/[linux-patches]/genpatches-2.6/tags/2.6.34-10/1000_linux-2.6.34.1.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.34-10/1000_linux-2.6.34.1.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1771 - (show annotations) (download)
Tue Aug 31 14:13:10 2010 UTC (4 years, 3 months ago) by mpagano
File size: 295155 byte(s)
2.6.34-10 release
1 diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
2 index 02838a4..86b5880 100644
3 --- a/Documentation/hwmon/ltc4245
4 +++ b/Documentation/hwmon/ltc4245
5 @@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm
6 in7_min_alarm 3v output undervoltage alarm
7 in8_min_alarm Vee (-12v) output undervoltage alarm
8
9 -in9_input GPIO #1 voltage data
10 -in10_input GPIO #2 voltage data
11 -in11_input GPIO #3 voltage data
12 +in9_input GPIO voltage data
13
14 power1_input 12v power usage (mW)
15 power2_input 5v power usage (mW)
16 diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
17 index a52a27c..6f80665 100644
18 --- a/arch/arm/common/sa1111.c
19 +++ b/arch/arm/common/sa1111.c
20 @@ -951,8 +951,6 @@ static int sa1111_resume(struct platform_device *dev)
21 if (!save)
22 return 0;
23
24 - spin_lock_irqsave(&sachip->lock, flags);
25 -
26 /*
27 * Ensure that the SA1111 is still here.
28 * FIXME: shouldn't do this here.
29 @@ -969,6 +967,13 @@ static int sa1111_resume(struct platform_device *dev)
30 * First of all, wake up the chip.
31 */
32 sa1111_wake(sachip);
33 +
34 + /*
35 + * Only lock for write ops. Also, sa1111_wake must be called with
36 + * released spinlock!
37 + */
38 + spin_lock_irqsave(&sachip->lock, flags);
39 +
40 sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
41 sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
42
43 diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c
44 index b91e412..04f36d8 100644
45 --- a/arch/arm/mach-mx2/devices.c
46 +++ b/arch/arm/mach-mx2/devices.c
47 @@ -483,8 +483,8 @@ int __init mxc_register_gpios(void)
48 #ifdef CONFIG_MACH_MX21
49 static struct resource mx21_usbhc_resources[] = {
50 {
51 - .start = MX21_BASE_ADDR,
52 - .end = MX21_BASE_ADDR + 0x1FFF,
53 + .start = MX21_USBOTG_BASE_ADDR,
54 + .end = MX21_USBOTG_BASE_ADDR + SZ_8K - 1,
55 .flags = IORESOURCE_MEM,
56 },
57 {
58 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
59 index 06a90dc..37c8157 100644
60 --- a/arch/arm/mm/cache-v7.S
61 +++ b/arch/arm/mm/cache-v7.S
62 @@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all)
63 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
64 bl v7_flush_dcache_all
65 mov r0, #0
66 +#ifdef CONFIG_SMP
67 + mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
68 +#else
69 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
70 +#endif
71 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
72 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
73 mov pc, lr
74 diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
75 index 5eb4fd9..ac163de 100644
76 --- a/arch/arm/mm/copypage-feroceon.c
77 +++ b/arch/arm/mm/copypage-feroceon.c
78 @@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
79 {
80 asm("\
81 stmfd sp!, {r4-r9, lr} \n\
82 - mov ip, %0 \n\
83 + mov ip, %2 \n\
84 1: mov lr, r1 \n\
85 ldmia r1!, {r2 - r9} \n\
86 pld [lr, #32] \n\
87 @@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
88 mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
89 ldmfd sp!, {r4-r9, pc}"
90 :
91 - : "I" (PAGE_SIZE));
92 + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
93 }
94
95 void feroceon_copy_user_highpage(struct page *to, struct page *from,
96 diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
97 index 7c2eb55..cb589cb 100644
98 --- a/arch/arm/mm/copypage-v4wb.c
99 +++ b/arch/arm/mm/copypage-v4wb.c
100 @@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
101 {
102 asm("\
103 stmfd sp!, {r4, lr} @ 2\n\
104 - mov r2, %0 @ 1\n\
105 + mov r2, %2 @ 1\n\
106 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
107 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
108 stmia r0!, {r3, r4, ip, lr} @ 4\n\
109 @@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
110 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
111 ldmfd sp!, {r4, pc} @ 3"
112 :
113 - : "I" (PAGE_SIZE / 64));
114 + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
115 }
116
117 void v4wb_copy_user_highpage(struct page *to, struct page *from,
118 diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
119 index 172e6a5..30c7d04 100644
120 --- a/arch/arm/mm/copypage-v4wt.c
121 +++ b/arch/arm/mm/copypage-v4wt.c
122 @@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
123 {
124 asm("\
125 stmfd sp!, {r4, lr} @ 2\n\
126 - mov r2, %0 @ 1\n\
127 + mov r2, %2 @ 1\n\
128 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
129 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
130 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
131 @@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
132 mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
133 ldmfd sp!, {r4, pc} @ 3"
134 :
135 - : "I" (PAGE_SIZE / 64));
136 + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
137 }
138
139 void v4wt_copy_user_highpage(struct page *to, struct page *from,
140 diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
141 index 747ad41..f9cde07 100644
142 --- a/arch/arm/mm/copypage-xsc3.c
143 +++ b/arch/arm/mm/copypage-xsc3.c
144 @@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
145 {
146 asm("\
147 stmfd sp!, {r4, r5, lr} \n\
148 - mov lr, %0 \n\
149 + mov lr, %2 \n\
150 \n\
151 pld [r1, #0] \n\
152 pld [r1, #32] \n\
153 @@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
154 \n\
155 ldmfd sp!, {r4, r5, pc}"
156 :
157 - : "I" (PAGE_SIZE / 64 - 1));
158 + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
159 }
160
161 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
162 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
163 index 9d40c34..8ad75e9 100644
164 --- a/arch/arm/mm/fault.c
165 +++ b/arch/arm/mm/fault.c
166 @@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
167 if (addr < TASK_SIZE)
168 return do_page_fault(addr, fsr, regs);
169
170 + if (user_mode(regs))
171 + goto bad_area;
172 +
173 index = pgd_index(addr);
174
175 /*
176 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
177 index 0ed29bf..55d07c8 100644
178 --- a/arch/arm/mm/init.c
179 +++ b/arch/arm/mm/init.c
180 @@ -712,10 +712,10 @@ void __init mem_init(void)
181 void free_initmem(void)
182 {
183 #ifdef CONFIG_HAVE_TCM
184 - extern char *__tcm_start, *__tcm_end;
185 + extern char __tcm_start, __tcm_end;
186
187 - totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
188 - __phys_to_pfn(__pa(__tcm_end)),
189 + totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
190 + __phys_to_pfn(__pa(&__tcm_end)),
191 "TCM link");
192 #endif
193
194 diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
195 index 66dc2d0..d66cead 100644
196 --- a/arch/arm/vfp/vfphw.S
197 +++ b/arch/arm/vfp/vfphw.S
198 @@ -277,7 +277,7 @@ ENTRY(vfp_put_double)
199 #ifdef CONFIG_VFPv3
200 @ d16 - d31 registers
201 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
202 -1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr
203 +1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
204 mov pc, lr
205 .org 1b + 8
206 .endr
207 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
208 index 8542bc3..93f6c63 100644
209 --- a/arch/blackfin/include/asm/cache.h
210 +++ b/arch/blackfin/include/asm/cache.h
211 @@ -15,6 +15,8 @@
212 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
213 #define SMP_CACHE_BYTES L1_CACHE_BYTES
214
215 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
216 +
217 #ifdef CONFIG_SMP
218 #define __cacheline_aligned
219 #else
220 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
221 index 2797163..7dc0f0f 100644
222 --- a/arch/frv/include/asm/cache.h
223 +++ b/arch/frv/include/asm/cache.h
224 @@ -17,6 +17,8 @@
225 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
226 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
227
228 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
229 +
230 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
231 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
232
233 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
234 index fed3fd3..ecafbe1 100644
235 --- a/arch/m68k/include/asm/cache.h
236 +++ b/arch/m68k/include/asm/cache.h
237 @@ -8,4 +8,6 @@
238 #define L1_CACHE_SHIFT 4
239 #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
240
241 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
242 +
243 #endif
244 diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h
245 index e03cfa2..6e2fe28 100644
246 --- a/arch/mn10300/include/asm/cache.h
247 +++ b/arch/mn10300/include/asm/cache.h
248 @@ -21,6 +21,8 @@
249 #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
250 #endif
251
252 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
253 +
254 /* data cache purge registers
255 * - read from the register to unconditionally purge that cache line
256 * - write address & 0xffffff00 to conditionally purge that cache line
257 diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
258 index 3ca1c61..27a7492 100644
259 --- a/arch/parisc/math-emu/decode_exc.c
260 +++ b/arch/parisc/math-emu/decode_exc.c
261 @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
262 return SIGNALCODE(SIGFPE, FPE_FLTINV);
263 case DIVISIONBYZEROEXCEPTION:
264 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
265 + Clear_excp_register(exception_index);
266 return SIGNALCODE(SIGFPE, FPE_FLTDIV);
267 case INEXACTEXCEPTION:
268 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
269 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
270 index c09138d..b894721 100644
271 --- a/arch/powerpc/kernel/asm-offsets.c
272 +++ b/arch/powerpc/kernel/asm-offsets.c
273 @@ -447,6 +447,14 @@ int main(void)
274 DEFINE(PGD_T_LOG2, PGD_T_LOG2);
275 DEFINE(PTE_T_LOG2, PTE_T_LOG2);
276 #endif
277 +#ifdef CONFIG_FSL_BOOKE
278 + DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
279 + DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
280 + DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
281 + DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
282 + DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
283 + DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
284 +#endif
285
286 #ifdef CONFIG_KVM_EXIT_TIMING
287 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
288 diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
289 index 7255265..edd4a57 100644
290 --- a/arch/powerpc/kernel/head_fsl_booke.S
291 +++ b/arch/powerpc/kernel/head_fsl_booke.S
292 @@ -639,6 +639,13 @@ interrupt_base:
293 rlwinm r12,r12,0,16,1
294 mtspr SPRN_MAS1,r12
295
296 + /* Make up the required permissions for kernel code */
297 +#ifdef CONFIG_PTE_64BIT
298 + li r13,_PAGE_PRESENT | _PAGE_BAP_SX
299 + oris r13,r13,_PAGE_ACCESSED@h
300 +#else
301 + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
302 +#endif
303 b 4f
304
305 /* Get the PGD for the current thread */
306 @@ -646,15 +653,15 @@ interrupt_base:
307 mfspr r11,SPRN_SPRG_THREAD
308 lwz r11,PGDIR(r11)
309
310 -4:
311 - /* Make up the required permissions */
312 + /* Make up the required permissions for user code */
313 #ifdef CONFIG_PTE_64BIT
314 - li r13,_PAGE_PRESENT | _PAGE_EXEC
315 + li r13,_PAGE_PRESENT | _PAGE_BAP_UX
316 oris r13,r13,_PAGE_ACCESSED@h
317 #else
318 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
319 #endif
320
321 +4:
322 FIND_PTE
323 andc. r13,r13,r11 /* Check permission */
324
325 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
326 index 604af29..ecb532b 100644
327 --- a/arch/powerpc/kvm/book3s.c
328 +++ b/arch/powerpc/kvm/book3s.c
329 @@ -922,6 +922,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
330 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
331 int i;
332
333 + vcpu_load(vcpu);
334 +
335 sregs->pvr = vcpu->arch.pvr;
336
337 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
338 @@ -940,6 +942,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
339 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
340 }
341 }
342 +
343 + vcpu_put(vcpu);
344 +
345 return 0;
346 }
347
348 @@ -949,6 +954,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
349 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
350 int i;
351
352 + vcpu_load(vcpu);
353 +
354 kvmppc_set_pvr(vcpu, sregs->pvr);
355
356 vcpu3s->sdr1 = sregs->u.s.sdr1;
357 @@ -975,6 +982,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
358
359 /* Flush the MMU after messing with the segments */
360 kvmppc_mmu_pte_flush(vcpu, 0, 0);
361 +
362 + vcpu_put(vcpu);
363 +
364 return 0;
365 }
366
367 diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
368 index 2a3a195..df0182a 100644
369 --- a/arch/powerpc/kvm/booke.c
370 +++ b/arch/powerpc/kvm/booke.c
371 @@ -479,6 +479,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
372 {
373 int i;
374
375 + vcpu_load(vcpu);
376 +
377 regs->pc = vcpu->arch.pc;
378 regs->cr = kvmppc_get_cr(vcpu);
379 regs->ctr = vcpu->arch.ctr;
380 @@ -499,6 +501,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
381 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
382 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
383
384 + vcpu_put(vcpu);
385 +
386 return 0;
387 }
388
389 @@ -506,6 +510,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390 {
391 int i;
392
393 + vcpu_load(vcpu);
394 +
395 vcpu->arch.pc = regs->pc;
396 kvmppc_set_cr(vcpu, regs->cr);
397 vcpu->arch.ctr = regs->ctr;
398 @@ -525,6 +531,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
399 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
400 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
401
402 + vcpu_put(vcpu);
403 +
404 return 0;
405 }
406
407 @@ -553,7 +561,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
408 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
409 struct kvm_translation *tr)
410 {
411 - return kvmppc_core_vcpu_translate(vcpu, tr);
412 + int r;
413 +
414 + vcpu_load(vcpu);
415 + r = kvmppc_core_vcpu_translate(vcpu, tr);
416 + vcpu_put(vcpu);
417 + return r;
418 }
419
420 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
421 diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
422 index 297fcd2..bf36a9d 100644
423 --- a/arch/powerpc/kvm/powerpc.c
424 +++ b/arch/powerpc/kvm/powerpc.c
425 @@ -193,7 +193,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
426 {
427 struct kvm_vcpu *vcpu;
428 vcpu = kvmppc_core_vcpu_create(kvm, id);
429 - kvmppc_create_vcpu_debugfs(vcpu, id);
430 + if (!IS_ERR(vcpu))
431 + kvmppc_create_vcpu_debugfs(vcpu, id);
432 return vcpu;
433 }
434
435 diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
436 index 64e2e49..3ac0cd3 100644
437 --- a/arch/powerpc/lib/string.S
438 +++ b/arch/powerpc/lib/string.S
439 @@ -71,7 +71,7 @@ _GLOBAL(strcmp)
440
441 _GLOBAL(strncmp)
442 PPC_LCMPI r5,0
443 - beqlr
444 + ble- 2f
445 mtctr r5
446 addi r5,r3,-1
447 addi r4,r4,-1
448 @@ -82,6 +82,8 @@ _GLOBAL(strncmp)
449 beqlr 1
450 bdnzt eq,1b
451 blr
452 +2: li r3,0
453 + blr
454
455 _GLOBAL(strlen)
456 addi r4,r3,-1
457 diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
458 index 1ed6b52..cdc7526 100644
459 --- a/arch/powerpc/mm/fsl_booke_mmu.c
460 +++ b/arch/powerpc/mm/fsl_booke_mmu.c
461 @@ -2,7 +2,7 @@
462 * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
463 * E500 Book E processors.
464 *
465 - * Copyright 2004 Freescale Semiconductor, Inc
466 + * Copyright 2004,2010 Freescale Semiconductor, Inc.
467 *
468 * This file contains the routines for initializing the MMU
469 * on the 4xx series of chips.
470 @@ -56,19 +56,13 @@
471
472 unsigned int tlbcam_index;
473
474 -#define NUM_TLBCAMS (64)
475
476 #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
477 #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
478 #endif
479
480 -struct tlbcam {
481 - u32 MAS0;
482 - u32 MAS1;
483 - unsigned long MAS2;
484 - u32 MAS3;
485 - u32 MAS7;
486 -} TLBCAM[NUM_TLBCAMS];
487 +#define NUM_TLBCAMS (64)
488 +struct tlbcam TLBCAM[NUM_TLBCAMS];
489
490 struct tlbcamrange {
491 unsigned long start;
492 @@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
493 return 0;
494 }
495
496 -void loadcam_entry(int idx)
497 -{
498 - mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
499 - mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
500 - mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
501 - mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
502 -
503 - if (mmu_has_feature(MMU_FTR_BIG_PHYS))
504 - mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
505 -
506 - asm volatile("isync;tlbwe;isync" : : : "memory");
507 -}
508 -
509 /*
510 * Set up one of the I/D BAT (block address translation) register pairs.
511 * The parameters are not checked; in particular size must be a power
512 diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
513 index d49a775..0591f25 100644
514 --- a/arch/powerpc/mm/mmu_decl.h
515 +++ b/arch/powerpc/mm/mmu_decl.h
516 @@ -149,7 +149,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top);
517 extern void MMU_init_hw(void);
518 extern unsigned long mmu_mapin_ram(unsigned long top);
519 extern void adjust_total_lowmem(void);
520 -
521 +extern void loadcam_entry(unsigned int index);
522 +
523 +struct tlbcam {
524 + u32 MAS0;
525 + u32 MAS1;
526 + unsigned long MAS2;
527 + u32 MAS3;
528 + u32 MAS7;
529 +};
530 #elif defined(CONFIG_PPC32)
531 /* anything 32-bit except 4xx or 8xx */
532 extern void MMU_init_hw(void);
533 diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
534 index b9243e7..767b0cf 100644
535 --- a/arch/powerpc/mm/pgtable_32.c
536 +++ b/arch/powerpc/mm/pgtable_32.c
537 @@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
538 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
539 flags &= ~(_PAGE_USER | _PAGE_EXEC);
540
541 +#ifdef _PAGE_BAP_SR
542 + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
543 + * which means that we just cleared supervisor access... oops ;-) This
544 + * restores it
545 + */
546 + flags |= _PAGE_BAP_SR;
547 +#endif
548 +
549 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
550 }
551 EXPORT_SYMBOL(ioremap_flags);
552 diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
553 index d95679a..d050fc8 100644
554 --- a/arch/powerpc/mm/pgtable_64.c
555 +++ b/arch/powerpc/mm/pgtable_64.c
556 @@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
557 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
558 flags &= ~(_PAGE_USER | _PAGE_EXEC);
559
560 +#ifdef _PAGE_BAP_SR
561 + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
562 + * which means that we just cleared supervisor access... oops ;-) This
563 + * restores it
564 + */
565 + flags |= _PAGE_BAP_SR;
566 +#endif
567 +
568 if (ppc_md.ioremap)
569 return ppc_md.ioremap(addr, size, flags, caller);
570 return __ioremap_caller(addr, size, flags, caller);
571 diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
572 index bbdc5b5..8656ecf 100644
573 --- a/arch/powerpc/mm/tlb_nohash_low.S
574 +++ b/arch/powerpc/mm/tlb_nohash_low.S
575 @@ -271,3 +271,31 @@ _GLOBAL(set_context)
576 #else
577 #error Unsupported processor type !
578 #endif
579 +
580 +#if defined(CONFIG_FSL_BOOKE)
581 +/*
582 + * extern void loadcam_entry(unsigned int index)
583 + *
584 + * Load TLBCAM[index] entry in to the L2 CAM MMU
585 + */
586 +_GLOBAL(loadcam_entry)
587 + LOAD_REG_ADDR(r4, TLBCAM)
588 + mulli r5,r3,TLBCAM_SIZE
589 + add r3,r5,r4
590 + lwz r4,TLBCAM_MAS0(r3)
591 + mtspr SPRN_MAS0,r4
592 + lwz r4,TLBCAM_MAS1(r3)
593 + mtspr SPRN_MAS1,r4
594 + PPC_LL r4,TLBCAM_MAS2(r3)
595 + mtspr SPRN_MAS2,r4
596 + lwz r4,TLBCAM_MAS3(r3)
597 + mtspr SPRN_MAS3,r4
598 +BEGIN_MMU_FTR_SECTION
599 + lwz r4,TLBCAM_MAS7(r3)
600 + mtspr SPRN_MAS7,r4
601 +END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
602 + isync
603 + tlbwe
604 + isync
605 + blr
606 +#endif
607 diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
608 index 2c9e522..7fd90d0 100644
609 --- a/arch/powerpc/oprofile/op_model_cell.c
610 +++ b/arch/powerpc/oprofile/op_model_cell.c
611 @@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n)
612 index = ENTRIES-1;
613
614 /* make sure index is valid */
615 - if ((index > ENTRIES) || (index < 0))
616 + if ((index >= ENTRIES) || (index < 0))
617 index = ENTRIES-1;
618
619 return initial_lfsr[index];
620 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
621 index a8e1d5d..b0760d7 100644
622 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
623 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
624 @@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
625 for(;;);
626 }
627
628 -static int qcss_tok; /* query-cpu-stopped-state token */
629 -
630 -/* Get state of physical CPU.
631 - * Return codes:
632 - * 0 - The processor is in the RTAS stopped state
633 - * 1 - stop-self is in progress
634 - * 2 - The processor is not in the RTAS stopped state
635 - * -1 - Hardware Error
636 - * -2 - Hardware Busy, Try again later.
637 - */
638 -static int query_cpu_stopped(unsigned int pcpu)
639 -{
640 - int cpu_status, status;
641 -
642 - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
643 - if (status != 0) {
644 - printk(KERN_ERR
645 - "RTAS query-cpu-stopped-state failed: %i\n", status);
646 - return status;
647 - }
648 -
649 - return cpu_status;
650 -}
651 -
652 static int pseries_cpu_disable(void)
653 {
654 int cpu = smp_processor_id();
655 @@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
656 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
657
658 for (tries = 0; tries < 25; tries++) {
659 - cpu_status = query_cpu_stopped(pcpu);
660 - if (cpu_status == 0 || cpu_status == -1)
661 + cpu_status = smp_query_cpu_stopped(pcpu);
662 + if (cpu_status == QCSS_STOPPED ||
663 + cpu_status == QCSS_HARDWARE_ERROR)
664 break;
665 cpu_relax();
666 }
667 @@ -388,6 +365,7 @@ static int __init pseries_cpu_hotplug_init(void)
668 struct device_node *np;
669 const char *typep;
670 int cpu;
671 + int qcss_tok;
672
673 for_each_node_by_name(np, "interrupt-controller") {
674 typep = of_get_property(np, "compatible", NULL);
675 diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
676 index a05f8d4..6c4fd2c 100644
677 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
678 +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
679 @@ -4,6 +4,14 @@
680 #include <asm/hvcall.h>
681 #include <asm/page.h>
682
683 +/* Get state of physical CPU from query_cpu_stopped */
684 +int smp_query_cpu_stopped(unsigned int pcpu);
685 +#define QCSS_STOPPED 0
686 +#define QCSS_STOPPING 1
687 +#define QCSS_NOT_STOPPED 2
688 +#define QCSS_HARDWARE_ERROR -1
689 +#define QCSS_HARDWARE_BUSY -2
690 +
691 static inline long poll_pending(void)
692 {
693 return plpar_hcall_norets(H_POLL_PENDING);
694 diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
695 index 4e7f89a..8979982 100644
696 --- a/arch/powerpc/platforms/pseries/smp.c
697 +++ b/arch/powerpc/platforms/pseries/smp.c
698 @@ -57,6 +57,28 @@
699 */
700 static cpumask_t of_spin_map;
701
702 +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
703 +int smp_query_cpu_stopped(unsigned int pcpu)
704 +{
705 + int cpu_status, status;
706 + int qcss_tok = rtas_token("query-cpu-stopped-state");
707 +
708 + if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
709 + printk(KERN_INFO "Firmware doesn't support "
710 + "query-cpu-stopped-state\n");
711 + return QCSS_HARDWARE_ERROR;
712 + }
713 +
714 + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
715 + if (status != 0) {
716 + printk(KERN_ERR
717 + "RTAS query-cpu-stopped-state failed: %i\n", status);
718 + return status;
719 + }
720 +
721 + return cpu_status;
722 +}
723 +
724 /**
725 * smp_startup_cpu() - start the given cpu
726 *
727 @@ -82,6 +104,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
728
729 pcpu = get_hard_smp_processor_id(lcpu);
730
731 + /* Check to see if the CPU out of FW already for kexec */
732 + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
733 + cpu_set(lcpu, of_spin_map);
734 + return 1;
735 + }
736 +
737 /* Fixup atomic count: it exited inside IRQ handler. */
738 task_thread_info(paca[lcpu].__current)->preempt_count = 0;
739
740 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
741 index 4929286..ee7c713 100644
742 --- a/arch/s390/kvm/kvm-s390.c
743 +++ b/arch/s390/kvm/kvm-s390.c
744 @@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
745
746 rc = kvm_vcpu_init(vcpu, kvm, id);
747 if (rc)
748 - goto out_free_cpu;
749 + goto out_free_sie_block;
750 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
751 vcpu->arch.sie_block);
752
753 return vcpu;
754 +out_free_sie_block:
755 + free_page((unsigned long)(vcpu->arch.sie_block));
756 out_free_cpu:
757 kfree(vcpu);
758 out_nomem:
759 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
760 index 06d9e79..63400e6 100644
761 --- a/arch/x86/include/asm/kvm_host.h
762 +++ b/arch/x86/include/asm/kvm_host.h
763 @@ -180,6 +180,7 @@ union kvm_mmu_page_role {
764 unsigned invalid:1;
765 unsigned cr4_pge:1;
766 unsigned nxe:1;
767 + unsigned cr0_wp:1;
768 };
769 };
770
771 @@ -541,6 +542,8 @@ struct kvm_x86_ops {
772 int (*get_lpage_level)(void);
773 bool (*rdtscp_supported)(void);
774
775 + void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
776 +
777 const struct trace_print_flags *exit_reasons_str;
778 };
779
780 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
781 index 4604e6a..d86da72 100644
782 --- a/arch/x86/include/asm/msr-index.h
783 +++ b/arch/x86/include/asm/msr-index.h
784 @@ -199,8 +199,9 @@
785 #define MSR_IA32_EBL_CR_POWERON 0x0000002a
786 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
787
788 -#define FEATURE_CONTROL_LOCKED (1<<0)
789 -#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
790 +#define FEATURE_CONTROL_LOCKED (1<<0)
791 +#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
792 +#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
793
794 #define MSR_IA32_APICBASE 0x0000001b
795 #define MSR_IA32_APICBASE_BSP (1<<8)
796 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
797 index f854d89..29e5e6e 100644
798 --- a/arch/x86/kernel/amd_iommu.c
799 +++ b/arch/x86/kernel/amd_iommu.c
800 @@ -1420,6 +1420,7 @@ static int __attach_device(struct device *dev,
801 struct protection_domain *domain)
802 {
803 struct iommu_dev_data *dev_data, *alias_data;
804 + int ret;
805
806 dev_data = get_dev_data(dev);
807 alias_data = get_dev_data(dev_data->alias);
808 @@ -1431,13 +1432,14 @@ static int __attach_device(struct device *dev,
809 spin_lock(&domain->lock);
810
811 /* Some sanity checks */
812 + ret = -EBUSY;
813 if (alias_data->domain != NULL &&
814 alias_data->domain != domain)
815 - return -EBUSY;
816 + goto out_unlock;
817
818 if (dev_data->domain != NULL &&
819 dev_data->domain != domain)
820 - return -EBUSY;
821 + goto out_unlock;
822
823 /* Do real assignment */
824 if (dev_data->alias != dev) {
825 @@ -1453,10 +1455,14 @@ static int __attach_device(struct device *dev,
826
827 atomic_inc(&dev_data->bind);
828
829 + ret = 0;
830 +
831 +out_unlock:
832 +
833 /* ready */
834 spin_unlock(&domain->lock);
835
836 - return 0;
837 + return ret;
838 }
839
840 /*
841 @@ -2257,10 +2263,6 @@ int __init amd_iommu_init_dma_ops(void)
842
843 iommu_detected = 1;
844 swiotlb = 0;
845 -#ifdef CONFIG_GART_IOMMU
846 - gart_iommu_aperture_disabled = 1;
847 - gart_iommu_aperture = 0;
848 -#endif
849
850 /* Make the driver finally visible to the drivers */
851 dma_ops = &amd_iommu_dma_ops;
852 diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
853 index 6360abf..6f8ce75 100644
854 --- a/arch/x86/kernel/amd_iommu_init.c
855 +++ b/arch/x86/kernel/amd_iommu_init.c
856 @@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(u64 address)
857 {
858 u8 *ret;
859
860 - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
861 + if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
862 + pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
863 + address);
864 + pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
865 return NULL;
866 + }
867
868 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
869 if (ret != NULL)
870 @@ -1313,7 +1317,7 @@ static int __init amd_iommu_init(void)
871 ret = amd_iommu_init_dma_ops();
872
873 if (ret)
874 - goto free;
875 + goto free_disable;
876
877 amd_iommu_init_api();
878
879 @@ -1331,9 +1335,10 @@ static int __init amd_iommu_init(void)
880 out:
881 return ret;
882
883 -free:
884 +free_disable:
885 disable_iommus();
886
887 +free:
888 amd_iommu_uninit_devices();
889
890 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
891 @@ -1352,6 +1357,15 @@ free:
892
893 free_unity_maps();
894
895 +#ifdef CONFIG_GART_IOMMU
896 + /*
897 + * We failed to initialize the AMD IOMMU - try fallback to GART
898 + * if possible.
899 + */
900 + gart_iommu_init();
901 +
902 +#endif
903 +
904 goto out;
905 }
906
907 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
908 index db5bdc8..c5e8b53 100644
909 --- a/arch/x86/kernel/cpu/perf_event.c
910 +++ b/arch/x86/kernel/cpu/perf_event.c
911 @@ -460,8 +460,11 @@ static int __hw_perf_event_init(struct perf_event *event)
912 if (atomic_read(&active_events) == 0) {
913 if (!reserve_pmc_hardware())
914 err = -EBUSY;
915 - else
916 + else {
917 err = reserve_bts_hardware();
918 + if (err)
919 + release_pmc_hardware();
920 + }
921 }
922 if (!err)
923 atomic_inc(&active_events);
924 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
925 index 03801f2..dfdfe46 100644
926 --- a/arch/x86/kernel/pvclock.c
927 +++ b/arch/x86/kernel/pvclock.c
928 @@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
929 return pv_tsc_khz;
930 }
931
932 +static atomic64_t last_value = ATOMIC64_INIT(0);
933 +
934 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
935 {
936 struct pvclock_shadow_time shadow;
937 unsigned version;
938 cycle_t ret, offset;
939 + u64 last;
940
941 do {
942 version = pvclock_get_time_values(&shadow, src);
943 @@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
944 barrier();
945 } while (version != src->version);
946
947 + /*
948 + * Assumption here is that last_value, a global accumulator, always goes
949 + * forward. If we are less than that, we should not be much smaller.
950 + * We assume there is an error marging we're inside, and then the correction
951 + * does not sacrifice accuracy.
952 + *
953 + * For reads: global may have changed between test and return,
954 + * but this means someone else updated poked the clock at a later time.
955 + * We just need to make sure we are not seeing a backwards event.
956 + *
957 + * For updates: last_value = ret is not enough, since two vcpus could be
958 + * updating at the same time, and one of them could be slightly behind,
959 + * making the assumption that last_value always go forward fail to hold.
960 + */
961 + last = atomic64_read(&last_value);
962 + do {
963 + if (ret < last)
964 + return last;
965 + last = atomic64_cmpxchg(&last_value, last, ret);
966 + } while (unlikely(last != ret));
967 +
968 return ret;
969 }
970
971 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
972 index c4851ef..47ae912 100644
973 --- a/arch/x86/kernel/setup.c
974 +++ b/arch/x86/kernel/setup.c
975 @@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
976 DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
977 },
978 },
979 + /*
980 + * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
981 + * match on the product name.
982 + */
983 + {
984 + .callback = dmi_low_memory_corruption,
985 + .ident = "Phoenix BIOS",
986 + .matches = {
987 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
988 + },
989 + },
990 #endif
991 {}
992 };
993 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
994 index 86c9f91..46b8277 100644
995 --- a/arch/x86/kernel/tboot.c
996 +++ b/arch/x86/kernel/tboot.c
997 @@ -46,6 +46,7 @@
998
999 /* Global pointer to shared data; NULL means no measured launch. */
1000 struct tboot *tboot __read_mostly;
1001 +EXPORT_SYMBOL(tboot);
1002
1003 /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
1004 #define AP_WAIT_TIMEOUT 1
1005 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1006 index 19a8906..62fd8e6 100644
1007 --- a/arch/x86/kvm/mmu.c
1008 +++ b/arch/x86/kvm/mmu.c
1009 @@ -223,7 +223,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1010 }
1011 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
1012
1013 -static int is_write_protection(struct kvm_vcpu *vcpu)
1014 +static bool is_write_protection(struct kvm_vcpu *vcpu)
1015 {
1016 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
1017 }
1018 @@ -2085,11 +2085,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1019 direct = 1;
1020 if (mmu_check_root(vcpu, root_gfn))
1021 return 1;
1022 + spin_lock(&vcpu->kvm->mmu_lock);
1023 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1024 PT64_ROOT_LEVEL, direct,
1025 ACC_ALL, NULL);
1026 root = __pa(sp->spt);
1027 ++sp->root_count;
1028 + spin_unlock(&vcpu->kvm->mmu_lock);
1029 vcpu->arch.mmu.root_hpa = root;
1030 return 0;
1031 }
1032 @@ -2111,11 +2113,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1033 root_gfn = 0;
1034 if (mmu_check_root(vcpu, root_gfn))
1035 return 1;
1036 + spin_lock(&vcpu->kvm->mmu_lock);
1037 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1038 PT32_ROOT_LEVEL, direct,
1039 ACC_ALL, NULL);
1040 root = __pa(sp->spt);
1041 ++sp->root_count;
1042 + spin_unlock(&vcpu->kvm->mmu_lock);
1043 +
1044 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1045 }
1046 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1047 @@ -2439,6 +2444,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1048 r = paging32_init_context(vcpu);
1049
1050 vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
1051 + vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
1052
1053 return r;
1054 }
1055 @@ -2478,7 +2484,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
1056 goto out;
1057 spin_lock(&vcpu->kvm->mmu_lock);
1058 kvm_mmu_free_some_pages(vcpu);
1059 + spin_unlock(&vcpu->kvm->mmu_lock);
1060 r = mmu_alloc_roots(vcpu);
1061 + spin_lock(&vcpu->kvm->mmu_lock);
1062 mmu_sync_roots(vcpu);
1063 spin_unlock(&vcpu->kvm->mmu_lock);
1064 if (r)
1065 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1066 index 737361f..1185b55 100644
1067 --- a/arch/x86/kvm/svm.c
1068 +++ b/arch/x86/kvm/svm.c
1069 @@ -129,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
1070 static void svm_complete_interrupts(struct vcpu_svm *svm);
1071
1072 static int nested_svm_exit_handled(struct vcpu_svm *svm);
1073 +static int nested_svm_intercept(struct vcpu_svm *svm);
1074 static int nested_svm_vmexit(struct vcpu_svm *svm);
1075 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1076 bool has_error_code, u32 error_code);
1077 @@ -1384,6 +1385,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
1078 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1079 bool has_error_code, u32 error_code)
1080 {
1081 + int vmexit;
1082 +
1083 if (!is_nested(svm))
1084 return 0;
1085
1086 @@ -1392,19 +1395,24 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1087 svm->vmcb->control.exit_info_1 = error_code;
1088 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1089
1090 - return nested_svm_exit_handled(svm);
1091 + vmexit = nested_svm_intercept(svm);
1092 + if (vmexit == NESTED_EXIT_DONE)
1093 + svm->nested.exit_required = true;
1094 +
1095 + return vmexit;
1096 }
1097
1098 -static inline int nested_svm_intr(struct vcpu_svm *svm)
1099 +/* This function returns true if it is save to enable the irq window */
1100 +static inline bool nested_svm_intr(struct vcpu_svm *svm)
1101 {
1102 if (!is_nested(svm))
1103 - return 0;
1104 + return true;
1105
1106 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1107 - return 0;
1108 + return true;
1109
1110 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1111 - return 0;
1112 + return false;
1113
1114 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1115
1116 @@ -1417,13 +1425,13 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
1117 */
1118 svm->nested.exit_required = true;
1119 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1120 - return 1;
1121 + return false;
1122 }
1123
1124 - return 0;
1125 + return true;
1126 }
1127
1128 -static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1129 +static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
1130 {
1131 struct page *page;
1132
1133 @@ -1431,7 +1439,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1134 if (is_error_page(page))
1135 goto error;
1136
1137 - return kmap_atomic(page, idx);
1138 + *_page = page;
1139 +
1140 + return kmap(page);
1141
1142 error:
1143 kvm_release_page_clean(page);
1144 @@ -1440,16 +1450,9 @@ error:
1145 return NULL;
1146 }
1147
1148 -static void nested_svm_unmap(void *addr, enum km_type idx)
1149 +static void nested_svm_unmap(struct page *page)
1150 {
1151 - struct page *page;
1152 -
1153 - if (!addr)
1154 - return;
1155 -
1156 - page = kmap_atomic_to_page(addr);
1157 -
1158 - kunmap_atomic(addr, idx);
1159 + kunmap(page);
1160 kvm_release_page_dirty(page);
1161 }
1162
1163 @@ -1459,16 +1462,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1164 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1165 bool ret = false;
1166 u32 t0, t1;
1167 - u8 *msrpm;
1168 + u8 val;
1169
1170 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1171 return false;
1172
1173 - msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1174 -
1175 - if (!msrpm)
1176 - goto out;
1177 -
1178 switch (msr) {
1179 case 0 ... 0x1fff:
1180 t0 = (msr * 2) % 8;
1181 @@ -1489,11 +1487,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1182 goto out;
1183 }
1184
1185 - ret = msrpm[t1] & ((1 << param) << t0);
1186 + if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1))
1187 + ret = val & ((1 << param) << t0);
1188
1189 out:
1190 - nested_svm_unmap(msrpm, KM_USER0);
1191 -
1192 return ret;
1193 }
1194
1195 @@ -1525,7 +1522,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
1196 /*
1197 * If this function returns true, this #vmexit was already handled
1198 */
1199 -static int nested_svm_exit_handled(struct vcpu_svm *svm)
1200 +static int nested_svm_intercept(struct vcpu_svm *svm)
1201 {
1202 u32 exit_code = svm->vmcb->control.exit_code;
1203 int vmexit = NESTED_EXIT_HOST;
1204 @@ -1571,9 +1568,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
1205 }
1206 }
1207
1208 - if (vmexit == NESTED_EXIT_DONE) {
1209 + return vmexit;
1210 +}
1211 +
1212 +static int nested_svm_exit_handled(struct vcpu_svm *svm)
1213 +{
1214 + int vmexit;
1215 +
1216 + vmexit = nested_svm_intercept(svm);
1217 +
1218 + if (vmexit == NESTED_EXIT_DONE)
1219 nested_svm_vmexit(svm);
1220 - }
1221
1222 return vmexit;
1223 }
1224 @@ -1615,6 +1620,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1225 struct vmcb *nested_vmcb;
1226 struct vmcb *hsave = svm->nested.hsave;
1227 struct vmcb *vmcb = svm->vmcb;
1228 + struct page *page;
1229
1230 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
1231 vmcb->control.exit_info_1,
1232 @@ -1622,7 +1628,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1233 vmcb->control.exit_int_info,
1234 vmcb->control.exit_int_info_err);
1235
1236 - nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
1237 + nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
1238 if (!nested_vmcb)
1239 return 1;
1240
1241 @@ -1635,9 +1641,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1242 nested_vmcb->save.ds = vmcb->save.ds;
1243 nested_vmcb->save.gdtr = vmcb->save.gdtr;
1244 nested_vmcb->save.idtr = vmcb->save.idtr;
1245 + nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
1246 if (npt_enabled)
1247 nested_vmcb->save.cr3 = vmcb->save.cr3;
1248 + else
1249 + nested_vmcb->save.cr3 = svm->vcpu.arch.cr3;
1250 nested_vmcb->save.cr2 = vmcb->save.cr2;
1251 + nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
1252 nested_vmcb->save.rflags = vmcb->save.rflags;
1253 nested_vmcb->save.rip = vmcb->save.rip;
1254 nested_vmcb->save.rsp = vmcb->save.rsp;
1255 @@ -1712,7 +1722,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1256 /* Exit nested SVM mode */
1257 svm->nested.vmcb = 0;
1258
1259 - nested_svm_unmap(nested_vmcb, KM_USER0);
1260 + nested_svm_unmap(page);
1261
1262 kvm_mmu_reset_context(&svm->vcpu);
1263 kvm_mmu_load(&svm->vcpu);
1264 @@ -1723,9 +1733,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1265 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1266 {
1267 u32 *nested_msrpm;
1268 + struct page *page;
1269 int i;
1270
1271 - nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1272 + nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page);
1273 if (!nested_msrpm)
1274 return false;
1275
1276 @@ -1734,7 +1745,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1277
1278 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1279
1280 - nested_svm_unmap(nested_msrpm, KM_USER0);
1281 + nested_svm_unmap(page);
1282
1283 return true;
1284 }
1285 @@ -1744,8 +1755,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1286 struct vmcb *nested_vmcb;
1287 struct vmcb *hsave = svm->nested.hsave;
1288 struct vmcb *vmcb = svm->vmcb;
1289 + struct page *page;
1290
1291 - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1292 + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1293 if (!nested_vmcb)
1294 return false;
1295
1296 @@ -1819,21 +1831,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1297 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1298 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1299
1300 - /* We don't want a nested guest to be more powerful than the guest,
1301 - so all intercepts are ORed */
1302 - svm->vmcb->control.intercept_cr_read |=
1303 - nested_vmcb->control.intercept_cr_read;
1304 - svm->vmcb->control.intercept_cr_write |=
1305 - nested_vmcb->control.intercept_cr_write;
1306 - svm->vmcb->control.intercept_dr_read |=
1307 - nested_vmcb->control.intercept_dr_read;
1308 - svm->vmcb->control.intercept_dr_write |=
1309 - nested_vmcb->control.intercept_dr_write;
1310 - svm->vmcb->control.intercept_exceptions |=
1311 - nested_vmcb->control.intercept_exceptions;
1312 -
1313 - svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1314 -
1315 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1316
1317 /* cache intercepts */
1318 @@ -1851,13 +1848,38 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1319 else
1320 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1321
1322 + if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
1323 + /* We only want the cr8 intercept bits of the guest */
1324 + svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
1325 + svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1326 + }
1327 +
1328 + /* We don't want to see VMMCALLs from a nested guest */
1329 + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
1330 +
1331 + /* We don't want a nested guest to be more powerful than the guest,
1332 + so all intercepts are ORed */
1333 + svm->vmcb->control.intercept_cr_read |=
1334 + nested_vmcb->control.intercept_cr_read;
1335 + svm->vmcb->control.intercept_cr_write |=
1336 + nested_vmcb->control.intercept_cr_write;
1337 + svm->vmcb->control.intercept_dr_read |=
1338 + nested_vmcb->control.intercept_dr_read;
1339 + svm->vmcb->control.intercept_dr_write |=
1340 + nested_vmcb->control.intercept_dr_write;
1341 + svm->vmcb->control.intercept_exceptions |=
1342 + nested_vmcb->control.intercept_exceptions;
1343 +
1344 + svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1345 +
1346 + svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
1347 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1348 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1349 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1350 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1351 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1352
1353 - nested_svm_unmap(nested_vmcb, KM_USER0);
1354 + nested_svm_unmap(page);
1355
1356 enable_gif(svm);
1357
1358 @@ -1883,6 +1905,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1359 static int vmload_interception(struct vcpu_svm *svm)
1360 {
1361 struct vmcb *nested_vmcb;
1362 + struct page *page;
1363
1364 if (nested_svm_check_permissions(svm))
1365 return 1;
1366 @@ -1890,12 +1913,12 @@ static int vmload_interception(struct vcpu_svm *svm)
1367 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1368 skip_emulated_instruction(&svm->vcpu);
1369
1370 - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1371 + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1372 if (!nested_vmcb)
1373 return 1;
1374
1375 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1376 - nested_svm_unmap(nested_vmcb, KM_USER0);
1377 + nested_svm_unmap(page);
1378
1379 return 1;
1380 }
1381 @@ -1903,6 +1926,7 @@ static int vmload_interception(struct vcpu_svm *svm)
1382 static int vmsave_interception(struct vcpu_svm *svm)
1383 {
1384 struct vmcb *nested_vmcb;
1385 + struct page *page;
1386
1387 if (nested_svm_check_permissions(svm))
1388 return 1;
1389 @@ -1910,12 +1934,12 @@ static int vmsave_interception(struct vcpu_svm *svm)
1390 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1391 skip_emulated_instruction(&svm->vcpu);
1392
1393 - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1394 + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
1395 if (!nested_vmcb)
1396 return 1;
1397
1398 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1399 - nested_svm_unmap(nested_vmcb, KM_USER0);
1400 + nested_svm_unmap(page);
1401
1402 return 1;
1403 }
1404 @@ -2511,6 +2535,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
1405 {
1406 struct vcpu_svm *svm = to_svm(vcpu);
1407
1408 + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1409 + return;
1410 +
1411 if (irr == -1)
1412 return;
1413
1414 @@ -2568,13 +2595,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
1415 {
1416 struct vcpu_svm *svm = to_svm(vcpu);
1417
1418 - nested_svm_intr(svm);
1419 -
1420 /* In case GIF=0 we can't rely on the CPU to tell us when
1421 * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
1422 * The next time we get that intercept, this function will be
1423 * called again though and we'll get the vintr intercept. */
1424 - if (gif_set(svm)) {
1425 + if (gif_set(svm) && nested_svm_intr(svm)) {
1426 svm_set_vintr(svm);
1427 svm_inject_irq(svm, 0x0);
1428 }
1429 @@ -2614,6 +2639,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1430 {
1431 struct vcpu_svm *svm = to_svm(vcpu);
1432
1433 + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1434 + return;
1435 +
1436 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1437 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1438 kvm_set_cr8(vcpu, cr8);
1439 @@ -2625,6 +2653,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1440 struct vcpu_svm *svm = to_svm(vcpu);
1441 u64 cr8;
1442
1443 + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
1444 + return;
1445 +
1446 cr8 = kvm_get_cr8(vcpu);
1447 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1448 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1449 @@ -2879,6 +2910,20 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
1450 {
1451 }
1452
1453 +static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
1454 +{
1455 + switch (func) {
1456 + case 0x8000000A:
1457 + entry->eax = 1; /* SVM revision 1 */
1458 + entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1459 + ASID emulation to nested SVM */
1460 + entry->ecx = 0; /* Reserved */
1461 + entry->edx = 0; /* Do not support any additional features */
1462 +
1463 + break;
1464 + }
1465 +}
1466 +
1467 static const struct trace_print_flags svm_exit_reasons_str[] = {
1468 { SVM_EXIT_READ_CR0, "read_cr0" },
1469 { SVM_EXIT_READ_CR3, "read_cr3" },
1470 @@ -3023,6 +3068,8 @@ static struct kvm_x86_ops svm_x86_ops = {
1471 .cpuid_update = svm_cpuid_update,
1472
1473 .rdtscp_supported = svm_rdtscp_supported,
1474 +
1475 + .set_supported_cpuid = svm_set_supported_cpuid,
1476 };
1477
1478 static int __init svm_init(void)
1479 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1480 index 2f8db0e..3c86c42 100644
1481 --- a/arch/x86/kvm/vmx.c
1482 +++ b/arch/x86/kvm/vmx.c
1483 @@ -27,6 +27,7 @@
1484 #include <linux/moduleparam.h>
1485 #include <linux/ftrace_event.h>
1486 #include <linux/slab.h>
1487 +#include <linux/tboot.h>
1488 #include "kvm_cache_regs.h"
1489 #include "x86.h"
1490
1491 @@ -1176,9 +1177,16 @@ static __init int vmx_disabled_by_bios(void)
1492 u64 msr;
1493
1494 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1495 - return (msr & (FEATURE_CONTROL_LOCKED |
1496 - FEATURE_CONTROL_VMXON_ENABLED))
1497 - == FEATURE_CONTROL_LOCKED;
1498 + if (msr & FEATURE_CONTROL_LOCKED) {
1499 + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
1500 + && tboot_enabled())
1501 + return 1;
1502 + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
1503 + && !tboot_enabled())
1504 + return 1;
1505 + }
1506 +
1507 + return 0;
1508 /* locked but not enabled */
1509 }
1510
1511 @@ -1186,21 +1194,23 @@ static int hardware_enable(void *garbage)
1512 {
1513 int cpu = raw_smp_processor_id();
1514 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1515 - u64 old;
1516 + u64 old, test_bits;
1517
1518 if (read_cr4() & X86_CR4_VMXE)
1519 return -EBUSY;
1520
1521 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1522 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1523 - if ((old & (FEATURE_CONTROL_LOCKED |
1524 - FEATURE_CONTROL_VMXON_ENABLED))
1525 - != (FEATURE_CONTROL_LOCKED |
1526 - FEATURE_CONTROL_VMXON_ENABLED))
1527 +
1528 + test_bits = FEATURE_CONTROL_LOCKED;
1529 + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
1530 + if (tboot_enabled())
1531 + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
1532 +
1533 + if ((old & test_bits) != test_bits) {
1534 /* enable and lock */
1535 - wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
1536 - FEATURE_CONTROL_LOCKED |
1537 - FEATURE_CONTROL_VMXON_ENABLED);
1538 + wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
1539 + }
1540 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1541 asm volatile (ASM_VMX_VMXON_RAX
1542 : : "a"(&phys_addr), "m"(phys_addr)
1543 @@ -4115,6 +4125,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
1544 }
1545 }
1546
1547 +static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
1548 +{
1549 +}
1550 +
1551 static struct kvm_x86_ops vmx_x86_ops = {
1552 .cpu_has_kvm_support = cpu_has_kvm_support,
1553 .disabled_by_bios = vmx_disabled_by_bios,
1554 @@ -4186,6 +4200,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
1555 .cpuid_update = vmx_cpuid_update,
1556
1557 .rdtscp_supported = vmx_rdtscp_supported,
1558 +
1559 + .set_supported_cpuid = vmx_set_supported_cpuid,
1560 };
1561
1562 static int __init vmx_init(void)
1563 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1564 index c4f35b5..a6517a2 100644
1565 --- a/arch/x86/kvm/x86.c
1566 +++ b/arch/x86/kvm/x86.c
1567 @@ -484,7 +484,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
1568
1569 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1570 {
1571 - kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f));
1572 + kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
1573 }
1574 EXPORT_SYMBOL_GPL(kvm_lmsw);
1575
1576 @@ -624,48 +624,42 @@ static u32 emulated_msrs[] = {
1577 MSR_IA32_MISC_ENABLE,
1578 };
1579
1580 -static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1581 +static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
1582 {
1583 - if (efer & efer_reserved_bits) {
1584 - kvm_inject_gp(vcpu, 0);
1585 - return;
1586 - }
1587 + if (efer & efer_reserved_bits)
1588 + return 1;
1589
1590 if (is_paging(vcpu)
1591 - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
1592 - kvm_inject_gp(vcpu, 0);
1593 - return;
1594 - }
1595 + && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1596 + return 1;
1597
1598 if (efer & EFER_FFXSR) {
1599 struct kvm_cpuid_entry2 *feat;
1600
1601 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1602 - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
1603 - kvm_inject_gp(vcpu, 0);
1604 - return;
1605 - }
1606 + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
1607 + return 1;
1608 }
1609
1610 if (efer & EFER_SVME) {
1611 struct kvm_cpuid_entry2 *feat;
1612
1613 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1614 - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
1615 - kvm_inject_gp(vcpu, 0);
1616 - return;
1617 - }
1618 + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
1619 + return 1;
1620 }
1621
1622 - kvm_x86_ops->set_efer(vcpu, efer);
1623 -
1624 efer &= ~EFER_LMA;
1625 efer |= vcpu->arch.efer & EFER_LMA;
1626
1627 + kvm_x86_ops->set_efer(vcpu, efer);
1628 +
1629 vcpu->arch.efer = efer;
1630
1631 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
1632 kvm_mmu_reset_context(vcpu);
1633 +
1634 + return 0;
1635 }
1636
1637 void kvm_enable_efer_bits(u64 mask)
1638 @@ -695,14 +689,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1639
1640 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1641 {
1642 - static int version;
1643 + int version;
1644 + int r;
1645 struct pvclock_wall_clock wc;
1646 struct timespec boot;
1647
1648 if (!wall_clock)
1649 return;
1650
1651 - version++;
1652 + r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1653 + if (r)
1654 + return;
1655 +
1656 + if (version & 1)
1657 + ++version; /* first time write, random junk */
1658 +
1659 + ++version;
1660
1661 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1662
1663 @@ -1086,8 +1088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1664 {
1665 switch (msr) {
1666 case MSR_EFER:
1667 - set_efer(vcpu, data);
1668 - break;
1669 + return set_efer(vcpu, data);
1670 case MSR_K7_HWCR:
1671 data &= ~(u64)0x40; /* ignore flush filter disable */
1672 if (data != 0) {
1673 @@ -1768,6 +1769,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1674 {
1675 int r;
1676
1677 + vcpu_load(vcpu);
1678 r = -E2BIG;
1679 if (cpuid->nent < vcpu->arch.cpuid_nent)
1680 goto out;
1681 @@ -1779,6 +1781,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1682
1683 out:
1684 cpuid->nent = vcpu->arch.cpuid_nent;
1685 + vcpu_put(vcpu);
1686 return r;
1687 }
1688
1689 @@ -1917,6 +1920,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1690 entry->ecx &= kvm_supported_word6_x86_features;
1691 break;
1692 }
1693 +
1694 + kvm_x86_ops->set_supported_cpuid(function, entry);
1695 +
1696 put_cpu();
1697 }
1698
1699 @@ -2031,6 +2037,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1700 int r;
1701 unsigned bank_num = mcg_cap & 0xff, bank;
1702
1703 + vcpu_load(vcpu);
1704 r = -EINVAL;
1705 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
1706 goto out;
1707 @@ -2045,6 +2052,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1708 for (bank = 0; bank < bank_num; bank++)
1709 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1710 out:
1711 + vcpu_put(vcpu);
1712 return r;
1713 }
1714
1715 @@ -2312,7 +2320,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1716 r = -EFAULT;
1717 if (copy_from_user(&mce, argp, sizeof mce))
1718 goto out;
1719 + vcpu_load(vcpu);
1720 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1721 + vcpu_put(vcpu);
1722 break;
1723 }
1724 case KVM_GET_VCPU_EVENTS: {
1725 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
1726 index 2c505ee..f1fb411 100644
1727 --- a/arch/x86/oprofile/nmi_int.c
1728 +++ b/arch/x86/oprofile/nmi_int.c
1729 @@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
1730 static void nmi_cpu_start(void *dummy)
1731 {
1732 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1733 - model->start(msrs);
1734 + if (!msrs->controls)
1735 + WARN_ON_ONCE(1);
1736 + else
1737 + model->start(msrs);
1738 }
1739
1740 static int nmi_start(void)
1741 @@ -107,7 +110,10 @@ static int nmi_start(void)
1742 static void nmi_cpu_stop(void *dummy)
1743 {
1744 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
1745 - model->stop(msrs);
1746 + if (!msrs->controls)
1747 + WARN_ON_ONCE(1);
1748 + else
1749 + model->stop(msrs);
1750 }
1751
1752 static void nmi_stop(void)
1753 diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
1754 index 987267f..a9c6611 100644
1755 --- a/arch/x86/xen/suspend.c
1756 +++ b/arch/x86/xen/suspend.c
1757 @@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data)
1758
1759 void xen_arch_resume(void)
1760 {
1761 - smp_call_function(xen_vcpu_notify_restore,
1762 - (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
1763 + on_each_cpu(xen_vcpu_notify_restore,
1764 + (void *)CLOCK_EVT_NOTIFY_RESUME, 1);
1765 }
1766 diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
1767 index f04c989..ed8cd3c 100644
1768 --- a/arch/xtensa/include/asm/cache.h
1769 +++ b/arch/xtensa/include/asm/cache.h
1770 @@ -29,5 +29,6 @@
1771 # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
1772 #endif
1773
1774 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1775
1776 #endif /* _XTENSA_CACHE_H */
1777 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1778 index 5f127cf..002c0ce 100644
1779 --- a/block/cfq-iosched.c
1780 +++ b/block/cfq-iosched.c
1781 @@ -2503,15 +2503,10 @@ static void cfq_free_io_context(struct io_context *ioc)
1782 __call_for_each_cic(ioc, cic_free_func);
1783 }
1784
1785 -static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1786 +static void cfq_put_cooperator(struct cfq_queue *cfqq)
1787 {
1788 struct cfq_queue *__cfqq, *next;
1789
1790 - if (unlikely(cfqq == cfqd->active_queue)) {
1791 - __cfq_slice_expired(cfqd, cfqq, 0);
1792 - cfq_schedule_dispatch(cfqd);
1793 - }
1794 -
1795 /*
1796 * If this queue was scheduled to merge with another queue, be
1797 * sure to drop the reference taken on that queue (and others in
1798 @@ -2527,6 +2522,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1799 cfq_put_queue(__cfqq);
1800 __cfqq = next;
1801 }
1802 +}
1803 +
1804 +static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1805 +{
1806 + if (unlikely(cfqq == cfqd->active_queue)) {
1807 + __cfq_slice_expired(cfqd, cfqq, 0);
1808 + cfq_schedule_dispatch(cfqd);
1809 + }
1810 +
1811 + cfq_put_cooperator(cfqq);
1812
1813 cfq_put_queue(cfqq);
1814 }
1815 @@ -3470,6 +3475,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
1816 }
1817
1818 cic_set_cfqq(cic, NULL, 1);
1819 +
1820 + cfq_put_cooperator(cfqq);
1821 +
1822 cfq_put_queue(cfqq);
1823 return NULL;
1824 }
1825 diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1826 index fc2f26b..c5fef01 100644
1827 --- a/drivers/acpi/video_detect.c
1828 +++ b/drivers/acpi/video_detect.c
1829 @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
1830 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
1831 if (!strcmp("video", str))
1832 acpi_video_support |=
1833 - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
1834 + ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
1835 }
1836 return 1;
1837 }
1838 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1839 index 49cffb6..5abab5d 100644
1840 --- a/drivers/ata/libata-core.c
1841 +++ b/drivers/ata/libata-core.c
1842 @@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
1843 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
1844 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
1845
1846 +static int atapi_an;
1847 +module_param(atapi_an, int, 0444);
1848 +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
1849 +
1850 MODULE_AUTHOR("Jeff Garzik");
1851 MODULE_DESCRIPTION("Library module for ATA devices");
1852 MODULE_LICENSE("GPL");
1853 @@ -2572,7 +2576,8 @@ int ata_dev_configure(struct ata_device *dev)
1854 * to enable ATAPI AN to discern between PHY status
1855 * changed notifications and ATAPI ANs.
1856 */
1857 - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
1858 + if (atapi_an &&
1859 + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
1860 (!sata_pmp_attached(ap) ||
1861 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
1862 unsigned int err_mask;
1863 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1864 index e3877b6..4723648 100644
1865 --- a/drivers/ata/libata-sff.c
1866 +++ b/drivers/ata/libata-sff.c
1867 @@ -894,7 +894,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1868 do_write);
1869 }
1870
1871 - if (!do_write)
1872 + if (!do_write && !PageSlab(page))
1873 flush_dcache_page(page);
1874
1875 qc->curbytes += qc->sect_size;
1876 diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
1877 index 2a98b09..7f3d179 100644
1878 --- a/drivers/ata/sata_nv.c
1879 +++ b/drivers/ata/sata_nv.c
1880 @@ -1674,7 +1674,6 @@ static void nv_mcp55_freeze(struct ata_port *ap)
1881 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1882 mask &= ~(NV_INT_ALL_MCP55 << shift);
1883 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1884 - ata_sff_freeze(ap);
1885 }
1886
1887 static void nv_mcp55_thaw(struct ata_port *ap)
1888 @@ -1688,7 +1687,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1889 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1890 mask |= (NV_INT_MASK_MCP55 << shift);
1891 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1892 - ata_sff_thaw(ap);
1893 }
1894
1895 static void nv_adma_error_handler(struct ata_port *ap)
1896 @@ -2479,8 +2477,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1897 }
1898
1899 pci_set_master(pdev);
1900 - return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
1901 - IRQF_SHARED, ipriv->sht);
1902 + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
1903 }
1904
1905 #ifdef CONFIG_PM
1906 diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
1907 index 08f6549..0553455 100644
1908 --- a/drivers/ata/sata_via.c
1909 +++ b/drivers/ata/sata_via.c
1910 @@ -575,6 +575,19 @@ static void svia_configure(struct pci_dev *pdev)
1911 tmp8 |= NATIVE_MODE_ALL;
1912 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
1913 }
1914 +
1915 + /*
1916 + * vt6421 has problems talking to some drives. The following
1917 + * is the magic fix from Joseph Chan <JosephChan@via.com.tw>.
1918 + * Please add proper documentation if possible.
1919 + *
1920 + * https://bugzilla.kernel.org/show_bug.cgi?id=15173
1921 + */
1922 + if (pdev->device == 0x3249) {
1923 + pci_read_config_byte(pdev, 0x52, &tmp8);
1924 + tmp8 |= 1 << 2;
1925 + pci_write_config_byte(pdev, 0x52, tmp8);
1926 + }
1927 }
1928
1929 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1930 diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
1931 index f35719a..251acea 100644
1932 --- a/drivers/base/cpu.c
1933 +++ b/drivers/base/cpu.c
1934 @@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
1935 /* display offline cpus < nr_cpu_ids */
1936 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
1937 return -ENOMEM;
1938 - cpumask_complement(offline, cpu_online_mask);
1939 + cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
1940 n = cpulist_scnprintf(buf, len, offline);
1941 free_cpumask_var(offline);
1942
1943 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
1944 index 4462b11..9ead05d 100644
1945 --- a/drivers/char/ipmi/ipmi_si_intf.c
1946 +++ b/drivers/char/ipmi/ipmi_si_intf.c
1947 @@ -314,9 +314,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
1948 {
1949 /* Deliver the message to the upper layer with the lock
1950 released. */
1951 - spin_unlock(&(smi_info->si_lock));
1952 - ipmi_smi_msg_received(smi_info->intf, msg);
1953 - spin_lock(&(smi_info->si_lock));
1954 +
1955 + if (smi_info->run_to_completion) {
1956 + ipmi_smi_msg_received(smi_info->intf, msg);
1957 + } else {
1958 + spin_unlock(&(smi_info->si_lock));
1959 + ipmi_smi_msg_received(smi_info->intf, msg);
1960 + spin_lock(&(smi_info->si_lock));
1961 + }
1962 }
1963
1964 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
1965 diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
1966 index 744f748..a860ec0 100644
1967 --- a/drivers/clocksource/sh_cmt.c
1968 +++ b/drivers/clocksource/sh_cmt.c
1969 @@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
1970 static int sh_cmt_clocksource_enable(struct clocksource *cs)
1971 {
1972 struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
1973 - int ret;
1974
1975 p->total_cycles = 0;
1976
1977 - ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
1978 - if (ret)
1979 - return ret;
1980 -
1981 - /* TODO: calculate good shift from rate and counter bit width */
1982 - cs->shift = 0;
1983 - cs->mult = clocksource_hz2mult(p->rate, cs->shift);
1984 - return 0;
1985 + return sh_cmt_start(p, FLAG_CLOCKSOURCE);
1986 }
1987
1988 static void sh_cmt_clocksource_disable(struct clocksource *cs)
1989 @@ -451,7 +443,18 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
1990 cs->resume = sh_cmt_clocksource_resume;
1991 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
1992 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
1993 +
1994 + /* clk_get_rate() needs an enabled clock */
1995 + clk_enable(p->clk);
1996 + p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8;
1997 + clk_disable(p->clk);
1998 +
1999 + /* TODO: calculate good shift from rate and counter bit width */
2000 + cs->shift = 10;
2001 + cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2002 +
2003 pr_info("sh_cmt: %s used as clock source\n", cs->name);
2004 +
2005 clocksource_register(cs);
2006 return 0;
2007 }
2008 diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
2009 index fc9ff1e..7a24160 100644
2010 --- a/drivers/clocksource/sh_tmu.c
2011 +++ b/drivers/clocksource/sh_tmu.c
2012 @@ -200,16 +200,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
2013 static int sh_tmu_clocksource_enable(struct clocksource *cs)
2014 {
2015 struct sh_tmu_priv *p = cs_to_sh_tmu(cs);
2016 - int ret;
2017 -
2018 - ret = sh_tmu_enable(p);
2019 - if (ret)
2020 - return ret;
2021
2022 - /* TODO: calculate good shift from rate and counter bit width */
2023 - cs->shift = 10;
2024 - cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2025 - return 0;
2026 + return sh_tmu_enable(p);
2027 }
2028
2029 static void sh_tmu_clocksource_disable(struct clocksource *cs)
2030 @@ -229,6 +221,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
2031 cs->disable = sh_tmu_clocksource_disable;
2032 cs->mask = CLOCKSOURCE_MASK(32);
2033 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
2034 +
2035 + /* clk_get_rate() needs an enabled clock */
2036 + clk_enable(p->clk);
2037 + /* channel will be configured at parent clock / 4 */
2038 + p->rate = clk_get_rate(p->clk) / 4;
2039 + clk_disable(p->clk);
2040 + /* TODO: calculate good shift from rate and counter bit width */
2041 + cs->shift = 10;
2042 + cs->mult = clocksource_hz2mult(p->rate, cs->shift);
2043 +
2044 pr_info("sh_tmu: %s used as clock source\n", cs->name);
2045 clocksource_register(cs);
2046 return 0;
2047 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
2048 index 5045156..991447b 100644
2049 --- a/drivers/firewire/core-card.c
2050 +++ b/drivers/firewire/core-card.c
2051 @@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
2052 static void fw_card_bm_work(struct work_struct *work)
2053 {
2054 struct fw_card *card = container_of(work, struct fw_card, work.work);
2055 - struct fw_device *root_device;
2056 + struct fw_device *root_device, *irm_device;
2057 struct fw_node *root_node;
2058 unsigned long flags;
2059 int root_id, new_root_id, irm_id, local_id;
2060 @@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work)
2061 bool do_reset = false;
2062 bool root_device_is_running;
2063 bool root_device_is_cmc;
2064 + bool irm_is_1394_1995_only;
2065
2066 spin_lock_irqsave(&card->lock, flags);
2067
2068 @@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work)
2069 }
2070
2071 generation = card->generation;
2072 +
2073 root_node = card->root_node;
2074 fw_node_get(root_node);
2075 root_device = root_node->data;
2076 root_device_is_running = root_device &&
2077 atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
2078 root_device_is_cmc = root_device && root_device->cmc;
2079 +
2080 + irm_device = card->irm_node->data;
2081 + irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
2082 + (irm_device->config_rom[2] & 0x000000f0) == 0;
2083 +
2084 root_id = root_node->node_id;
2085 irm_id = card->irm_node->node_id;
2086 local_id = card->local_node->node_id;
2087 @@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work)
2088
2089 if (!card->irm_node->link_on) {
2090 new_root_id = local_id;
2091 - fw_notify("IRM has link off, making local node (%02x) root.\n",
2092 - new_root_id);
2093 + fw_notify("%s, making local node (%02x) root.\n",
2094 + "IRM has link off", new_root_id);
2095 + goto pick_me;
2096 + }
2097 +
2098 + if (irm_is_1394_1995_only) {
2099 + new_root_id = local_id;
2100 + fw_notify("%s, making local node (%02x) root.\n",
2101 + "IRM is not 1394a compliant", new_root_id);
2102 goto pick_me;
2103 }
2104
2105 @@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work)
2106 * root, and thus, IRM.
2107 */
2108 new_root_id = local_id;
2109 - fw_notify("BM lock failed, making local node (%02x) root.\n",
2110 - new_root_id);
2111 + fw_notify("%s, making local node (%02x) root.\n",
2112 + "BM lock failed", new_root_id);
2113 goto pick_me;
2114 }
2115 } else if (card->bm_generation != generation) {
2116 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
2117 index 18f41d7..10348d3 100644
2118 --- a/drivers/gpu/drm/drm_edid.c
2119 +++ b/drivers/gpu/drm/drm_edid.c
2120 @@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
2121 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2122 /* 1024x768@85Hz */
2123 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
2124 - 1072, 1376, 0, 768, 769, 772, 808, 0,
2125 + 1168, 1376, 0, 768, 769, 772, 808, 0,
2126 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2127 /* 1152x864@75Hz */
2128 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2129 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2130 index ef3d91d..691701a 100644
2131 --- a/drivers/gpu/drm/i915/i915_gem.c
2132 +++ b/drivers/gpu/drm/i915/i915_gem.c
2133 @@ -2688,6 +2688,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2134 return -EINVAL;
2135 }
2136
2137 + /* If the object is bigger than the entire aperture, reject it early
2138 + * before evicting everything in a vain attempt to find space.
2139 + */
2140 + if (obj->size > dev->gtt_total) {
2141 + DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2142 + return -E2BIG;
2143 + }
2144 +
2145 search_free:
2146 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2147 obj->size, alignment, 0);
2148 @@ -4231,6 +4239,17 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2149 int ret;
2150
2151 i915_verify_inactive(dev, __FILE__, __LINE__);
2152 +
2153 + if (obj_priv->gtt_space != NULL) {
2154 + if (alignment == 0)
2155 + alignment = i915_gem_get_gtt_alignment(obj);
2156 + if (obj_priv->gtt_offset & (alignment - 1)) {
2157 + ret = i915_gem_object_unbind(obj);
2158 + if (ret)
2159 + return ret;
2160 + }
2161 + }
2162 +
2163 if (obj_priv->gtt_space == NULL) {
2164 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2165 if (ret)
2166 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2167 index c7502b6..70765cf 100644
2168 --- a/drivers/gpu/drm/i915/intel_display.c
2169 +++ b/drivers/gpu/drm/i915/intel_display.c
2170 @@ -4155,12 +4155,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
2171 spin_lock_irqsave(&dev->event_lock, flags);
2172 work = intel_crtc->unpin_work;
2173 if (work == NULL || !work->pending) {
2174 - if (work && !work->pending) {
2175 - obj_priv = to_intel_bo(work->pending_flip_obj);
2176 - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
2177 - obj_priv,
2178 - atomic_read(&obj_priv->pending_flip));
2179 - }
2180 spin_unlock_irqrestore(&dev->event_lock, flags);
2181 return;
2182 }
2183 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2184 index 77e40cf..7c28ff1 100644
2185 --- a/drivers/gpu/drm/i915/intel_dp.c
2186 +++ b/drivers/gpu/drm/i915/intel_dp.c
2187 @@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *connector)
2188 if (HAS_PCH_SPLIT(dev))
2189 return ironlake_dp_detect(connector);
2190
2191 - temp = I915_READ(PORT_HOTPLUG_EN);
2192 -
2193 - I915_WRITE(PORT_HOTPLUG_EN,
2194 - temp |
2195 - DPB_HOTPLUG_INT_EN |
2196 - DPC_HOTPLUG_INT_EN |
2197 - DPD_HOTPLUG_INT_EN);
2198 -
2199 - POSTING_READ(PORT_HOTPLUG_EN);
2200 -
2201 switch (dp_priv->output_reg) {
2202 case DP_B:
2203 bit = DPB_HOTPLUG_INT_STATUS;
2204 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
2205 index 034218c..4c7204a 100644
2206 --- a/drivers/gpu/drm/radeon/radeon.h
2207 +++ b/drivers/gpu/drm/radeon/radeon.h
2208 @@ -566,6 +566,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
2209 */
2210 int radeon_agp_init(struct radeon_device *rdev);
2211 void radeon_agp_resume(struct radeon_device *rdev);
2212 +void radeon_agp_suspend(struct radeon_device *rdev);
2213 void radeon_agp_fini(struct radeon_device *rdev);
2214
2215
2216 diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
2217 index 28e473f..f40dfb7 100644
2218 --- a/drivers/gpu/drm/radeon/radeon_agp.c
2219 +++ b/drivers/gpu/drm/radeon/radeon_agp.c
2220 @@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
2221 }
2222 #endif
2223 }
2224 +
2225 +void radeon_agp_suspend(struct radeon_device *rdev)
2226 +{
2227 + radeon_agp_fini(rdev);
2228 +}
2229 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2230 index 9916d82..1a4fa9b 100644
2231 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2232 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2233 @@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
2234 }
2235
2236 /* look up gpio for ddc, hpd */
2237 + ddc_bus.valid = false;
2238 + hpd.hpd = RADEON_HPD_NONE;
2239 if ((le16_to_cpu(path->usDeviceTag) &
2240 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
2241 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
2242 @@ -585,9 +587,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
2243 break;
2244 }
2245 }
2246 - } else {
2247 - hpd.hpd = RADEON_HPD_NONE;
2248 - ddc_bus.valid = false;
2249 }
2250
2251 /* needed for aux chan transactions */
2252 @@ -1174,7 +1173,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
2253 lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
2254 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
2255 lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
2256 - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
2257 + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
2258 lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
2259 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
2260 lvds->panel_pwr_delay =
2261 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2262 index 7b629e3..ed6a724 100644
2263 --- a/drivers/gpu/drm/radeon/radeon_device.c
2264 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2265 @@ -748,6 +748,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
2266 /* evict remaining vram memory */
2267 radeon_bo_evict_vram(rdev);
2268
2269 + radeon_agp_suspend(rdev);
2270 +
2271 pci_save_state(dev->pdev);
2272 if (state.event == PM_EVENT_SUSPEND) {
2273 /* Shut down the device */
2274 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
2275 index bb1c122..c2848e0 100644
2276 --- a/drivers/gpu/drm/radeon/radeon_display.c
2277 +++ b/drivers/gpu/drm/radeon/radeon_display.c
2278 @@ -978,8 +978,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
2279 /* set display priority to high for r3xx, rv515 chips
2280 * this avoids flickering due to underflow to the
2281 * display controllers during heavy acceleration.
2282 + * Don't force high on rs4xx igp chips as it seems to
2283 + * affect the sound card. See kernel bug 15982.
2284 */
2285 - if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
2286 + if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
2287 + !(rdev->flags & RADEON_IS_IGP))
2288 rdev->disp_priority = 2;
2289 else
2290 rdev->disp_priority = 0;
2291 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
2292 index cc5316d..b3ba44c 100644
2293 --- a/drivers/gpu/drm/radeon/radeon_state.c
2294 +++ b/drivers/gpu/drm/radeon/radeon_state.c
2295 @@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
2296 flags |= RADEON_FRONT;
2297 }
2298 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
2299 - if (!dev_priv->have_z_offset)
2300 + if (!dev_priv->have_z_offset) {
2301 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
2302 - flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
2303 + flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
2304 + }
2305 }
2306
2307 if (flags & (RADEON_FRONT | RADEON_BACK)) {
2308 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2309 index 143e788..0010efa 100644
2310 --- a/drivers/hid/hid-core.c
2311 +++ b/drivers/hid/hid-core.c
2312 @@ -1305,6 +1305,7 @@ static const struct hid_device_id hid_blacklist[] = {
2313 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
2314 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
2315 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
2316 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
2317 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
2318 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
2319 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
2320 diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
2321 index 62416e6..3975e03 100644
2322 --- a/drivers/hid/hid-gyration.c
2323 +++ b/drivers/hid/hid-gyration.c
2324 @@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
2325 static const struct hid_device_id gyration_devices[] = {
2326 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
2327 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
2328 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
2329 { }
2330 };
2331 MODULE_DEVICE_TABLE(hid, gyration_devices);
2332 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2333 index 09d2764..b681cbf 100644
2334 --- a/drivers/hid/hid-ids.h
2335 +++ b/drivers/hid/hid-ids.h
2336 @@ -267,6 +267,7 @@
2337 #define USB_VENDOR_ID_GYRATION 0x0c16
2338 #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
2339 #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
2340 +#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
2341
2342 #define USB_VENDOR_ID_HAPP 0x078b
2343 #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
2344 diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
2345 index 65c232a..21d201b 100644
2346 --- a/drivers/hwmon/ltc4245.c
2347 +++ b/drivers/hwmon/ltc4245.c
2348 @@ -45,9 +45,7 @@ enum ltc4245_cmd {
2349 LTC4245_VEEIN = 0x19,
2350 LTC4245_VEESENSE = 0x1a,
2351 LTC4245_VEEOUT = 0x1b,
2352 - LTC4245_GPIOADC1 = 0x1c,
2353 - LTC4245_GPIOADC2 = 0x1d,
2354 - LTC4245_GPIOADC3 = 0x1e,
2355 + LTC4245_GPIOADC = 0x1c,
2356 };
2357
2358 struct ltc4245_data {
2359 @@ -61,7 +59,7 @@ struct ltc4245_data {
2360 u8 cregs[0x08];
2361
2362 /* Voltage registers */
2363 - u8 vregs[0x0f];
2364 + u8 vregs[0x0d];
2365 };
2366
2367 static struct ltc4245_data *ltc4245_update_device(struct device *dev)
2368 @@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
2369 data->cregs[i] = val;
2370 }
2371
2372 - /* Read voltage registers -- 0x10 to 0x1f */
2373 + /* Read voltage registers -- 0x10 to 0x1c */
2374 for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
2375 val = i2c_smbus_read_byte_data(client, i+0x10);
2376 if (unlikely(val < 0))
2377 @@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
2378 case LTC4245_VEEOUT:
2379 voltage = regval * -55;
2380 break;
2381 - case LTC4245_GPIOADC1:
2382 - case LTC4245_GPIOADC2:
2383 - case LTC4245_GPIOADC3:
2384 + case LTC4245_GPIOADC:
2385 voltage = regval * 10;
2386 break;
2387 default:
2388 @@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
2389 LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
2390
2391 /* GPIO voltages */
2392 -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1);
2393 -LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
2394 -LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
2395 +LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
2396
2397 /* Power Consumption (virtual) */
2398 LTC4245_POWER(power1_input, LTC4245_12VSENSE);
2399 @@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
2400 &sensor_dev_attr_in8_min_alarm.dev_attr.attr,
2401
2402 &sensor_dev_attr_in9_input.dev_attr.attr,
2403 - &sensor_dev_attr_in10_input.dev_attr.attr,
2404 - &sensor_dev_attr_in11_input.dev_attr.attr,
2405
2406 &sensor_dev_attr_power1_input.dev_attr.attr,
2407 &sensor_dev_attr_power2_input.dev_attr.attr,
2408 diff --git a/drivers/md/linear.c b/drivers/md/linear.c
2409 index 09437e9..0a1042b 100644
2410 --- a/drivers/md/linear.c
2411 +++ b/drivers/md/linear.c
2412 @@ -282,6 +282,7 @@ static int linear_stop (mddev_t *mddev)
2413 rcu_barrier();
2414 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2415 kfree(conf);
2416 + mddev->private = NULL;
2417
2418 return 0;
2419 }
2420 diff --git a/drivers/md/md.c b/drivers/md/md.c
2421 index cefd63d..336792e 100644
2422 --- a/drivers/md/md.c
2423 +++ b/drivers/md/md.c
2424 @@ -508,9 +508,36 @@ static inline int mddev_trylock(mddev_t * mddev)
2425 return mutex_trylock(&mddev->reconfig_mutex);
2426 }
2427
2428 -static inline void mddev_unlock(mddev_t * mddev)
2429 -{
2430 - mutex_unlock(&mddev->reconfig_mutex);
2431 +static struct attribute_group md_redundancy_group;
2432 +
2433 +static void mddev_unlock(mddev_t * mddev)
2434 +{
2435 + if (mddev->to_remove) {
2436 + /* These cannot be removed under reconfig_mutex as
2437 + * an access to the files will try to take reconfig_mutex
2438 + * while holding the file unremovable, which leads to
2439 + * a deadlock.
2440 + * So hold open_mutex instead - we are allowed to take
2441 + * it while holding reconfig_mutex, and md_run can
2442 + * use it to wait for the remove to complete.
2443 + */
2444 + struct attribute_group *to_remove = mddev->to_remove;
2445 + mddev->to_remove = NULL;
2446 + mutex_lock(&mddev->open_mutex);
2447 + mutex_unlock(&mddev->reconfig_mutex);
2448 +
2449 + if (to_remove != &md_redundancy_group)
2450 + sysfs_remove_group(&mddev->kobj, to_remove);
2451 + if (mddev->pers == NULL ||
2452 + mddev->pers->sync_request == NULL) {
2453 + sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2454 + if (mddev->sysfs_action)
2455 + sysfs_put(mddev->sysfs_action);
2456 + mddev->sysfs_action = NULL;
2457 + }
2458 + mutex_unlock(&mddev->open_mutex);
2459 + } else
2460 + mutex_unlock(&mddev->reconfig_mutex);
2461
2462 md_wakeup_thread(mddev->thread);
2463 }
2464 @@ -2980,6 +3007,23 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2465 /* Looks like we have a winner */
2466 mddev_suspend(mddev);
2467 mddev->pers->stop(mddev);
2468 +
2469 + if (mddev->pers->sync_request == NULL &&
2470 + pers->sync_request != NULL) {
2471 + /* need to add the md_redundancy_group */
2472 + if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
2473 + printk(KERN_WARNING
2474 + "md: cannot register extra attributes for %s\n",
2475 + mdname(mddev));
2476 + mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
2477 + }
2478 + if (mddev->pers->sync_request != NULL &&
2479 + pers->sync_request == NULL) {
2480 + /* need to remove the md_redundancy_group */
2481 + if (mddev->to_remove == NULL)
2482 + mddev->to_remove = &md_redundancy_group;
2483 + }
2484 +
2485 module_put(mddev->pers->owner);
2486 /* Invalidate devices that are now superfluous */
2487 list_for_each_entry(rdev, &mddev->disks, same_set)
2488 @@ -4082,15 +4126,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
2489 {
2490 mddev_t *mddev = container_of(ws, mddev_t, del_work);
2491
2492 - if (mddev->private) {
2493 - sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2494 - if (mddev->private != (void*)1)
2495 - sysfs_remove_group(&mddev->kobj, mddev->private);
2496 - if (mddev->sysfs_action)
2497 - sysfs_put(mddev->sysfs_action);
2498 - mddev->sysfs_action = NULL;
2499 - mddev->private = NULL;
2500 - }
2501 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
2502 kobject_del(&mddev->kobj);
2503 kobject_put(&mddev->kobj);
2504 @@ -4248,6 +4283,13 @@ static int do_md_run(mddev_t * mddev)
2505 if (mddev->pers)
2506 return -EBUSY;
2507
2508 + /* These two calls synchronise us with the
2509 + * sysfs_remove_group calls in mddev_unlock,
2510 + * so they must have completed.
2511 + */
2512 + mutex_lock(&mddev->open_mutex);
2513 + mutex_unlock(&mddev->open_mutex);
2514 +
2515 /*
2516 * Analyze all RAID superblock(s)
2517 */
2518 @@ -4536,8 +4578,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
2519 mddev->queue->unplug_fn = NULL;
2520 mddev->queue->backing_dev_info.congested_fn = NULL;
2521 module_put(mddev->pers->owner);
2522 - if (mddev->pers->sync_request && mddev->private == NULL)
2523 - mddev->private = (void*)1;
2524 + if (mddev->pers->sync_request && mddev->to_remove == NULL)
2525 + mddev->to_remove = &md_redundancy_group;
2526 mddev->pers = NULL;
2527 /* tell userspace to handle 'inactive' */
2528 sysfs_notify_dirent(mddev->sysfs_state);
2529 @@ -5496,6 +5538,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2530 int err = 0;
2531 void __user *argp = (void __user *)arg;
2532 mddev_t *mddev = NULL;
2533 + int ro;
2534
2535 if (!capable(CAP_SYS_ADMIN))
2536 return -EACCES;
2537 @@ -5631,6 +5674,34 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2538 err = do_md_stop(mddev, 1, 1);
2539 goto done_unlock;
2540
2541 + case BLKROSET:
2542 + if (get_user(ro, (int __user *)(arg))) {
2543 + err = -EFAULT;
2544 + goto done_unlock;
2545 + }
2546 + err = -EINVAL;
2547 +
2548 + /* if the bdev is going readonly the value of mddev->ro
2549 + * does not matter, no writes are coming
2550 + */
2551 + if (ro)
2552 + goto done_unlock;
2553 +
2554 + /* are we are already prepared for writes? */
2555 + if (mddev->ro != 1)
2556 + goto done_unlock;
2557 +
2558 + /* transitioning to readauto need only happen for
2559 + * arrays that call md_write_start
2560 + */
2561 + if (mddev->pers) {
2562 + err = restart_array(mddev);
2563 + if (err == 0) {
2564 + mddev->ro = 2;
2565 + set_disk_ro(mddev->gendisk, 0);
2566 + }
2567 + }
2568 + goto done_unlock;
2569 }
2570
2571 /*
2572 diff --git a/drivers/md/md.h b/drivers/md/md.h
2573 index 8e4c75c..722f5df 100644
2574 --- a/drivers/md/md.h
2575 +++ b/drivers/md/md.h
2576 @@ -305,6 +305,7 @@ struct mddev_s
2577 atomic_t max_corr_read_errors; /* max read retries */
2578 struct list_head all_mddevs;
2579
2580 + struct attribute_group *to_remove;
2581 /* Generic barrier handling.
2582 * If there is a pending barrier request, all other
2583 * writes are blocked while the devices are flushed.
2584 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2585 index e59b10e..84d3bf0 100644
2586 --- a/drivers/md/raid1.c
2587 +++ b/drivers/md/raid1.c
2588 @@ -418,7 +418,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
2589 */
2590 static int read_balance(conf_t *conf, r1bio_t *r1_bio)
2591 {
2592 - const unsigned long this_sector = r1_bio->sector;
2593 + const sector_t this_sector = r1_bio->sector;
2594 int new_disk = conf->last_used, disk = new_disk;
2595 int wonly_disk = -1;
2596 const int sectors = r1_bio->sectors;
2597 @@ -434,7 +434,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
2598 retry:
2599 if (conf->mddev->recovery_cp < MaxSector &&
2600 (this_sector + sectors >= conf->next_resync)) {
2601 - /* Choose the first operation device, for consistancy */
2602 + /* Choose the first operational device, for consistancy */
2603 new_disk = 0;
2604
2605 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
2606 @@ -912,9 +912,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
2607 if (test_bit(Faulty, &rdev->flags)) {
2608 rdev_dec_pending(rdev, mddev);
2609 r1_bio->bios[i] = NULL;
2610 - } else
2611 + } else {
2612 r1_bio->bios[i] = bio;
2613 - targets++;
2614 + targets++;
2615 + }
2616 } else
2617 r1_bio->bios[i] = NULL;
2618 }
2619 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2620 index e2766d8..ad945cc 100644
2621 --- a/drivers/md/raid10.c
2622 +++ b/drivers/md/raid10.c
2623 @@ -494,7 +494,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
2624 */
2625 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
2626 {
2627 - const unsigned long this_sector = r10_bio->sector;
2628 + const sector_t this_sector = r10_bio->sector;
2629 int disk, slot, nslot;
2630 const int sectors = r10_bio->sectors;
2631 sector_t new_distance, current_distance;
2632 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2633 index 15348c3..6af0a6d 100644
2634 --- a/drivers/md/raid5.c
2635 +++ b/drivers/md/raid5.c
2636 @@ -5087,7 +5087,9 @@ static int run(mddev_t *mddev)
2637 }
2638
2639 /* Ok, everything is just fine now */
2640 - if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
2641 + if (mddev->to_remove == &raid5_attrs_group)
2642 + mddev->to_remove = NULL;
2643 + else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
2644 printk(KERN_WARNING
2645 "raid5: failed to create sysfs attributes for %s\n",
2646 mdname(mddev));
2647 @@ -5134,7 +5136,8 @@ static int stop(mddev_t *mddev)
2648 mddev->queue->backing_dev_info.congested_fn = NULL;
2649 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2650 free_conf(conf);
2651 - mddev->private = &raid5_attrs_group;
2652 + mddev->private = NULL;
2653 + mddev->to_remove = &raid5_attrs_group;
2654 return 0;
2655 }
2656
2657 diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
2658 index 6d3850b..2194da5 100644
2659 --- a/drivers/media/video/uvc/uvc_ctrl.c
2660 +++ b/drivers/media/video/uvc/uvc_ctrl.c
2661 @@ -1047,6 +1047,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
2662 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
2663 step = mapping->get(mapping, UVC_GET_RES,
2664 uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
2665 + if (step == 0)
2666 + step = 1;
2667
2668 xctrl->value = min + (xctrl->value - min + step/2) / step * step;
2669 xctrl->value = clamp(xctrl->value, min, max);
2670 diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
2671 index e7161c4..ad8fb09 100644
2672 --- a/drivers/misc/vmware_balloon.c
2673 +++ b/drivers/misc/vmware_balloon.c
2674 @@ -45,7 +45,7 @@
2675
2676 MODULE_AUTHOR("VMware, Inc.");
2677 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
2678 -MODULE_VERSION("1.2.1.0-K");
2679 +MODULE_VERSION("1.2.1.1-k");
2680 MODULE_ALIAS("dmi:*:svnVMware*:*");
2681 MODULE_ALIAS("vmware_vmmemctl");
2682 MODULE_LICENSE("GPL");
2683 @@ -101,6 +101,8 @@ MODULE_LICENSE("GPL");
2684 /* Maximum number of page allocations without yielding processor */
2685 #define VMW_BALLOON_YIELD_THRESHOLD 1024
2686
2687 +/* Maximum number of refused pages we accumulate during inflation cycle */
2688 +#define VMW_BALLOON_MAX_REFUSED 16
2689
2690 /*
2691 * Hypervisor communication port definitions.
2692 @@ -183,6 +185,7 @@ struct vmballoon {
2693
2694 /* transient list of non-balloonable pages */
2695 struct list_head refused_pages;
2696 + unsigned int n_refused_pages;
2697
2698 /* balloon size in pages */
2699 unsigned int size;
2700 @@ -428,14 +431,21 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
2701 /* inform monitor */
2702 locked = vmballoon_send_lock_page(b, page_to_pfn(page));
2703 if (!locked) {
2704 + STATS_INC(b->stats.refused_alloc);
2705 +
2706 if (b->reset_required) {
2707 __free_page(page);
2708 return -EIO;
2709 }
2710
2711 - /* place on list of non-balloonable pages, retry allocation */
2712 + /*
2713 + * Place page on the list of non-balloonable pages
2714 + * and retry allocation, unless we already accumulated
2715 + * too many of them, in which case take a breather.
2716 + */
2717 list_add(&page->lru, &b->refused_pages);
2718 - STATS_INC(b->stats.refused_alloc);
2719 + if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
2720 + return -EIO;
2721 }
2722 } while (!locked);
2723
2724 @@ -483,6 +493,8 @@ static void vmballoon_release_refused_pages(struct vmballoon *b)
2725 __free_page(page);
2726 STATS_INC(b->stats.refused_free);
2727 }
2728 +
2729 + b->n_refused_pages = 0;
2730 }
2731
2732 /*
2733 diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
2734 index 2c712af..48a1dbf 100644
2735 --- a/drivers/net/arcnet/com20020-pci.c
2736 +++ b/drivers/net/arcnet/com20020-pci.c
2737 @@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
2738 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2739 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2740 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2741 - { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2742 - { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2743 + { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
2744 + { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
2745 { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2746 { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
2747 {0,}
2748 diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
2749 index 145b1a7..dd70d0c 100644
2750 --- a/drivers/net/can/sja1000/sja1000.c
2751 +++ b/drivers/net/can/sja1000/sja1000.c
2752 @@ -84,6 +84,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
2753 .brp_inc = 1,
2754 };
2755
2756 +static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
2757 +{
2758 + unsigned long flags;
2759 +
2760 + /*
2761 + * The command register needs some locking and time to settle
2762 + * the write_reg() operation - especially on SMP systems.
2763 + */
2764 + spin_lock_irqsave(&priv->cmdreg_lock, flags);
2765 + priv->write_reg(priv, REG_CMR, val);
2766 + priv->read_reg(priv, REG_SR);
2767 + spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
2768 +}
2769 +
2770 static int sja1000_probe_chip(struct net_device *dev)
2771 {
2772 struct sja1000_priv *priv = netdev_priv(dev);
2773 @@ -297,7 +311,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
2774
2775 can_put_echo_skb(skb, dev, 0);
2776
2777 - priv->write_reg(priv, REG_CMR, CMD_TR);
2778 + sja1000_write_cmdreg(priv, CMD_TR);
2779
2780 return NETDEV_TX_OK;
2781 }
2782 @@ -346,7 +360,7 @@ static void sja1000_rx(struct net_device *dev)
2783 cf->can_id = id;
2784
2785 /* release receive buffer */
2786 - priv->write_reg(priv, REG_CMR, CMD_RRB);
2787 + sja1000_write_cmdreg(priv, CMD_RRB);
2788
2789 netif_rx(skb);
2790
2791 @@ -374,7 +388,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
2792 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
2793 stats->rx_over_errors++;
2794 stats->rx_errors++;
2795 - priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */
2796 + sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
2797 }
2798
2799 if (isrc & IRQ_EI) {
2800 diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
2801 index 97a622b..de8e778 100644
2802 --- a/drivers/net/can/sja1000/sja1000.h
2803 +++ b/drivers/net/can/sja1000/sja1000.h
2804 @@ -167,6 +167,7 @@ struct sja1000_priv {
2805
2806 void __iomem *reg_base; /* ioremap'ed address to registers */
2807 unsigned long irq_flags; /* for request_irq() */
2808 + spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
2809
2810 u16 flags; /* custom mode flags */
2811 u8 ocr; /* output control register */
2812 diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
2813 index 57288ca..ef62f17 100644
2814 --- a/drivers/net/mlx4/icm.c
2815 +++ b/drivers/net/mlx4/icm.c
2816 @@ -175,9 +175,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
2817
2818 if (chunk->nsg <= 0)
2819 goto fail;
2820 + }
2821
2822 + if (chunk->npages == MLX4_ICM_CHUNK_LEN)
2823 chunk = NULL;
2824 - }
2825
2826 npages -= 1 << cur_order;
2827 } else {
2828 diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
2829 index 0a1d4c2..06f1f3c 100644
2830 --- a/drivers/net/wireless/ath/ar9170/hw.h
2831 +++ b/drivers/net/wireless/ath/ar9170/hw.h
2832 @@ -425,5 +425,6 @@ enum ar9170_txq {
2833
2834 #define AR9170_TXQ_DEPTH 32
2835 #define AR9170_TX_MAX_PENDING 128
2836 +#define AR9170_RX_STREAM_MAX_SIZE 65535
2837
2838 #endif /* __AR9170_HW_H */
2839 diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
2840 index c536929..144db02 100644
2841 --- a/drivers/net/wireless/ath/ar9170/main.c
2842 +++ b/drivers/net/wireless/ath/ar9170/main.c
2843 @@ -2516,7 +2516,7 @@ void *ar9170_alloc(size_t priv_size)
2844 * tends to split the streams into separate rx descriptors.
2845 */
2846
2847 - skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
2848 + skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
2849 if (!skb)
2850 goto err_nomem;
2851
2852 diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
2853 index e1c2fca..7bae7fd 100644
2854 --- a/drivers/net/wireless/ath/ar9170/usb.c
2855 +++ b/drivers/net/wireless/ath/ar9170/usb.c
2856 @@ -67,18 +67,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
2857 { USB_DEVICE(0x0cf3, 0x1001) },
2858 /* TP-Link TL-WN821N v2 */
2859 { USB_DEVICE(0x0cf3, 0x1002) },
2860 + /* 3Com Dual Band 802.11n USB Adapter */
2861 + { USB_DEVICE(0x0cf3, 0x1010) },
2862 + /* H3C Dual Band 802.11n USB Adapter */
2863 + { USB_DEVICE(0x0cf3, 0x1011) },
2864 /* Cace Airpcap NX */
2865 { USB_DEVICE(0xcace, 0x0300) },
2866 /* D-Link DWA 160 A1 */
2867 { USB_DEVICE(0x07d1, 0x3c10) },
2868 /* D-Link DWA 160 A2 */
2869 { USB_DEVICE(0x07d1, 0x3a09) },
2870 + /* Netgear WNA1000 */
2871 + { USB_DEVICE(0x0846, 0x9040) },
2872 /* Netgear WNDA3100 */
2873 { USB_DEVICE(0x0846, 0x9010) },
2874 /* Netgear WN111 v2 */
2875 { USB_DEVICE(0x0846, 0x9001) },
2876 /* Zydas ZD1221 */
2877 { USB_DEVICE(0x0ace, 0x1221) },
2878 + /* Proxim ORiNOCO 802.11n USB */
2879 + { USB_DEVICE(0x1435, 0x0804) },
2880 + /* WNC Generic 11n USB Dongle */
2881 + { USB_DEVICE(0x1435, 0x0326) },
2882 /* ZyXEL NWD271N */
2883 { USB_DEVICE(0x0586, 0x3417) },
2884 /* Z-Com UB81 BG */
2885 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
2886 index 3abbe75..ea90997 100644
2887 --- a/drivers/net/wireless/ath/ath5k/base.c
2888 +++ b/drivers/net/wireless/ath/ath5k/base.c
2889 @@ -1211,6 +1211,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2890 struct ath5k_hw *ah = sc->ah;
2891 struct sk_buff *skb = bf->skb;
2892 struct ath5k_desc *ds;
2893 + int ret;
2894
2895 if (!skb) {
2896 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
2897 @@ -1237,9 +1238,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2898 ds = bf->desc;
2899 ds->ds_link = bf->daddr; /* link to self */
2900 ds->ds_data = bf->skbaddr;
2901 - ah->ah_setup_rx_desc(ah, ds,
2902 - skb_tailroom(skb), /* buffer size */
2903 - 0);
2904 + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
2905 + if (ret)
2906 + return ret;
2907
2908 if (sc->rxlink != NULL)
2909 *sc->rxlink = bf->daddr;
2910 @@ -2993,13 +2994,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2911
2912 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
2913 if (*new_flags & FIF_PROMISC_IN_BSS) {
2914 - rfilt |= AR5K_RX_FILTER_PROM;
2915 __set_bit(ATH_STAT_PROMISC, sc->status);
2916 } else {
2917 __clear_bit(ATH_STAT_PROMISC, sc->status);
2918 }
2919 }
2920
2921 + if (test_bit(ATH_STAT_PROMISC, sc->status))
2922 + rfilt |= AR5K_RX_FILTER_PROM;
2923 +
2924 /* Note, AR5K_RX_FILTER_MCAST is already enabled */
2925 if (*new_flags & FIF_ALLMULTI) {
2926 mfilt[0] = ~0;
2927 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2928 index 78b5711..20d5414 100644
2929 --- a/drivers/net/wireless/ath/ath9k/hw.c
2930 +++ b/drivers/net/wireless/ath/ath9k/hw.c
2931 @@ -1241,7 +1241,7 @@ void ath9k_hw_deinit(struct ath_hw *ah)
2932 {
2933 struct ath_common *common = ath9k_hw_common(ah);
2934
2935 - if (common->state <= ATH_HW_INITIALIZED)
2936 + if (common->state < ATH_HW_INITIALIZED)
2937 goto free_hw;
2938
2939 if (!AR_SREV_9100(ah))
2940 @@ -1252,8 +1252,6 @@ void ath9k_hw_deinit(struct ath_hw *ah)
2941 free_hw:
2942 if (!AR_SREV_9280_10_OR_LATER(ah))
2943 ath9k_hw_rf_free_ext_banks(ah);
2944 - kfree(ah);
2945 - ah = NULL;
2946 }
2947 EXPORT_SYMBOL(ath9k_hw_deinit);
2948
2949 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2950 index 3d4d897..b78308c 100644
2951 --- a/drivers/net/wireless/ath/ath9k/init.c
2952 +++ b/drivers/net/wireless/ath/ath9k/init.c
2953 @@ -760,6 +760,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
2954
2955 tasklet_kill(&sc->intr_tq);
2956 tasklet_kill(&sc->bcon_tasklet);
2957 +
2958 + kfree(sc->sc_ah);
2959 + sc->sc_ah = NULL;
2960 }
2961
2962 void ath9k_deinit_device(struct ath_softc *sc)
2963 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2964 index 1460116..d3ef2a9 100644
2965 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2966 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
2967 @@ -2077,10 +2077,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2968 }
2969 /* Else we have enough samples; calculate estimate of
2970 * actual average throughput */
2971 -
2972 - /* Sanity-check TPT calculations */
2973 - BUG_ON(window->average_tpt != ((window->success_ratio *
2974 - tbl->expected_tpt[index] + 64) / 128));
2975 + if (window->average_tpt != ((window->success_ratio *
2976 + tbl->expected_tpt[index] + 64) / 128)) {
2977 + IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2978 + window->average_tpt = ((window->success_ratio *
2979 + tbl->expected_tpt[index] + 64) / 128);
2980 + }
2981
2982 /* If we are searching for better modulation mode, check success. */
2983 if (lq_sta->search_better_tbl &&
2984 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
2985 index 741e65e..661e36b 100644
2986 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
2987 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
2988 @@ -561,6 +561,11 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
2989
2990 mutex_lock(&priv->mutex);
2991
2992 + if (priv->is_internal_short_scan == true) {
2993 + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
2994 + goto unlock;
2995 + }
2996 +
2997 if (!iwl_is_ready_rf(priv)) {
2998 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
2999 goto unlock;
3000 @@ -958,17 +963,27 @@ void iwl_bg_scan_completed(struct work_struct *work)
3001 {
3002 struct iwl_priv *priv =
3003 container_of(work, struct iwl_priv, scan_completed);
3004 + bool internal = false;
3005
3006 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
3007
3008 cancel_delayed_work(&priv->scan_check);
3009
3010 - if (!priv->is_internal_short_scan)
3011 - ieee80211_scan_completed(priv->hw, false);
3012 - else {
3013 + mutex_lock(&priv->mutex);
3014 + if (priv->is_internal_short_scan) {
3015 priv->is_internal_short_scan = false;
3016 IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
3017 + internal = true;
3018 }
3019 + mutex_unlock(&priv->mutex);
3020 +
3021 + /*
3022 + * Do not hold mutex here since this will cause mac80211 to call
3023 + * into driver again into functions that will attempt to take
3024 + * mutex.
3025 + */
3026 + if (!internal)
3027 + ieee80211_scan_completed(priv->hw, false);
3028
3029 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3030 return;
3031 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
3032 index 8dd0c03..c243df7 100644
3033 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
3034 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
3035 @@ -1198,6 +1198,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
3036 struct ieee80211_sta *sta;
3037 struct iwl_station_priv *sta_priv;
3038
3039 + rcu_read_lock();
3040 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
3041 if (sta) {
3042 sta_priv = (void *)sta->drv_priv;
3043 @@ -1206,6 +1207,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
3044 atomic_dec_return(&sta_priv->pending_frames) == 0)
3045 ieee80211_sta_block_awake(priv->hw, sta, false);
3046 }
3047 + rcu_read_unlock();
3048
3049 ieee80211_tx_status_irqsafe(priv->hw, skb);
3050 }
3051 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
3052 index 743a6c6..186dc71 100644
3053 --- a/drivers/net/wireless/p54/p54usb.c
3054 +++ b/drivers/net/wireless/p54/p54usb.c
3055 @@ -80,6 +80,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
3056 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
3057 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
3058 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
3059 + {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
3060 {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
3061 {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
3062 {}
3063 diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
3064 index 2131a44..de632ec 100644
3065 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
3066 +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
3067 @@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
3068 info->flags |= IEEE80211_TX_STAT_ACK;
3069
3070 info->status.rates[0].count = (flags & 0xFF) + 1;
3071 + info->status.rates[1].idx = -1;
3072
3073 ieee80211_tx_status_irqsafe(dev, skb);
3074 if (ring->entries - skb_queue_len(&ring->queue) == 2)
3075 diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
3076 index 9423f22..d74b89b 100644
3077 --- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
3078 +++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
3079 @@ -160,6 +160,7 @@ disable:
3080 sdio_disable_func(func);
3081 release:
3082 sdio_release_host(func);
3083 + wl1251_free_hw(wl);
3084 return ret;
3085 }
3086
3087 diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
3088 index 166b67e..de82183 100644
3089 --- a/drivers/oprofile/cpu_buffer.c
3090 +++ b/drivers/oprofile/cpu_buffer.c
3091 @@ -30,23 +30,7 @@
3092
3093 #define OP_BUFFER_FLAGS 0
3094
3095 -/*
3096 - * Read and write access is using spin locking. Thus, writing to the
3097 - * buffer by NMI handler (x86) could occur also during critical
3098 - * sections when reading the buffer. To avoid this, there are 2
3099 - * buffers for independent read and write access. Read access is in
3100 - * process context only, write access only in the NMI handler. If the
3101 - * read buffer runs empty, both buffers are swapped atomically. There
3102 - * is potentially a small window during swapping where the buffers are
3103 - * disabled and samples could be lost.
3104 - *
3105 - * Using 2 buffers is a little bit overhead, but the solution is clear
3106 - * and does not require changes in the ring buffer implementation. It
3107 - * can be changed to a single buffer solution when the ring buffer
3108 - * access is implemented as non-locking atomic code.
3109 - */
3110 -static struct ring_buffer *op_ring_buffer_read;
3111 -static struct ring_buffer *op_ring_buffer_write;
3112 +static struct ring_buffer *op_ring_buffer;
3113 DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
3114
3115 static void wq_sync_buffer(struct work_struct *work);
3116 @@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
3117
3118 void free_cpu_buffers(void)
3119 {
3120 - if (op_ring_buffer_read)
3121 - ring_buffer_free(op_ring_buffer_read);
3122 - op_ring_buffer_read = NULL;
3123 - if (op_ring_buffer_write)
3124 - ring_buffer_free(op_ring_buffer_write);
3125 - op_ring_buffer_write = NULL;
3126 + if (op_ring_buffer)
3127 + ring_buffer_free(op_ring_buffer);
3128 + op_ring_buffer = NULL;
3129 }
3130
3131 #define RB_EVENT_HDR_SIZE 4
3132 @@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
3133 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
3134 RB_EVENT_HDR_SIZE);
3135
3136 - op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
3137 - if (!op_ring_buffer_read)
3138 - goto fail;
3139 - op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
3140 - if (!op_ring_buffer_write)
3141 + op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
3142 + if (!op_ring_buffer)
3143 goto fail;
3144
3145 for_each_possible_cpu(i) {
3146 @@ -162,16 +140,11 @@ struct op_sample
3147 *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
3148 {
3149 entry->event = ring_buffer_lock_reserve
3150 - (op_ring_buffer_write, sizeof(struct op_sample) +
3151 + (op_ring_buffer, sizeof(struct op_sample) +
3152 size * sizeof(entry->sample->data[0]));
3153 - if (entry->event)
3154 - entry->sample = ring_buffer_event_data(entry->event);
3155 - else
3156 - entry->sample = NULL;
3157 -
3158 - if (!entry->sample)
3159 + if (!entry->event)
3160 return NULL;
3161 -
3162 + entry->sample = ring_buffer_event_data(entry->event);
3163 entry->size = size;
3164 entry->data = entry->sample->data;
3165
3166 @@ -180,25 +153,16 @@ struct op_sample
3167
3168 int op_cpu_buffer_write_commit(struct op_entry *entry)
3169 {
3170 - return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
3171 + return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
3172 }
3173
3174 struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
3175 {
3176 struct ring_buffer_event *e;
3177 - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
3178 - if (e)
3179 - goto event;
3180 - if (ring_buffer_swap_cpu(op_ring_buffer_read,
3181 - op_ring_buffer_write,
3182 - cpu))
3183 + e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
3184 + if (!e)
3185 return NULL;
3186 - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
3187 - if (e)
3188 - goto event;
3189 - return NULL;
3190
3191 -event:
3192 entry->event = e;
3193 entry->sample = ring_buffer_event_data(e);
3194 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
3195 @@ -209,8 +173,7 @@ event:
3196
3197 unsigned long op_cpu_buffer_entries(int cpu)
3198 {
3199 - return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
3200 - + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
3201 + return ring_buffer_entries_cpu(op_ring_buffer, cpu);
3202 }
3203
3204 static int
3205 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3206 index 27c0e6e..2f2d0ec 100644
3207 --- a/drivers/pci/quirks.c
3208 +++ b/drivers/pci/quirks.c
3209 @@ -1457,7 +1457,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
3210 conf5 &= ~(1 << 24); /* Clear bit 24 */
3211
3212 switch (pdev->device) {
3213 - case PCI_DEVICE_ID_JMICRON_JMB360:
3214 + case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
3215 + case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
3216 /* The controller should be in single function ahci mode */
3217 conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
3218 break;
3219 @@ -1493,12 +1494,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
3220 }
3221 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
3222 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
3223 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
3224 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
3225 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
3226 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
3227 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
3228 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
3229 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
3230 +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
3231 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
3232 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
3233 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
3234 @@ -2127,6 +2130,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
3235 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
3236 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
3237 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
3238 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
3239
3240 /* Go through the list of Hypertransport capabilities and
3241 * return 1 if a HT MSI capability is found and enabled */
3242 @@ -2218,15 +2222,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
3243 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
3244 ht_enable_msi_mapping);
3245
3246 -/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
3247 +/* The P5N32-SLI motherboards from Asus have a problem with msi
3248 * for the MCP55 NIC. It is not yet determined whether the msi problem
3249 * also affects other devices. As for now, turn off msi for this device.
3250 */
3251 static void __devinit nvenet_msi_disable(struct pci_dev *dev)
3252 {
3253 - if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
3254 + if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
3255 + dmi_name_in_vendors("P5N32-E SLI")) {
3256 dev_info(&dev->dev,
3257 - "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
3258 + "Disabling msi for MCP55 NIC on P5N32-SLI\n");
3259 dev->no_msi = 1;
3260 }
3261 }
3262 diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
3263 index 041eee4..6df5dff 100644
3264 --- a/drivers/pcmcia/ds.c
3265 +++ b/drivers/pcmcia/ds.c
3266 @@ -682,6 +682,7 @@ static void pcmcia_requery(struct pcmcia_socket *s)
3267 if (old_funcs != new_funcs) {
3268 /* we need to re-start */
3269 pcmcia_card_remove(s, NULL);
3270 + s->functions = 0;
3271 pcmcia_card_add(s);
3272 }
3273 }
3274 diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
3275 index 83ace27..6bb6cb9 100644
3276 --- a/drivers/pcmcia/yenta_socket.c
3277 +++ b/drivers/pcmcia/yenta_socket.c
3278 @@ -975,7 +975,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
3279 /* probes the PCI interrupt, use only on override functions */
3280 static int yenta_probe_cb_irq(struct yenta_socket *socket)
3281 {
3282 - u8 reg;
3283 + u8 reg = 0;
3284
3285 if (!socket->cb_irq)
3286 return -1;
3287 @@ -989,7 +989,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
3288 }
3289
3290 /* generate interrupt, wait */
3291 - reg = exca_readb(socket, I365_CSCINT);
3292 + if (!socket->dev->irq)
3293 + reg = exca_readb(socket, I365_CSCINT);
3294 exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
3295 cb_writel(socket, CB_SOCKET_EVENT, -1);
3296 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
3297 diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
3298 index 6c3320d..50601d9 100644
3299 --- a/drivers/platform/x86/Kconfig
3300 +++ b/drivers/platform/x86/Kconfig
3301 @@ -390,6 +390,7 @@ config EEEPC_WMI
3302 depends on ACPI_WMI
3303 depends on INPUT
3304 depends on EXPERIMENTAL
3305 + depends on BACKLIGHT_CLASS_DEVICE
3306 select INPUT_SPARSEKMAP
3307 ---help---
3308 Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
3309 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
3310 index e9aa814..aa13875 100644
3311 --- a/drivers/rtc/rtc-cmos.c
3312 +++ b/drivers/rtc/rtc-cmos.c
3313 @@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
3314 }
3315 }
3316
3317 + cmos_rtc.dev = dev;
3318 + dev_set_drvdata(dev, &cmos_rtc);
3319 +
3320 cmos_rtc.rtc = rtc_device_register(driver_name, dev,
3321 &cmos_rtc_ops, THIS_MODULE);
3322 if (IS_ERR(cmos_rtc.rtc)) {
3323 @@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
3324 goto cleanup0;
3325 }
3326
3327 - cmos_rtc.dev = dev;
3328 - dev_set_drvdata(dev, &cmos_rtc);
3329 rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
3330
3331 spin_lock_irq(&rtc_lock);
3332 diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
3333 index 4969b60..3793ea6 100644
3334 --- a/drivers/rtc/rtc-s3c.c
3335 +++ b/drivers/rtc/rtc-s3c.c
3336 @@ -457,8 +457,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
3337 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
3338 readb(s3c_rtc_base + S3C2410_RTCCON));
3339
3340 - s3c_rtc_setfreq(&pdev->dev, 1);
3341 -
3342 device_init_wakeup(&pdev->dev, 1);
3343
3344 /* register RTC and exit */
3345 @@ -475,6 +473,9 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
3346 rtc->max_user_freq = 128;
3347
3348 platform_set_drvdata(pdev, rtc);
3349 +
3350 + s3c_rtc_setfreq(&pdev->dev, 1);
3351 +
3352 return 0;
3353
3354 err_nortc:
3355 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
3356 index 88f7446..8c496b5 100644
3357 --- a/drivers/scsi/libsas/sas_ata.c
3358 +++ b/drivers/scsi/libsas/sas_ata.c
3359 @@ -395,12 +395,13 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
3360 void sas_ata_task_abort(struct sas_task *task)
3361 {
3362 struct ata_queued_cmd *qc = task->uldd_task;
3363 - struct request_queue *q = qc->scsicmd->device->request_queue;
3364 struct completion *waiting;
3365 - unsigned long flags;
3366
3367 /* Bounce SCSI-initiated commands to the SCSI EH */
3368 if (qc->scsicmd) {
3369 + struct request_queue *q = qc->scsicmd->device->request_queue;
3370 + unsigned long flags;
3371 +
3372 spin_lock_irqsave(q->queue_lock, flags);
3373 blk_abort_request(qc->scsicmd->request);
3374 spin_unlock_irqrestore(q->queue_lock, flags);
3375 diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
3376 index 8228350..53849f2 100644
3377 --- a/drivers/scsi/libsas/sas_scsi_host.c
3378 +++ b/drivers/scsi/libsas/sas_scsi_host.c
3379 @@ -1030,8 +1030,6 @@ int __sas_task_abort(struct sas_task *task)
3380 void sas_task_abort(struct sas_task *task)
3381 {
3382 struct scsi_cmnd *sc = task->uldd_task;
3383 - struct request_queue *q = sc->device->request_queue;
3384 - unsigned long flags;
3385
3386 /* Escape for libsas internal commands */
3387 if (!sc) {
3388 @@ -1043,13 +1041,15 @@ void sas_task_abort(struct sas_task *task)
3389
3390 if (dev_is_sata(task->dev)) {
3391 sas_ata_task_abort(task);
3392 - return;
3393 - }
3394 + } else {
3395 + struct request_queue *q = sc->device->request_queue;
3396 + unsigned long flags;
3397
3398 - spin_lock_irqsave(q->queue_lock, flags);
3399 - blk_abort_request(sc->request);
3400 - spin_unlock_irqrestore(q->queue_lock, flags);
3401 - scsi_schedule_eh(sc->device->host);
3402 + spin_lock_irqsave(q->queue_lock, flags);
3403 + blk_abort_request(sc->request);
3404 + spin_unlock_irqrestore(q->queue_lock, flags);
3405 + scsi_schedule_eh(sc->device->host);
3406 + }
3407 }
3408
3409 int sas_slave_alloc(struct scsi_device *scsi_dev)
3410 diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
3411 index 78ed24b..3046386 100644
3412 --- a/drivers/serial/68328serial.c
3413 +++ b/drivers/serial/68328serial.c
3414 @@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg)
3415 for (i = 0; i < ARRAY_SIZE(baud_table); i++)
3416 if (baud_table[i] == n)
3417 break;
3418 - if (i < BAUD_TABLE_SIZE) {
3419 + if (i < ARRAY_SIZE(baud_table)) {
3420 m68328_console_baud = n;
3421 m68328_console_cbaud = 0;
3422 if (i > 15) {
3423 diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c
3424 index 7de60e8..c9366bc 100644
3425 --- a/drivers/staging/batman-adv/proc.c
3426 +++ b/drivers/staging/batman-adv/proc.c
3427 @@ -41,7 +41,7 @@ static int proc_interfaces_read(struct seq_file *seq, void *offset)
3428
3429 rcu_read_lock();
3430 list_for_each_entry_rcu(batman_if, &if_list, list) {
3431 - seq_printf(seq, "[%8s] %s %s \n",
3432 + seq_printf(seq, "[%8s] %s %s\n",
3433 (batman_if->if_active == IF_ACTIVE ?
3434 "active" : "inactive"),
3435 batman_if->dev,
3436 @@ -188,18 +188,18 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
3437 rcu_read_lock();
3438 if (list_empty(&if_list)) {
3439 rcu_read_unlock();
3440 - seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
3441 + seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
3442 goto end;
3443 }
3444
3445 if (((struct batman_if *)if_list.next)->if_active != IF_ACTIVE) {
3446 rcu_read_unlock();
3447 - seq_printf(seq, "BATMAN disabled - primary interface not active \n");
3448 + seq_printf(seq, "BATMAN disabled - primary interface not active\n");
3449 goto end;
3450 }
3451
3452 seq_printf(seq,
3453 - " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s] \n",
3454 + " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s]\n",
3455 "Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
3456 "Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR,
3457 ((struct batman_if *)if_list.next)->dev,
3458 @@ -240,7 +240,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset)
3459 spin_unlock_irqrestore(&orig_hash_lock, flags);
3460
3461 if (batman_count == 0)
3462 - seq_printf(seq, "No batman nodes in range ... \n");
3463 + seq_printf(seq, "No batman nodes in range ...\n");
3464
3465 end:
3466 return 0;
3467 @@ -262,7 +262,7 @@ static int proc_transt_local_read(struct seq_file *seq, void *offset)
3468 rcu_read_lock();
3469 if (list_empty(&if_list)) {
3470 rcu_read_unlock();
3471 - seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
3472 + seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
3473 goto end;
3474 }
3475
3476 @@ -294,7 +294,7 @@ static int proc_transt_global_read(struct seq_file *seq, void *offset)
3477 rcu_read_lock();
3478 if (list_empty(&if_list)) {
3479 rcu_read_unlock();
3480 - seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n");
3481 + seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n");
3482 goto end;
3483 }
3484 rcu_read_unlock();
3485 @@ -350,9 +350,9 @@ static int proc_vis_srv_read(struct seq_file *seq, void *offset)
3486 {
3487 int vis_server = atomic_read(&vis_mode);
3488
3489 - seq_printf(seq, "[%c] client mode (server disabled) \n",
3490 + seq_printf(seq, "[%c] client mode (server disabled)\n",
3491 (vis_server == VIS_TYPE_CLIENT_UPDATE) ? 'x' : ' ');
3492 - seq_printf(seq, "[%c] server mode (server enabled) \n",
3493 + seq_printf(seq, "[%c] server mode (server enabled)\n",
3494 (vis_server == VIS_TYPE_SERVER_SYNC) ? 'x' : ' ');
3495
3496 return 0;
3497 @@ -369,6 +369,8 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
3498 struct vis_info *info;
3499 struct vis_info_entry *entries;
3500 HLIST_HEAD(vis_if_list);
3501 + struct if_list_entry *entry;
3502 + struct hlist_node *pos, *n;
3503 int i;
3504 char tmp_addr_str[ETH_STR_LEN];
3505 unsigned long flags;
3506 @@ -387,17 +389,34 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset)
3507 info = hashit.bucket->data;
3508 entries = (struct vis_info_entry *)
3509 ((char *)info + sizeof(struct vis_info));
3510 - addr_to_string(tmp_addr_str, info->packet.vis_orig);
3511 - seq_printf(seq, "%s,", tmp_addr_str);
3512
3513 for (i = 0; i < info->packet.entries; i++) {
3514 - proc_vis_read_entry(seq, &entries[i], &vis_if_list,
3515 - info->packet.vis_orig);
3516 + if (entries[i].quality == 0)
3517 + continue;
3518 + proc_vis_insert_interface(entries[i].src, &vis_if_list,
3519 + compare_orig(entries[i].src,
3520 + info->packet.vis_orig));
3521 }
3522
3523 - /* add primary/secondary records */
3524 - proc_vis_read_prim_sec(seq, &vis_if_list);
3525 - seq_printf(seq, "\n");
3526 + hlist_for_each_entry(entry, pos, &vis_if_list, list) {
3527 + addr_to_string(tmp_addr_str, entry->addr);
3528 + seq_printf(seq, "%s,", tmp_addr_str);
3529 +
3530 + for (i = 0; i < info->packet.entries; i++)
3531 + proc_vis_read_entry(seq, &entries[i],
3532 + entry->addr, entry->primary);
3533 +
3534 + /* add primary/secondary records */
3535 + if (compare_orig(entry->addr, info->packet.vis_orig))
3536 + proc_vis_read_prim_sec(seq, &vis_if_list);
3537 +
3538 + seq_printf(seq, "\n");
3539 + }
3540 +
3541 + hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
3542 + hlist_del(&entry->list);
3543 + kfree(entry);
3544 + }
3545 }
3546 spin_unlock_irqrestore(&vis_hash_lock, flags);
3547
3548 diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
3549 index fedec1b..28eac7e 100644
3550 --- a/drivers/staging/batman-adv/vis.c
3551 +++ b/drivers/staging/batman-adv/vis.c
3552 @@ -27,24 +27,44 @@
3553 #include "hard-interface.h"
3554 #include "hash.h"
3555
3556 +/* Returns the smallest signed integer in two's complement with the sizeof x */
3557 +#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
3558 +
3559 +/* Checks if a sequence number x is a predecessor/successor of y.
3560 + they handle overflows/underflows and can correctly check for a
3561 + predecessor/successor unless the variable sequence number has grown by
3562 + more then 2**(bitwidth(x)-1)-1.
3563 + This means that for a uint8_t with the maximum value 255, it would think:
3564 + * when adding nothing - it is neither a predecessor nor a successor
3565 + * before adding more than 127 to the starting value - it is a predecessor,
3566 + * when adding 128 - it is neither a predecessor nor a successor,
3567 + * after adding more than 127 to the starting value - it is a successor */
3568 +#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
3569 + _dummy > smallest_signed_int(_dummy); })
3570 +#define seq_after(x, y) seq_before(y, x)
3571 +
3572 struct hashtable_t *vis_hash;
3573 DEFINE_SPINLOCK(vis_hash_lock);
3574 +static DEFINE_SPINLOCK(recv_list_lock);
3575 static struct vis_info *my_vis_info;
3576 static struct list_head send_list; /* always locked with vis_hash_lock */
3577
3578 static void start_vis_timer(void);
3579
3580 /* free the info */
3581 -static void free_info(void *data)
3582 +static void free_info(struct kref *ref)
3583 {
3584 - struct vis_info *info = data;
3585 + struct vis_info *info = container_of(ref, struct vis_info, refcount);
3586 struct recvlist_node *entry, *tmp;
3587 + unsigned long flags;
3588
3589 list_del_init(&info->send_list);
3590 + spin_lock_irqsave(&recv_list_lock, flags);
3591 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
3592 list_del(&entry->list);
3593 kfree(entry);
3594 }
3595 + spin_unlock_irqrestore(&recv_list_lock, flags);
3596 kfree(info);
3597 }
3598
3599 @@ -82,7 +102,7 @@ static int vis_info_choose(void *data, int size)
3600
3601 /* insert interface to the list of interfaces of one originator, if it
3602 * does not already exist in the list */
3603 -static void proc_vis_insert_interface(const uint8_t *interface,
3604 +void proc_vis_insert_interface(const uint8_t *interface,
3605 struct hlist_head *if_list,
3606 bool primary)
3607 {
3608 @@ -107,38 +127,51 @@ void proc_vis_read_prim_sec(struct seq_file *seq,
3609 struct hlist_head *if_list)
3610 {
3611 struct if_list_entry *entry;
3612 - struct hlist_node *pos, *n;
3613 + struct hlist_node *pos;
3614 char tmp_addr_str[ETH_STR_LEN];
3615
3616 - hlist_for_each_entry_safe(entry, pos, n, if_list, list) {
3617 - if (entry->primary) {
3618 + hlist_for_each_entry(entry, pos, if_list, list) {
3619 + if (entry->primary)
3620 seq_printf(seq, "PRIMARY, ");
3621 - } else {
3622 + else {
3623 addr_to_string(tmp_addr_str, entry->addr);
3624 seq_printf(seq, "SEC %s, ", tmp_addr_str);
3625 }
3626 -
3627 - hlist_del(&entry->list);
3628 - kfree(entry);
3629 }
3630 }
3631
3632 /* read an entry */
3633 void proc_vis_read_entry(struct seq_file *seq,
3634 struct vis_info_entry *entry,
3635 - struct hlist_head *if_list,
3636 - uint8_t *vis_orig)
3637 + uint8_t *src,
3638 + bool primary)
3639 {
3640 char to[40];
3641
3642 addr_to_string(to, entry->dest);
3643 - if (entry->quality == 0) {
3644 - proc_vis_insert_interface(vis_orig, if_list, true);
3645 + if (primary && entry->quality == 0)
3646 seq_printf(seq, "HNA %s, ", to);
3647 - } else {
3648 - proc_vis_insert_interface(entry->src, if_list,
3649 - compare_orig(entry->src, vis_orig));
3650 + else if (compare_orig(entry->src, src))
3651 seq_printf(seq, "TQ %s %d, ", to, entry->quality);
3652 +}
3653 +
3654 +/* add the info packet to the send list, if it was not
3655 + * already linked in. */
3656 +static void send_list_add(struct vis_info *info)
3657 +{
3658 + if (list_empty(&info->send_list)) {
3659 + kref_get(&info->refcount);
3660 + list_add_tail(&info->send_list, &send_list);
3661 + }
3662 +}
3663 +
3664 +/* delete the info packet from the send list, if it was
3665 + * linked in. */
3666 +static void send_list_del(struct vis_info *info)
3667 +{
3668 + if (!list_empty(&info->send_list)) {
3669 + list_del_init(&info->send_list);
3670 + kref_put(&info->refcount, free_info);
3671 }
3672 }
3673
3674 @@ -146,32 +179,41 @@ void proc_vis_read_entry(struct seq_file *seq,
3675 static void recv_list_add(struct list_head *recv_list, char *mac)
3676 {
3677 struct recvlist_node *entry;
3678 + unsigned long flags;
3679 +
3680 entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
3681 if (!entry)
3682 return;
3683
3684 memcpy(entry->mac, mac, ETH_ALEN);
3685 + spin_lock_irqsave(&recv_list_lock, flags);
3686 list_add_tail(&entry->list, recv_list);
3687 + spin_unlock_irqrestore(&recv_list_lock, flags);
3688 }
3689
3690 /* returns 1 if this mac is in the recv_list */
3691 static int recv_list_is_in(struct list_head *recv_list, char *mac)
3692 {
3693 struct recvlist_node *entry;
3694 + unsigned long flags;
3695
3696 + spin_lock_irqsave(&recv_list_lock, flags);
3697 list_for_each_entry(entry, recv_list, list) {
3698 - if (memcmp(entry->mac, mac, ETH_ALEN) == 0)
3699 + if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
3700 + spin_unlock_irqrestore(&recv_list_lock, flags);
3701 return 1;
3702 + }
3703 }
3704 -
3705 + spin_unlock_irqrestore(&recv_list_lock, flags);
3706 return 0;
3707 }
3708
3709 /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
3710 - * broken.. ). vis hash must be locked outside. is_new is set when the packet
3711 + * broken.. ). vis hash must be locked outside. is_new is set when the packet
3712 * is newer than old entries in the hash. */
3713 static struct vis_info *add_packet(struct vis_packet *vis_packet,
3714 - int vis_info_len, int *is_new)
3715 + int vis_info_len, int *is_new,
3716 + int make_broadcast)
3717 {
3718 struct vis_info *info, *old_info;
3719 struct vis_info search_elem;
3720 @@ -186,7 +228,7 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3721 old_info = hash_find(vis_hash, &search_elem);
3722
3723 if (old_info != NULL) {
3724 - if (vis_packet->seqno - old_info->packet.seqno <= 0) {
3725 + if (!seq_after(vis_packet->seqno, old_info->packet.seqno)) {
3726 if (old_info->packet.seqno == vis_packet->seqno) {
3727 recv_list_add(&old_info->recv_list,
3728 vis_packet->sender_orig);
3729 @@ -198,13 +240,15 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3730 }
3731 /* remove old entry */
3732 hash_remove(vis_hash, old_info);
3733 - free_info(old_info);
3734 + send_list_del(old_info);
3735 + kref_put(&old_info->refcount, free_info);
3736 }
3737
3738 info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC);
3739 if (info == NULL)
3740 return NULL;
3741
3742 + kref_init(&info->refcount);
3743 INIT_LIST_HEAD(&info->send_list);
3744 INIT_LIST_HEAD(&info->recv_list);
3745 info->first_seen = jiffies;
3746 @@ -214,16 +258,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3747 /* initialize and add new packet. */
3748 *is_new = 1;
3749
3750 + /* Make it a broadcast packet, if required */
3751 + if (make_broadcast)
3752 + memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3753 +
3754 /* repair if entries is longer than packet. */
3755 if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
3756 - info->packet.entries = vis_info_len / sizeof(struct vis_info_entry);
3757 + info->packet.entries = vis_info_len /
3758 + sizeof(struct vis_info_entry);
3759
3760 recv_list_add(&info->recv_list, info->packet.sender_orig);
3761
3762 /* try to add it */
3763 if (hash_add(vis_hash, info) < 0) {
3764 /* did not work (for some reason) */
3765 - free_info(info);
3766 + kref_put(&old_info->refcount, free_info);
3767 info = NULL;
3768 }
3769
3770 @@ -234,22 +283,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
3771 void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len)
3772 {
3773 struct vis_info *info;
3774 - int is_new;
3775 + int is_new, make_broadcast;
3776 unsigned long flags;
3777 int vis_server = atomic_read(&vis_mode);
3778
3779 + make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
3780 +
3781 spin_lock_irqsave(&vis_hash_lock, flags);
3782 - info = add_packet(vis_packet, vis_info_len, &is_new);
3783 + info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
3784 if (info == NULL)
3785 goto end;
3786
3787 /* only if we are server ourselves and packet is newer than the one in
3788 * hash.*/
3789 - if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) {
3790 - memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3791 - if (list_empty(&info->send_list))
3792 - list_add_tail(&info->send_list, &send_list);
3793 - }
3794 + if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
3795 + send_list_add(info);
3796 end:
3797 spin_unlock_irqrestore(&vis_hash_lock, flags);
3798 }
3799 @@ -262,31 +310,32 @@ void receive_client_update_packet(struct vis_packet *vis_packet,
3800 int is_new;
3801 unsigned long flags;
3802 int vis_server = atomic_read(&vis_mode);
3803 + int are_target = 0;
3804
3805 /* clients shall not broadcast. */
3806 if (is_bcast(vis_packet->target_orig))
3807 return;
3808
3809 + /* Are we the target for this VIS packet? */
3810 + if (vis_server == VIS_TYPE_SERVER_SYNC &&
3811 + is_my_mac(vis_packet->target_orig))
3812 + are_target = 1;
3813 +
3814 spin_lock_irqsave(&vis_hash_lock, flags);
3815 - info = add_packet(vis_packet, vis_info_len, &is_new);
3816 + info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
3817 if (info == NULL)
3818 goto end;
3819 /* note that outdated packets will be dropped at this point. */
3820
3821
3822 /* send only if we're the target server or ... */
3823 - if (vis_server == VIS_TYPE_SERVER_SYNC &&
3824 - is_my_mac(info->packet.target_orig) &&
3825 - is_new) {
3826 + if (are_target && is_new) {
3827 info->packet.vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
3828 - memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3829 - if (list_empty(&info->send_list))
3830 - list_add_tail(&info->send_list, &send_list);
3831 + send_list_add(info);
3832
3833 /* ... we're not the recipient (and thus need to forward). */
3834 } else if (!is_my_mac(info->packet.target_orig)) {
3835 - if (list_empty(&info->send_list))
3836 - list_add_tail(&info->send_list, &send_list);
3837 + send_list_add(info);
3838 }
3839 end:
3840 spin_unlock_irqrestore(&vis_hash_lock, flags);
3841 @@ -361,14 +410,17 @@ static int generate_vis_packet(void)
3842 while (hash_iterate(orig_hash, &hashit_global)) {
3843 orig_node = hashit_global.bucket->data;
3844 if (orig_node->router != NULL
3845 - && compare_orig(orig_node->router->addr, orig_node->orig)
3846 + && compare_orig(orig_node->router->addr,
3847 + orig_node->orig)
3848 && orig_node->batman_if
3849 && (orig_node->batman_if->if_active == IF_ACTIVE)
3850 && orig_node->router->tq_avg > 0) {
3851
3852 /* fill one entry into buffer. */
3853 entry = &entry_array[info->packet.entries];
3854 - memcpy(entry->src, orig_node->batman_if->net_dev->dev_addr, ETH_ALEN);
3855 + memcpy(entry->src,
3856 + orig_node->batman_if->net_dev->dev_addr,
3857 + ETH_ALEN);
3858 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
3859 entry->quality = orig_node->router->tq_avg;
3860 info->packet.entries++;
3861 @@ -400,6 +452,8 @@ static int generate_vis_packet(void)
3862 return 0;
3863 }
3864
3865 +/* free old vis packets. Must be called with this vis_hash_lock
3866 + * held */
3867 static void purge_vis_packets(void)
3868 {
3869 HASHIT(hashit);
3870 @@ -412,7 +466,8 @@ static void purge_vis_packets(void)
3871 if (time_after(jiffies,
3872 info->first_seen + (VIS_TIMEOUT*HZ)/1000)) {
3873 hash_remove_bucket(vis_hash, &hashit);
3874 - free_info(info);
3875 + send_list_del(info);
3876 + kref_put(&info->refcount, free_info);
3877 }
3878 }
3879 }
3880 @@ -422,6 +477,8 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
3881 HASHIT(hashit);
3882 struct orig_node *orig_node;
3883 unsigned long flags;
3884 + struct batman_if *batman_if;
3885 + uint8_t dstaddr[ETH_ALEN];
3886
3887 spin_lock_irqsave(&orig_hash_lock, flags);
3888
3889 @@ -430,45 +487,56 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
3890 orig_node = hashit.bucket->data;
3891
3892 /* if it's a vis server and reachable, send it. */
3893 - if (orig_node &&
3894 - (orig_node->flags & VIS_SERVER) &&
3895 - orig_node->batman_if &&
3896 - orig_node->router) {
3897 + if ((!orig_node) || (!orig_node->batman_if) ||
3898 + (!orig_node->router))
3899 + continue;
3900 + if (!(orig_node->flags & VIS_SERVER))
3901 + continue;
3902 + /* don't send it if we already received the packet from
3903 + * this node. */
3904 + if (recv_list_is_in(&info->recv_list, orig_node->orig))
3905 + continue;
3906
3907 - /* don't send it if we already received the packet from
3908 - * this node. */
3909 - if (recv_list_is_in(&info->recv_list, orig_node->orig))
3910 - continue;
3911 + memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN);
3912 + batman_if = orig_node->batman_if;
3913 + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
3914 + spin_unlock_irqrestore(&orig_hash_lock, flags);
3915
3916 - memcpy(info->packet.target_orig,
3917 - orig_node->orig, ETH_ALEN);
3918 + send_raw_packet((unsigned char *)&info->packet,
3919 + packet_length, batman_if, dstaddr);
3920 +
3921 + spin_lock_irqsave(&orig_hash_lock, flags);
3922
3923 - send_raw_packet((unsigned char *) &info->packet,
3924 - packet_length,
3925 - orig_node->batman_if,
3926 - orig_node->router->addr);
3927 - }
3928 }
3929 - memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3930 spin_unlock_irqrestore(&orig_hash_lock, flags);
3931 + memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN);
3932 }
3933
3934 static void unicast_vis_packet(struct vis_info *info, int packet_length)
3935 {
3936 struct orig_node *orig_node;
3937 unsigned long flags;
3938 + struct batman_if *batman_if;
3939 + uint8_t dstaddr[ETH_ALEN];
3940
3941 spin_lock_irqsave(&orig_hash_lock, flags);
3942 orig_node = ((struct orig_node *)
3943 hash_find(orig_hash, info->packet.target_orig));
3944
3945 - if ((orig_node != NULL) &&
3946 - (orig_node->batman_if != NULL) &&
3947 - (orig_node->router != NULL)) {
3948 - send_raw_packet((unsigned char *) &info->packet, packet_length,
3949 - orig_node->batman_if,
3950 - orig_node->router->addr);
3951 - }
3952 + if ((!orig_node) || (!orig_node->batman_if) || (!orig_node->router))
3953 + goto out;
3954 +
3955 + /* don't lock while sending the packets ... we therefore
3956 + * copy the required data before sending */
3957 + batman_if = orig_node->batman_if;
3958 + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
3959 + spin_unlock_irqrestore(&orig_hash_lock, flags);
3960 +
3961 + send_raw_packet((unsigned char *)&info->packet,
3962 + packet_length, batman_if, dstaddr);
3963 + return;
3964 +
3965 +out:
3966 spin_unlock_irqrestore(&orig_hash_lock, flags);
3967 }
3968
3969 @@ -502,15 +570,24 @@ static void send_vis_packets(struct work_struct *work)
3970 unsigned long flags;
3971
3972 spin_lock_irqsave(&vis_hash_lock, flags);
3973 +
3974 purge_vis_packets();
3975
3976 - if (generate_vis_packet() == 0)
3977 + if (generate_vis_packet() == 0) {
3978 /* schedule if generation was successful */
3979 - list_add_tail(&my_vis_info->send_list, &send_list);
3980 + send_list_add(my_vis_info);
3981 + }
3982
3983 list_for_each_entry_safe(info, temp, &send_list, send_list) {
3984 - list_del_init(&info->send_list);
3985 +
3986 + kref_get(&info->refcount);
3987 + spin_unlock_irqrestore(&vis_hash_lock, flags);
3988 +
3989 send_vis_packet(info);
3990 +
3991 + spin_lock_irqsave(&vis_hash_lock, flags);
3992 + send_list_del(info);
3993 + kref_put(&info->refcount, free_info);
3994 }
3995 spin_unlock_irqrestore(&vis_hash_lock, flags);
3996 start_vis_timer();
3997 @@ -543,6 +620,7 @@ int vis_init(void)
3998 my_vis_info->first_seen = jiffies - atomic_read(&vis_interval);
3999 INIT_LIST_HEAD(&my_vis_info->recv_list);
4000 INIT_LIST_HEAD(&my_vis_info->send_list);
4001 + kref_init(&my_vis_info->refcount);
4002 my_vis_info->packet.version = COMPAT_VERSION;
4003 my_vis_info->packet.packet_type = BAT_VIS;
4004 my_vis_info->packet.ttl = TTL;
4005 @@ -556,9 +634,9 @@ int vis_init(void)
4006
4007 if (hash_add(vis_hash, my_vis_info) < 0) {
4008 printk(KERN_ERR
4009 - "batman-adv:Can't add own vis packet into hash\n");
4010 - free_info(my_vis_info); /* not in hash, need to remove it
4011 - * manually. */
4012 + "batman-adv:Can't add own vis packet into hash\n");
4013 + /* not in hash, need to remove it manually. */
4014 + kref_put(&my_vis_info->refcount, free_info);
4015 goto err;
4016 }
4017
4018 @@ -572,6 +650,15 @@ err:
4019 return 0;
4020 }
4021
4022 +/* Decrease the reference count on a hash item info */
4023 +static void free_info_ref(void *data)
4024 +{
4025 + struct vis_info *info = data;
4026 +
4027 + send_list_del(info);
4028 + kref_put(&info->refcount, free_info);
4029 +}
4030 +
4031 /* shutdown vis-server */
4032 void vis_quit(void)
4033 {
4034 @@ -583,7 +670,7 @@ void vis_quit(void)
4035
4036 spin_lock_irqsave(&vis_hash_lock, flags);
4037 /* properly remove, kill timers ... */
4038 - hash_delete(vis_hash, free_info);
4039 + hash_delete(vis_hash, free_info_ref);
4040 vis_hash = NULL;
4041 my_vis_info = NULL;
4042 spin_unlock_irqrestore(&vis_hash_lock, flags);
4043 diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
4044 index 0cdafde..a1f92a4 100644
4045 --- a/drivers/staging/batman-adv/vis.h
4046 +++ b/drivers/staging/batman-adv/vis.h
4047 @@ -29,6 +29,7 @@ struct vis_info {
4048 /* list of server-neighbors we received a vis-packet
4049 * from. we should not reply to them. */
4050 struct list_head send_list;
4051 + struct kref refcount;
4052 /* this packet might be part of the vis send queue. */
4053 struct vis_packet packet;
4054 /* vis_info may follow here*/
4055 @@ -48,10 +49,13 @@ struct recvlist_node {
4056 extern struct hashtable_t *vis_hash;
4057 extern spinlock_t vis_hash_lock;
4058
4059 +void proc_vis_insert_interface(const uint8_t *interface,
4060 + struct hlist_head *if_list,
4061 + bool primary);
4062 void proc_vis_read_entry(struct seq_file *seq,
4063 struct vis_info_entry *entry,
4064 - struct hlist_head *if_list,
4065 - uint8_t *vis_orig);
4066 + uint8_t *src,
4067 + bool primary);
4068 void proc_vis_read_prim_sec(struct seq_file *seq,
4069 struct hlist_head *if_list);
4070 void receive_server_sync_packet(struct vis_packet *vis_packet,
4071 diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
4072 index dc4849a..9855608 100644
4073 --- a/drivers/staging/comedi/drivers/ni_mio_cs.c
4074 +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
4075 @@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = {
4076 .adbits = 12,
4077 .ai_fifo_depth = 1024,
4078 .alwaysdither = 0,
4079 - .gainlkup = ai_gain_16,
4080 + .gainlkup = ai_gain_4,
4081 .ai_speed = 5000,
4082 .n_aochan = 2,
4083 .aobits = 12,
4084 diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
4085 index 740db0c..2ffd0fe 100644
4086 --- a/drivers/staging/rt2860/usb_main_dev.c
4087 +++ b/drivers/staging/rt2860/usb_main_dev.c
4088 @@ -98,6 +98,7 @@ struct usb_device_id rtusb_usb_id[] = {
4089 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
4090 {USB_DEVICE(0x7392, 0x7718)},
4091 {USB_DEVICE(0x7392, 0x7717)},
4092 + {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */
4093 {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */
4094 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
4095 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
4096 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
4097 index e40a2e9..fea0e99 100644
4098 --- a/drivers/staging/vt6655/device_main.c
4099 +++ b/drivers/staging/vt6655/device_main.c
4100 @@ -1090,11 +1090,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent)
4101 }
4102 //2008-07-21-01<Add>by MikeLiu
4103 //register wpadev
4104 +#if 0
4105 if(wpa_set_wpadev(pDevice, 1)!=0) {
4106 printk("Fail to Register WPADEV?\n");
4107 unregister_netdev(pDevice->dev);
4108 free_netdev(dev);
4109 }
4110 +#endif
4111 device_print_info(pDevice);
4112 pci_set_drvdata(pcid, pDevice);
4113 return 0;
4114 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
4115 index 5e1a253..3c73add 100644
4116 --- a/drivers/usb/class/cdc-acm.c
4117 +++ b/drivers/usb/class/cdc-acm.c
4118 @@ -1201,7 +1201,7 @@ made_compressed_probe:
4119 if (rcv->urb == NULL) {
4120 dev_dbg(&intf->dev,
4121 "out of memory (read urbs usb_alloc_urb)\n");
4122 - goto alloc_fail7;
4123 + goto alloc_fail6;
4124 }
4125
4126 rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
4127 @@ -1225,7 +1225,7 @@ made_compressed_probe:
4128 if (snd->urb == NULL) {
4129 dev_dbg(&intf->dev,
4130 "out of memory (write urbs usb_alloc_urb)");
4131 - goto alloc_fail7;
4132 + goto alloc_fail8;
4133 }
4134
4135 if (usb_endpoint_xfer_int(epwrite))
4136 @@ -1264,6 +1264,7 @@ made_compressed_probe:
4137 i = device_create_file(&intf->dev,
4138 &dev_attr_iCountryCodeRelDate);
4139 if (i < 0) {
4140 + device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
4141 kfree(acm->country_codes);
4142 goto skip_countries;
4143 }
4144 @@ -1300,6 +1301,7 @@ alloc_fail8:
4145 usb_free_urb(acm->wb[i].urb);
4146 alloc_fail7:
4147 acm_read_buffers_free(acm);
4148 +alloc_fail6:
4149 for (i = 0; i < num_rx_buf; i++)
4150 usb_free_urb(acm->ru[i].urb);
4151 usb_free_urb(acm->ctrlurb);
4152 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
4153 index 2f3dc4c..9d02dc6 100644
4154 --- a/drivers/usb/core/driver.c
4155 +++ b/drivers/usb/core/driver.c
4156 @@ -1322,6 +1322,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
4157
4158 /* For all other calls, take the device back to full power and
4159 * tell the PM core in case it was autosuspended previously.
4160 + * Unbind the interfaces that will need rebinding later.
4161 */
4162 } else {
4163 status = usb_resume_both(udev, msg);
4164 @@ -1330,6 +1331,7 @@ int usb_resume(struct device *dev, pm_message_t msg)
4165 pm_runtime_set_active(dev);
4166 pm_runtime_enable(dev);
4167 udev->last_busy = jiffies;
4168 + do_unbind_rebind(udev, DO_REBIND);
4169 }
4170 }
4171
4172 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4173 index 2f8cedd..b60a14d 100644
4174 --- a/drivers/usb/core/hcd.c
4175 +++ b/drivers/usb/core/hcd.c
4176 @@ -1261,6 +1261,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
4177 *dma_handle = 0;
4178 }
4179
4180 +static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
4181 +{
4182 + enum dma_data_direction dir;
4183 +
4184 + if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
4185 + dma_unmap_single(hcd->self.controller,
4186 + urb->setup_dma,
4187 + sizeof(struct usb_ctrlrequest),
4188 + DMA_TO_DEVICE);
4189 + else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
4190 + hcd_free_coherent(urb->dev->bus,
4191 + &urb->setup_dma,
4192 + (void **) &urb->setup_packet,
4193 + sizeof(struct usb_ctrlrequest),
4194 + DMA_TO_DEVICE);
4195 +
4196 + dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
4197 + if (urb->transfer_flags & URB_DMA_MAP_SG)
4198 + dma_unmap_sg(hcd->self.controller,
4199 + urb->sg->sg,
4200 + urb->num_sgs,
4201 + dir);
4202 + else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
4203 + dma_unmap_page(hcd->self.controller,
4204 + urb->transfer_dma,
4205 + urb->transfer_buffer_length,
4206 + dir);
4207 + else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
4208 + dma_unmap_single(hcd->self.controller,
4209 + urb->transfer_dma,
4210 + urb->transfer_buffer_length,
4211 + dir);
4212 + else if (urb->transfer_flags & URB_MAP_LOCAL)
4213 + hcd_free_coherent(urb->dev->bus,
4214 + &urb->transfer_dma,
4215 + &urb->transfer_buffer,
4216 + urb->transfer_buffer_length,
4217 + dir);
4218 +
4219 + /* Make it safe to call this routine more than once */
4220 + urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
4221 + URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
4222 + URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
4223 +}
4224 +
4225 static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4226 gfp_t mem_flags)
4227 {
4228 @@ -1272,8 +1317,6 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4229 * unless it uses pio or talks to another transport,
4230 * or uses the provided scatter gather list for bulk.
4231 */
4232 - if (is_root_hub(urb->dev))
4233 - return 0;
4234
4235 if (usb_endpoint_xfer_control(&urb->ep->desc)
4236 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
4237 @@ -1286,6 +1329,7 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4238 if (dma_mapping_error(hcd->self.controller,
4239 urb->setup_dma))
4240 return -EAGAIN;
4241 + urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
4242 } else if (hcd->driver->flags & HCD_LOCAL_MEM)
4243 ret = hcd_alloc_coherent(
4244 urb->dev->bus, mem_flags,
4245 @@ -1293,20 +1337,57 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4246 (void **)&urb->setup_packet,
4247 sizeof(struct usb_ctrlrequest),
4248 DMA_TO_DEVICE);
4249 + if (ret)
4250 + return ret;
4251 + urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
4252 }
4253
4254 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
4255 - if (ret == 0 && urb->transfer_buffer_length != 0
4256 + if (urb->transfer_buffer_length != 0
4257 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
4258 if (hcd->self.uses_dma) {
4259 - urb->transfer_dma = dma_map_single (
4260 - hcd->self.controller,
4261 - urb->transfer_buffer,
4262 - urb->transfer_buffer_length,
4263 - dir);
4264 - if (dma_mapping_error(hcd->self.controller,
4265 + if (urb->num_sgs) {
4266 + int n = dma_map_sg(
4267 + hcd->self.controller,
4268 + urb->sg->sg,
4269 + urb->num_sgs,
4270 + dir);
4271 + if (n <= 0)
4272 + ret = -EAGAIN;
4273 + else
4274 + urb->transfer_flags |= URB_DMA_MAP_SG;
4275 + if (n != urb->num_sgs) {
4276 + urb->num_sgs = n;
4277 + urb->transfer_flags |=
4278 + URB_DMA_SG_COMBINED;
4279 + }
4280 + } else if (urb->sg) {
4281 + struct scatterlist *sg;
4282 +
4283 + sg = (struct scatterlist *) urb->sg;
4284 + urb->transfer_dma = dma_map_page(
4285 + hcd->self.controller,
4286 + sg_page(sg),
4287 + sg->offset,
4288 + urb->transfer_buffer_length,
4289 + dir);
4290 + if (dma_mapping_error(hcd->self.controller,
4291 urb->transfer_dma))
4292 - return -EAGAIN;
4293 + ret = -EAGAIN;
4294 + else
4295 + urb->transfer_flags |= URB_DMA_MAP_PAGE;
4296 + } else {
4297 + urb->transfer_dma = dma_map_single(
4298 + hcd->self.controller,
4299 + urb->transfer_buffer,
4300 + urb->transfer_buffer_length,
4301 + dir);
4302 + if (dma_mapping_error(hcd->self.controller,
4303 + urb->transfer_dma))
4304 + ret = -EAGAIN;
4305 + else
4306 + urb->transfer_flags |= URB_DMA_MAP_SINGLE;
4307 + }
4308 } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
4309 ret = hcd_alloc_coherent(
4310 urb->dev->bus, mem_flags,
4311 @@ -1314,55 +1395,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
4312 &urb->transfer_buffer,
4313 urb->transfer_buffer_length,
4314 dir);
4315 -
4316 - if (ret && usb_endpoint_xfer_control(&urb->ep->desc)
4317 - && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
4318 - hcd_free_coherent(urb->dev->bus,
4319 - &urb->setup_dma,
4320 - (void **)&urb->setup_packet,
4321 - sizeof(struct usb_ctrlrequest),
4322 - DMA_TO_DEVICE);
4323 + if (ret == 0)
4324 + urb->transfer_flags |= URB_MAP_LOCAL;
4325 }
4326 + if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
4327 + URB_SETUP_MAP_LOCAL)))
4328 + unmap_urb_for_dma(hcd, urb);
4329 }
4330 return ret;
4331 }
4332
4333 -static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
4334 -{
4335 - enum dma_data_direction dir;
4336 -
4337 - if (is_root_hub(urb->dev))
4338 - return;
4339 -
4340 - if (usb_endpoint_xfer_control(&urb->ep->desc)
4341 - && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
4342 - if (hcd->self.uses_dma)
4343 - dma_unmap_single(hcd->self.controller, urb->setup_dma,
4344 - sizeof(struct usb_ctrlrequest),
4345 - DMA_TO_DEVICE);
4346 - else if (hcd->driver->flags & HCD_LOCAL_MEM)
4347 - hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
4348 - (void **)&urb->setup_packet,
4349 - sizeof(struct usb_ctrlrequest),
4350 - DMA_TO_DEVICE);
4351 - }
4352 -
4353 - dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
4354 - if (urb->transfer_buffer_length != 0
4355 - && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
4356 - if (hcd->self.uses_dma)
4357 - dma_unmap_single(hcd->self.controller,
4358 - urb->transfer_dma,
4359 - urb->transfer_buffer_length,
4360 - dir);
4361 - else if (hcd->driver->flags & HCD_LOCAL_MEM)
4362 - hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
4363 - &urb->transfer_buffer,
4364 - urb->transfer_buffer_length,
4365 - dir);
4366 - }
4367 -}
4368 -
4369 /*-------------------------------------------------------------------------*/
4370
4371 /* may be called in any context with a valid urb->dev usecount
4372 @@ -1391,21 +1433,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
4373 * URBs must be submitted in process context with interrupts
4374 * enabled.
4375 */
4376 - status = map_urb_for_dma(hcd, urb, mem_flags);
4377 - if (unlikely(status)) {
4378 - usbmon_urb_submit_error(&hcd->self, urb, status);
4379 - goto error;
4380 - }
4381
4382 - if (is_root_hub(urb->dev))
4383 + if (is_root_hub(urb->dev)) {
4384 status = rh_urb_enqueue(hcd, urb);
4385 - else
4386 - status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
4387 + } else {
4388 + status = map_urb_for_dma(hcd, urb, mem_flags);
4389 + if (likely(status == 0)) {
4390 + status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
4391 + if (unlikely(status))
4392 + unmap_urb_for_dma(hcd, urb);
4393 + }
4394 + }
4395
4396 if (unlikely(status)) {
4397 usbmon_urb_submit_error(&hcd->self, urb, status);
4398 - unmap_urb_for_dma(hcd, urb);
4399 - error:
4400 urb->hcpriv = NULL;
4401 INIT_LIST_HEAD(&urb->urb_list);
4402 atomic_dec(&urb->use_count);
4403 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
4404 index cd22027..794dca2 100644
4405 --- a/drivers/usb/core/message.c
4406 +++ b/drivers/usb/core/message.c
4407 @@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io)
4408 kfree(io->urbs);
4409 io->urbs = NULL;
4410 }
4411 - if (io->dev->dev.dma_mask != NULL)
4412 - usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
4413 - io->sg, io->nents);
4414 io->dev = NULL;
4415 }
4416
4417 @@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4418 {
4419 int i;
4420 int urb_flags;
4421 - int dma;
4422 int use_sg;
4423
4424 if (!io || !dev || !sg
4425 @@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4426 io->pipe = pipe;
4427 io->sg = sg;
4428 io->nents = nents;
4429 -
4430 - /* not all host controllers use DMA (like the mainstream pci ones);
4431 - * they can use PIO (sl811) or be software over another transport.
4432 - */
4433 - dma = (dev->dev.dma_mask != NULL);
4434 - if (dma)
4435 - io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
4436 - sg, nents);
4437 - else
4438 - io->entries = nents;
4439 + io->entries = nents;
4440
4441 /* initialize all the urbs we'll use */
4442 - if (io->entries <= 0)
4443 - return io->entries;
4444 -
4445 if (dev->bus->sg_tablesize > 0) {
4446 io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
4447 use_sg = true;
4448 @@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4449 goto nomem;
4450
4451 urb_flags = 0;
4452 - if (dma)
4453 - urb_flags |= URB_NO_TRANSFER_DMA_MAP;
4454 if (usb_pipein(pipe))
4455 urb_flags |= URB_SHORT_NOT_OK;
4456
4457 @@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4458
4459 io->urbs[0]->complete = sg_complete;
4460 io->urbs[0]->context = io;
4461 +
4462 /* A length of zero means transfer the whole sg list */
4463 io->urbs[0]->transfer_buffer_length = length;
4464 if (length == 0) {
4465 for_each_sg(sg, sg, io->entries, i) {
4466 io->urbs[0]->transfer_buffer_length +=
4467 - sg_dma_len(sg);
4468 + sg->length;
4469 }
4470 }
4471 io->urbs[0]->sg = io;
4472 @@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4473 io->urbs[i]->context = io;
4474
4475 /*
4476 - * Some systems need to revert to PIO when DMA is temporarily
4477 - * unavailable. For their sakes, both transfer_buffer and
4478 - * transfer_dma are set when possible.
4479 - *
4480 - * Note that if IOMMU coalescing occurred, we cannot
4481 - * trust sg_page anymore, so check if S/G list shrunk.
4482 + * Some systems can't use DMA; they use PIO instead.
4483 + * For their sakes, transfer_buffer is set whenever
4484 + * possible.
4485 */
4486 - if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
4487 + if (!PageHighMem(sg_page(sg)))
4488 io->urbs[i]->transfer_buffer = sg_virt(sg);
4489 else
4490 io->urbs[i]->transfer_buffer = NULL;
4491
4492 - if (dma) {
4493 - io->urbs[i]->transfer_dma = sg_dma_address(sg);
4494 - len = sg_dma_len(sg);
4495 - } else {
4496 - /* hc may use _only_ transfer_buffer */
4497 - len = sg->length;
4498 - }
4499 -
4500 + len = sg->length;
4501 if (length) {
4502 len = min_t(unsigned, len, length);
4503 length -= len;
4504 @@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
4505 io->entries = i + 1;
4506 }
4507 io->urbs[i]->transfer_buffer_length = len;
4508 +
4509 + io->urbs[i]->sg = (struct usb_sg_request *) sg;
4510 }
4511 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
4512 }
4513 diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
4514 index 45a32da..fec46d0 100644
4515 --- a/drivers/usb/core/urb.c
4516 +++ b/drivers/usb/core/urb.c
4517 @@ -333,9 +333,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
4518 is_out = usb_endpoint_dir_out(&ep->desc);
4519 }
4520
4521 - /* Cache the direction for later use */
4522 - urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
4523 - (is_out ? URB_DIR_OUT : URB_DIR_IN);
4524 + /* Clear the internal flags and cache the direction for later use */
4525 + urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
4526 + URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
4527 + URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
4528 + URB_DMA_SG_COMBINED);
4529 + urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
4530
4531 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
4532 dev->state < USB_STATE_CONFIGURED)
4533 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
4534 index 0561430..956108e 100644
4535 --- a/drivers/usb/core/usb.c
4536 +++ b/drivers/usb/core/usb.c
4537 @@ -893,6 +893,7 @@ void usb_buffer_unmap(struct urb *urb)
4538 EXPORT_SYMBOL_GPL(usb_buffer_unmap);
4539 #endif /* 0 */
4540
4541 +#if 0
4542 /**
4543 * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
4544 * @dev: device to which the scatterlist will be mapped
4545 @@ -936,6 +937,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
4546 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
4547 }
4548 EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
4549 +#endif
4550
4551 /* XXX DISABLED, no users currently. If you wish to re-enable this
4552 * XXX please determine whether the sync is to transfer ownership of
4553 @@ -972,6 +974,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
4554 EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
4555 #endif
4556
4557 +#if 0
4558 /**
4559 * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
4560 * @dev: device to which the scatterlist will be mapped
4561 @@ -997,6 +1000,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
4562 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
4563 }
4564 EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
4565 +#endif
4566
4567 /* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
4568 #ifdef MODULE
4569 diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
4570 index fa3d142..08a9a62 100644
4571 --- a/drivers/usb/gadget/fsl_udc_core.c
4572 +++ b/drivers/usb/gadget/fsl_udc_core.c
4573 @@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
4574 case USB_ENDPOINT_XFER_ISOC:
4575 /* Calculate transactions needed for high bandwidth iso */
4576 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
4577 - max = max & 0x8ff; /* bit 0~10 */
4578 + max = max & 0x7ff; /* bit 0~10 */
4579 /* 3 transactions at most */
4580 if (mult > 3)
4581 goto en_done;
4582 diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
4583 index e3a74e7..a422a1b 100644
4584 --- a/drivers/usb/host/ehci-au1xxx.c
4585 +++ b/drivers/usb/host/ehci-au1xxx.c
4586 @@ -215,26 +215,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
4587 msleep(10);
4588
4589 /* Root hub was already suspended. Disable irq emission and
4590 - * mark HW unaccessible, bail out if RH has been resumed. Use
4591 - * the spinlock to properly synchronize with possible pending
4592 - * RH suspend or resume activity.
4593 - *
4594 - * This is still racy as hcd->state is manipulated outside of
4595 - * any locks =P But that will be a different fix.
4596 + * mark HW unaccessible. The PM and USB cores make sure that
4597 + * the root hub is either suspended or stopped.
4598 */
4599 spin_lock_irqsave(&ehci->lock, flags);
4600 - if (hcd->state != HC_STATE_SUSPENDED) {
4601 - rc = -EINVAL;
4602 - goto bail;
4603 - }
4604 + ehci_prepare_ports_for_controller_suspend(ehci);
4605 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
4606 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
4607
4608 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4609
4610 au1xxx_stop_ehc();
4611 -
4612 -bail:
4613 spin_unlock_irqrestore(&ehci->lock, flags);
4614
4615 // could save FLADJ in case of Vaux power loss
4616 @@ -264,6 +255,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
4617 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
4618 int mask = INTR_MASK;
4619
4620 + ehci_prepare_ports_for_controller_resume(ehci);
4621 if (!hcd->self.root_hub->do_remote_wakeup)
4622 mask &= ~STS_PCD;
4623 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
4624 diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
4625 index 0e26aa1..5cd967d 100644
4626 --- a/drivers/usb/host/ehci-fsl.c
4627 +++ b/drivers/usb/host/ehci-fsl.c
4628 @@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
4629 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
4630 void __iomem *non_ehci = hcd->regs;
4631
4632 + ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd));
4633 if (!fsl_deep_sleep())
4634 return 0;
4635
4636 @@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
4637 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
4638 void __iomem *non_ehci = hcd->regs;
4639
4640 + ehci_prepare_ports_for_controller_resume(ehci);
4641 if (!fsl_deep_sleep())
4642 return 0;
4643
4644 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
4645 index c7178bc..1b2af4d 100644
4646 --- a/drivers/usb/host/ehci-hub.c
4647 +++ b/drivers/usb/host/ehci-hub.c
4648 @@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
4649 ehci->owned_ports = 0;
4650 }
4651
4652 +static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
4653 + bool suspending)
4654 +{
4655 + int port;
4656 + u32 temp;
4657 +
4658 + /* If remote wakeup is enabled for the root hub but disabled
4659 + * for the controller, we must adjust all the port wakeup flags
4660 + * when the controller is suspended or resumed. In all other
4661 + * cases they don't need to be changed.
4662 + */
4663 + if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup ||
4664 + device_may_wakeup(ehci_to_hcd(ehci)->self.controller))
4665 + return;
4666 +
4667 + /* clear phy low-power mode before changing wakeup flags */
4668 + if (ehci->has_hostpc) {
4669 + port = HCS_N_PORTS(ehci->hcs_params);
4670 + while (port--) {
4671 + u32 __iomem *hostpc_reg;
4672 +
4673 + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4674 + + HOSTPC0 + 4 * port);
4675 + temp = ehci_readl(ehci, hostpc_reg);
4676 + ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
4677 + }
4678 + msleep(5);
4679 + }
4680 +
4681 + port = HCS_N_PORTS(ehci->hcs_params);
4682 + while (port--) {
4683 + u32 __iomem *reg = &ehci->regs->port_status[port];
4684 + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
4685 + u32 t2 = t1 & ~PORT_WAKE_BITS;
4686 +
4687 + /* If we are suspending the controller, clear the flags.
4688 + * If we are resuming the controller, set the wakeup flags.
4689 + */
4690 + if (!suspending) {
4691 + if (t1 & PORT_CONNECT)
4692 + t2 |= PORT_WKOC_E | PORT_WKDISC_E;
4693 + else
4694 + t2 |= PORT_WKOC_E | PORT_WKCONN_E;
4695 + }
4696 + ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
4697 + port + 1, t1, t2);
4698 + ehci_writel(ehci, t2, reg);
4699 + }
4700 +
4701 + /* enter phy low-power mode again */
4702 + if (ehci->has_hostpc) {
4703 + port = HCS_N_PORTS(ehci->hcs_params);
4704 + while (port--) {
4705 + u32 __iomem *hostpc_reg;
4706 +
4707 + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4708 + + HOSTPC0 + 4 * port);
4709 + temp = ehci_readl(ehci, hostpc_reg);
4710 + ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg);
4711 + }
4712 + }
4713 +}
4714 +
4715 static int ehci_bus_suspend (struct usb_hcd *hcd)
4716 {
4717 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
4718 int port;
4719 int mask;
4720 - u32 __iomem *hostpc_reg = NULL;
4721 + int changed;
4722
4723 ehci_dbg(ehci, "suspend root hub\n");
4724
4725 @@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
4726 */
4727 ehci->bus_suspended = 0;
4728 ehci->owned_ports = 0;
4729 + changed = 0;
4730 port = HCS_N_PORTS(ehci->hcs_params);
4731 while (port--) {
4732 u32 __iomem *reg = &ehci->regs->port_status [port];
4733 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
4734 - u32 t2 = t1;
4735 + u32 t2 = t1 & ~PORT_WAKE_BITS;
4736
4737 - if (ehci->has_hostpc)
4738 - hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
4739 - + HOSTPC0 + 4 * (port & 0xff));
4740 /* keep track of which ports we suspend */
4741 if (t1 & PORT_OWNER)
4742 set_bit(port, &ehci->owned_ports);
4743 @@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
4744 set_bit(port, &ehci->bus_suspended);
4745 }
4746
4747 - /* enable remote wakeup on all ports */
4748 + /* enable remote wakeup on all ports, if told to do so */
4749 if (hcd->self.root_hub->do_remote_wakeup) {
4750 /* only enable appropriate wake bits, otherwise the
4751 * hardware can not go phy low power mode. If a race
4752 * condition happens here(connection change during bits
4753 * set), the port change detection will finally fix it.
4754 */
4755 - if (t1 & PORT_CONNECT) {
4756 + if (t1 & PORT_CONNECT)
4757 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
4758 - t2 &= ~PORT_WKCONN_E;
4759 - } else {
4760 + else
4761 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
4762 - t2 &= ~PORT_WKDISC_E;
4763 - }
4764 - } else
4765 - t2 &= ~PORT_WAKE_BITS;
4766 + }
4767
4768 if (t1 != t2) {
4769 ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
4770 port + 1, t1, t2);
4771 ehci_writel(ehci, t2, reg);
4772 - if (hostpc_reg) {
4773 - u32 t3;
4774 + changed = 1;
4775 + }
4776 + }
4777
4778 - spin_unlock_irq(&ehci->lock);
4779 - msleep(5);/* 5ms for HCD enter low pwr mode */
4780 - spin_lock_irq(&ehci->lock);
4781 - t3 = ehci_readl(ehci, hostpc_reg);
4782 - ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
4783 - t3 = ehci_readl(ehci, hostpc_reg);
4784 - ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
4785 + if (changed && ehci->has_hostpc) {
4786 + spin_unlock_irq(&ehci->lock);
4787 + msleep(5); /* 5 ms for HCD to enter low-power mode */
4788 + spin_lock_irq(&ehci->lock);
4789 +
4790 + port = HCS_N_PORTS(ehci->hcs_params);
4791 + while (port--) {
4792 + u32 __iomem *hostpc_reg;
4793 + u32 t3;
4794 +
4795 + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4796 + + HOSTPC0 + 4 * port);
4797 + t3 = ehci_readl(ehci, hostpc_reg);
4798 + ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
4799 + t3 = ehci_readl(ehci, hostpc_reg);
4800 + ehci_dbg(ehci, "Port %d phy low-power mode %s\n",
4801 port, (t3 & HOSTPC_PHCD) ?
4802 "succeeded" : "failed");
4803 - }
4804 }
4805 }
4806
4807 @@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
4808 msleep(8);
4809 spin_lock_irq(&ehci->lock);
4810
4811 + /* clear phy low-power mode before resume */
4812 + if (ehci->bus_suspended && ehci->has_hostpc) {
4813 + i = HCS_N_PORTS(ehci->hcs_params);
4814 + while (i--) {
4815 + if (test_bit(i, &ehci->bus_suspended)) {
4816 + u32 __iomem *hostpc_reg;
4817 +
4818 + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs
4819 + + HOSTPC0 + 4 * i);
4820 + temp = ehci_readl(ehci, hostpc_reg);
4821 + ehci_writel(ehci, temp & ~HOSTPC_PHCD,
4822 + hostpc_reg);
4823 + }
4824 + }
4825 + spin_unlock_irq(&ehci->lock);
4826 + msleep(5);
4827 + spin_lock_irq(&ehci->lock);
4828 + }
4829 +
4830 /* manually resume the ports we suspended during bus_suspend() */
4831 i = HCS_N_PORTS (ehci->hcs_params);
4832 while (i--) {
4833 @@ -675,16 +760,25 @@ static int ehci_hub_control (
4834 goto error;
4835 if (ehci->no_selective_suspend)
4836 break;
4837 - if (temp & PORT_SUSPEND) {
4838 - if ((temp & PORT_PE) == 0)
4839 - goto error;
4840 - /* resume signaling for 20 msec */
4841 - temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
4842 - ehci_writel(ehci, temp | PORT_RESUME,
4843 - status_reg);
4844 - ehci->reset_done [wIndex] = jiffies
4845 - + msecs_to_jiffies (20);
4846 + if (!(temp & PORT_SUSPEND))
4847 + break;
4848 + if ((temp & PORT_PE) == 0)
4849 + goto error;
4850 +
4851 + /* clear phy low-power mode before resume */
4852 + if (hostpc_reg) {
4853 + temp1 = ehci_readl(ehci, hostpc_reg);
4854 + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
4855 + hostpc_reg);
4856 + spin_unlock_irqrestore(&ehci->lock, flags);
4857 + msleep(5);/* wait to leave low-power mode */
4858 + spin_lock_irqsave(&ehci->lock, flags);
4859 }
4860 + /* resume signaling for 20 msec */
4861 + temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
4862 + ehci_writel(ehci, temp | PORT_RESUME, status_reg);
4863 + ehci->reset_done[wIndex] = jiffies
4864 + + msecs_to_jiffies(20);
4865 break;
4866 case USB_PORT_FEAT_C_SUSPEND:
4867 clear_bit(wIndex, &ehci->port_c_suspend);
4868 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
4869 index ead5f4f..c5f662d 100644
4870 --- a/drivers/usb/host/ehci-pci.c
4871 +++ b/drivers/usb/host/ehci-pci.c
4872 @@ -284,23 +284,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd)
4873 msleep(10);
4874
4875 /* Root hub was already suspended. Disable irq emission and
4876 - * mark HW unaccessible, bail out if RH has been resumed. Use
4877 - * the spinlock to properly synchronize with possible pending
4878 - * RH suspend or resume activity.
4879 - *
4880 - * This is still racy as hcd->state is manipulated outside of
4881 - * any locks =P But that will be a different fix.
4882 + * mark HW unaccessible. The PM and USB cores make sure that
4883 + * the root hub is either suspended or stopped.
4884 */
4885 spin_lock_irqsave (&ehci->lock, flags);
4886 - if (hcd->state != HC_STATE_SUSPENDED) {
4887 - rc = -EINVAL;
4888 - goto bail;
4889 - }
4890 + ehci_prepare_ports_for_controller_suspend(ehci);
4891 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
4892 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
4893
4894 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4895 - bail:
4896 spin_unlock_irqrestore (&ehci->lock, flags);
4897
4898 // could save FLADJ in case of Vaux power loss
4899 @@ -330,6 +322,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
4900 !hibernated) {
4901 int mask = INTR_MASK;
4902
4903 + ehci_prepare_ports_for_controller_resume(ehci);
4904 if (!hcd->self.root_hub->do_remote_wakeup)
4905 mask &= ~STS_PCD;
4906 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
4907 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
4908 index 556c0b4..ddf61c3 100644
4909 --- a/drivers/usb/host/ehci.h
4910 +++ b/drivers/usb/host/ehci.h
4911 @@ -536,6 +536,16 @@ struct ehci_fstn {
4912
4913 /*-------------------------------------------------------------------------*/
4914
4915 +/* Prepare the PORTSC wakeup flags during controller suspend/resume */
4916 +
4917 +#define ehci_prepare_ports_for_controller_suspend(ehci) \
4918 + ehci_adjust_port_wakeup_flags(ehci, true);
4919 +
4920 +#define ehci_prepare_ports_for_controller_resume(ehci) \
4921 + ehci_adjust_port_wakeup_flags(ehci, false);
4922 +
4923 +/*-------------------------------------------------------------------------*/
4924 +
4925 #ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
4926
4927 /*
4928 diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
4929 index 72dae1c..3b6e864 100644
4930 --- a/drivers/usb/host/fhci.h
4931 +++ b/drivers/usb/host/fhci.h
4932 @@ -20,6 +20,7 @@
4933
4934 #include <linux/kernel.h>
4935 #include <linux/types.h>
4936 +#include <linux/bug.h>
4937 #include <linux/spinlock.h>
4938 #include <linux/interrupt.h>
4939 #include <linux/kfifo.h>
4940 @@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p)
4941
4942 static inline void *cq_get(struct kfifo *kfifo)
4943 {
4944 - void *p = NULL;
4945 + unsigned int sz;
4946 + void *p;
4947 +
4948 + sz = kfifo_out(kfifo, (void *)&p, sizeof(p));
4949 + if (sz != sizeof(p))
4950 + return NULL;
4951
4952 - kfifo_out(kfifo, (void *)&p, sizeof(p));
4953 return p;
4954 }
4955
4956 diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
4957 index 141d049..b388dd1 100644
4958 --- a/drivers/usb/host/whci/qset.c
4959 +++ b/drivers/usb/host/whci/qset.c
4960 @@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
4961 wurb->urb = urb;
4962 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
4963
4964 - if (urb->sg) {
4965 + if (urb->num_sgs) {
4966 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
4967 if (ret == -EINVAL) {
4968 qset_free_stds(qset, urb);
4969 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4970 index 417d37a..98a73cd 100644
4971 --- a/drivers/usb/host/xhci-pci.c
4972 +++ b/drivers/usb/host/xhci-pci.c
4973 @@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
4974 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
4975 int retval;
4976
4977 - hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
4978 + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;
4979
4980 xhci->cap_regs = hcd->regs;
4981 xhci->op_regs = hcd->regs +
4982 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4983 index 85d7e8f..40cba25 100644
4984 --- a/drivers/usb/host/xhci-ring.c
4985 +++ b/drivers/usb/host/xhci-ring.c
4986 @@ -242,10 +242,27 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
4987 int i;
4988 union xhci_trb *enq = ring->enqueue;
4989 struct xhci_segment *enq_seg = ring->enq_seg;
4990 + struct xhci_segment *cur_seg;
4991 + unsigned int left_on_ring;
4992
4993 /* Check if ring is empty */
4994 - if (enq == ring->dequeue)
4995 + if (enq == ring->dequeue) {
4996 + /* Can't use link trbs */
4997 + left_on_ring = TRBS_PER_SEGMENT - 1;
4998 + for (cur_seg = enq_seg->next; cur_seg != enq_seg;
4999 + cur_seg = cur_seg->next)
5000 + left_on_ring += TRBS_PER_SEGMENT - 1;
5001 +
5002 + /* Always need one TRB free in the ring. */
5003 + left_on_ring -= 1;
5004 + if (num_trbs > left_on_ring) {
5005 + xhci_warn(xhci, "Not enough room on ring; "
5006 + "need %u TRBs, %u TRBs left\n",
5007 + num_trbs, left_on_ring);
5008 + return 0;
5009 + }
5010 return 1;
5011 + }
5012 /* Make sure there's an extra empty TRB available */
5013 for (i = 0; i <= num_trbs; ++i) {
5014 if (enq == ring->dequeue)
5015 @@ -334,7 +351,8 @@ static struct xhci_segment *find_trb_seg(
5016 while (cur_seg->trbs > trb ||
5017 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
5018 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
5019 - if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
5020 + if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
5021 + TRB_TYPE(TRB_LINK) &&
5022 (generic_trb->field[3] & LINK_TOGGLE))
5023 *cycle_state = ~(*cycle_state) & 0x1;
5024 cur_seg = cur_seg->next;
5025 @@ -390,7 +408,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
5026 BUG();
5027
5028 trb = &state->new_deq_ptr->generic;
5029 - if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
5030 + if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
5031 (trb->field[3] & LINK_TOGGLE))
5032 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
5033 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
5034 @@ -578,6 +596,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
5035 /* Otherwise just ring the doorbell to restart the ring */
5036 ring_ep_doorbell(xhci, slot_id, ep_index);
5037 }
5038 + ep->stopped_td = NULL;
5039 + ep->stopped_trb = NULL;
5040
5041 /*
5042 * Drop the lock and complete the URBs in the cancelled TD list.
5043 @@ -1061,8 +1081,13 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
5044 ep->ep_state |= EP_HALTED;
5045 ep->stopped_td = td;
5046 ep->stopped_trb = event_trb;
5047 +
5048 xhci_queue_reset_ep(xhci, slot_id, ep_index);
5049 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
5050 +
5051 + ep->stopped_td = NULL;
5052 + ep->stopped_trb = NULL;
5053 +
5054 xhci_ring_cmd_db(xhci);
5055 }
5056
5057 @@ -1390,8 +1415,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
5058 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
5059 cur_trb != event_trb;
5060 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
5061 - if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
5062 - TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
5063 + if ((cur_trb->generic.field[3] &
5064 + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
5065 + (cur_trb->generic.field[3] &
5066 + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
5067 td->urb->actual_length +=
5068 TRB_LEN(cur_trb->generic.field[2]);
5069 }
5070 @@ -1938,7 +1965,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5071 int running_total, trb_buff_len, ret;
5072 u64 addr;
5073
5074 - if (urb->sg)
5075 + if (urb->num_sgs)
5076 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
5077
5078 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
5079 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5080 index 7e42772..5a752d6 100644
5081 --- a/drivers/usb/host/xhci.c
5082 +++ b/drivers/usb/host/xhci.c
5083 @@ -105,6 +105,33 @@ int xhci_halt(struct xhci_hcd *xhci)
5084 }
5085
5086 /*
5087 + * Set the run bit and wait for the host to be running.
5088 + */
5089 +int xhci_start(struct xhci_hcd *xhci)
5090 +{
5091 + u32 temp;
5092 + int ret;
5093 +
5094 + temp = xhci_readl(xhci, &xhci->op_regs->command);
5095 + temp |= (CMD_RUN);
5096 + xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
5097 + temp);
5098 + xhci_writel(xhci, temp, &xhci->op_regs->command);
5099 +
5100 + /*
5101 + * Wait for the HCHalted Status bit to be 0 to indicate the host is
5102 + * running.
5103 + */
5104 + ret = handshake(xhci, &xhci->op_regs->status,
5105 + STS_HALT, 0, XHCI_MAX_HALT_USEC);
5106 + if (ret == -ETIMEDOUT)
5107 + xhci_err(xhci, "Host took too long to start, "
5108 + "waited %u microseconds.\n",
5109 + XHCI_MAX_HALT_USEC);
5110 + return ret;
5111 +}
5112 +
5113 +/*
5114 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
5115 *
5116 * This resets pipelines, timers, counters, state machines, etc.
5117 @@ -115,6 +142,7 @@ int xhci_reset(struct xhci_hcd *xhci)
5118 {
5119 u32 command;
5120 u32 state;
5121 + int ret;
5122
5123 state = xhci_readl(xhci, &xhci->op_regs->status);
5124 if ((state & STS_HALT) == 0) {
5125 @@ -129,7 +157,17 @@ int xhci_reset(struct xhci_hcd *xhci)
5126 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
5127 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
5128
5129 - return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
5130 + ret = handshake(xhci, &xhci->op_regs->command,
5131 + CMD_RESET, 0, 250 * 1000);
5132 + if (ret)
5133 + return ret;
5134 +
5135 + xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
5136 + /*
5137 + * xHCI cannot write to any doorbells or operational registers other
5138 + * than status until the "Controller Not Ready" flag is cleared.
5139 + */
5140 + return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
5141 }
5142
5143
5144 @@ -452,13 +490,11 @@ int xhci_run(struct usb_hcd *hcd)
5145 if (NUM_TEST_NOOPS > 0)
5146 doorbell = xhci_setup_one_noop(xhci);
5147
5148 - temp = xhci_readl(xhci, &xhci->op_regs->command);
5149 - temp |= (CMD_RUN);
5150 - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
5151 - temp);
5152 - xhci_writel(xhci, temp, &xhci->op_regs->command);
5153 - /* Flush PCI posted writes */
5154 - temp = xhci_readl(xhci, &xhci->op_regs->command);
5155 + if (xhci_start(xhci)) {
5156 + xhci_halt(xhci);
5157 + return -ENODEV;
5158 + }
5159 +
5160 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
5161 if (doorbell)
5162 (*doorbell)(xhci);
5163 @@ -1438,6 +1474,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
5164 kfree(virt_ep->stopped_td);
5165 xhci_ring_cmd_db(xhci);
5166 }
5167 + virt_ep->stopped_td = NULL;
5168 + virt_ep->stopped_trb = NULL;
5169 spin_unlock_irqrestore(&xhci->lock, flags);
5170
5171 if (ret)
5172 diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
5173 index ddf7f9a..8a7968d 100644
5174 --- a/drivers/usb/mon/mon_bin.c
5175 +++ b/drivers/usb/mon/mon_bin.c
5176 @@ -416,7 +416,7 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
5177
5178 } else {
5179 /* If IOMMU coalescing occurred, we cannot trust sg_page */
5180 - if (urb->sg->nents != urb->num_sgs) {
5181 + if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
5182 *flag = 'D';
5183 return length;
5184 }
5185 diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
5186 index 4d0be13..d562602 100644
5187 --- a/drivers/usb/mon/mon_text.c
5188 +++ b/drivers/usb/mon/mon_text.c
5189 @@ -161,9 +161,7 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
5190 } else {
5191 struct scatterlist *sg = urb->sg->sg;
5192
5193 - /* If IOMMU coalescing occurred, we cannot trust sg_page */
5194 - if (urb->sg->nents != urb->num_sgs ||
5195 - PageHighMem(sg_page(sg)))
5196 + if (PageHighMem(sg_page(sg)))
5197 return 'D';
5198
5199 /* For the text interface we copy only the first sg buffer */
5200 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
5201 index ec9b044..009e26c 100644
5202 --- a/drivers/usb/serial/cp210x.c
5203 +++ b/drivers/usb/serial/cp210x.c
5204 @@ -61,6 +61,8 @@ static const struct usb_device_id id_table[] = {
5205 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
5206 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
5207 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
5208 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
5209 + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
5210 { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
5211 { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
5212 { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
5213 @@ -72,9 +74,12 @@ static const struct usb_device_id id_table[] = {
5214 { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */
5215 { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */
5216 { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */
5217 + { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */
5218 + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
5219 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
5220 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
5221 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
5222 + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
5223 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
5224 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
5225 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
5226 @@ -82,12 +87,15 @@ static const struct usb_device_id id_table[] = {
5227 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
5228 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
5229 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
5230 + { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
5231 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
5232 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
5233 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
5234 + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
5235 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
5236 { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
5237 { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
5238 + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
5239 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
5240 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
5241 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
5242 @@ -105,6 +113,7 @@ static const struct usb_device_id id_table[] = {
5243 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
5244 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
5245 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
5246 + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
5247 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
5248 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
5249 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
5250 @@ -115,6 +124,8 @@ static const struct usb_device_id id_table[] = {
5251 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
5252 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
5253 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
5254 + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
5255 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
5256 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
5257 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
5258 { } /* Terminating Entry */
5259 diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
5260 index e23c779..582e832 100644
5261 --- a/drivers/usb/serial/cypress_m8.c
5262 +++ b/drivers/usb/serial/cypress_m8.c
5263 @@ -1309,7 +1309,7 @@ static void cypress_read_int_callback(struct urb *urb)
5264 /* process read if there is data other than line status */
5265 if (tty && bytes > i) {
5266 tty_insert_flip_string_fixed_flag(tty, data + i,
5267 - bytes - i, tty_flag);
5268 + tty_flag, bytes - i);
5269 tty_flip_buffer_push(tty);
5270 }
5271
5272 diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
5273 index 68b0aa5..3edda3e 100644
5274 --- a/drivers/usb/serial/digi_acceleport.c
5275 +++ b/drivers/usb/serial/digi_acceleport.c
5276 @@ -1703,8 +1703,8 @@ static int digi_read_inb_callback(struct urb *urb)
5277 /* data length is len-1 (one byte of len is port_status) */
5278 --len;
5279 if (len > 0) {
5280 - tty_insert_flip_string_fixed_flag(tty, data, len,
5281 - flag);
5282 + tty_insert_flip_string_fixed_flag(tty, data, flag,
5283 + len);
5284 tty_flip_buffer_push(tty);
5285 }
5286 }
5287 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
5288 index 1d7c4fa..3f5676e 100644
5289 --- a/drivers/usb/serial/ftdi_sio.c
5290 +++ b/drivers/usb/serial/ftdi_sio.c
5291 @@ -2289,6 +2289,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
5292 "urb failed to set to rts/cts flow control\n");
5293 }
5294
5295 + /* raise DTR/RTS */
5296 + set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
5297 } else {
5298 /*
5299 * Xon/Xoff code
5300 @@ -2336,6 +2338,8 @@ static void ftdi_set_termios(struct tty_struct *tty,
5301 }
5302 }
5303
5304 + /* lower DTR/RTS */
5305 + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
5306 }
5307 return;
5308 }
5309 diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
5310 index 4a0f519..71bdbe0 100644
5311 --- a/drivers/usb/serial/ir-usb.c
5312 +++ b/drivers/usb/serial/ir-usb.c
5313 @@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
5314 kfree(port->read_urb->transfer_buffer);
5315 port->read_urb->transfer_buffer = buffer;
5316 port->read_urb->transfer_buffer_length = buffer_size;
5317 + port->bulk_in_buffer = buffer;
5318
5319 buffer = kmalloc(buffer_size, GFP_KERNEL);
5320 if (!buffer) {
5321 @@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
5322 kfree(port->write_urb->transfer_buffer);
5323 port->write_urb->transfer_buffer = buffer;
5324 port->write_urb->transfer_buffer_length = buffer_size;
5325 + port->bulk_out_buffer = buffer;
5326 port->bulk_out_size = buffer_size;
5327 }
5328
5329 diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
5330 index 8eef91b..cc0ba38 100644
5331 --- a/drivers/usb/serial/kl5kusb105.c
5332 +++ b/drivers/usb/serial/kl5kusb105.c
5333 @@ -321,6 +321,7 @@ err_cleanup:
5334 usb_free_urb(priv->write_urb_pool[j]);
5335 }
5336 }
5337 + kfree(priv);
5338 usb_set_serial_port_data(serial->port[i], NULL);
5339 }
5340 return -ENOMEM;
5341 diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
5342 index c113a2a..bd5bd85 100644
5343 --- a/drivers/usb/serial/kobil_sct.c
5344 +++ b/drivers/usb/serial/kobil_sct.c
5345 @@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port)
5346
5347 /* FIXME: Add rts/dtr methods */
5348 if (port->write_urb) {
5349 - usb_kill_urb(port->write_urb);
5350 + usb_poison_urb(port->write_urb);
5351 + kfree(port->write_urb->transfer_buffer);
5352 usb_free_urb(port->write_urb);
5353 port->write_urb = NULL;
5354 }
5355 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
5356 index 2fda1c0..68f6a1d 100644
5357 --- a/drivers/usb/serial/mos7840.c
5358 +++ b/drivers/usb/serial/mos7840.c
5359 @@ -731,7 +731,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
5360 mos7840_port = urb->context;
5361 if (!mos7840_port) {
5362 dbg("%s", "NULL mos7840_port pointer");
5363 - mos7840_port->read_urb_busy = false;
5364 return;
5365 }
5366
5367 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5368 index 84d0eda..8b2e612 100644
5369 --- a/drivers/usb/serial/option.c
5370 +++ b/drivers/usb/serial/option.c
5371 @@ -380,6 +380,10 @@ static int option_resume(struct usb_serial *serial);
5372
5373 #define CINTERION_VENDOR_ID 0x0681
5374
5375 +/* Olivetti products */
5376 +#define OLIVETTI_VENDOR_ID 0x0b3c
5377 +#define OLIVETTI_PRODUCT_OLICARD100 0xc000
5378 +
5379 /* some devices interfaces need special handling due to a number of reasons */
5380 enum option_blacklist_reason {
5381 OPTION_BLACKLIST_NONE = 0,
5382 @@ -675,6 +679,180 @@ static const struct usb_device_id option_ids[] = {
5383 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
5384 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
5385 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
5386 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
5387 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
5388 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
5389 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
5390 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
5391 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
5392 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) },
5393 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) },
5394 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) },
5395 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) },
5396 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) },
5397 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) },
5398 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) },
5399 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) },
5400 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) },
5401 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) },
5402 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) },
5403 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) },
5404 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) },
5405 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) },
5406 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) },
5407 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) },
5408 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) },
5409 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) },
5410 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) },
5411 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) },
5412 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) },
5413 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) },
5414 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) },
5415 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) },
5416 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) },
5417 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) },
5418 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) },
5419 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) },
5420 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) },
5421 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) },
5422 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) },
5423 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) },
5424 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) },
5425 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) },
5426 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) },
5427 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) },
5428 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) },
5429 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) },
5430 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) },
5431 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) },
5432 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) },
5433 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) },
5434 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) },
5435 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) },
5436 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) },
5437 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) },
5438 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) },
5439 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) },
5440 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) },
5441 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) },
5442 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) },
5443 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) },
5444 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) },
5445 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) },
5446 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) },
5447 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) },
5448 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) },
5449 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) },
5450 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) },
5451 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) },
5452 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) },
5453 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) },
5454 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) },
5455 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) },
5456 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) },
5457 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) },
5458 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) },
5459 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) },
5460 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) },
5461 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) },
5462 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) },
5463 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) },
5464 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) },
5465 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) },
5466 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) },
5467 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) },
5468 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) },
5469 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) },
5470 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) },
5471 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) },
5472 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) },
5473 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) },
5474 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) },
5475 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) },
5476 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) },
5477 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) },
5478 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) },
5479 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) },
5480 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) },
5481 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) },
5482 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) },
5483 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) },
5484 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) },
5485 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) },
5486 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) },
5487 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) },
5488 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) },
5489 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) },
5490 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) },
5491 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) },
5492 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) },
5493 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) },
5494 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) },
5495 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) },
5496 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) },
5497 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) },
5498 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) },
5499 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) },
5500 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) },
5501 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
5502 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
5503 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
5504 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
5505 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
5506 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
5507 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
5508 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
5509 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
5510 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
5511 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
5512 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
5513 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
5514 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
5515 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
5516 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
5517 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
5518 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
5519 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) },
5520 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) },
5521 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) },
5522 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) },
5523 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) },
5524 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) },
5525 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) },
5526 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
5527 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
5528 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
5529 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
5530 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
5531 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
5532 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
5533 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
5534 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
5535 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
5536 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
5537 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
5538 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) },
5539 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) },
5540 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) },
5541 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) },
5542 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) },
5543 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) },
5544 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) },
5545 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) },
5546 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) },
5547 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) },
5548 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) },
5549 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) },
5550 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) },
5551 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) },
5552 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) },
5553 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) },
5554 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) },
5555 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) },
5556 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) },
5557 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
5558 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
5559 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
5560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
5561 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
5562 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
5563 @@ -726,6 +904,8 @@ static const struct usb_device_id option_ids[] = {
5564 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
5565
5566 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
5567 +
5568 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
5569 { } /* Terminating entry */
5570 };
5571 MODULE_DEVICE_TABLE(usb, option_ids);
5572 diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
5573 index 7e3bea2..214a3e5 100644
5574 --- a/drivers/usb/serial/qcaux.c
5575 +++ b/drivers/usb/serial/qcaux.c
5576 @@ -50,6 +50,10 @@
5577 #define SANYO_VENDOR_ID 0x0474
5578 #define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
5579
5580 +/* Samsung devices */
5581 +#define SAMSUNG_VENDOR_ID 0x04e8
5582 +#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */
5583 +
5584 static struct usb_device_id id_table[] = {
5585 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
5586 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
5587 @@ -61,6 +65,7 @@ static struct usb_device_id id_table[] = {
5588 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
5589 { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
5590 { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
5591 + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
5592 { },
5593 };
5594 MODULE_DEVICE_TABLE(usb, id_table);
5595 diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
5596 index 5d39191..2ea32c5 100644
5597 --- a/drivers/usb/serial/spcp8x5.c
5598 +++ b/drivers/usb/serial/spcp8x5.c
5599 @@ -726,8 +726,8 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
5600 /* overrun is special, not associated with a char */
5601 if (status & UART_OVERRUN_ERROR)
5602 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
5603 - tty_insert_flip_string_fixed_flag(tty, data,
5604 - urb->actual_length, tty_flag);
5605 + tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
5606 + urb->actual_length);
5607 tty_flip_buffer_push(tty);
5608 }
5609 tty_kref_put(tty);
5610 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
5611 index 0949427..fb7fc40 100644
5612 --- a/drivers/usb/serial/visor.c
5613 +++ b/drivers/usb/serial/visor.c
5614 @@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5_device = {
5615 .throttle = visor_throttle,
5616 .unthrottle = visor_unthrottle,
5617 .attach = clie_3_5_startup,
5618 + .release = visor_release,
5619 .write = visor_write,
5620 .write_room = visor_write_room,
5621 .write_bulk_callback = visor_write_bulk_callback,
5622 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
5623 index ccf1dbb..55b3cd1 100644
5624 --- a/drivers/usb/storage/unusual_devs.h
5625 +++ b/drivers/usb/storage/unusual_devs.h
5626 @@ -1853,6 +1853,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
5627 US_SC_DEVICE, US_PR_DEVICE, NULL,
5628 US_FL_IGNORE_RESIDUE ),
5629
5630 +/* Reported by Hans de Goede <hdegoede@redhat.com>
5631 + * These Appotech controllers are found in Picture Frames, they provide a
5632 + * (buggy) emulation of a cdrom drive which contains the windows software
5633 + * Uploading of pictures happens over the corresponding /dev/sg device. */
5634 +UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
5635 + "BUILDWIN",
5636 + "Photo Frame",
5637 + US_SC_DEVICE, US_PR_DEVICE, NULL,
5638 + US_FL_BAD_SENSE ),
5639 +UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
5640 + "BUILDWIN",
5641 + "Photo Frame",
5642 + US_SC_DEVICE, US_PR_DEVICE, NULL,
5643 + US_FL_BAD_SENSE ),
5644 +
5645 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
5646 "ST",
5647 "2A",
5648 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
5649 index 9777583..c9d0c79 100644
5650 --- a/drivers/vhost/net.c
5651 +++ b/drivers/vhost/net.c
5652 @@ -637,7 +637,7 @@ const static struct file_operations vhost_net_fops = {
5653 };
5654
5655 static struct miscdevice vhost_net_misc = {
5656 - VHOST_NET_MINOR,
5657 + MISC_DYNAMIC_MINOR,
5658 "vhost-net",
5659 &vhost_net_fops,
5660 };
5661 diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
5662 index 8d406fb..f3d7440 100644
5663 --- a/drivers/video/arcfb.c
5664 +++ b/drivers/video/arcfb.c
5665 @@ -80,7 +80,7 @@ struct arcfb_par {
5666 spinlock_t lock;
5667 };
5668
5669 -static struct fb_fix_screeninfo arcfb_fix __initdata = {
5670 +static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
5671 .id = "arcfb",
5672 .type = FB_TYPE_PACKED_PIXELS,
5673 .visual = FB_VISUAL_MONO01,
5674 @@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = {
5675 .accel = FB_ACCEL_NONE,
5676 };
5677
5678 -static struct fb_var_screeninfo arcfb_var __initdata = {
5679 +static struct fb_var_screeninfo arcfb_var __devinitdata = {
5680 .xres = 128,
5681 .yres = 64,
5682 .xres_virtual = 128,
5683 @@ -588,7 +588,7 @@ err:
5684 return retval;
5685 }
5686
5687 -static int arcfb_remove(struct platform_device *dev)
5688 +static int __devexit arcfb_remove(struct platform_device *dev)
5689 {
5690 struct fb_info *info = platform_get_drvdata(dev);
5691
5692 @@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev)
5693
5694 static struct platform_driver arcfb_driver = {
5695 .probe = arcfb_probe,
5696 - .remove = arcfb_remove,
5697 + .remove = __devexit_p(arcfb_remove),
5698 .driver = {
5699 .name = "arcfb",
5700 },
5701 diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
5702 index 8bbf251..af8f0f2 100644
5703 --- a/drivers/video/hgafb.c
5704 +++ b/drivers/video/hgafb.c
5705 @@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
5706
5707 /* Framebuffer driver structures */
5708
5709 -static struct fb_var_screeninfo __initdata hga_default_var = {
5710 +static struct fb_var_screeninfo hga_default_var __devinitdata = {
5711 .xres = 720,
5712 .yres = 348,
5713 .xres_virtual = 720,
5714 @@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = {
5715 .width = -1,
5716 };
5717
5718 -static struct fb_fix_screeninfo __initdata hga_fix = {
5719 +static struct fb_fix_screeninfo hga_fix __devinitdata = {
5720 .id = "HGA",
5721 .type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
5722 .visual = FB_VISUAL_MONO10,
5723 @@ -276,7 +276,7 @@ static void hga_blank(int blank_mode)
5724 spin_unlock_irqrestore(&hga_reg_lock, flags);
5725 }
5726
5727 -static int __init hga_card_detect(void)
5728 +static int __devinit hga_card_detect(void)
5729 {
5730 int count = 0;
5731 void __iomem *p, *q;
5732 @@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev)
5733 return 0;
5734 }
5735
5736 -static int hgafb_remove(struct platform_device *pdev)
5737 +static int __devexit hgafb_remove(struct platform_device *pdev)
5738 {
5739 struct fb_info *info = platform_get_drvdata(pdev);
5740
5741 @@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev)
5742
5743 static struct platform_driver hgafb_driver = {
5744 .probe = hgafb_probe,
5745 - .remove = hgafb_remove,
5746 + .remove = __devexit_p(hgafb_remove),
5747 .driver = {
5748 .name = "hgafb",
5749 },
5750 diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
5751 index 9b5532b..bc67251 100644
5752 --- a/drivers/video/vfb.c
5753 +++ b/drivers/video/vfb.c
5754 @@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
5755 vfree(mem);
5756 }
5757
5758 -static struct fb_var_screeninfo vfb_default __initdata = {
5759 +static struct fb_var_screeninfo vfb_default __devinitdata = {
5760 .xres = 640,
5761 .yres = 480,
5762 .xres_virtual = 640,
5763 @@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = {
5764 .vmode = FB_VMODE_NONINTERLACED,
5765 };
5766
5767 -static struct fb_fix_screeninfo vfb_fix __initdata = {
5768 +static struct fb_fix_screeninfo vfb_fix __devinitdata = {
5769 .id = "Virtual FB",
5770 .type = FB_TYPE_PACKED_PIXELS,
5771 .visual = FB_VISUAL_PSEUDOCOLOR,
5772 diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
5773 index bf638a4..2ab3cc7 100644
5774 --- a/drivers/video/vga16fb.c
5775 +++ b/drivers/video/vga16fb.c
5776 @@ -65,7 +65,7 @@ struct vga16fb_par {
5777
5778 /* --------------------------------------------------------------------- */
5779
5780 -static struct fb_var_screeninfo vga16fb_defined __initdata = {
5781 +static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
5782 .xres = 640,
5783 .yres = 480,
5784 .xres_virtual = 640,
5785 @@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = {
5786 };
5787
5788 /* name should not depend on EGA/VGA */
5789 -static struct fb_fix_screeninfo vga16fb_fix __initdata = {
5790 +static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
5791 .id = "VGA16 VGA",
5792 .smem_start = VGA_FB_PHYS,
5793 .smem_len = VGA_FB_PHYS_LEN,
5794 @@ -1278,7 +1278,7 @@ static struct fb_ops vga16fb_ops = {
5795 };
5796
5797 #ifndef MODULE
5798 -static int vga16fb_setup(char *options)
5799 +static int __init vga16fb_setup(char *options)
5800 {
5801 char *this_opt;
5802
5803 @@ -1376,7 +1376,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
5804 return ret;
5805 }
5806
5807 -static int vga16fb_remove(struct platform_device *dev)
5808 +static int __devexit vga16fb_remove(struct platform_device *dev)
5809 {
5810 struct fb_info *info = platform_get_drvdata(dev);
5811
5812 @@ -1393,7 +1393,7 @@ static int vga16fb_remove(struct platform_device *dev)
5813
5814 static struct platform_driver vga16fb_driver = {
5815 .probe = vga16fb_probe,
5816 - .remove = vga16fb_remove,
5817 + .remove = __devexit_p(vga16fb_remove),
5818 .driver = {
5819 .name = "vga16fb",
5820 },
5821 diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
5822 index 31b0e17..e66b8b1 100644
5823 --- a/drivers/video/w100fb.c
5824 +++ b/drivers/video/w100fb.c
5825 @@ -53,7 +53,7 @@ static void w100_update_enable(void);
5826 static void w100_update_disable(void);
5827 static void calc_hsync(struct w100fb_par *par);
5828 static void w100_init_graphic_engine(struct w100fb_par *par);
5829 -struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
5830 +struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
5831
5832 /* Pseudo palette size */
5833 #define MAX_PALETTES 16
5834 @@ -782,7 +782,7 @@ out:
5835 }
5836
5837
5838 -static int w100fb_remove(struct platform_device *pdev)
5839 +static int __devexit w100fb_remove(struct platform_device *pdev)
5840 {
5841 struct fb_info *info = platform_get_drvdata(pdev);
5842 struct w100fb_par *par=info->par;
5843 @@ -1020,7 +1020,7 @@ static struct pll_entries {
5844 { 0 },
5845 };
5846
5847 -struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
5848 +struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
5849 {
5850 struct pll_entries *pll_entry = w100_pll_tables;
5851
5852 @@ -1611,7 +1611,7 @@ static void w100_vsync(void)
5853
5854 static struct platform_driver w100fb_driver = {
5855 .probe = w100fb_probe,
5856 - .remove = w100fb_remove,
5857 + .remove = __devexit_p(w100fb_remove),
5858 .suspend = w100fb_suspend,
5859 .resume = w100fb_resume,
5860 .driver = {
5861 @@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = {
5862 },
5863 };
5864
5865 -int __devinit w100fb_init(void)
5866 +int __init w100fb_init(void)
5867 {
5868 return platform_driver_register(&w100fb_driver);
5869 }
5870 diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
5871 index eab33f1..7b547f5 100644
5872 --- a/drivers/xen/xenbus/xenbus_xs.c
5873 +++ b/drivers/xen/xenbus/xenbus_xs.c
5874 @@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t,
5875 #define PRINTF_BUFFER_SIZE 4096
5876 char *printf_buffer;
5877
5878 - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
5879 + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
5880 if (printf_buffer == NULL)
5881 return -ENOMEM;
5882
5883 diff --git a/fs/aio.c b/fs/aio.c
5884 index 1cf12b3..48fdeeb 100644
5885 --- a/fs/aio.c
5886 +++ b/fs/aio.c
5887 @@ -36,6 +36,7 @@
5888 #include <linux/blkdev.h>
5889 #include <linux/mempool.h>
5890 #include <linux/hash.h>
5891 +#include <linux/compat.h>
5892
5893 #include <asm/kmap_types.h>
5894 #include <asm/uaccess.h>
5895 @@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb)
5896 return ret;
5897 }
5898
5899 -static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
5900 +static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
5901 {
5902 ssize_t ret;
5903
5904 - ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
5905 - kiocb->ki_nbytes, 1,
5906 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
5907 +#ifdef CONFIG_COMPAT
5908 + if (compat)
5909 + ret = compat_rw_copy_check_uvector(type,
5910 + (struct compat_iovec __user *)kiocb->ki_buf,
5911 + kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
5912 + &kiocb->ki_iovec);
5913 + else
5914 +#endif
5915 + ret = rw_copy_check_uvector(type,
5916 + (struct iovec __user *)kiocb->ki_buf,
5917 + kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
5918 + &kiocb->ki_iovec);
5919 if (ret < 0)
5920 goto out;
5921
5922 @@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
5923 * Performs the initial checks and aio retry method
5924 * setup for the kiocb at the time of io submission.
5925 */
5926 -static ssize_t aio_setup_iocb(struct kiocb *kiocb)
5927 +static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
5928 {
5929 struct file *file = kiocb->ki_filp;
5930 ssize_t ret = 0;
5931 @@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
5932 ret = security_file_permission(file, MAY_READ);
5933 if (unlikely(ret))
5934 break;
5935 - ret = aio_setup_vectored_rw(READ, kiocb);
5936 + ret = aio_setup_vectored_rw(READ, kiocb, compat);
5937 if (ret)
5938 break;
5939 ret = -EINVAL;
5940 @@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
5941 ret = security_file_permission(file, MAY_WRITE);
5942 if (unlikely(ret))
5943 break;
5944 - ret = aio_setup_vectored_rw(WRITE, kiocb);
5945 + ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
5946 if (ret)
5947 break;
5948 ret = -EINVAL;
5949 @@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash)
5950 }
5951
5952 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
5953 - struct iocb *iocb, struct hlist_head *batch_hash)
5954 + struct iocb *iocb, struct hlist_head *batch_hash,
5955 + bool compat)
5956 {
5957 struct kiocb *req;
5958 struct file *file;
5959 @@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
5960 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
5961 req->ki_opcode = iocb->aio_lio_opcode;
5962
5963 - ret = aio_setup_iocb(req);
5964 + ret = aio_setup_iocb(req, compat);
5965
5966 if (ret)
5967 goto out_put_req;
5968 @@ -1637,20 +1648,8 @@ out_put_req:
5969 return ret;
5970 }
5971
5972 -/* sys_io_submit:
5973 - * Queue the nr iocbs pointed to by iocbpp for processing. Returns
5974 - * the number of iocbs queued. May return -EINVAL if the aio_context
5975 - * specified by ctx_id is invalid, if nr is < 0, if the iocb at
5976 - * *iocbpp[0] is not properly initialized, if the operation specified
5977 - * is invalid for the file descriptor in the iocb. May fail with
5978 - * -EFAULT if any of the data structures point to invalid data. May
5979 - * fail with -EBADF if the file descriptor specified in the first
5980 - * iocb is invalid. May fail with -EAGAIN if insufficient resources
5981 - * are available to queue any iocbs. Will return 0 if nr is 0. Will
5982 - * fail with -ENOSYS if not implemented.
5983 - */
5984 -SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
5985 - struct iocb __user * __user *, iocbpp)
5986 +long do_io_submit(aio_context_t ctx_id, long nr,
5987 + struct iocb __user *__user *iocbpp, bool compat)
5988 {
5989 struct kioctx *ctx;
5990 long ret = 0;
5991 @@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
5992 break;
5993 }
5994
5995 - ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
5996 + ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
5997 if (ret)
5998 break;
5999 }
6000 @@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
6001 return i ? i : ret;
6002 }
6003
6004 +/* sys_io_submit:
6005 + * Queue the nr iocbs pointed to by iocbpp for processing. Returns
6006 + * the number of iocbs queued. May return -EINVAL if the aio_context
6007 + * specified by ctx_id is invalid, if nr is < 0, if the iocb at
6008 + * *iocbpp[0] is not properly initialized, if the operation specified
6009 + * is invalid for the file descriptor in the iocb. May fail with
6010 + * -EFAULT if any of the data structures point to invalid data. May
6011 + * fail with -EBADF if the file descriptor specified in the first
6012 + * iocb is invalid. May fail with -EAGAIN if insufficient resources
6013 + * are available to queue any iocbs. Will return 0 if nr is 0. Will
6014 + * fail with -ENOSYS if not implemented.
6015 + */
6016 +SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
6017 + struct iocb __user * __user *, iocbpp)
6018 +{
6019 + return do_io_submit(ctx_id, nr, iocbpp, 0);
6020 +}
6021 +
6022 /* lookup_kiocb
6023 * Finds a given iocb for cancellation.
6024 */
6025 diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
6026 index 6ef7b26..6b4d0cc 100644
6027 --- a/fs/btrfs/acl.c
6028 +++ b/fs/btrfs/acl.c
6029 @@ -160,6 +160,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
6030 int ret;
6031 struct posix_acl *acl = NULL;
6032
6033 + if (!is_owner_or_cap(dentry->d_inode))
6034 + return -EPERM;
6035 +
6036 if (value) {
6037 acl = posix_acl_from_xattr(value, size);
6038 if (acl == NULL) {
6039 diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
6040 index 39e47f4..a6db615 100644
6041 --- a/fs/cifs/cifsproto.h
6042 +++ b/fs/cifs/cifsproto.h
6043 @@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
6044 __u16 fileHandle, struct file *file,
6045 struct vfsmount *mnt, unsigned int oflags);
6046 extern int cifs_posix_open(char *full_path, struct inode **pinode,
6047 - struct vfsmount *mnt, int mode, int oflags,
6048 - __u32 *poplock, __u16 *pnetfid, int xid);
6049 + struct vfsmount *mnt,
6050 + struct super_block *sb,
6051 + int mode, int oflags,
6052 + __u32 *poplock, __u16 *pnetfid, int xid);
6053 extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
6054 FILE_UNIX_BASIC_INFO *info,
6055 struct cifs_sb_info *cifs_sb);
6056 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
6057 index e9f7ecc..ff3d891 100644
6058 --- a/fs/cifs/dir.c
6059 +++ b/fs/cifs/dir.c
6060 @@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
6061 }
6062
6063 int cifs_posix_open(char *full_path, struct inode **pinode,
6064 - struct vfsmount *mnt, int mode, int oflags,
6065 - __u32 *poplock, __u16 *pnetfid, int xid)
6066 + struct vfsmount *mnt, struct super_block *sb,
6067 + int mode, int oflags,
6068 + __u32 *poplock, __u16 *pnetfid, int xid)
6069 {
6070 int rc;
6071 FILE_UNIX_BASIC_INFO *presp_data;
6072 __u32 posix_flags = 0;
6073 - struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
6074 + struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
6075 struct cifs_fattr fattr;
6076
6077 cFYI(1, ("posix open %s", full_path));
6078 @@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
6079
6080 /* get new inode and set it up */
6081 if (*pinode == NULL) {
6082 - *pinode = cifs_iget(mnt->mnt_sb, &fattr);
6083 + *pinode = cifs_iget(sb, &fattr);
6084 if (!*pinode) {
6085 rc = -ENOMEM;
6086 goto posix_open_ret;
6087 @@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
6088 cifs_fattr_to_inode(*pinode, &fattr);
6089 }
6090
6091 - cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
6092 + if (mnt)
6093 + cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
6094
6095 posix_open_ret:
6096 kfree(presp_data);
6097 @@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
6098 if (nd && (nd->flags & LOOKUP_OPEN))
6099 oflags = nd->intent.open.flags;
6100 else
6101 - oflags = FMODE_READ;
6102 + oflags = FMODE_READ | SMB_O_CREAT;
6103
6104 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
6105 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
6106 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
6107 - rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
6108 - mode, oflags, &oplock, &fileHandle, xid);
6109 + rc = cifs_posix_open(full_path, &newinode,
6110 + nd ? nd->path.mnt : NULL,
6111 + inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
6112 /* EIO could indicate that (posix open) operation is not
6113 supported, despite what server claimed in capability
6114 negotation. EREMOTE indicates DFS junction, which is not
6115 @@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
6116 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
6117 (nd->intent.open.flags & O_CREAT)) {
6118 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
6119 + parent_dir_inode->i_sb,
6120 nd->intent.open.create_mode,
6121 nd->intent.open.flags, &oplock,
6122 &fileHandle, xid);
6123 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
6124 index 9b11a8f..4cbdb20 100644
6125 --- a/fs/cifs/file.c
6126 +++ b/fs/cifs/file.c
6127 @@ -298,10 +298,12 @@ int cifs_open(struct inode *inode, struct file *file)
6128 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
6129 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
6130 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
6131 + oflags |= SMB_O_CREAT;
6132 /* can not refresh inode info since size could be stale */
6133 rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
6134 - cifs_sb->mnt_file_mode /* ignored */,
6135 - oflags, &oplock, &netfid, xid);
6136 + inode->i_sb,
6137 + cifs_sb->mnt_file_mode /* ignored */,
6138 + oflags, &oplock, &netfid, xid);
6139 if (rc == 0) {
6140 cFYI(1, ("posix open succeeded"));
6141 /* no need for special case handling of setting mode
6142 @@ -513,8 +515,9 @@ reopen_error_exit:
6143 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
6144 /* can not refresh inode info since size could be stale */
6145 rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
6146 - cifs_sb->mnt_file_mode /* ignored */,
6147 - oflags, &oplock, &netfid, xid);
6148 + inode->i_sb,
6149 + cifs_sb->mnt_file_mode /* ignored */,
6150 + oflags, &oplock, &netfid, xid);
6151 if (rc == 0) {
6152 cFYI(1, ("posix reopen succeeded"));
6153 goto reopen_success;
6154 diff --git a/fs/compat.c b/fs/compat.c
6155 index 0544873..6490d21 100644
6156 --- a/fs/compat.c
6157 +++ b/fs/compat.c
6158 @@ -568,6 +568,79 @@ out:
6159 return ret;
6160 }
6161
6162 +/* A write operation does a read from user space and vice versa */
6163 +#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
6164 +
6165 +ssize_t compat_rw_copy_check_uvector(int type,
6166 + const struct compat_iovec __user *uvector, unsigned long nr_segs,
6167 + unsigned long fast_segs, struct iovec *fast_pointer,
6168 + struct iovec **ret_pointer)
6169 +{
6170 + compat_ssize_t tot_len;
6171 + struct iovec *iov = *ret_pointer = fast_pointer;
6172 + ssize_t ret = 0;
6173 + int seg;
6174 +
6175 + /*
6176 + * SuS says "The readv() function *may* fail if the iovcnt argument
6177 + * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
6178 + * traditionally returned zero for zero segments, so...
6179 + */
6180 + if (nr_segs == 0)
6181 + goto out;
6182 +
6183 + ret = -EINVAL;
6184 + if (nr_segs > UIO_MAXIOV || nr_segs < 0)
6185 + goto out;
6186 + if (nr_segs > fast_segs) {
6187 + ret = -ENOMEM;
6188 + iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
6189 + if (iov == NULL) {
6190 + *ret_pointer = fast_pointer;
6191 + goto out;
6192 + }
6193 + }
6194 + *ret_pointer = iov;
6195 +
6196 + /*
6197 + * Single unix specification:
6198 + * We should -EINVAL if an element length is not >= 0 and fitting an
6199 + * ssize_t. The total length is fitting an ssize_t
6200 + *
6201 + * Be careful here because iov_len is a size_t not an ssize_t
6202 + */
6203 + tot_len = 0;
6204 + ret = -EINVAL;
6205 + for (seg = 0; seg < nr_segs; seg++) {
6206 + compat_ssize_t tmp = tot_len;
6207 + compat_uptr_t buf;
6208 + compat_ssize_t len;
6209 +
6210 + if (__get_user(len, &uvector->iov_len) ||
6211 + __get_user(buf, &uvector->iov_base)) {
6212 + ret = -EFAULT;
6213 + goto out;
6214 + }
6215 + if (len < 0) /* size_t not fitting in compat_ssize_t .. */
6216 + goto out;
6217 + tot_len += len;
6218 + if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
6219 + goto out;
6220 + if (!access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
6221 + ret = -EFAULT;
6222 + goto out;
6223 + }
6224 + iov->iov_base = compat_ptr(buf);
6225 + iov->iov_len = (compat_size_t) len;
6226 + uvector++;
6227 + iov++;
6228 + }
6229 + ret = tot_len;
6230 +
6231 +out:
6232 + return ret;
6233 +}
6234 +
6235 static inline long
6236 copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
6237 {
6238 @@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
6239 iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
6240 ret = copy_iocb(nr, iocb, iocb64);
6241 if (!ret)
6242 - ret = sys_io_submit(ctx_id, nr, iocb64);
6243 + ret = do_io_submit(ctx_id, nr, iocb64, 1);
6244 return ret;
6245 }
6246
6247 @@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
6248 {
6249 compat_ssize_t tot_len;
6250 struct iovec iovstack[UIO_FASTIOV];
6251 - struct iovec *iov=iovstack, *vector;
6252 + struct iovec *iov;
6253 ssize_t ret;
6254 - int seg;
6255 io_fn_t fn;
6256 iov_fn_t fnv;
6257
6258 - /*
6259 - * SuS says "The readv() function *may* fail if the iovcnt argument
6260 - * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
6261 - * traditionally returned zero for zero segments, so...
6262 - */
6263 - ret = 0;
6264 - if (nr_segs == 0)
6265 - goto out;
6266 -
6267 - /*
6268 - * First get the "struct iovec" from user memory and
6269 - * verify all the pointers
6270 - */
6271 ret = -EINVAL;
6272 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
6273 - goto out;
6274 if (!file->f_op)
6275 goto out;
6276 - if (nr_segs > UIO_FASTIOV) {
6277 - ret = -ENOMEM;
6278 - iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
6279 - if (!iov)
6280 - goto out;
6281 - }
6282 +
6283 ret = -EFAULT;
6284 if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
6285 goto out;
6286
6287 - /*
6288 - * Single unix specification:
6289 - * We should -EINVAL if an element length is not >= 0 and fitting an
6290 - * ssize_t. The total length is fitting an ssize_t
6291 - *
6292 - * Be careful here because iov_len is a size_t not an ssize_t
6293 - */
6294 - tot_len = 0;
6295 - vector = iov;
6296 - ret = -EINVAL;
6297 - for (seg = 0 ; seg < nr_segs; seg++) {
6298 - compat_ssize_t tmp = tot_len;
6299 - compat_ssize_t len;
6300 - compat_uptr_t buf;
6301 -
6302 - if (__get_user(len, &uvector->iov_len) ||
6303 - __get_user(buf, &uvector->iov_base)) {
6304 - ret = -EFAULT;
6305 - goto out;
6306 - }
6307 - if (len < 0) /* size_t not fitting an compat_ssize_t .. */
6308 - goto out;
6309 - tot_len += len;
6310 - if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
6311 - goto out;
6312 - vector->iov_base = compat_ptr(buf);
6313 - vector->iov_len = (compat_size_t) len;
6314 - uvector++;
6315 - vector++;
6316 - }
6317 + tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
6318 + UIO_FASTIOV, iovstack, &iov);
6319 if (tot_len == 0) {
6320 ret = 0;
6321 goto out;
6322 diff --git a/fs/dcache.c b/fs/dcache.c
6323 index f1358e5..2b6f09a 100644
6324 --- a/fs/dcache.c
6325 +++ b/fs/dcache.c
6326 @@ -1529,6 +1529,7 @@ void d_delete(struct dentry * dentry)
6327 spin_lock(&dentry->d_lock);
6328 isdir = S_ISDIR(dentry->d_inode->i_mode);
6329 if (atomic_read(&dentry->d_count) == 1) {
6330 + dentry->d_flags &= ~DCACHE_CANT_MOUNT;
6331 dentry_iput(dentry);
6332