/[linux-patches]/genpatches-2.6/tags/3.0-30/1022_linux-3.0.23.patch
Gentoo

Contents of /genpatches-2.6/tags/3.0-30/1022_linux-3.0.23.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2206 - (show annotations) (download)
Mon Sep 17 18:58:14 2012 UTC (2 years, 2 months ago) by mpagano
File size: 126038 byte(s)
3.0-30 release
1 diff --git a/Makefile b/Makefile
2 index a5b2253..d14684e 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 0
8 -SUBLEVEL = 22
9 +SUBLEVEL = 23
10 EXTRAVERSION =
11 NAME = Sneaky Weasel
12
13 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
14 index 65c3f24..4e25f18 100644
15 --- a/arch/arm/include/asm/assembler.h
16 +++ b/arch/arm/include/asm/assembler.h
17 @@ -137,6 +137,11 @@
18 disable_irq
19 .endm
20
21 + .macro save_and_disable_irqs_notrace, oldcpsr
22 + mrs \oldcpsr, cpsr
23 + disable_irq_notrace
24 + .endm
25 +
26 /*
27 * Restore interrupt state previously stored in a register. We don't
28 * guarantee that this will preserve the flags.
29 diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
30 index 3593119..1ed1fd3 100644
31 --- a/arch/arm/mm/cache-v7.S
32 +++ b/arch/arm/mm/cache-v7.S
33 @@ -54,9 +54,15 @@ loop1:
34 and r1, r1, #7 @ mask of the bits for current cache only
35 cmp r1, #2 @ see what cache we have at this level
36 blt skip @ skip if no cache, or just i-cache
37 +#ifdef CONFIG_PREEMPT
38 + save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
39 +#endif
40 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
41 isb @ isb to sych the new cssr&csidr
42 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
43 +#ifdef CONFIG_PREEMPT
44 + restore_irqs_notrace r9
45 +#endif
46 and r2, r1, #7 @ extract the length of the cache lines
47 add r2, r2, #4 @ add 4 (line length offset)
48 ldr r4, =0x3ff
49 diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
50 index 822f630..5793c4b 100644
51 --- a/arch/powerpc/kernel/perf_event.c
52 +++ b/arch/powerpc/kernel/perf_event.c
53 @@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
54 {
55 unsigned long flags;
56 s64 left;
57 + unsigned long val;
58
59 if (!event->hw.idx || !event->hw.sample_period)
60 return;
61 @@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
62
63 event->hw.state = 0;
64 left = local64_read(&event->hw.period_left);
65 - write_pmc(event->hw.idx, left);
66 +
67 + val = 0;
68 + if (left < 0x80000000L)
69 + val = 0x80000000L - left;
70 +
71 + write_pmc(event->hw.idx, val);
72
73 perf_event_update_userpage(event);
74 perf_pmu_enable(event->pmu);
75 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
76 index c9e09ea..a850b4d 100644
77 --- a/arch/x86/include/asm/i387.h
78 +++ b/arch/x86/include/asm/i387.h
79 @@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size;
80 extern void fpu_init(void);
81 extern void mxcsr_feature_mask_init(void);
82 extern int init_fpu(struct task_struct *child);
83 -extern asmlinkage void math_state_restore(void);
84 -extern void __math_state_restore(void);
85 +extern void __math_state_restore(struct task_struct *);
86 +extern void math_state_restore(void);
87 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
88
89 extern user_regset_active_fn fpregs_active, xfpregs_active;
90 @@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu)
91
92 #endif /* CONFIG_X86_64 */
93
94 -/* We need a safe address that is cheap to find and that is already
95 - in L1 during context switch. The best choices are unfortunately
96 - different for UP and SMP */
97 -#ifdef CONFIG_SMP
98 -#define safe_address (__per_cpu_offset[0])
99 -#else
100 -#define safe_address (kstat_cpu(0).cpustat.user)
101 -#endif
102 -
103 /*
104 - * These must be called with preempt disabled
105 + * These must be called with preempt disabled. Returns
106 + * 'true' if the FPU state is still intact.
107 */
108 -static inline void fpu_save_init(struct fpu *fpu)
109 +static inline int fpu_save_init(struct fpu *fpu)
110 {
111 if (use_xsave()) {
112 fpu_xsave(fpu);
113 @@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu)
114 * xsave header may indicate the init state of the FP.
115 */
116 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
117 - return;
118 + return 1;
119 } else if (use_fxsr()) {
120 fpu_fxsave(fpu);
121 } else {
122 asm volatile("fnsave %[fx]; fwait"
123 : [fx] "=m" (fpu->state->fsave));
124 - return;
125 + return 0;
126 }
127
128 - if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
129 + /*
130 + * If exceptions are pending, we need to clear them so
131 + * that we don't randomly get exceptions later.
132 + *
133 + * FIXME! Is this perhaps only true for the old-style
134 + * irq13 case? Maybe we could leave the x87 state
135 + * intact otherwise?
136 + */
137 + if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
138 asm volatile("fnclex");
139 -
140 - /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
141 - is pending. Clear the x87 state here by setting it to fixed
142 - values. safe_address is a random variable that should be in L1 */
143 - alternative_input(
144 - ASM_NOP8 ASM_NOP2,
145 - "emms\n\t" /* clear stack tags */
146 - "fildl %P[addr]", /* set F?P to defined value */
147 - X86_FEATURE_FXSAVE_LEAK,
148 - [addr] "m" (safe_address));
149 + return 0;
150 + }
151 + return 1;
152 }
153
154 -static inline void __save_init_fpu(struct task_struct *tsk)
155 +static inline int __save_init_fpu(struct task_struct *tsk)
156 {
157 - fpu_save_init(&tsk->thread.fpu);
158 - task_thread_info(tsk)->status &= ~TS_USEDFPU;
159 + return fpu_save_init(&tsk->thread.fpu);
160 }
161
162 static inline int fpu_fxrstor_checking(struct fpu *fpu)
163 @@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
164 }
165
166 /*
167 - * Signal frame handlers...
168 + * Software FPU state helpers. Careful: these need to
169 + * be preemption protection *and* they need to be
170 + * properly paired with the CR0.TS changes!
171 */
172 -extern int save_i387_xstate(void __user *buf);
173 -extern int restore_i387_xstate(void __user *buf);
174 +static inline int __thread_has_fpu(struct task_struct *tsk)
175 +{
176 + return tsk->thread.has_fpu;
177 +}
178
179 -static inline void __unlazy_fpu(struct task_struct *tsk)
180 +/* Must be paired with an 'stts' after! */
181 +static inline void __thread_clear_has_fpu(struct task_struct *tsk)
182 {
183 - if (task_thread_info(tsk)->status & TS_USEDFPU) {
184 - __save_init_fpu(tsk);
185 - stts();
186 - } else
187 - tsk->fpu_counter = 0;
188 + tsk->thread.has_fpu = 0;
189 +}
190 +
191 +/* Must be paired with a 'clts' before! */
192 +static inline void __thread_set_has_fpu(struct task_struct *tsk)
193 +{
194 + tsk->thread.has_fpu = 1;
195 }
196
197 +/*
198 + * Encapsulate the CR0.TS handling together with the
199 + * software flag.
200 + *
201 + * These generally need preemption protection to work,
202 + * do try to avoid using these on their own.
203 + */
204 +static inline void __thread_fpu_end(struct task_struct *tsk)
205 +{
206 + __thread_clear_has_fpu(tsk);
207 + stts();
208 +}
209 +
210 +static inline void __thread_fpu_begin(struct task_struct *tsk)
211 +{
212 + clts();
213 + __thread_set_has_fpu(tsk);
214 +}
215 +
216 +/*
217 + * FPU state switching for scheduling.
218 + *
219 + * This is a two-stage process:
220 + *
221 + * - switch_fpu_prepare() saves the old state and
222 + * sets the new state of the CR0.TS bit. This is
223 + * done within the context of the old process.
224 + *
225 + * - switch_fpu_finish() restores the new state as
226 + * necessary.
227 + */
228 +typedef struct { int preload; } fpu_switch_t;
229 +
230 +/*
231 + * FIXME! We could do a totally lazy restore, but we need to
232 + * add a per-cpu "this was the task that last touched the FPU
233 + * on this CPU" variable, and the task needs to have a "I last
234 + * touched the FPU on this CPU" and check them.
235 + *
236 + * We don't do that yet, so "fpu_lazy_restore()" always returns
237 + * false, but some day..
238 + */
239 +#define fpu_lazy_restore(tsk) (0)
240 +#define fpu_lazy_state_intact(tsk) do { } while (0)
241 +
242 +static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
243 +{
244 + fpu_switch_t fpu;
245 +
246 + fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
247 + if (__thread_has_fpu(old)) {
248 + if (__save_init_fpu(old))
249 + fpu_lazy_state_intact(old);
250 + __thread_clear_has_fpu(old);
251 + old->fpu_counter++;
252 +
253 + /* Don't change CR0.TS if we just switch! */
254 + if (fpu.preload) {
255 + __thread_set_has_fpu(new);
256 + prefetch(new->thread.fpu.state);
257 + } else
258 + stts();
259 + } else {
260 + old->fpu_counter = 0;
261 + if (fpu.preload) {
262 + if (fpu_lazy_restore(new))
263 + fpu.preload = 0;
264 + else
265 + prefetch(new->thread.fpu.state);
266 + __thread_fpu_begin(new);
267 + }
268 + }
269 + return fpu;
270 +}
271 +
272 +/*
273 + * By the time this gets called, we've already cleared CR0.TS and
274 + * given the process the FPU if we are going to preload the FPU
275 + * state - all we need to do is to conditionally restore the register
276 + * state itself.
277 + */
278 +static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
279 +{
280 + if (fpu.preload)
281 + __math_state_restore(new);
282 +}
283 +
284 +/*
285 + * Signal frame handlers...
286 + */
287 +extern int save_i387_xstate(void __user *buf);
288 +extern int restore_i387_xstate(void __user *buf);
289 +
290 static inline void __clear_fpu(struct task_struct *tsk)
291 {
292 - if (task_thread_info(tsk)->status & TS_USEDFPU) {
293 + if (__thread_has_fpu(tsk)) {
294 /* Ignore delayed exceptions from user space */
295 asm volatile("1: fwait\n"
296 "2:\n"
297 _ASM_EXTABLE(1b, 2b));
298 - task_thread_info(tsk)->status &= ~TS_USEDFPU;
299 - stts();
300 + __thread_fpu_end(tsk);
301 }
302 }
303
304 +/*
305 + * Were we in an interrupt that interrupted kernel mode?
306 + *
307 + * We can do a kernel_fpu_begin/end() pair *ONLY* if that
308 + * pair does nothing at all: the thread must not have fpu (so
309 + * that we don't try to save the FPU state), and TS must
310 + * be set (so that the clts/stts pair does nothing that is
311 + * visible in the interrupted kernel thread).
312 + */
313 +static inline bool interrupted_kernel_fpu_idle(void)
314 +{
315 + return !__thread_has_fpu(current) &&
316 + (read_cr0() & X86_CR0_TS);
317 +}
318 +
319 +/*
320 + * Were we in user mode (or vm86 mode) when we were
321 + * interrupted?
322 + *
323 + * Doing kernel_fpu_begin/end() is ok if we are running
324 + * in an interrupt context from user mode - we'll just
325 + * save the FPU state as required.
326 + */
327 +static inline bool interrupted_user_mode(void)
328 +{
329 + struct pt_regs *regs = get_irq_regs();
330 + return regs && user_mode_vm(regs);
331 +}
332 +
333 +/*
334 + * Can we use the FPU in kernel mode with the
335 + * whole "kernel_fpu_begin/end()" sequence?
336 + *
337 + * It's always ok in process context (ie "not interrupt")
338 + * but it is sometimes ok even from an irq.
339 + */
340 +static inline bool irq_fpu_usable(void)
341 +{
342 + return !in_interrupt() ||
343 + interrupted_user_mode() ||
344 + interrupted_kernel_fpu_idle();
345 +}
346 +
347 static inline void kernel_fpu_begin(void)
348 {
349 - struct thread_info *me = current_thread_info();
350 + struct task_struct *me = current;
351 +
352 + WARN_ON_ONCE(!irq_fpu_usable());
353 preempt_disable();
354 - if (me->status & TS_USEDFPU)
355 - __save_init_fpu(me->task);
356 - else
357 + if (__thread_has_fpu(me)) {
358 + __save_init_fpu(me);
359 + __thread_clear_has_fpu(me);
360 + /* We do 'stts()' in kernel_fpu_end() */
361 + } else
362 clts();
363 }
364
365 @@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void)
366 preempt_enable();
367 }
368
369 -static inline bool irq_fpu_usable(void)
370 -{
371 - struct pt_regs *regs;
372 -
373 - return !in_interrupt() || !(regs = get_irq_regs()) || \
374 - user_mode(regs) || (read_cr0() & X86_CR0_TS);
375 -}
376 -
377 /*
378 * Some instructions like VIA's padlock instructions generate a spurious
379 * DNA fault but don't modify SSE registers. And these instructions
380 @@ -363,20 +493,64 @@ static inline void irq_ts_restore(int TS_state)
381 }
382
383 /*
384 + * The question "does this thread have fpu access?"
385 + * is slightly racy, since preemption could come in
386 + * and revoke it immediately after the test.
387 + *
388 + * However, even in that very unlikely scenario,
389 + * we can just assume we have FPU access - typically
390 + * to save the FP state - we'll just take a #NM
391 + * fault and get the FPU access back.
392 + *
393 + * The actual user_fpu_begin/end() functions
394 + * need to be preemption-safe, though.
395 + *
396 + * NOTE! user_fpu_end() must be used only after you
397 + * have saved the FP state, and user_fpu_begin() must
398 + * be used only immediately before restoring it.
399 + * These functions do not do any save/restore on
400 + * their own.
401 + */
402 +static inline int user_has_fpu(void)
403 +{
404 + return __thread_has_fpu(current);
405 +}
406 +
407 +static inline void user_fpu_end(void)
408 +{
409 + preempt_disable();
410 + __thread_fpu_end(current);
411 + preempt_enable();
412 +}
413 +
414 +static inline void user_fpu_begin(void)
415 +{
416 + preempt_disable();
417 + if (!user_has_fpu())
418 + __thread_fpu_begin(current);
419 + preempt_enable();
420 +}
421 +
422 +/*
423 * These disable preemption on their own and are safe
424 */
425 static inline void save_init_fpu(struct task_struct *tsk)
426 {
427 + WARN_ON_ONCE(!__thread_has_fpu(tsk));
428 preempt_disable();
429 __save_init_fpu(tsk);
430 - stts();
431 + __thread_fpu_end(tsk);
432 preempt_enable();
433 }
434
435 static inline void unlazy_fpu(struct task_struct *tsk)
436 {
437 preempt_disable();
438 - __unlazy_fpu(tsk);
439 + if (__thread_has_fpu(tsk)) {
440 + __save_init_fpu(tsk);
441 + __thread_fpu_end(tsk);
442 + } else
443 + tsk->fpu_counter = 0;
444 preempt_enable();
445 }
446
447 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
448 index 2193715..5d9c61d 100644
449 --- a/arch/x86/include/asm/processor.h
450 +++ b/arch/x86/include/asm/processor.h
451 @@ -454,6 +454,7 @@ struct thread_struct {
452 unsigned long trap_no;
453 unsigned long error_code;
454 /* floating point and extended processor state */
455 + unsigned long has_fpu;
456 struct fpu fpu;
457 #ifdef CONFIG_X86_32
458 /* Virtual 86 mode info */
459 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
460 index 1f2e61e..278d3d5 100644
461 --- a/arch/x86/include/asm/thread_info.h
462 +++ b/arch/x86/include/asm/thread_info.h
463 @@ -242,8 +242,6 @@ static inline struct thread_info *current_thread_info(void)
464 * ever touches our thread-synchronous status, so we don't
465 * have to worry about atomic accesses.
466 */
467 -#define TS_USEDFPU 0x0001 /* FPU was used by this task
468 - this quantum (SMP) */
469 #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
470 #define TS_POLLING 0x0004 /* idle task polling need_resched,
471 skip sending interrupt */
472 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
473 index c105c53..fde4428 100644
474 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
475 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
476 @@ -330,8 +330,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
477 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
478 }
479
480 -static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
481 - int index)
482 +static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
483 {
484 static struct amd_l3_cache *__cpuinitdata l3_caches;
485 int node;
486 @@ -748,14 +747,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
487 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
488
489 #ifdef CONFIG_SMP
490 -static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
491 +
492 +static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
493 {
494 - struct _cpuid4_info *this_leaf, *sibling_leaf;
495 - unsigned long num_threads_sharing;
496 - int index_msb, i, sibling;
497 + struct _cpuid4_info *this_leaf;
498 + int ret, i, sibling;
499 struct cpuinfo_x86 *c = &cpu_data(cpu);
500
501 - if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
502 + ret = 0;
503 + if (index == 3) {
504 + ret = 1;
505 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
506 if (!per_cpu(ici_cpuid4_info, i))
507 continue;
508 @@ -766,8 +767,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
509 set_bit(sibling, this_leaf->shared_cpu_map);
510 }
511 }
512 - return;
513 + } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
514 + ret = 1;
515 + for_each_cpu(i, cpu_sibling_mask(cpu)) {
516 + if (!per_cpu(ici_cpuid4_info, i))
517 + continue;
518 + this_leaf = CPUID4_INFO_IDX(i, index);
519 + for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
520 + if (!cpu_online(sibling))
521 + continue;
522 + set_bit(sibling, this_leaf->shared_cpu_map);
523 + }
524 + }
525 }
526 +
527 + return ret;
528 +}
529 +
530 +static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
531 +{
532 + struct _cpuid4_info *this_leaf, *sibling_leaf;
533 + unsigned long num_threads_sharing;
534 + int index_msb, i;
535 + struct cpuinfo_x86 *c = &cpu_data(cpu);
536 +
537 + if (c->x86_vendor == X86_VENDOR_AMD) {
538 + if (cache_shared_amd_cpu_map_setup(cpu, index))
539 + return;
540 + }
541 +
542 this_leaf = CPUID4_INFO_IDX(cpu, index);
543 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
544
545 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
546 index a3d0dc5..fcdb1b3 100644
547 --- a/arch/x86/kernel/process_32.c
548 +++ b/arch/x86/kernel/process_32.c
549 @@ -293,22 +293,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
550 *next = &next_p->thread;
551 int cpu = smp_processor_id();
552 struct tss_struct *tss = &per_cpu(init_tss, cpu);
553 - bool preload_fpu;
554 + fpu_switch_t fpu;
555
556 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
557
558 - /*
559 - * If the task has used fpu the last 5 timeslices, just do a full
560 - * restore of the math state immediately to avoid the trap; the
561 - * chances of needing FPU soon are obviously high now
562 - */
563 - preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
564 -
565 - __unlazy_fpu(prev_p);
566 -
567 - /* we're going to use this soon, after a few expensive things */
568 - if (preload_fpu)
569 - prefetch(next->fpu.state);
570 + fpu = switch_fpu_prepare(prev_p, next_p);
571
572 /*
573 * Reload esp0.
574 @@ -348,11 +337,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
575 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
576 __switch_to_xtra(prev_p, next_p, tss);
577
578 - /* If we're going to preload the fpu context, make sure clts
579 - is run while we're batching the cpu state updates. */
580 - if (preload_fpu)
581 - clts();
582 -
583 /*
584 * Leave lazy mode, flushing any hypercalls made here.
585 * This must be done before restoring TLS segments so
586 @@ -362,15 +346,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
587 */
588 arch_end_context_switch(next_p);
589
590 - if (preload_fpu)
591 - __math_state_restore();
592 -
593 /*
594 * Restore %gs if needed (which is common)
595 */
596 if (prev->gs | next->gs)
597 lazy_load_gs(next->gs);
598
599 + switch_fpu_finish(next_p, fpu);
600 +
601 percpu_write(current_task, next_p);
602
603 return prev_p;
604 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
605 index ca6f7ab..b01898d 100644
606 --- a/arch/x86/kernel/process_64.c
607 +++ b/arch/x86/kernel/process_64.c
608 @@ -377,18 +377,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
609 int cpu = smp_processor_id();
610 struct tss_struct *tss = &per_cpu(init_tss, cpu);
611 unsigned fsindex, gsindex;
612 - bool preload_fpu;
613 + fpu_switch_t fpu;
614
615 - /*
616 - * If the task has used fpu the last 5 timeslices, just do a full
617 - * restore of the math state immediately to avoid the trap; the
618 - * chances of needing FPU soon are obviously high now
619 - */
620 - preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
621 -
622 - /* we're going to use this soon, after a few expensive things */
623 - if (preload_fpu)
624 - prefetch(next->fpu.state);
625 + fpu = switch_fpu_prepare(prev_p, next_p);
626
627 /*
628 * Reload esp0, LDT and the page table pointer:
629 @@ -418,13 +409,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
630
631 load_TLS(next, cpu);
632
633 - /* Must be after DS reload */
634 - __unlazy_fpu(prev_p);
635 -
636 - /* Make sure cpu is ready for new context */
637 - if (preload_fpu)
638 - clts();
639 -
640 /*
641 * Leave lazy mode, flushing any hypercalls made here.
642 * This must be done before restoring TLS segments so
643 @@ -465,6 +449,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
644 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
645 prev->gsindex = gsindex;
646
647 + switch_fpu_finish(next_p, fpu);
648 +
649 /*
650 * Switch the PDA and FPU contexts.
651 */
652 @@ -483,13 +469,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
653 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
654 __switch_to_xtra(prev_p, next_p, tss);
655
656 - /*
657 - * Preload the FPU context, now that we've determined that the
658 - * task is likely to be using it.
659 - */
660 - if (preload_fpu)
661 - __math_state_restore();
662 -
663 return prev_p;
664 }
665
666 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
667 index b9b6716..1b26e01 100644
668 --- a/arch/x86/kernel/traps.c
669 +++ b/arch/x86/kernel/traps.c
670 @@ -717,25 +717,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
671 }
672
673 /*
674 - * __math_state_restore assumes that cr0.TS is already clear and the
675 - * fpu state is all ready for use. Used during context switch.
676 + * This gets called with the process already owning the
677 + * FPU state, and with CR0.TS cleared. It just needs to
678 + * restore the FPU register state.
679 */
680 -void __math_state_restore(void)
681 +void __math_state_restore(struct task_struct *tsk)
682 {
683 - struct thread_info *thread = current_thread_info();
684 - struct task_struct *tsk = thread->task;
685 + /* We need a safe address that is cheap to find and that is already
686 + in L1. We've just brought in "tsk->thread.has_fpu", so use that */
687 +#define safe_address (tsk->thread.has_fpu)
688 +
689 + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
690 + is pending. Clear the x87 state here by setting it to fixed
691 + values. safe_address is a random variable that should be in L1 */
692 + alternative_input(
693 + ASM_NOP8 ASM_NOP2,
694 + "emms\n\t" /* clear stack tags */
695 + "fildl %P[addr]", /* set F?P to defined value */
696 + X86_FEATURE_FXSAVE_LEAK,
697 + [addr] "m" (safe_address));
698
699 /*
700 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
701 */
702 if (unlikely(restore_fpu_checking(tsk))) {
703 - stts();
704 + __thread_fpu_end(tsk);
705 force_sig(SIGSEGV, tsk);
706 return;
707 }
708 -
709 - thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
710 - tsk->fpu_counter++;
711 }
712
713 /*
714 @@ -745,13 +754,12 @@ void __math_state_restore(void)
715 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
716 * Don't touch unless you *really* know how it works.
717 *
718 - * Must be called with kernel preemption disabled (in this case,
719 - * local interrupts are disabled at the call-site in entry.S).
720 + * Must be called with kernel preemption disabled (eg with local
721 + * local interrupts as in the case of do_device_not_available).
722 */
723 -asmlinkage void math_state_restore(void)
724 +void math_state_restore(void)
725 {
726 - struct thread_info *thread = current_thread_info();
727 - struct task_struct *tsk = thread->task;
728 + struct task_struct *tsk = current;
729
730 if (!tsk_used_math(tsk)) {
731 local_irq_enable();
732 @@ -768,9 +776,10 @@ asmlinkage void math_state_restore(void)
733 local_irq_disable();
734 }
735
736 - clts(); /* Allow maths ops (or we recurse) */
737 + __thread_fpu_begin(tsk);
738 + __math_state_restore(tsk);
739
740 - __math_state_restore();
741 + tsk->fpu_counter++;
742 }
743 EXPORT_SYMBOL_GPL(math_state_restore);
744
745 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
746 index a391134..7110911 100644
747 --- a/arch/x86/kernel/xsave.c
748 +++ b/arch/x86/kernel/xsave.c
749 @@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
750 if (!fx)
751 return;
752
753 - BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU);
754 + BUG_ON(__thread_has_fpu(tsk));
755
756 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
757
758 @@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
759 if (!used_math())
760 return 0;
761
762 - if (task_thread_info(tsk)->status & TS_USEDFPU) {
763 + if (user_has_fpu()) {
764 if (use_xsave())
765 err = xsave_user(buf);
766 else
767 @@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
768
769 if (err)
770 return err;
771 - task_thread_info(tsk)->status &= ~TS_USEDFPU;
772 - stts();
773 + user_fpu_end();
774 } else {
775 sanitize_i387_state(tsk);
776 if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
777 @@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
778 return err;
779 }
780
781 - if (!(task_thread_info(current)->status & TS_USEDFPU)) {
782 - clts();
783 - task_thread_info(current)->status |= TS_USEDFPU;
784 - }
785 + user_fpu_begin();
786 if (use_xsave())
787 err = restore_user_xstate(buf);
788 else
789 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
790 index d48ec60..2ad060a 100644
791 --- a/arch/x86/kvm/vmx.c
792 +++ b/arch/x86/kvm/vmx.c
793 @@ -948,7 +948,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
794 #ifdef CONFIG_X86_64
795 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
796 #endif
797 - if (current_thread_info()->status & TS_USEDFPU)
798 + if (__thread_has_fpu(current))
799 clts();
800 load_gdt(&__get_cpu_var(host_gdt));
801 }
802 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
803 index b693cbd..cc6471a 100644
804 --- a/drivers/cdrom/cdrom.c
805 +++ b/drivers/cdrom/cdrom.c
806 @@ -2114,11 +2114,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
807 if (!nr)
808 return -ENOMEM;
809
810 - if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
811 - ret = -EFAULT;
812 - goto out;
813 - }
814 -
815 cgc.data_direction = CGC_DATA_READ;
816 while (nframes > 0) {
817 if (nr > nframes)
818 @@ -2127,7 +2122,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
819 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
820 if (ret)
821 break;
822 - if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
823 + if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
824 ret = -EFAULT;
825 break;
826 }
827 @@ -2135,7 +2130,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
828 nframes -= nr;
829 lba += nr;
830 }
831 -out:
832 kfree(cgc.buffer);
833 return ret;
834 }
835 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
836 index 7642495..d94f440 100644
837 --- a/drivers/gpu/drm/radeon/r100.c
838 +++ b/drivers/gpu/drm/radeon/r100.c
839 @@ -681,9 +681,7 @@ int r100_irq_process(struct radeon_device *rdev)
840 WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
841 break;
842 default:
843 - msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
844 - WREG32(RADEON_MSI_REARM_EN, msi_rearm);
845 - WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
846 + WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
847 break;
848 }
849 }
850 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
851 index 21acfb5..2026c2d 100644
852 --- a/drivers/gpu/drm/radeon/rs600.c
853 +++ b/drivers/gpu/drm/radeon/rs600.c
854 @@ -698,9 +698,7 @@ int rs600_irq_process(struct radeon_device *rdev)
855 WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
856 break;
857 default:
858 - msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
859 - WREG32(RADEON_MSI_REARM_EN, msi_rearm);
860 - WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
861 + WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
862 break;
863 }
864 }
865 diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
866 index e9beeda..9a5af38 100644
867 --- a/drivers/hwmon/ads1015.c
868 +++ b/drivers/hwmon/ads1015.c
869 @@ -284,7 +284,7 @@ static int ads1015_probe(struct i2c_client *client,
870 continue;
871 err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
872 if (err)
873 - goto exit_free;
874 + goto exit_remove;
875 }
876
877 data->hwmon_dev = hwmon_device_register(&client->dev);
878 @@ -298,7 +298,6 @@ static int ads1015_probe(struct i2c_client *client,
879 exit_remove:
880 for (k = 0; k < ADS1015_CHANNELS; ++k)
881 device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
882 -exit_free:
883 kfree(data);
884 exit:
885 return err;
886 diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
887 index e4ab491..040a820 100644
888 --- a/drivers/hwmon/f75375s.c
889 +++ b/drivers/hwmon/f75375s.c
890 @@ -304,8 +304,6 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
891 case 0: /* Full speed */
892 fanmode |= (3 << FAN_CTRL_MODE(nr));
893 data->pwm[nr] = 255;
894 - f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
895 - data->pwm[nr]);
896 break;
897 case 1: /* PWM */
898 fanmode |= (3 << FAN_CTRL_MODE(nr));
899 @@ -318,6 +316,9 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
900 }
901 f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
902 data->pwm_enable[nr] = val;
903 + if (val == 0)
904 + f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
905 + data->pwm[nr]);
906 return 0;
907 }
908
909 diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
910 index f20d997..8c3df04 100644
911 --- a/drivers/hwmon/max6639.c
912 +++ b/drivers/hwmon/max6639.c
913 @@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
914
915 static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
916
917 -#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
918 - (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
919 +#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
920 + 0 : (rpm_ranges[rpm_range] * 30) / (val))
921 #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
922
923 /*
924 @@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev,
925 return PTR_ERR(data);
926
927 return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
928 - data->ppr, data->rpm_range));
929 + data->rpm_range));
930 }
931
932 static ssize_t show_alarm(struct device *dev,
933 @@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client)
934 struct max6639_data *data = i2c_get_clientdata(client);
935 struct max6639_platform_data *max6639_info =
936 client->dev.platform_data;
937 - int i = 0;
938 + int i;
939 int rpm_range = 1; /* default: 4000 RPM */
940 - int err = 0;
941 + int err;
942
943 /* Reset chip to default values, see below for GCONFIG setup */
944 err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
945 @@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client)
946 else
947 data->ppr = 2;
948 data->ppr -= 1;
949 - err = i2c_smbus_write_byte_data(client,
950 - MAX6639_REG_FAN_PPR(i),
951 - data->ppr << 5);
952 - if (err)
953 - goto exit;
954
955 if (max6639_info)
956 rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
957 @@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client)
958
959 for (i = 0; i < 2; i++) {
960
961 + /* Set Fan pulse per revolution */
962 + err = i2c_smbus_write_byte_data(client,
963 + MAX6639_REG_FAN_PPR(i),
964 + data->ppr << 6);
965 + if (err)
966 + goto exit;
967 +
968 /* Fans config PWM, RPM */
969 err = i2c_smbus_write_byte_data(client,
970 MAX6639_REG_FAN_CONFIG1(i),
971 diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
972 index 7b6985a..936804e 100644
973 --- a/drivers/infiniband/ulp/ipoib/ipoib.h
974 +++ b/drivers/infiniband/ulp/ipoib/ipoib.h
975 @@ -44,6 +44,7 @@
976 #include <linux/mutex.h>
977
978 #include <net/neighbour.h>
979 +#include <net/sch_generic.h>
980
981 #include <asm/atomic.h>
982
983 @@ -117,8 +118,9 @@ struct ipoib_header {
984 u16 reserved;
985 };
986
987 -struct ipoib_pseudoheader {
988 - u8 hwaddr[INFINIBAND_ALEN];
989 +struct ipoib_cb {
990 + struct qdisc_skb_cb qdisc_cb;
991 + u8 hwaddr[INFINIBAND_ALEN];
992 };
993
994 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
995 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
996 index a98c414..b811444 100644
997 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
998 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
999 @@ -658,7 +658,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
1000 }
1001
1002 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1003 - struct ipoib_pseudoheader *phdr)
1004 + struct ipoib_cb *cb)
1005 {
1006 struct ipoib_dev_priv *priv = netdev_priv(dev);
1007 struct ipoib_path *path;
1008 @@ -666,17 +666,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1009
1010 spin_lock_irqsave(&priv->lock, flags);
1011
1012 - path = __path_find(dev, phdr->hwaddr + 4);
1013 + path = __path_find(dev, cb->hwaddr + 4);
1014 if (!path || !path->valid) {
1015 int new_path = 0;
1016
1017 if (!path) {
1018 - path = path_rec_create(dev, phdr->hwaddr + 4);
1019 + path = path_rec_create(dev, cb->hwaddr + 4);
1020 new_path = 1;
1021 }
1022 if (path) {
1023 - /* put pseudoheader back on for next time */
1024 - skb_push(skb, sizeof *phdr);
1025 __skb_queue_tail(&path->queue, skb);
1026
1027 if (!path->query && path_rec_start(dev, path)) {
1028 @@ -700,12 +698,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1029 be16_to_cpu(path->pathrec.dlid));
1030
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032 - ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
1033 + ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
1034 return;
1035 } else if ((path->query || !path_rec_start(dev, path)) &&
1036 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1037 - /* put pseudoheader back on for next time */
1038 - skb_push(skb, sizeof *phdr);
1039 __skb_queue_tail(&path->queue, skb);
1040 } else {
1041 ++dev->stats.tx_dropped;
1042 @@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1043 dev_kfree_skb_any(skb);
1044 }
1045 } else {
1046 - struct ipoib_pseudoheader *phdr =
1047 - (struct ipoib_pseudoheader *) skb->data;
1048 - skb_pull(skb, sizeof *phdr);
1049 + struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
1050
1051 - if (phdr->hwaddr[4] == 0xff) {
1052 + if (cb->hwaddr[4] == 0xff) {
1053 /* Add in the P_Key for multicast*/
1054 - phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1055 - phdr->hwaddr[9] = priv->pkey & 0xff;
1056 + cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1057 + cb->hwaddr[9] = priv->pkey & 0xff;
1058
1059 - ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
1060 + ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
1061 } else {
1062 /* unicast GID -- should be ARP or RARP reply */
1063
1064 @@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1065 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
1066 skb_dst(skb) ? "neigh" : "dst",
1067 be16_to_cpup((__be16 *) skb->data),
1068 - IPOIB_QPN(phdr->hwaddr),
1069 - phdr->hwaddr + 4);
1070 + IPOIB_QPN(cb->hwaddr),
1071 + cb->hwaddr + 4);
1072 dev_kfree_skb_any(skb);
1073 ++dev->stats.tx_dropped;
1074 goto unlock;
1075 }
1076
1077 - unicast_arp_send(skb, dev, phdr);
1078 + unicast_arp_send(skb, dev, cb);
1079 }
1080 }
1081 unlock:
1082 @@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
1083 const void *daddr, const void *saddr, unsigned len)
1084 {
1085 struct ipoib_header *header;
1086 - struct dst_entry *dst;
1087 - struct neighbour *n;
1088
1089 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
1090
1091 @@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
1092 header->reserved = 0;
1093
1094 /*
1095 - * If we don't have a neighbour structure, stuff the
1096 - * destination address onto the front of the skb so we can
1097 - * figure out where to send the packet later.
1098 + * If we don't have a dst_entry structure, stuff the
1099 + * destination address into skb->cb so we can figure out where
1100 + * to send the packet later.
1101 */
1102 - dst = skb_dst(skb);
1103 - n = NULL;
1104 - if (dst)
1105 - n = dst_get_neighbour_raw(dst);
1106 - if ((!dst || !n) && daddr) {
1107 - struct ipoib_pseudoheader *phdr =
1108 - (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
1109 - memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
1110 + if (!skb_dst(skb)) {
1111 + struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
1112 + memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
1113 }
1114
1115 return 0;
1116 @@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
1117
1118 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1119
1120 - /*
1121 - * We add in INFINIBAND_ALEN to allow for the destination
1122 - * address "pseudoheader" for skbs without neighbour struct.
1123 - */
1124 - dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1125 + dev->hard_header_len = IPOIB_ENCAP_LEN;
1126 dev->addr_len = INFINIBAND_ALEN;
1127 dev->type = ARPHRD_INFINIBAND;
1128 dev->tx_queue_len = ipoib_sendq_size * 2;
1129 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1130 index a8d2a89..8b63506 100644
1131 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1132 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1133 @@ -258,21 +258,14 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
1134 netif_tx_lock_bh(dev);
1135 while (!skb_queue_empty(&mcast->pkt_queue)) {
1136 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
1137 - struct dst_entry *dst = skb_dst(skb);
1138 - struct neighbour *n = NULL;
1139
1140 netif_tx_unlock_bh(dev);
1141
1142 skb->dev = dev;
1143 - if (dst)
1144 - n = dst_get_neighbour_raw(dst);
1145 - if (!dst || !n) {
1146 - /* put pseudoheader back on for next time */
1147 - skb_push(skb, sizeof (struct ipoib_pseudoheader));
1148 - }
1149
1150 if (dev_queue_xmit(skb))
1151 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
1152 +
1153 netif_tx_lock_bh(dev);
1154 }
1155 netif_tx_unlock_bh(dev);
1156 diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
1157 index 514aea7..4c0394a 100644
1158 --- a/drivers/media/video/hdpvr/hdpvr-video.c
1159 +++ b/drivers/media/video/hdpvr/hdpvr-video.c
1160 @@ -284,12 +284,13 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
1161
1162 hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00);
1163
1164 + dev->status = STATUS_STREAMING;
1165 +
1166 INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
1167 queue_work(dev->workqueue, &dev->worker);
1168
1169 v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
1170 "streaming started\n");
1171 - dev->status = STATUS_STREAMING;
1172
1173 return 0;
1174 }
1175 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1176 index f85e422..c0839d4 100644
1177 --- a/drivers/mmc/card/block.c
1178 +++ b/drivers/mmc/card/block.c
1179 @@ -251,6 +251,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
1180 goto idata_err;
1181 }
1182
1183 + if (!idata->buf_bytes)
1184 + return idata;
1185 +
1186 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
1187 if (!idata->buf) {
1188 err = -ENOMEM;
1189 @@ -297,25 +300,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1190 if (IS_ERR(idata))
1191 return PTR_ERR(idata);
1192
1193 - cmd.opcode = idata->ic.opcode;
1194 - cmd.arg = idata->ic.arg;
1195 - cmd.flags = idata->ic.flags;
1196 -
1197 - data.sg = &sg;
1198 - data.sg_len = 1;
1199 - data.blksz = idata->ic.blksz;
1200 - data.blocks = idata->ic.blocks;
1201 -
1202 - sg_init_one(data.sg, idata->buf, idata->buf_bytes);
1203 -
1204 - if (idata->ic.write_flag)
1205 - data.flags = MMC_DATA_WRITE;
1206 - else
1207 - data.flags = MMC_DATA_READ;
1208 -
1209 - mrq.cmd = &cmd;
1210 - mrq.data = &data;
1211 -
1212 md = mmc_blk_get(bdev->bd_disk);
1213 if (!md) {
1214 err = -EINVAL;
1215 @@ -328,6 +312,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1216 goto cmd_done;
1217 }
1218
1219 + cmd.opcode = idata->ic.opcode;
1220 + cmd.arg = idata->ic.arg;
1221 + cmd.flags = idata->ic.flags;
1222 +
1223 + if (idata->buf_bytes) {
1224 + data.sg = &sg;
1225 + data.sg_len = 1;
1226 + data.blksz = idata->ic.blksz;
1227 + data.blocks = idata->ic.blocks;
1228 +
1229 + sg_init_one(data.sg, idata->buf, idata->buf_bytes);
1230 +
1231 + if (idata->ic.write_flag)
1232 + data.flags = MMC_DATA_WRITE;
1233 + else
1234 + data.flags = MMC_DATA_READ;
1235 +
1236 + /* data.flags must already be set before doing this. */
1237 + mmc_set_data_timeout(&data, card);
1238 +
1239 + /* Allow overriding the timeout_ns for empirical tuning. */
1240 + if (idata->ic.data_timeout_ns)
1241 + data.timeout_ns = idata->ic.data_timeout_ns;
1242 +
1243 + if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
1244 + /*
1245 + * Pretend this is a data transfer and rely on the
1246 + * host driver to compute timeout. When all host
1247 + * drivers support cmd.cmd_timeout for R1B, this
1248 + * can be changed to:
1249 + *
1250 + * mrq.data = NULL;
1251 + * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
1252 + */
1253 + data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
1254 + }
1255 +
1256 + mrq.data = &data;
1257 + }
1258 +
1259 + mrq.cmd = &cmd;
1260 +
1261 mmc_claim_host(card->host);
1262
1263 if (idata->ic.is_acmd) {
1264 @@ -336,24 +362,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
1265 goto cmd_rel_host;
1266 }
1267
1268 - /* data.flags must already be set before doing this. */
1269 - mmc_set_data_timeout(&data, card);
1270 - /* Allow overriding the timeout_ns for empirical tuning. */
1271 - if (idata->ic.data_timeout_ns)
1272 - data.timeout_ns = idata->ic.data_timeout_ns;
1273 -
1274 - if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
1275 - /*
1276 - * Pretend this is a data transfer and rely on the host driver
1277 - * to compute timeout. When all host drivers support
1278 - * cmd.cmd_timeout for R1B, this can be changed to:
1279 - *
1280 - * mrq.data = NULL;
1281 - * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
1282 - */
1283 - data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
1284 - }
1285 -
1286 mmc_wait_for_req(card->host, &mrq);
1287
1288 if (cmd.error) {
1289 diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
1290 index 8cc2256..41afc40 100644
1291 --- a/drivers/net/3c59x.c
1292 +++ b/drivers/net/3c59x.c
1293 @@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data)
1294 ok = 1;
1295 }
1296
1297 - if (!netif_carrier_ok(dev))
1298 + if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
1299 next_tick = 5*HZ;
1300
1301 if (vp->medialock)
1302 diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
1303 index dcc4a17..e5efe3a 100644
1304 --- a/drivers/net/davinci_emac.c
1305 +++ b/drivers/net/davinci_emac.c
1306 @@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
1307 int ret;
1308
1309 /* free and bail if we are shutting down */
1310 - if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
1311 + if (unlikely(!netif_running(ndev))) {
1312 dev_kfree_skb_any(skb);
1313 return;
1314 }
1315 @@ -1037,7 +1037,9 @@ static void emac_rx_handler(void *token, int len, int status)
1316 recycle:
1317 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1318 skb_tailroom(skb), GFP_KERNEL);
1319 - if (WARN_ON(ret < 0))
1320 +
1321 + WARN_ON(ret == -ENOMEM);
1322 + if (unlikely(ret < 0))
1323 dev_kfree_skb_any(skb);
1324 }
1325
1326 diff --git a/drivers/net/jme.c b/drivers/net/jme.c
1327 index 1973814..1d1ccec 100644
1328 --- a/drivers/net/jme.c
1329 +++ b/drivers/net/jme.c
1330 @@ -2228,19 +2228,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
1331 ((new_mtu) < IPV6_MIN_MTU))
1332 return -EINVAL;
1333
1334 - if (new_mtu > 4000) {
1335 - jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1336 - jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
1337 - jme_restart_rx_engine(jme);
1338 - } else {
1339 - jme->reg_rxcs &= ~RXCS_FIFOTHNP;
1340 - jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
1341 - jme_restart_rx_engine(jme);
1342 - }
1343
1344 netdev->mtu = new_mtu;
1345 netdev_update_features(netdev);
1346
1347 + jme_restart_rx_engine(jme);
1348 jme_reset_link(jme);
1349
1350 return 0;
1351 diff --git a/drivers/net/jme.h b/drivers/net/jme.h
1352 index e9aaeca..fff885e 100644
1353 --- a/drivers/net/jme.h
1354 +++ b/drivers/net/jme.h
1355 @@ -734,7 +734,7 @@ enum jme_rxcs_values {
1356 RXCS_RETRYCNT_60 = 0x00000F00,
1357
1358 RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
1359 - RXCS_FIFOTHNP_128QW |
1360 + RXCS_FIFOTHNP_16QW |
1361 RXCS_DMAREQSZ_128B |
1362 RXCS_RETRYGAP_256ns |
1363 RXCS_RETRYCNT_32,
1364 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
1365 index 8f9b7f7..9cf4e47 100644
1366 --- a/drivers/net/usb/ipheth.c
1367 +++ b/drivers/net/usb/ipheth.c
1368 @@ -60,6 +60,7 @@
1369 #define USB_PRODUCT_IPHONE_3GS 0x1294
1370 #define USB_PRODUCT_IPHONE_4 0x1297
1371 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
1372 +#define USB_PRODUCT_IPHONE_4S 0x12a0
1373
1374 #define IPHETH_USBINTF_CLASS 255
1375 #define IPHETH_USBINTF_SUBCLASS 253
1376 @@ -103,6 +104,10 @@ static struct usb_device_id ipheth_table[] = {
1377 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
1378 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
1379 IPHETH_USBINTF_PROTO) },
1380 + { USB_DEVICE_AND_INTERFACE_INFO(
1381 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
1382 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
1383 + IPHETH_USBINTF_PROTO) },
1384 { }
1385 };
1386 MODULE_DEVICE_TABLE(usb, ipheth_table);
1387 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
1388 index 4bf7c6d..6c0a3b0 100644
1389 --- a/drivers/net/veth.c
1390 +++ b/drivers/net/veth.c
1391 @@ -421,7 +421,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
1392 unregister_netdevice_queue(peer, head);
1393 }
1394
1395 -static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
1396 +static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1397 + [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1398 +};
1399
1400 static struct rtnl_link_ops veth_link_ops = {
1401 .kind = DRV_NAME,
1402 diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
1403 index 06daa9d..c7e4934 100644
1404 --- a/drivers/net/via-velocity.c
1405 +++ b/drivers/net/via-velocity.c
1406 @@ -2513,9 +2513,6 @@ static int velocity_close(struct net_device *dev)
1407 if (dev->irq != 0)
1408 free_irq(dev->irq, dev);
1409
1410 - /* Power down the chip */
1411 - pci_set_power_state(vptr->pdev, PCI_D3hot);
1412 -
1413 velocity_free_rings(vptr);
1414
1415 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
1416 diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
1417 index ea35843..9d965e3 100644
1418 --- a/drivers/net/wireless/ath/ath9k/rc.c
1419 +++ b/drivers/net/wireless/ath/ath9k/rc.c
1420 @@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1421 fc = hdr->frame_control;
1422 for (i = 0; i < sc->hw->max_rates; i++) {
1423 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1424 - if (!rate->count)
1425 + if (rate->idx < 0 || !rate->count)
1426 break;
1427
1428 final_ts_idx = i;
1429 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
1430 index bafb3c3..5b3771a 100644
1431 --- a/drivers/pci/probe.c
1432 +++ b/drivers/pci/probe.c
1433 @@ -657,6 +657,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1434 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1435 secondary, subordinate, pass);
1436
1437 + if (!primary && (primary != bus->number) && secondary && subordinate) {
1438 + dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1439 + primary = bus->number;
1440 + }
1441 +
1442 /* Check if setup is sensible at all */
1443 if (!pass &&
1444 (primary != bus->number || secondary <= bus->number)) {
1445 diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
1446 index d70e91a..122a5a2 100644
1447 --- a/drivers/scsi/scsi_pm.c
1448 +++ b/drivers/scsi/scsi_pm.c
1449 @@ -6,6 +6,7 @@
1450 */
1451
1452 #include <linux/pm_runtime.h>
1453 +#include <linux/async.h>
1454
1455 #include <scsi/scsi.h>
1456 #include <scsi/scsi_device.h>
1457 @@ -68,6 +69,19 @@ static int scsi_bus_resume_common(struct device *dev)
1458 return err;
1459 }
1460
1461 +static int scsi_bus_prepare(struct device *dev)
1462 +{
1463 + if (scsi_is_sdev_device(dev)) {
1464 + /* sd probing uses async_schedule. Wait until it finishes. */
1465 + async_synchronize_full();
1466 +
1467 + } else if (scsi_is_host_device(dev)) {
1468 + /* Wait until async scanning is finished */
1469 + scsi_complete_async_scans();
1470 + }
1471 + return 0;
1472 +}
1473 +
1474 static int scsi_bus_suspend(struct device *dev)
1475 {
1476 return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
1477 @@ -86,6 +100,7 @@ static int scsi_bus_poweroff(struct device *dev)
1478 #else /* CONFIG_PM_SLEEP */
1479
1480 #define scsi_bus_resume_common NULL
1481 +#define scsi_bus_prepare NULL
1482 #define scsi_bus_suspend NULL
1483 #define scsi_bus_freeze NULL
1484 #define scsi_bus_poweroff NULL
1485 @@ -194,6 +209,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
1486 #endif /* CONFIG_PM_RUNTIME */
1487
1488 const struct dev_pm_ops scsi_bus_pm_ops = {
1489 + .prepare = scsi_bus_prepare,
1490 .suspend = scsi_bus_suspend,
1491 .resume = scsi_bus_resume_common,
1492 .freeze = scsi_bus_freeze,
1493 diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
1494 index 2a58895..5b475d0 100644
1495 --- a/drivers/scsi/scsi_priv.h
1496 +++ b/drivers/scsi/scsi_priv.h
1497 @@ -110,6 +110,7 @@ extern void scsi_exit_procfs(void);
1498 #endif /* CONFIG_PROC_FS */
1499
1500 /* scsi_scan.c */
1501 +extern int scsi_complete_async_scans(void);
1502 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
1503 unsigned int, unsigned int, int);
1504 extern void scsi_forget_host(struct Scsi_Host *);
1505 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1506 index b3c6d95..6e7ea4a 100644
1507 --- a/drivers/scsi/scsi_scan.c
1508 +++ b/drivers/scsi/scsi_scan.c
1509 @@ -1815,6 +1815,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
1510 }
1511 spin_unlock(&async_scan_lock);
1512
1513 + scsi_autopm_put_host(shost);
1514 scsi_host_put(shost);
1515 kfree(data);
1516 }
1517 @@ -1841,7 +1842,6 @@ static int do_scan_async(void *_data)
1518
1519 do_scsi_scan_host(shost);
1520 scsi_finish_async_scan(data);
1521 - scsi_autopm_put_host(shost);
1522 return 0;
1523 }
1524
1525 @@ -1869,7 +1869,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
1526 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1527 if (IS_ERR(p))
1528 do_scan_async(data);
1529 - /* scsi_autopm_put_host(shost) is called in do_scan_async() */
1530 + /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
1531 }
1532 EXPORT_SYMBOL(scsi_scan_host);
1533
1534 diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
1535 index ce22f4a..6c1642b 100644
1536 --- a/drivers/usb/core/hcd-pci.c
1537 +++ b/drivers/usb/core/hcd-pci.c
1538 @@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1539 return -ENODEV;
1540 dev->current_state = PCI_D0;
1541
1542 - if (!dev->irq) {
1543 + /* The xHCI driver supports MSI and MSI-X,
1544 + * so don't fail if the BIOS doesn't provide a legacy IRQ.
1545 + */
1546 + if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
1547 dev_err(&dev->dev,
1548 "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
1549 pci_name(dev));
1550 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1551 index 691d212..45e0908 100644
1552 --- a/drivers/usb/core/hcd.c
1553 +++ b/drivers/usb/core/hcd.c
1554 @@ -2435,8 +2435,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
1555 && device_can_wakeup(&hcd->self.root_hub->dev))
1556 dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
1557
1558 - /* enable irqs just before we start the controller */
1559 - if (usb_hcd_is_primary_hcd(hcd)) {
1560 + /* enable irqs just before we start the controller,
1561 + * if the BIOS provides legacy PCI irqs.
1562 + */
1563 + if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
1564 retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
1565 if (retval)
1566 goto err_request_irq;
1567 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1568 index 210e359..3776ddf 100644
1569 --- a/drivers/usb/core/hub.c
1570 +++ b/drivers/usb/core/hub.c
1571 @@ -705,10 +705,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1572 if (type == HUB_INIT3)
1573 goto init3;
1574
1575 - /* After a resume, port power should still be on.
1576 + /* The superspeed hub except for root hub has to use Hub Depth
1577 + * value as an offset into the route string to locate the bits
1578 + * it uses to determine the downstream port number. So hub driver
1579 + * should send a set hub depth request to superspeed hub after
1580 + * the superspeed hub is set configuration in initialization or
1581 + * reset procedure.
1582 + *
1583 + * After a resume, port power should still be on.
1584 * For any other type of activation, turn it on.
1585 */
1586 if (type != HUB_RESUME) {
1587 + if (hdev->parent && hub_is_superspeed(hdev)) {
1588 + ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
1589 + HUB_SET_DEPTH, USB_RT_HUB,
1590 + hdev->level - 1, 0, NULL, 0,
1591 + USB_CTRL_SET_TIMEOUT);
1592 + if (ret < 0)
1593 + dev_err(hub->intfdev,
1594 + "set hub depth failed\n");
1595 + }
1596
1597 /* Speed up system boot by using a delayed_work for the
1598 * hub's initial power-up delays. This is pretty awkward
1599 @@ -987,18 +1003,6 @@ static int hub_configure(struct usb_hub *hub,
1600 goto fail;
1601 }
1602
1603 - if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) {
1604 - ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
1605 - HUB_SET_DEPTH, USB_RT_HUB,
1606 - hdev->level - 1, 0, NULL, 0,
1607 - USB_CTRL_SET_TIMEOUT);
1608 -
1609 - if (ret < 0) {
1610 - message = "can't set hub depth";
1611 - goto fail;
1612 - }
1613 - }
1614 -
1615 /* Request the entire hub descriptor.
1616 * hub->descriptor can handle USB_MAXCHILDREN ports,
1617 * but the hub can/will return fewer bytes here.
1618 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
1619 index 20f2f21..3f387b8 100644
1620 --- a/drivers/usb/host/pci-quirks.c
1621 +++ b/drivers/usb/host/pci-quirks.c
1622 @@ -871,7 +871,17 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
1623 */
1624 if (pdev->vendor == 0x184e) /* vendor Netlogic */
1625 return;
1626 + if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1627 + pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1628 + pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1629 + pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1630 + return;
1631
1632 + if (pci_enable_device(pdev) < 0) {
1633 + dev_warn(&pdev->dev, "Can't enable PCI device, "
1634 + "BIOS handoff failed.\n");
1635 + return;
1636 + }
1637 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1638 quirk_usb_handoff_uhci(pdev);
1639 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1640 @@ -880,5 +890,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
1641 quirk_usb_disable_ehci(pdev);
1642 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1643 quirk_usb_handoff_xhci(pdev);
1644 + pci_disable_device(pdev);
1645 }
1646 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
1647 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1648 index ce9f974..7520ebb 100644
1649 --- a/drivers/usb/host/xhci-hub.c
1650 +++ b/drivers/usb/host/xhci-hub.c
1651 @@ -75,7 +75,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
1652 */
1653 memset(port_removable, 0, sizeof(port_removable));
1654 for (i = 0; i < ports; i++) {
1655 - portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
1656 + portsc = xhci_readl(xhci, xhci->usb2_ports[i]);
1657 /* If a device is removable, PORTSC reports a 0, same as in the
1658 * hub descriptor DeviceRemovable bits.
1659 */
1660 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1661 index ffeee57..64fbf6f 100644
1662 --- a/drivers/usb/host/xhci-mem.c
1663 +++ b/drivers/usb/host/xhci-mem.c
1664 @@ -1002,26 +1002,42 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1665 }
1666
1667 /*
1668 - * Convert bInterval expressed in frames (in 1-255 range) to exponent of
1669 + * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1670 * microframes, rounded down to nearest power of 2.
1671 */
1672 -static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1673 - struct usb_host_endpoint *ep)
1674 +static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1675 + struct usb_host_endpoint *ep, unsigned int desc_interval,
1676 + unsigned int min_exponent, unsigned int max_exponent)
1677 {
1678 unsigned int interval;
1679
1680 - interval = fls(8 * ep->desc.bInterval) - 1;
1681 - interval = clamp_val(interval, 3, 10);
1682 - if ((1 << interval) != 8 * ep->desc.bInterval)
1683 + interval = fls(desc_interval) - 1;
1684 + interval = clamp_val(interval, min_exponent, max_exponent);
1685 + if ((1 << interval) != desc_interval)
1686 dev_warn(&udev->dev,
1687 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1688 ep->desc.bEndpointAddress,
1689 1 << interval,
1690 - 8 * ep->desc.bInterval);
1691 + desc_interval);
1692
1693 return interval;
1694 }
1695
1696 +static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1697 + struct usb_host_endpoint *ep)
1698 +{
1699 + return xhci_microframes_to_exponent(udev, ep,
1700 + ep->desc.bInterval, 0, 15);
1701 +}
1702 +
1703 +
1704 +static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1705 + struct usb_host_endpoint *ep)
1706 +{
1707 + return xhci_microframes_to_exponent(udev, ep,
1708 + ep->desc.bInterval * 8, 3, 10);
1709 +}
1710 +
1711 /* Return the polling or NAK interval.
1712 *
1713 * The polling interval is expressed in "microframes". If xHCI's Interval field
1714 @@ -1040,7 +1056,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1715 /* Max NAK rate */
1716 if (usb_endpoint_xfer_control(&ep->desc) ||
1717 usb_endpoint_xfer_bulk(&ep->desc)) {
1718 - interval = ep->desc.bInterval;
1719 + interval = xhci_parse_microframe_interval(udev, ep);
1720 break;
1721 }
1722 /* Fall through - SS and HS isoc/int have same decoding */
1723 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1724 index 107438e..b4416d8 100644
1725 --- a/drivers/usb/host/xhci.c
1726 +++ b/drivers/usb/host/xhci.c
1727 @@ -444,6 +444,11 @@ int xhci_run(struct usb_hcd *hcd)
1728
1729 if (ret) {
1730 legacy_irq:
1731 + if (!pdev->irq) {
1732 + xhci_err(xhci, "No msi-x/msi found and "
1733 + "no IRQ in BIOS\n");
1734 + return -EINVAL;
1735 + }
1736 /* fall back to legacy interrupt*/
1737 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
1738 hcd->irq_descr, hcd);
1739 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1740 index a515237..33d25d4 100644
1741 --- a/drivers/usb/serial/cp210x.c
1742 +++ b/drivers/usb/serial/cp210x.c
1743 @@ -136,6 +136,8 @@ static const struct usb_device_id id_table[] = {
1744 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
1745 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
1746 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
1747 + { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
1748 + { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
1749 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1750 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1751 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1752 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1753 index 338d082..68fa8c7 100644
1754 --- a/drivers/usb/serial/option.c
1755 +++ b/drivers/usb/serial/option.c
1756 @@ -788,7 +788,6 @@ static const struct usb_device_id option_ids[] = {
1757 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
1758 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1759 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
1760 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
1761 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
1762 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
1763 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
1764 @@ -803,7 +802,6 @@ static const struct usb_device_id option_ids[] = {
1765 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
1766 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
1767 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1768 - /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
1769 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
1770 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
1771 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
1772 @@ -828,7 +826,6 @@ static const struct usb_device_id option_ids[] = {
1773 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
1774 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
1775 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1776 - /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
1777 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
1778 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
1779 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1780 @@ -836,7 +833,6 @@ static const struct usb_device_id option_ids[] = {
1781 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
1782 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
1783 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1784 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
1785 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
1786 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
1787 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
1788 @@ -846,7 +842,6 @@ static const struct usb_device_id option_ids[] = {
1789 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
1790 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
1791 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
1792 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
1793 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
1794 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
1795 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
1796 @@ -865,8 +860,6 @@ static const struct usb_device_id option_ids[] = {
1797 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
1798 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
1799 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
1800 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0098, 0xff, 0xff, 0xff) },
1801 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0099, 0xff, 0xff, 0xff) },
1802 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
1803 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1804 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
1805 @@ -887,28 +880,18 @@ static const struct usb_device_id option_ids[] = {
1806 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
1807 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
1808 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
1809 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
1810 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
1811 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
1812 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
1813 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
1814 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
1815 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
1816 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
1817 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
1818 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
1819 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
1820 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
1821 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
1822 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
1823 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
1824 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
1825 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
1826 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
1827 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
1828 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
1829 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
1830 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
1831 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
1832 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
1833 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
1834 @@ -1083,127 +1066,27 @@ static const struct usb_device_id option_ids[] = {
1835 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
1836 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
1837 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
1838 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff) },
1839 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff) },
1840 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1403, 0xff, 0xff, 0xff) },
1841 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1404, 0xff, 0xff, 0xff) },
1842 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1405, 0xff, 0xff, 0xff) },
1843 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1406, 0xff, 0xff, 0xff) },
1844 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1407, 0xff, 0xff, 0xff) },
1845 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1408, 0xff, 0xff, 0xff) },
1846 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1409, 0xff, 0xff, 0xff) },
1847 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1410, 0xff, 0xff, 0xff) },
1848 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1411, 0xff, 0xff, 0xff) },
1849 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1412, 0xff, 0xff, 0xff) },
1850 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1413, 0xff, 0xff, 0xff) },
1851 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1414, 0xff, 0xff, 0xff) },
1852 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1415, 0xff, 0xff, 0xff) },
1853 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1416, 0xff, 0xff, 0xff) },
1854 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1417, 0xff, 0xff, 0xff) },
1855 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1418, 0xff, 0xff, 0xff) },
1856 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1419, 0xff, 0xff, 0xff) },
1857 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1420, 0xff, 0xff, 0xff) },
1858 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1421, 0xff, 0xff, 0xff) },
1859 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1422, 0xff, 0xff, 0xff) },
1860 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1423, 0xff, 0xff, 0xff) },
1861 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff) },
1862 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff) },
1863 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff) },
1864 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1427, 0xff, 0xff, 0xff) },
1865 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff) },
1866 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1429, 0xff, 0xff, 0xff) },
1867 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1430, 0xff, 0xff, 0xff) },
1868 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1431, 0xff, 0xff, 0xff) },
1869 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1432, 0xff, 0xff, 0xff) },
1870 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1433, 0xff, 0xff, 0xff) },
1871 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1434, 0xff, 0xff, 0xff) },
1872 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1435, 0xff, 0xff, 0xff) },
1873 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1436, 0xff, 0xff, 0xff) },
1874 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1437, 0xff, 0xff, 0xff) },
1875 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1438, 0xff, 0xff, 0xff) },
1876 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1439, 0xff, 0xff, 0xff) },
1877 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1440, 0xff, 0xff, 0xff) },
1878 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1441, 0xff, 0xff, 0xff) },
1879 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1442, 0xff, 0xff, 0xff) },
1880 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1443, 0xff, 0xff, 0xff) },
1881 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1444, 0xff, 0xff, 0xff) },
1882 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1445, 0xff, 0xff, 0xff) },
1883 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1446, 0xff, 0xff, 0xff) },
1884 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1447, 0xff, 0xff, 0xff) },
1885 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1448, 0xff, 0xff, 0xff) },
1886 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1449, 0xff, 0xff, 0xff) },
1887 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1450, 0xff, 0xff, 0xff) },
1888 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1451, 0xff, 0xff, 0xff) },
1889 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1452, 0xff, 0xff, 0xff) },
1890 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1453, 0xff, 0xff, 0xff) },
1891 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1454, 0xff, 0xff, 0xff) },
1892 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1455, 0xff, 0xff, 0xff) },
1893 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1456, 0xff, 0xff, 0xff) },
1894 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1457, 0xff, 0xff, 0xff) },
1895 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1458, 0xff, 0xff, 0xff) },
1896 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1459, 0xff, 0xff, 0xff) },
1897 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1460, 0xff, 0xff, 0xff) },
1898 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1461, 0xff, 0xff, 0xff) },
1899 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1462, 0xff, 0xff, 0xff) },
1900 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1463, 0xff, 0xff, 0xff) },
1901 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1464, 0xff, 0xff, 0xff) },
1902 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1465, 0xff, 0xff, 0xff) },
1903 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1466, 0xff, 0xff, 0xff) },
1904 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1467, 0xff, 0xff, 0xff) },
1905 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1468, 0xff, 0xff, 0xff) },
1906 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1469, 0xff, 0xff, 0xff) },
1907 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1470, 0xff, 0xff, 0xff) },
1908 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1471, 0xff, 0xff, 0xff) },
1909 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1472, 0xff, 0xff, 0xff) },
1910 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1473, 0xff, 0xff, 0xff) },
1911 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1474, 0xff, 0xff, 0xff) },
1912 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1475, 0xff, 0xff, 0xff) },
1913 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1476, 0xff, 0xff, 0xff) },
1914 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1477, 0xff, 0xff, 0xff) },
1915 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1478, 0xff, 0xff, 0xff) },
1916 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1479, 0xff, 0xff, 0xff) },
1917 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1480, 0xff, 0xff, 0xff) },
1918 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0xff, 0xff) },
1919 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1482, 0xff, 0xff, 0xff) },
1920 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1483, 0xff, 0xff, 0xff) },
1921 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1484, 0xff, 0xff, 0xff) },
1922 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff) },
1923 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1486, 0xff, 0xff, 0xff) },
1924 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1487, 0xff, 0xff, 0xff) },
1925 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1488, 0xff, 0xff, 0xff) },
1926 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1489, 0xff, 0xff, 0xff) },
1927 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1490, 0xff, 0xff, 0xff) },
1928 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1491, 0xff, 0xff, 0xff) },
1929 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1492, 0xff, 0xff, 0xff) },
1930 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1493, 0xff, 0xff, 0xff) },
1931 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1494, 0xff, 0xff, 0xff) },
1932 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1495, 0xff, 0xff, 0xff) },
1933 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1496, 0xff, 0xff, 0xff) },
1934 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1497, 0xff, 0xff, 0xff) },
1935 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1498, 0xff, 0xff, 0xff) },
1936 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1499, 0xff, 0xff, 0xff) },
1937 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1500, 0xff, 0xff, 0xff) },
1938 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1501, 0xff, 0xff, 0xff) },
1939 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1502, 0xff, 0xff, 0xff) },
1940 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1503, 0xff, 0xff, 0xff) },
1941 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1504, 0xff, 0xff, 0xff) },
1942 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1505, 0xff, 0xff, 0xff) },
1943 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1506, 0xff, 0xff, 0xff) },
1944 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1507, 0xff, 0xff, 0xff) },
1945 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1508, 0xff, 0xff, 0xff) },
1946 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1509, 0xff, 0xff, 0xff) },
1947 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1510, 0xff, 0xff, 0xff) },
1948 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
1949 + 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
1950 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
1951 +
1952 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
1953 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
1954 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
1955 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
1956 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
1957 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
1958 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
1959 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
1960 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
1961 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
1962 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
1963 - 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
1964 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
1965 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
1966 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
1967 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
1968 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
1969 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
1970 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
1971 +
1972 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
1973 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
1974 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
1975 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
1976 index ea84456..21c82b0 100644
1977 --- a/drivers/usb/serial/ti_usb_3410_5052.c
1978 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
1979 @@ -165,7 +165,7 @@ static unsigned int product_5052_count;
1980 /* the array dimension is the number of default entries plus */
1981 /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
1982 /* null entry */
1983 -static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
1984 +static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = {
1985 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
1986 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
1987 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
1988 @@ -179,6 +179,7 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
1989 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
1990 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
1991 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
1992 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
1993 };
1994
1995 static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
1996 @@ -188,7 +189,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
1997 { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
1998 };
1999
2000 -static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
2001 +static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1] = {
2002 { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
2003 { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
2004 { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
2005 @@ -206,6 +207,7 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1]
2006 { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
2007 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
2008 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
2009 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
2010 { }
2011 };
2012
2013 diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
2014 index 2aac195..f140f1b 100644
2015 --- a/drivers/usb/serial/ti_usb_3410_5052.h
2016 +++ b/drivers/usb/serial/ti_usb_3410_5052.h
2017 @@ -49,6 +49,10 @@
2018 #define MTS_MT9234ZBA_PRODUCT_ID 0xF115
2019 #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
2020
2021 +/* Abbott Diabetics vendor and product ids */
2022 +#define ABBOTT_VENDOR_ID 0x1a61
2023 +#define ABBOTT_PRODUCT_ID 0x3410
2024 +
2025 /* Commands */
2026 #define TI_GET_VERSION 0x01
2027 #define TI_GET_PORT_STATUS 0x02
2028 diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
2029 index 9e069ef..db51ba1 100644
2030 --- a/drivers/usb/storage/usb.c
2031 +++ b/drivers/usb/storage/usb.c
2032 @@ -788,15 +788,19 @@ static void quiesce_and_remove_host(struct us_data *us)
2033 struct Scsi_Host *host = us_to_host(us);
2034
2035 /* If the device is really gone, cut short reset delays */
2036 - if (us->pusb_dev->state == USB_STATE_NOTATTACHED)
2037 + if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
2038 set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
2039 + wake_up(&us->delay_wait);
2040 + }
2041
2042 - /* Prevent SCSI-scanning (if it hasn't started yet)
2043 - * and wait for the SCSI-scanning thread to stop.
2044 + /* Prevent SCSI scanning (if it hasn't started yet)
2045 + * or wait for the SCSI-scanning routine to stop.
2046 */
2047 - set_bit(US_FLIDX_DONT_SCAN, &us->dflags);
2048 - wake_up(&us->delay_wait);
2049 - wait_for_completion(&us->scanning_done);
2050 + cancel_delayed_work_sync(&us->scan_dwork);
2051 +
2052 + /* Balance autopm calls if scanning was cancelled */
2053 + if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
2054 + usb_autopm_put_interface_no_suspend(us->pusb_intf);
2055
2056 /* Removing the host will perform an orderly shutdown: caches
2057 * synchronized, disks spun down, etc.
2058 @@ -823,52 +827,28 @@ static void release_everything(struct us_data *us)
2059 scsi_host_put(us_to_host(us));
2060 }
2061
2062 -/* Thread to carry out delayed SCSI-device scanning */
2063 -static int usb_stor_scan_thread(void * __us)
2064 +/* Delayed-work routine to carry out SCSI-device scanning */
2065 +static void usb_stor_scan_dwork(struct work_struct *work)
2066 {
2067 - struct us_data *us = (struct us_data *)__us;
2068 + struct us_data *us = container_of(work, struct us_data,
2069 + scan_dwork.work);
2070 struct device *dev = &us->pusb_intf->dev;
2071
2072 - dev_dbg(dev, "device found\n");
2073 + dev_dbg(dev, "starting scan\n");
2074
2075 - set_freezable_with_signal();
2076 - /*
2077 - * Wait for the timeout to expire or for a disconnect
2078 - *
2079 - * We can't freeze in this thread or we risk causing khubd to
2080 - * fail to freeze, but we can't be non-freezable either. Nor can
2081 - * khubd freeze while waiting for scanning to complete as it may
2082 - * hold the device lock, causing a hang when suspending devices.
2083 - * So we request a fake signal when freezing and use
2084 - * interruptible sleep to kick us out of our wait early when
2085 - * freezing happens.
2086 - */
2087 - if (delay_use > 0) {
2088 - dev_dbg(dev, "waiting for device to settle "
2089 - "before scanning\n");
2090 - wait_event_interruptible_timeout(us->delay_wait,
2091 - test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
2092 - delay_use * HZ);
2093 + /* For bulk-only devices, determine the max LUN value */
2094 + if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
2095 + mutex_lock(&us->dev_mutex);
2096 + us->max_lun = usb_stor_Bulk_max_lun(us);
2097 + mutex_unlock(&us->dev_mutex);
2098 }
2099 + scsi_scan_host(us_to_host(us));
2100 + dev_dbg(dev, "scan complete\n");
2101
2102 - /* If the device is still connected, perform the scanning */
2103 - if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
2104 -
2105 - /* For bulk-only devices, determine the max LUN value */
2106 - if (us->protocol == USB_PR_BULK &&
2107 - !(us->fflags & US_FL_SINGLE_LUN)) {
2108 - mutex_lock(&us->dev_mutex);
2109 - us->max_lun = usb_stor_Bulk_max_lun(us);
2110 - mutex_unlock(&us->dev_mutex);
2111 - }
2112 - scsi_scan_host(us_to_host(us));
2113 - dev_dbg(dev, "scan complete\n");
2114 -
2115 - /* Should we unbind if no devices were detected? */
2116 - }
2117 + /* Should we unbind if no devices were detected? */
2118
2119 usb_autopm_put_interface(us->pusb_intf);
2120 - complete_and_exit(&us->scanning_done, 0);
2121 + clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
2122 }
2123
2124 static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
2125 @@ -915,7 +895,7 @@ int usb_stor_probe1(struct us_data **pus,
2126 init_completion(&us->cmnd_ready);
2127 init_completion(&(us->notify));
2128 init_waitqueue_head(&us->delay_wait);
2129 - init_completion(&us->scanning_done);
2130 + INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
2131
2132 /* Associate the us_data structure with the USB device */
2133 result = associate_dev(us, intf);
2134 @@ -946,7 +926,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1);
2135 /* Second part of general USB mass-storage probing */
2136 int usb_stor_probe2(struct us_data *us)
2137 {
2138 - struct task_struct *th;
2139 int result;
2140 struct device *dev = &us->pusb_intf->dev;
2141
2142 @@ -987,20 +966,14 @@ int usb_stor_probe2(struct us_data *us)
2143 goto BadDevice;
2144 }
2145
2146 - /* Start up the thread for delayed SCSI-device scanning */
2147 - th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
2148 - if (IS_ERR(th)) {
2149 - dev_warn(dev,
2150 - "Unable to start the device-scanning thread\n");
2151 - complete(&us->scanning_done);
2152 - quiesce_and_remove_host(us);
2153 - result = PTR_ERR(th);
2154 - goto BadDevice;
2155 - }
2156 -
2157 + /* Submit the delayed_work for SCSI-device scanning */
2158 usb_autopm_get_interface_no_resume(us->pusb_intf);
2159 - wake_up_process(th);
2160 + set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
2161
2162 + if (delay_use > 0)
2163 + dev_dbg(dev, "waiting for device to settle before scanning\n");
2164 + queue_delayed_work(system_freezable_wq, &us->scan_dwork,
2165 + delay_use * HZ);
2166 return 0;
2167
2168 /* We come here if there are any problems */
2169 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
2170 index 7b0f211..75f70f0 100644
2171 --- a/drivers/usb/storage/usb.h
2172 +++ b/drivers/usb/storage/usb.h
2173 @@ -47,6 +47,7 @@
2174 #include <linux/blkdev.h>
2175 #include <linux/completion.h>
2176 #include <linux/mutex.h>
2177 +#include <linux/workqueue.h>
2178 #include <scsi/scsi_host.h>
2179
2180 struct us_data;
2181 @@ -72,7 +73,7 @@ struct us_unusual_dev {
2182 #define US_FLIDX_DISCONNECTING 3 /* disconnect in progress */
2183 #define US_FLIDX_RESETTING 4 /* device reset in progress */
2184 #define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
2185 -#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
2186 +#define US_FLIDX_SCAN_PENDING 6 /* scanning not yet done */
2187 #define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */
2188 #define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */
2189
2190 @@ -147,8 +148,8 @@ struct us_data {
2191 /* mutual exclusion and synchronization structures */
2192 struct completion cmnd_ready; /* to sleep thread on */
2193 struct completion notify; /* thread begin/end */
2194 - wait_queue_head_t delay_wait; /* wait during scan, reset */
2195 - struct completion scanning_done; /* wait for scan thread */
2196 + wait_queue_head_t delay_wait; /* wait during reset */
2197 + struct delayed_work scan_dwork; /* for async scanning */
2198
2199 /* subdriver information */
2200 void *extra; /* Any extra data */
2201 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
2202 index e3562f2..2717329 100644
2203 --- a/fs/ecryptfs/inode.c
2204 +++ b/fs/ecryptfs/inode.c
2205 @@ -1119,6 +1119,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
2206 }
2207
2208 rc = vfs_setxattr(lower_dentry, name, value, size, flags);
2209 + if (!rc)
2210 + fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
2211 out:
2212 return rc;
2213 }
2214 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2215 index 2acaf60..6879d0c 100644
2216 --- a/fs/eventpoll.c
2217 +++ b/fs/eventpoll.c
2218 @@ -197,6 +197,12 @@ struct eventpoll {
2219
2220 /* The user that created the eventpoll descriptor */
2221 struct user_struct *user;
2222 +
2223 + struct file *file;
2224 +
2225 + /* used to optimize loop detection check */
2226 + int visited;
2227 + struct list_head visited_list_link;
2228 };
2229
2230 /* Wait structure used by the poll hooks */
2231 @@ -255,6 +261,15 @@ static struct kmem_cache *epi_cache __read_mostly;
2232 /* Slab cache used to allocate "struct eppoll_entry" */
2233 static struct kmem_cache *pwq_cache __read_mostly;
2234
2235 +/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
2236 +static LIST_HEAD(visited_list);
2237 +
2238 +/*
2239 + * List of files with newly added links, where we may need to limit the number
2240 + * of emanating paths. Protected by the epmutex.
2241 + */
2242 +static LIST_HEAD(tfile_check_list);
2243 +
2244 #ifdef CONFIG_SYSCTL
2245
2246 #include <linux/sysctl.h>
2247 @@ -276,6 +291,12 @@ ctl_table epoll_table[] = {
2248 };
2249 #endif /* CONFIG_SYSCTL */
2250
2251 +static const struct file_operations eventpoll_fops;
2252 +
2253 +static inline int is_file_epoll(struct file *f)
2254 +{
2255 + return f->f_op == &eventpoll_fops;
2256 +}
2257
2258 /* Setup the structure that is used as key for the RB tree */
2259 static inline void ep_set_ffd(struct epoll_filefd *ffd,
2260 @@ -299,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p)
2261 return !list_empty(p);
2262 }
2263
2264 +static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
2265 +{
2266 + return container_of(p, struct eppoll_entry, wait);
2267 +}
2268 +
2269 /* Get the "struct epitem" from a wait queue pointer */
2270 static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
2271 {
2272 @@ -446,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
2273 put_cpu();
2274 }
2275
2276 +static void ep_remove_wait_queue(struct eppoll_entry *pwq)
2277 +{
2278 + wait_queue_head_t *whead;
2279 +
2280 + rcu_read_lock();
2281 + /* If it is cleared by POLLFREE, it should be rcu-safe */
2282 + whead = rcu_dereference(pwq->whead);
2283 + if (whead)
2284 + remove_wait_queue(whead, &pwq->wait);
2285 + rcu_read_unlock();
2286 +}
2287 +
2288 /*
2289 * This function unregisters poll callbacks from the associated file
2290 * descriptor. Must be called with "mtx" held (or "epmutex" if called from
2291 @@ -460,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
2292 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
2293
2294 list_del(&pwq->llink);
2295 - remove_wait_queue(pwq->whead, &pwq->wait);
2296 + ep_remove_wait_queue(pwq);
2297 kmem_cache_free(pwq_cache, pwq);
2298 }
2299 }
2300 @@ -711,12 +749,6 @@ static const struct file_operations eventpoll_fops = {
2301 .llseek = noop_llseek,
2302 };
2303
2304 -/* Fast test to see if the file is an evenpoll file */
2305 -static inline int is_file_epoll(struct file *f)
2306 -{
2307 - return f->f_op == &eventpoll_fops;
2308 -}
2309 -
2310 /*
2311 * This is called from eventpoll_release() to unlink files from the eventpoll
2312 * interface. We need to have this facility to cleanup correctly files that are
2313 @@ -827,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
2314 struct epitem *epi = ep_item_from_wait(wait);
2315 struct eventpoll *ep = epi->ep;
2316
2317 + if ((unsigned long)key & POLLFREE) {
2318 + ep_pwq_from_wait(wait)->whead = NULL;
2319 + /*
2320 + * whead = NULL above can race with ep_remove_wait_queue()
2321 + * which can do another remove_wait_queue() after us, so we
2322 + * can't use __remove_wait_queue(). whead->lock is held by
2323 + * the caller.
2324 + */
2325 + list_del_init(&wait->task_list);
2326 + }
2327 +
2328 spin_lock_irqsave(&ep->lock, flags);
2329
2330 /*
2331 @@ -926,6 +969,99 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
2332 rb_insert_color(&epi->rbn, &ep->rbr);
2333 }
2334
2335 +
2336 +
2337 +#define PATH_ARR_SIZE 5
2338 +/*
2339 + * These are the number paths of length 1 to 5, that we are allowing to emanate
2340 + * from a single file of interest. For example, we allow 1000 paths of length
2341 + * 1, to emanate from each file of interest. This essentially represents the
2342 + * potential wakeup paths, which need to be limited in order to avoid massive
2343 + * uncontrolled wakeup storms. The common use case should be a single ep which
2344 + * is connected to n file sources. In this case each file source has 1 path
2345 + * of length 1. Thus, the numbers below should be more than sufficient. These
2346 + * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
2347 + * and delete can't add additional paths. Protected by the epmutex.
2348 + */
2349 +static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
2350 +static int path_count[PATH_ARR_SIZE];
2351 +
2352 +static int path_count_inc(int nests)
2353 +{
2354 + if (++path_count[nests] > path_limits[nests])
2355 + return -1;
2356 + return 0;
2357 +}
2358 +
2359 +static void path_count_init(void)
2360 +{
2361 + int i;
2362 +
2363 + for (i = 0; i < PATH_ARR_SIZE; i++)
2364 + path_count[i] = 0;
2365 +}
2366 +
2367 +static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
2368 +{
2369 + int error = 0;
2370 + struct file *file = priv;
2371 + struct file *child_file;
2372 + struct epitem *epi;
2373 +
2374 + list_for_each_entry(epi, &file->f_ep_links, fllink) {
2375 + child_file = epi->ep->file;
2376 + if (is_file_epoll(child_file)) {
2377 + if (list_empty(&child_file->f_ep_links)) {
2378 + if (path_count_inc(call_nests)) {
2379 + error = -1;
2380 + break;
2381 + }
2382 + } else {
2383 + error = ep_call_nested(&poll_loop_ncalls,
2384 + EP_MAX_NESTS,
2385 + reverse_path_check_proc,
2386 + child_file, child_file,
2387 + current);
2388 + }
2389 + if (error != 0)
2390 + break;
2391 + } else {
2392 + printk(KERN_ERR "reverse_path_check_proc: "
2393 + "file is not an ep!\n");
2394 + }
2395 + }
2396 + return error;
2397 +}
2398 +
2399 +/**
2400 + * reverse_path_check - The tfile_check_list is list of file *, which have
2401 + * links that are proposed to be newly added. We need to
2402 + * make sure that those added links don't add too many
2403 + * paths such that we will spend all our time waking up
2404 + * eventpoll objects.
2405 + *
2406 + * Returns: Returns zero if the proposed links don't create too many paths,
2407 + * -1 otherwise.
2408 + */
2409 +static int reverse_path_check(void)
2410 +{
2411 + int length = 0;
2412 + int error = 0;
2413 + struct file *current_file;
2414 +
2415 + /* let's call this for all tfiles */
2416 + list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
2417 + length++;
2418 + path_count_init();
2419 + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2420 + reverse_path_check_proc, current_file,
2421 + current_file, current);
2422 + if (error)
2423 + break;
2424 + }
2425 + return error;
2426 +}
2427 +
2428 /*
2429 * Must be called with "mtx" held.
2430 */
2431 @@ -987,6 +1123,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
2432 */
2433 ep_rbtree_insert(ep, epi);
2434
2435 + /* now check if we've created too many backpaths */
2436 + error = -EINVAL;
2437 + if (reverse_path_check())
2438 + goto error_remove_epi;
2439 +
2440 /* We have to drop the new item inside our item list to keep track of it */
2441 spin_lock_irqsave(&ep->lock, flags);
2442
2443 @@ -1011,6 +1152,14 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
2444
2445 return 0;
2446
2447 +error_remove_epi:
2448 + spin_lock(&tfile->f_lock);
2449 + if (ep_is_linked(&epi->fllink))
2450 + list_del_init(&epi->fllink);
2451 + spin_unlock(&tfile->f_lock);
2452 +
2453 + rb_erase(&epi->rbn, &ep->rbr);
2454 +
2455 error_unregister:
2456 ep_unregister_pollwait(ep, epi);
2457
2458 @@ -1275,18 +1424,36 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2459 int error = 0;
2460 struct file *file = priv;
2461 struct eventpoll *ep = file->private_data;
2462 + struct eventpoll *ep_tovisit;
2463 struct rb_node *rbp;
2464 struct epitem *epi;
2465
2466 mutex_lock_nested(&ep->mtx, call_nests + 1);
2467 + ep->visited = 1;
2468 + list_add(&ep->visited_list_link, &visited_list);
2469 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
2470 epi = rb_entry(rbp, struct epitem, rbn);
2471 if (unlikely(is_file_epoll(epi->ffd.file))) {
2472 + ep_tovisit = epi->ffd.file->private_data;
2473 + if (ep_tovisit->visited)
2474 + continue;
2475 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2476 - ep_loop_check_proc, epi->ffd.file,
2477 - epi->ffd.file->private_data, current);
2478 + ep_loop_check_proc, epi->ffd.file,
2479 + ep_tovisit, current);
2480 if (error != 0)
2481 break;
2482 + } else {
2483 + /*
2484 + * If we've reached a file that is not associated with
2485 + * an ep, then we need to check if the newly added
2486 + * links are going to add too many wakeup paths. We do
2487 + * this by adding it to the tfile_check_list, if it's
2488 + * not already there, and calling reverse_path_check()
2489 + * during ep_insert().
2490 + */
2491 + if (list_empty(&epi->ffd.file->f_tfile_llink))
2492 + list_add(&epi->ffd.file->f_tfile_llink,
2493 + &tfile_check_list);
2494 }
2495 }
2496 mutex_unlock(&ep->mtx);
2497 @@ -1307,8 +1474,31 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2498 */
2499 static int ep_loop_check(struct eventpoll *ep, struct file *file)
2500 {
2501 - return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2502 + int ret;
2503 + struct eventpoll *ep_cur, *ep_next;
2504 +
2505 + ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
2506 ep_loop_check_proc, file, ep, current);
2507 + /* clear visited list */
2508 + list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
2509 + visited_list_link) {
2510 + ep_cur->visited = 0;
2511 + list_del(&ep_cur->visited_list_link);
2512 + }
2513 + return ret;
2514 +}
2515 +
2516 +static void clear_tfile_check_list(void)
2517 +{
2518 + struct file *file;
2519 +
2520 + /* first clear the tfile_check_list */
2521 + while (!list_empty(&tfile_check_list)) {
2522 + file = list_first_entry(&tfile_check_list, struct file,
2523 + f_tfile_llink);
2524 + list_del_init(&file->f_tfile_llink);
2525 + }
2526 + INIT_LIST_HEAD(&tfile_check_list);
2527 }
2528
2529 /*
2530 @@ -1316,8 +1506,9 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file)
2531 */
2532 SYSCALL_DEFINE1(epoll_create1, int, flags)
2533 {
2534 - int error;
2535 + int error, fd;
2536 struct eventpoll *ep = NULL;
2537 + struct file *file;
2538
2539 /* Check the EPOLL_* constant for consistency. */
2540 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
2541 @@ -1334,11 +1525,25 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
2542 * Creates all the items needed to setup an eventpoll file. That is,
2543 * a file structure and a free file descriptor.
2544 */
2545 - error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
2546 + fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
2547 + if (fd < 0) {
2548 + error = fd;
2549 + goto out_free_ep;
2550 + }
2551 + file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
2552 O_RDWR | (flags & O_CLOEXEC));
2553 - if (error < 0)
2554 - ep_free(ep);
2555 -
2556 + if (IS_ERR(file)) {
2557 + error = PTR_ERR(file);
2558 + goto out_free_fd;
2559 + }
2560 + fd_install(fd, file);
2561 + ep->file = file;
2562 + return fd;
2563 +
2564 +out_free_fd:
2565 + put_unused_fd(fd);
2566 +out_free_ep:
2567 + ep_free(ep);
2568 return error;
2569 }
2570
2571 @@ -1404,21 +1609,27 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2572 /*
2573 * When we insert an epoll file descriptor, inside another epoll file
2574 * descriptor, there is the change of creating closed loops, which are
2575 - * better be handled here, than in more critical paths.
2576 + * better be handled here, than in more critical paths. While we are
2577 + * checking for loops we also determine the list of files reachable
2578 + * and hang them on the tfile_check_list, so we can check that we
2579 + * haven't created too many possible wakeup paths.
2580 *
2581 - * We hold epmutex across the loop check and the insert in this case, in
2582 - * order to prevent two separate inserts from racing and each doing the
2583 - * insert "at the same time" such that ep_loop_check passes on both
2584 - * before either one does the insert, thereby creating a cycle.
2585 + * We need to hold the epmutex across both ep_insert and ep_remove
2586 + * b/c we want to make sure we are looking at a coherent view of
2587 + * epoll network.
2588 */
2589 - if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
2590 + if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
2591 mutex_lock(&epmutex);
2592 did_lock_epmutex = 1;
2593 - error = -ELOOP;
2594 - if (ep_loop_check(ep, tfile) != 0)
2595 - goto error_tgt_fput;
2596 }
2597 -
2598 + if (op == EPOLL_CTL_ADD) {
2599 + if (is_file_epoll(tfile)) {
2600 + error = -ELOOP;
2601 + if (ep_loop_check(ep, tfile) != 0)
2602 + goto error_tgt_fput;
2603 + } else
2604 + list_add(&tfile->f_tfile_llink, &tfile_check_list);
2605 + }
2606
2607 mutex_lock_nested(&ep->mtx, 0);
2608
2609 @@ -1437,6 +1648,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2610 error = ep_insert(ep, &epds, tfile, fd);
2611 } else
2612 error = -EEXIST;
2613 + clear_tfile_check_list();
2614 break;
2615 case EPOLL_CTL_DEL:
2616 if (epi)
2617 @@ -1455,7 +1667,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2618 mutex_unlock(&ep->mtx);
2619
2620 error_tgt_fput:
2621 - if (unlikely(did_lock_epmutex))
2622 + if (did_lock_epmutex)
2623 mutex_unlock(&epmutex);
2624
2625 fput(tfile);
2626 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2627 index 87822a3..9e7e9a5 100644
2628 --- a/fs/nfs/nfs4state.c
2629 +++ b/fs/nfs/nfs4state.c
2630 @@ -1065,6 +1065,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
2631 {
2632 struct nfs_client *clp = server->nfs_client;
2633
2634 + if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
2635 + nfs_async_inode_return_delegation(state->inode, &state->stateid);
2636 nfs4_state_mark_reclaim_nograce(clp, state);
2637 nfs4_schedule_state_manager(clp);
2638 }
2639 diff --git a/fs/signalfd.c b/fs/signalfd.c
2640 index 492465b..7ae2a57 100644
2641 --- a/fs/signalfd.c
2642 +++ b/fs/signalfd.c
2643 @@ -30,6 +30,21 @@
2644 #include <linux/signalfd.h>
2645 #include <linux/syscalls.h>
2646
2647 +void signalfd_cleanup(struct sighand_struct *sighand)
2648 +{
2649 + wait_queue_head_t *wqh = &sighand->signalfd_wqh;
2650 + /*
2651 + * The lockless check can race with remove_wait_queue() in progress,
2652 + * but in this case its caller should run under rcu_read_lock() and
2653 + * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
2654 + */
2655 + if (likely(!waitqueue_active(wqh)))
2656 + return;
2657 +
2658 + /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
2659 + wake_up_poll(wqh, POLLHUP | POLLFREE);
2660 +}
2661 +
2662 struct signalfd_ctx {
2663 sigset_t sigmask;
2664 };
2665 diff --git a/include/asm-generic/poll.h b/include/asm-generic/poll.h
2666 index 44bce83..9ce7f44 100644
2667 --- a/include/asm-generic/poll.h
2668 +++ b/include/asm-generic/poll.h
2669 @@ -28,6 +28,8 @@
2670 #define POLLRDHUP 0x2000
2671 #endif
2672
2673 +#define POLLFREE 0x4000 /* currently only for epoll */
2674 +
2675 struct pollfd {
2676 int fd;
2677 short events;
2678 diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
2679 index f362733..657ab55 100644
2680 --- a/include/linux/eventpoll.h
2681 +++ b/include/linux/eventpoll.h
2682 @@ -61,6 +61,7 @@ struct file;
2683 static inline void eventpoll_init_file(struct file *file)
2684 {
2685 INIT_LIST_HEAD(&file->f_ep_links);
2686 + INIT_LIST_HEAD(&file->f_tfile_llink);
2687 }
2688
2689
2690 diff --git a/include/linux/fs.h b/include/linux/fs.h
2691 index 7b17db7..d8ecb01 100644
2692 --- a/include/linux/fs.h
2693 +++ b/include/linux/fs.h
2694 @@ -969,6 +969,7 @@ struct file {
2695 #ifdef CONFIG_EPOLL
2696 /* Used by fs/eventpoll.c to link all the hooks to this file */
2697 struct list_head f_ep_links;
2698 + struct list_head f_tfile_llink;
2699 #endif /* #ifdef CONFIG_EPOLL */
2700 struct address_space *f_mapping;
2701 #ifdef CONFIG_DEBUG_WRITECOUNT
2702 diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
2703 index 3ff4961..247399b 100644
2704 --- a/include/linux/signalfd.h
2705 +++ b/include/linux/signalfd.h
2706 @@ -61,13 +61,16 @@ static inline void signalfd_notify(struct task_struct *tsk, int sig)
2707 wake_up(&tsk->sighand->signalfd_wqh);
2708 }
2709
2710 +extern void signalfd_cleanup(struct sighand_struct *sighand);
2711 +
2712 #else /* CONFIG_SIGNALFD */
2713
2714 static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
2715
2716 +static inline void signalfd_cleanup(struct sighand_struct *sighand) { }
2717 +
2718 #endif /* CONFIG_SIGNALFD */
2719
2720 #endif /* __KERNEL__ */
2721
2722 #endif /* _LINUX_SIGNALFD_H */
2723 -
2724 diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
2725 index 4ebaf08..1eb735b 100644
2726 --- a/include/linux/usb/ch11.h
2727 +++ b/include/linux/usb/ch11.h
2728 @@ -62,12 +62,6 @@
2729 #define USB_PORT_FEAT_TEST 21
2730 #define USB_PORT_FEAT_INDICATOR 22
2731 #define USB_PORT_FEAT_C_PORT_L1 23
2732 -#define USB_PORT_FEAT_C_PORT_LINK_STATE 25
2733 -#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
2734 -#define USB_PORT_FEAT_PORT_REMOTE_WAKE_MASK 27
2735 -#define USB_PORT_FEAT_BH_PORT_RESET 28
2736 -#define USB_PORT_FEAT_C_BH_PORT_RESET 29
2737 -#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30
2738
2739 /*
2740 * Port feature selectors added by USB 3.0 spec.
2741 @@ -76,8 +70,8 @@
2742 #define USB_PORT_FEAT_LINK_STATE 5
2743 #define USB_PORT_FEAT_U1_TIMEOUT 23
2744 #define USB_PORT_FEAT_U2_TIMEOUT 24
2745 -#define USB_PORT_FEAT_C_LINK_STATE 25
2746 -#define USB_PORT_FEAT_C_CONFIG_ERR 26
2747 +#define USB_PORT_FEAT_C_PORT_LINK_STATE 25
2748 +#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
2749 #define USB_PORT_FEAT_REMOTE_WAKE_MASK 27
2750 #define USB_PORT_FEAT_BH_PORT_RESET 28
2751 #define USB_PORT_FEAT_C_BH_PORT_RESET 29
2752 diff --git a/include/net/flow.h b/include/net/flow.h
2753 index 32359fd..e37cfda 100644
2754 --- a/include/net/flow.h
2755 +++ b/include/net/flow.h
2756 @@ -90,6 +90,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
2757 fl4->fl4_dport = dport;
2758 fl4->fl4_sport = sport;
2759 }
2760 +
2761 +/* Reset some input parameters after previous lookup */
2762 +static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
2763 + __be32 daddr, __be32 saddr)
2764 +{
2765 + fl4->flowi4_oif = oif;
2766 + fl4->flowi4_tos = tos;
2767 + fl4->daddr = daddr;
2768 + fl4->saddr = saddr;
2769 +}
2770
2771
2772 struct flowi6 {
2773 diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
2774 index caaff5f..14dd9c7 100644
2775 --- a/include/net/inet_sock.h
2776 +++ b/include/net/inet_sock.h
2777 @@ -31,6 +31,7 @@
2778 /** struct ip_options - IP Options
2779 *
2780 * @faddr - Saved first hop address
2781 + * @nexthop - Saved nexthop address in LSRR and SSRR
2782 * @is_data - Options in __data, rather than skb
2783 * @is_strictroute - Strict source route
2784 * @srr_is_hit - Packet destination addr was our one
2785 @@ -41,6 +42,7 @@
2786 */
2787 struct ip_options {
2788 __be32 faddr;
2789 + __be32 nexthop;
2790 unsigned char optlen;
2791 unsigned char srr;
2792 unsigned char rr;
2793 diff --git a/include/net/route.h b/include/net/route.h
2794 index db7b343..5d7aae4 100644
2795 --- a/include/net/route.h
2796 +++ b/include/net/route.h
2797 @@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
2798 if (IS_ERR(rt))
2799 return rt;
2800 ip_rt_put(rt);
2801 + flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
2802 }
2803 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
2804 return ip_route_output_flow(net, fl4, sk);
2805 @@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
2806 fl4->fl4_dport = dport;
2807 fl4->fl4_sport = sport;
2808 ip_rt_put(rt);
2809 + flowi4_update_output(fl4, sk->sk_bound_dev_if,
2810 + RT_CONN_FLAGS(sk), fl4->daddr,
2811 + fl4->saddr);
2812 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
2813 return ip_route_output_flow(sock_net(sk), fl4, sk);
2814 }
2815 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
2816 index b931f02..f1fbe2d 100644
2817 --- a/include/net/sch_generic.h
2818 +++ b/include/net/sch_generic.h
2819 @@ -219,9 +219,16 @@ struct tcf_proto {
2820
2821 struct qdisc_skb_cb {
2822 unsigned int pkt_len;
2823 - long data[];
2824 + unsigned char data[24];
2825 };
2826
2827 +static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
2828 +{
2829 + struct qdisc_skb_cb *qcb;
2830 + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
2831 + BUILD_BUG_ON(sizeof(qcb->data) < sz);
2832 +}
2833 +
2834 static inline int qdisc_qlen(struct Qdisc *q)
2835 {
2836 return q->q.qlen;
2837 diff --git a/kernel/fork.c b/kernel/fork.c
2838 index 0276c30..a4e453b 100644
2839 --- a/kernel/fork.c
2840 +++ b/kernel/fork.c
2841 @@ -67,6 +67,7 @@
2842 #include <linux/user-return-notifier.h>
2843 #include <linux/oom.h>
2844 #include <linux/khugepaged.h>
2845 +#include <linux/signalfd.h>
2846
2847 #include <asm/pgtable.h>
2848 #include <asm/pgalloc.h>
2849 @@ -917,8 +918,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
2850
2851 void __cleanup_sighand(struct sighand_struct *sighand)
2852 {
2853 - if (atomic_dec_and_test(&sighand->count))
2854 + if (atomic_dec_and_test(&sighand->count)) {
2855 + signalfd_cleanup(sighand);
2856 kmem_cache_free(sighand_cachep, sighand);
2857 + }
2858 }
2859
2860
2861 diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
2862 index 342d8f4..0119b9d 100644
2863 --- a/kernel/irq/autoprobe.c
2864 +++ b/kernel/irq/autoprobe.c
2865 @@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
2866 if (desc->irq_data.chip->irq_set_type)
2867 desc->irq_data.chip->irq_set_type(&desc->irq_data,
2868 IRQ_TYPE_PROBE);
2869 - irq_startup(desc);
2870 + irq_startup(desc, false);
2871 }
2872 raw_spin_unlock_irq(&desc->lock);
2873 }
2874 @@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
2875 raw_spin_lock_irq(&desc->lock);
2876 if (!desc->action && irq_settings_can_probe(desc)) {
2877 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
2878 - if (irq_startup(desc))
2879 + if (irq_startup(desc, false))
2880 desc->istate |= IRQS_PENDING;
2881 }
2882 raw_spin_unlock_irq(&desc->lock);
2883 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
2884 index dc5114b..ca14f5d 100644
2885 --- a/kernel/irq/chip.c
2886 +++ b/kernel/irq/chip.c
2887 @@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
2888 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
2889 }
2890
2891 -int irq_startup(struct irq_desc *desc)
2892 +int irq_startup(struct irq_desc *desc, bool resend)
2893 {
2894 + int ret = 0;
2895 +
2896 irq_state_clr_disabled(desc);
2897 desc->depth = 0;
2898
2899 if (desc->irq_data.chip->irq_startup) {
2900 - int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
2901 + ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
2902 irq_state_clr_masked(desc);
2903 - return ret;
2904 + } else {
2905 + irq_enable(desc);
2906 }
2907 -
2908 - irq_enable(desc);
2909 - return 0;
2910 + if (resend)
2911 + check_irq_resend(desc, desc->irq_data.irq);
2912 + return ret;
2913 }
2914
2915 void irq_shutdown(struct irq_desc *desc)
2916 @@ -312,6 +315,24 @@ out_unlock:
2917 }
2918 EXPORT_SYMBOL_GPL(handle_simple_irq);
2919
2920 +/*
2921 + * Called unconditionally from handle_level_irq() and only for oneshot
2922 + * interrupts from handle_fasteoi_irq()
2923 + */
2924 +static void cond_unmask_irq(struct irq_desc *desc)
2925 +{
2926 + /*
2927 + * We need to unmask in the following cases:
2928 + * - Standard level irq (IRQF_ONESHOT is not set)
2929 + * - Oneshot irq which did not wake the thread (caused by a
2930 + * spurious interrupt or a primary handler handling it
2931 + * completely).
2932 + */
2933 + if (!irqd_irq_disabled(&desc->irq_data) &&
2934 + irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
2935 + unmask_irq(desc);
2936 +}
2937 +
2938 /**
2939 * handle_level_irq - Level type irq handler
2940 * @irq: the interrupt number
2941 @@ -344,8 +365,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
2942
2943 handle_irq_event(desc);
2944
2945 - if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
2946 - unmask_irq(desc);
2947 + cond_unmask_irq(desc);
2948 +
2949 out_unlock:
2950 raw_spin_unlock(&desc->lock);
2951 }
2952 @@ -399,6 +420,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
2953 preflow_handler(desc);
2954 handle_irq_event(desc);
2955
2956 + if (desc->istate & IRQS_ONESHOT)
2957 + cond_unmask_irq(desc);
2958 +
2959 out_eoi:
2960 desc->irq_data.chip->irq_eoi(&desc->irq_data);
2961 out_unlock:
2962 @@ -575,7 +599,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
2963 irq_settings_set_noprobe(desc);
2964 irq_settings_set_norequest(desc);
2965 irq_settings_set_nothread(desc);
2966 - irq_startup(desc);
2967 + irq_startup(desc, true);
2968 }
2969 out:
2970 irq_put_desc_busunlock(desc, flags);
2971 diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
2972 index 6546431..62efdc4 100644
2973 --- a/kernel/irq/internals.h
2974 +++ b/kernel/irq/internals.h
2975 @@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
2976 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
2977 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
2978
2979 -extern int irq_startup(struct irq_desc *desc);
2980 +extern int irq_startup(struct irq_desc *desc, bool resend);
2981 extern void irq_shutdown(struct irq_desc *desc);
2982 extern void irq_enable(struct irq_desc *desc);
2983 extern void irq_disable(struct irq_desc *desc);
2984 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
2985 index a1aadab..def3406 100644
2986 --- a/kernel/irq/manage.c
2987 +++ b/kernel/irq/manage.c
2988 @@ -1018,7 +1018,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
2989 desc->istate |= IRQS_ONESHOT;
2990
2991 if (irq_settings_can_autoenable(desc))
2992 - irq_startup(desc);
2993 + irq_startup(desc, true);
2994 else
2995 /* Undo nested disables: */
2996 desc->depth = 1;
2997 diff --git a/mm/nommu.c b/mm/nommu.c
2998 index 9edc897..8397758 100644
2999 --- a/mm/nommu.c
3000 +++ b/mm/nommu.c
3001 @@ -697,9 +697,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
3002 if (vma->vm_file) {
3003 mapping = vma->vm_file->f_mapping;
3004
3005 + mutex_lock(&mapping->i_mmap_mutex);
3006 flush_dcache_mmap_lock(mapping);
3007 vma_prio_tree_insert(vma, &mapping->i_mmap);
3008 flush_dcache_mmap_unlock(mapping);
3009 + mutex_unlock(&mapping->i_mmap_mutex);
3010 }
3011
3012 /* add the VMA to the tree */
3013 @@ -761,9 +763,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
3014 if (vma->vm_file) {
3015 mapping = vma->vm_file->f_mapping;
3016
3017 + mutex_lock(&mapping->i_mmap_mutex);
3018 flush_dcache_mmap_lock(mapping);
3019 vma_prio_tree_remove(vma, &mapping->i_mmap);
3020 flush_dcache_mmap_unlock(mapping);
3021 + mutex_unlock(&mapping->i_mmap_mutex);
3022 }
3023
3024 /* remove from the MM's tree and list */
3025 @@ -2061,6 +2065,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
3026 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
3027
3028 down_write(&nommu_region_sem);
3029 + mutex_lock(&inode->i_mapping->i_mmap_mutex);
3030
3031 /* search for VMAs that fall within the dead zone */
3032 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
3033 @@ -2068,6 +2073,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
3034 /* found one - only interested if it's shared out of the page
3035 * cache */
3036 if (vma->vm_flags & VM_SHARED) {
3037 + mutex_unlock(&inode->i_mapping->i_mmap_mutex);
3038 up_write(&nommu_region_sem);
3039 return -ETXTBSY; /* not quite true, but near enough */
3040 }
3041 @@ -2095,6 +2101,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
3042 }
3043 }
3044
3045 + mutex_unlock(&inode->i_mapping->i_mmap_mutex);
3046 up_write(&nommu_region_sem);
3047 return 0;
3048 }
3049 diff --git a/net/core/dev.c b/net/core/dev.c
3050 index f14f601..17fdbf8 100644
3051 --- a/net/core/dev.c
3052 +++ b/net/core/dev.c
3053 @@ -3434,14 +3434,20 @@ static inline gro_result_t
3054 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3055 {
3056 struct sk_buff *p;
3057 + unsigned int maclen = skb->dev->hard_header_len;
3058
3059 for (p = napi->gro_list; p; p = p->next) {
3060 unsigned long diffs;
3061
3062 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3063 diffs |= p->vlan_tci ^ skb->vlan_tci;
3064 - diffs |= compare_ether_header(skb_mac_header(p),
3065 - skb_gro_mac_header(skb));
3066 + if (maclen == ETH_HLEN)
3067 + diffs |= compare_ether_header(skb_mac_header(p),
3068 + skb_gro_mac_header(skb));
3069 + else if (!diffs)
3070 + diffs = memcmp(skb_mac_header(p),
3071 + skb_gro_mac_header(skb),
3072 + maclen);
3073 NAPI_GRO_CB(p)->same_flow = !diffs;
3074 NAPI_GRO_CB(p)->flush = 0;
3075 }
3076 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
3077 index 18d9cbd..05db410 100644
3078 --- a/net/core/netpoll.c
3079 +++ b/net/core/netpoll.c
3080 @@ -193,7 +193,7 @@ void netpoll_poll_dev(struct net_device *dev)
3081
3082 poll_napi(dev);
3083
3084 - if (dev->priv_flags & IFF_SLAVE) {
3085 + if (dev->flags & IFF_SLAVE) {
3086 if (dev->npinfo) {
3087 struct net_device *bond_dev = dev->master;
3088 struct sk_buff *skb;
3089 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
3090 index 1d5675e..d8f852d 100644
3091 --- a/net/ipv4/arp.c
3092 +++ b/net/ipv4/arp.c
3093 @@ -906,7 +906,8 @@ static int arp_process(struct sk_buff *skb)
3094 if (addr_type == RTN_UNICAST &&
3095 (arp_fwd_proxy(in_dev, dev, rt) ||
3096 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
3097 - pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
3098 + (rt->dst.dev != dev &&
3099 + pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
3100 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
3101 if (n)
3102 neigh_release(n);
3103 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
3104 index 3b34d1c..29a07b6 100644
3105 --- a/net/ipv4/ip_forward.c
3106 +++ b/net/ipv4/ip_forward.c
3107 @@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
3108
3109 rt = skb_rtable(skb);
3110
3111 - if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
3112 + if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
3113 goto sr_failed;
3114
3115 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
3116 diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
3117 index ec93335..42dd1a9 100644
3118 --- a/net/ipv4/ip_options.c
3119 +++ b/net/ipv4/ip_options.c
3120 @@ -568,11 +568,12 @@ void ip_forward_options(struct sk_buff *skb)
3121 ) {
3122 if (srrptr + 3 > srrspace)
3123 break;
3124 - if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
3125 + if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
3126 break;
3127 }
3128 if (srrptr + 3 <= srrspace) {
3129 opt->is_changed = 1;
3130 + ip_hdr(skb)->daddr = opt->nexthop;
3131 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
3132 optptr[2] = srrptr+4;
3133 } else if (net_ratelimit())
3134 @@ -640,6 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
3135 }
3136 if (srrptr <= srrspace) {
3137 opt->srr_is_hit = 1;
3138 + opt->nexthop = nexthop;
3139 opt->is_changed = 1;
3140 }
3141 return 0;
3142 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3143 index 65ff2e5..6b95f74 100644
3144 --- a/net/ipv4/route.c
3145 +++ b/net/ipv4/route.c
3146 @@ -1369,11 +1369,41 @@ static void rt_del(unsigned hash, struct rtable *rt)
3147 spin_unlock_bh(rt_hash_lock_addr(hash));
3148 }
3149
3150 +static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
3151 +{
3152 + struct rtable *rt = (struct rtable *) dst;
3153 + __be32 orig_gw = rt->rt_gateway;
3154 + struct neighbour *n, *old_n;
3155 +
3156 + dst_confirm(&rt->dst);
3157 +
3158 + rt->rt_gateway = peer->redirect_learned.a4;
3159 + n = __arp_bind_neighbour(&rt->dst, rt->rt_gateway);
3160 + if (IS_ERR(n))
3161 + return PTR_ERR(n);
3162 + old_n = xchg(&rt->dst._neighbour, n);
3163 + if (old_n)
3164 + neigh_release(old_n);
3165 + if (!n || !(n->nud_state & NUD_VALID)) {
3166 + if (n)
3167 + neigh_event_send(n, NULL);
3168 + rt->rt_gateway = orig_gw;
3169 + return -EAGAIN;
3170 + } else {
3171 + rt->rt_flags |= RTCF_REDIRECTED;
3172 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
3173 + }
3174 + return 0;
3175 +}
3176 +
3177 /* called in rcu_read_lock() section */
3178 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
3179 __be32 saddr, struct net_device *dev)
3180 {
3181 + int s, i;
3182 struct in_device *in_dev = __in_dev_get_rcu(dev);
3183 + __be32 skeys[2] = { saddr, 0 };
3184 + int ikeys[2] = { dev->ifindex, 0 };
3185 struct inet_peer *peer;
3186 struct net *net;
3187
3188 @@ -1396,13 +1426,43 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
3189 goto reject_redirect;
3190 }
3191
3192 - peer = inet_getpeer_v4(daddr, 1);
3193 - if (peer) {
3194 - peer->redirect_learned.a4 = new_gw;
3195 + for (s = 0; s < 2; s++) {
3196 + for (i = 0; i < 2; i++) {
3197 + unsigned int hash;
3198 + struct rtable __rcu **rthp;
3199 + struct rtable *rt;
3200
3201 - inet_putpeer(peer);
3202 + hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
3203
3204 - atomic_inc(&__rt_peer_genid);
3205 + rthp = &rt_hash_table[hash].chain;
3206 +
3207 + while ((rt = rcu_dereference(*rthp)) != NULL) {
3208 + rthp = &rt->dst.rt_next;
3209 +
3210 + if (rt->rt_key_dst != daddr ||
3211 + rt->rt_key_src != skeys[s] ||
3212 + rt->rt_oif != ikeys[i] ||
3213 + rt_is_input_route(rt) ||
3214 + rt_is_expired(rt) ||
3215 + !net_eq(dev_net(rt->dst.dev), net) ||
3216 + rt->dst.error ||
3217 + rt->dst.dev != dev ||
3218 + rt->rt_gateway != old_gw)
3219 + continue;
3220 +
3221 + if (!rt->peer)
3222 + rt_bind_peer(rt, rt->rt_dst, 1);
3223 +
3224 + peer = rt->peer;
3225 + if (peer) {
3226 + if (peer->redirect_learned.a4 != new_gw) {
3227 + peer->redirect_learned.a4 = new_gw;
3228 + atomic_inc(&__rt_peer_genid);
3229 + }
3230 + check_peer_redir(&rt->dst, peer);
3231 + }
3232 + }
3233 + }
3234 }
3235 return;
3236
3237 @@ -1689,33 +1749,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
3238 }
3239 }
3240
3241 -static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
3242 -{
3243 - struct rtable *rt = (struct rtable *) dst;
3244 - __be32 orig_gw = rt->rt_gateway;
3245 - struct neighbour *n, *old_n;
3246 -
3247 - dst_confirm(&rt->dst);
3248 -
3249 - rt->rt_gateway = peer->redirect_learned.a4;
3250 - n = __arp_bind_neighbour(&rt->dst, rt->rt_gateway);
3251 - if (IS_ERR(n))
3252 - return PTR_ERR(n);
3253 - old_n = xchg(&rt->dst._neighbour, n);
3254 - if (old_n)
3255 - neigh_release(old_n);
3256 - if (!n || !(n->nud_state & NUD_VALID)) {
3257 - if (n)
3258 - neigh_event_send(n, NULL);
3259 - rt->rt_gateway = orig_gw;
3260 - return -EAGAIN;
3261 - } else {
3262 - rt->rt_flags |= RTCF_REDIRECTED;
3263 - call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
3264 - }
3265 - return 0;
3266 -}
3267 -
3268 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3269 {
3270 struct rtable *rt = (struct rtable *) dst;
3271 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3272 index c68040f..ee08f11f 100644
3273 --- a/net/ipv4/tcp_input.c
3274 +++ b/net/ipv4/tcp_input.c
3275 @@ -1289,25 +1289,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
3276 return in_sack;
3277 }
3278
3279 -static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
3280 - struct tcp_sacktag_state *state,
3281 +/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
3282 +static u8 tcp_sacktag_one(struct sock *sk,
3283 + struct tcp_sacktag_state *state, u8 sacked,
3284 + u32 start_seq, u32 end_seq,
3285 int dup_sack, int pcount)
3286 {
3287 struct tcp_sock *tp = tcp_sk(sk);
3288 - u8 sacked = TCP_SKB_CB(skb)->sacked;
3289 int fack_count = state->fack_count;
3290
3291 /* Account D-SACK for retransmitted packet. */
3292 if (dup_sack && (sacked & TCPCB_RETRANS)) {
3293 if (tp->undo_marker && tp->undo_retrans &&
3294 - after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
3295 + after(end_seq, tp->undo_marker))
3296 tp->undo_retrans--;
3297 if (sacked & TCPCB_SACKED_ACKED)
3298 state->reord = min(fack_count, state->reord);
3299 }
3300
3301 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
3302 - if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
3303 + if (!after(end_seq, tp->snd_una))
3304 return sacked;
3305
3306 if (!(sacked & TCPCB_SACKED_ACKED)) {
3307 @@ -1326,13 +1327,13 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
3308 /* New sack for not retransmitted frame,
3309 * which was in hole. It is reordering.
3310 */
3311 - if (before(TCP_SKB_CB(skb)->seq,
3312 + if (before(start_seq,
3313 tcp_highest_sack_seq(tp)))
3314 state->reord = min(fack_count,
3315 state->reord);
3316
3317 /* SACK enhanced F-RTO (RFC4138; Appendix B) */
3318 - if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
3319 + if (!after(end_seq, tp->frto_highmark))
3320 state->flag |= FLAG_ONLY_ORIG_SACKED;
3321 }
3322
3323 @@ -1350,8 +1351,7 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
3324
3325 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
3326 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
3327 - before(TCP_SKB_CB(skb)->seq,
3328 - TCP_SKB_CB(tp->lost_skb_hint)->seq))
3329 + before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
3330 tp->lost_cnt_hint += pcount;
3331
3332 if (fack_count > tp->fackets_out)
3333 @@ -1370,6 +1370,9 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
3334 return sacked;
3335 }
3336
3337 +/* Shift newly-SACKed bytes from this skb to the immediately previous
3338 + * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
3339 + */
3340 static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
3341 struct tcp_sacktag_state *state,
3342 unsigned int pcount, int shifted, int mss,
3343 @@ -1377,10 +1380,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
3344 {
3345 struct tcp_sock *tp = tcp_sk(sk);
3346 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
3347 + u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
3348 + u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
3349
3350 BUG_ON(!pcount);
3351
3352 - if (skb == tp->lost_skb_hint)
3353 + /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
3354 + if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
3355 tp->lost_cnt_hint += pcount;
3356
3357 TCP_SKB_CB(prev)->end_seq += shifted;
3358 @@ -1406,8 +1412,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
3359 skb_shinfo(skb)->gso_type = 0;
3360 }
3361
3362 - /* We discard results */
3363 - tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
3364 + /* Adjust counters and hints for the newly sacked sequence range but
3365 + * discard the return value since prev is already marked.
3366 + */
3367 + tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
3368 + start_seq, end_seq, dup_sack, pcount);
3369
3370 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
3371 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
3372 @@ -1646,10 +1655,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
3373 break;
3374
3375 if (in_sack) {
3376 - TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
3377 - state,
3378 - dup_sack,
3379 - tcp_skb_pcount(skb));
3380 + TCP_SKB_CB(skb)->sacked =
3381 + tcp_sacktag_one(sk,
3382 + state,
3383 + TCP_SKB_CB(skb)->sacked,
3384 + TCP_SKB_CB(skb)->seq,
3385 + TCP_SKB_CB(skb)->end_seq,
3386 + dup_sack,
3387 + tcp_skb_pcount(skb));
3388
3389 if (!before(TCP_SKB_CB(skb)->seq,
3390 tcp_highest_sack_seq(tp)))
3391 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
3392 index 53b0125..04c6592 100644
3393 --- a/net/ipv4/tcp_ipv4.c
3394 +++ b/net/ipv4/tcp_ipv4.c
3395 @@ -650,6 +650,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
3396 arg.iov[0].iov_len, IPPROTO_TCP, 0);
3397 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
3398 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
3399 + /* When socket is gone, all binding information is lost.
3400 + * routing might fail in this case. using iif for oif to
3401 + * make sure we can deliver it
3402 + */
3403 + arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
3404
3405 net = dev_net(skb_dst(skb)->dev);
3406 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
3407 diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
3408 index 82a8099..86e3cc1 100644
3409 --- a/net/ipv6/ip6mr.c
3410 +++ b/net/ipv6/ip6mr.c
3411 @@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
3412 int err;
3413
3414 err = ip6mr_fib_lookup(net, &fl6, &mrt);
3415 - if (err < 0)
3416 + if (err < 0) {
3417 + kfree_skb(skb);
3418 return err;
3419 + }
3420
3421 read_lock(&mrt_lock);
3422 dev->stats.tx_bytes += skb->len;
3423 @@ -2051,8 +2053,10 @@ int ip6_mr_input(struct sk_buff *skb)
3424 int err;
3425
3426 err = ip6mr_fib_lookup(net, &fl6, &mrt);
3427 - if (err < 0)
3428 + if (err < 0) {
3429 + kfree_skb(skb);
3430 return err;
3431 + }
3432
3433 read_lock(&mrt_lock);
3434 cache = ip6mr_cache_find(mrt,
3435 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
3436 index 866f269..1e36fb3 100644
3437 --- a/net/mac80211/main.c
3438 +++ b/net/mac80211/main.c
3439 @@ -910,6 +910,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
3440 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
3441 result);
3442
3443 + ieee80211_led_init(local);
3444 +
3445 rtnl_lock();
3446
3447 result = ieee80211_init_rate_ctrl_alg(local,
3448 @@ -931,8 +933,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
3449
3450 rtnl_unlock();
3451
3452 - ieee80211_led_init(local);
3453 -
3454 local->network_latency_notifier.notifier_call =
3455 ieee80211_max_network_latency;
3456 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
3457 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
3458 index 24c28d2..0787bed 100644
3459 --- a/net/netfilter/ipvs/ip_vs_core.c
3460 +++ b/net/netfilter/ipvs/ip_vs_core.c
3461 @@ -233,6 +233,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
3462 __be16 dport = 0; /* destination port to forward */
3463 unsigned int flags;
3464 struct ip_vs_conn_param param;
3465 + const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
3466 union nf_inet_addr snet; /* source network of the client,
3467 after masking */
3468
3469 @@ -268,7 +269,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
3470 {
3471 int protocol = iph.protocol;
3472 const union nf_inet_addr *vaddr = &iph.daddr;
3473 - const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
3474 __be16 vport = 0;
3475
3476 if (dst_port == svc->port) {
3477 diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
3478 index 06afbae..178ee83 100644
3479 --- a/net/sched/sch_choke.c
3480 +++ b/net/sched/sch_choke.c
3481 @@ -225,8 +225,7 @@ struct choke_skb_cb {
3482
3483 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
3484 {
3485 - BUILD_BUG_ON(sizeof(skb->cb) <
3486 - sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
3487 + qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
3488 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
3489 }
3490
3491 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
3492 index 69c35f6..2f68459 100644
3493 --- a/net/sched/sch_netem.c
3494 +++ b/net/sched/sch_netem.c
3495 @@ -117,8 +117,7 @@ struct netem_skb_cb {
3496
3497 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
3498 {
3499 - BUILD_BUG_ON(sizeof(skb->cb) <
3500 - sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
3501 + qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
3502 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
3503 }
3504
3505 @@ -382,8 +381,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
3506 q->counter = 0;
3507
3508 __skb_queue_head(&q->qdisc->q, skb);
3509 - q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
3510 - q->qdisc->qstats.requeues++;
3511 + sch->qstats.backlog += qdisc_pkt_len(skb);
3512 + sch->qstats.requeues++;
3513 ret = NET_XMIT_SUCCESS;
3514 }
3515
3516 diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
3517 index 0a833d0..47ee29f 100644
3518 --- a/net/sched/sch_sfb.c
3519 +++ b/net/sched/sch_sfb.c
3520 @@ -93,8 +93,7 @@ struct sfb_skb_cb {
3521
3522 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
3523 {
3524 - BUILD_BUG_ON(sizeof(skb->cb) <
3525 - sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
3526 + qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
3527 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
3528 }
3529
3530 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
3531 index f6cbc3d..3c6c0b1 100644
3532 --- a/scripts/package/builddeb
3533 +++ b/scripts/package/builddeb
3534 @@ -238,14 +238,14 @@ EOF
3535 fi
3536
3537 # Build header package
3538 -(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
3539 -(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
3540 -(cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
3541 +(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
3542 +(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
3543 +(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
3544 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
3545 mkdir -p "$destdir"
3546 -(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
3547 -(cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
3548 -rm -f /tmp/files$$ /tmp/objfiles$$
3549 +(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
3550 +(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
3551 +rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
3552 arch=$(dpkg --print-architecture)
3553
3554 cat <<EOF >> debian/control
3555 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3556 index 3c2381c..81ecd6c 100644
3557 --- a/sound/pci/hda/patch_conexant.c
3558 +++ b/sound/pci/hda/patch_conexant.c
3559 @@ -1917,6 +1917,10 @@ static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid,
3560 snd_hda_codec_write(codec, nid, 0,
3561 AC_VERB_SET_UNSOLICITED_ENABLE,
3562 AC_USRSP_EN | event);
3563 +}
3564 +
3565 +static void cxt5051_init_mic_jack(struct hda_codec *codec, hda_nid_t nid)
3566 +{
3567 snd_hda_input_jack_add(codec, nid, SND_JACK_MICROPHONE, NULL);
3568 snd_hda_input_jack_report(codec, nid);
3569 }
3570 @@ -1934,7 +1938,6 @@ static int cxt5051_init(struct hda_codec *codec)
3571 struct conexant_spec *spec = codec->spec;
3572
3573 conexant_init(codec);
3574 - conexant_init_jacks(codec);
3575
3576 if (spec->auto_mic & AUTO_MIC_PORTB)
3577 cxt5051_init_mic_port(codec, 0x17, CXT5051_PORTB_EVENT);
3578 @@ -2067,6 +2070,12 @@ static int patch_cxt5051(struct hda_codec *codec)
3579 if (spec->beep_amp)
3580 snd_hda_attach_beep_device(codec, spec->beep_amp);
3581
3582 + conexant_init_jacks(codec);
3583 + if (spec->auto_mic & AUTO_MIC_PORTB)
3584 + cxt5051_init_mic_jack(codec, 0x17);
3585 + if (spec->auto_mic & AUTO_MIC_PORTC)
3586 + cxt5051_init_mic_jack(codec, 0x18);
3587 +
3588 return 0;
3589 }
3590
3591 diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
3592 index 6d0cae4..c850e3d 100644
3593 --- a/sound/soc/codecs/wm8962.c
3594 +++ b/sound/soc/codecs/wm8962.c
3595 @@ -2373,7 +2373,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
3596 }
3597 }
3598
3599 -static const char *st_text[] = { "None", "Right", "Left" };
3600 +static const char *st_text[] = { "None", "Left", "Right" };
3601
3602 static const struct soc_enum str_enum =
3603 SOC_ENUM_SINGLE(WM8962_DAC_DSP_MIXING_1, 2, 3, st_text);

  ViewVC Help
Powered by ViewVC 1.1.20