/[linux-patches]/genpatches-2.6/tags/3.4-10/1008_linux-3.4.9.patch
Gentoo

Contents of /genpatches-2.6/tags/3.4-10/1008_linux-3.4.9.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2192 - (show annotations) (download)
Fri Aug 17 23:46:53 2012 UTC (5 years, 11 months ago) by mpagano
File size: 83863 byte(s)
3.4-10 release
1 diff --git a/MAINTAINERS b/MAINTAINERS
2 index b362709..a60009d 100644
3 --- a/MAINTAINERS
4 +++ b/MAINTAINERS
5 @@ -5566,7 +5566,7 @@ F: Documentation/blockdev/ramdisk.txt
6 F: drivers/block/brd.c
7
8 RANDOM NUMBER DRIVER
9 -M: Matt Mackall <mpm@selenic.com>
10 +M: Theodore Ts'o" <tytso@mit.edu>
11 S: Maintained
12 F: drivers/char/random.c
13
14 diff --git a/Makefile b/Makefile
15 index a3c0c43..9549547 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 3
20 PATCHLEVEL = 4
21 -SUBLEVEL = 8
22 +SUBLEVEL = 9
23 EXTRAVERSION =
24 NAME = Saber-toothed Squirrel
25
26 diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
27 index 2dccce4..7541a91 100644
28 --- a/arch/arm/boot/dts/imx53-ard.dts
29 +++ b/arch/arm/boot/dts/imx53-ard.dts
30 @@ -70,10 +70,30 @@
31 interrupt-parent = <&gpio2>;
32 interrupts = <31>;
33 reg-io-width = <4>;
34 + /*
35 + * VDD33A and VDDVARIO of LAN9220 are supplied by
36 + * SW4_3V3 of LTC3589. Before the regulator driver
37 + * for this PMIC is available, we use a fixed dummy
38 + * 3V3 regulator to get LAN9220 driver probing work.
39 + */
40 + vdd33a-supply = <&reg_3p3v>;
41 + vddvario-supply = <&reg_3p3v>;
42 smsc,irq-push-pull;
43 };
44 };
45
46 + regulators {
47 + compatible = "simple-bus";
48 +
49 + reg_3p3v: 3p3v {
50 + compatible = "regulator-fixed";
51 + regulator-name = "3P3V";
52 + regulator-min-microvolt = <3300000>;
53 + regulator-max-microvolt = <3300000>;
54 + regulator-always-on;
55 + };
56 + };
57 +
58 gpio-keys {
59 compatible = "gpio-keys";
60
61 diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
62 index 1ebbf45..70d0bf4 100644
63 --- a/arch/arm/configs/mxs_defconfig
64 +++ b/arch/arm/configs/mxs_defconfig
65 @@ -32,7 +32,6 @@ CONFIG_NO_HZ=y
66 CONFIG_HIGH_RES_TIMERS=y
67 CONFIG_PREEMPT_VOLUNTARY=y
68 CONFIG_AEABI=y
69 -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
70 CONFIG_AUTO_ZRELADDR=y
71 CONFIG_FPE_NWFPE=y
72 CONFIG_NET=y
73 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
74 index 1252a26..42dec04 100644
75 --- a/arch/arm/include/asm/cacheflush.h
76 +++ b/arch/arm/include/asm/cacheflush.h
77 @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
78 static inline void
79 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
80 {
81 - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
82 + struct mm_struct *mm = vma->vm_mm;
83 +
84 + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
85 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
86 vma->vm_flags);
87 }
88 @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
89 static inline void
90 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
91 {
92 - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
93 + struct mm_struct *mm = vma->vm_mm;
94 +
95 + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
96 unsigned long addr = user_addr & PAGE_MASK;
97 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
98 }
99 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
100 index 7fd3ad0..8f29865 100644
101 --- a/arch/arm/kernel/entry-armv.S
102 +++ b/arch/arm/kernel/entry-armv.S
103 @@ -244,6 +244,19 @@ svc_preempt:
104 b 1b
105 #endif
106
107 +__und_fault:
108 + @ Correct the PC such that it is pointing at the instruction
109 + @ which caused the fault. If the faulting instruction was ARM
110 + @ the PC will be pointing at the next instruction, and have to
111 + @ subtract 4. Otherwise, it is Thumb, and the PC will be
112 + @ pointing at the second half of the Thumb instruction. We
113 + @ have to subtract 2.
114 + ldr r2, [r0, #S_PC]
115 + sub r2, r2, r1
116 + str r2, [r0, #S_PC]
117 + b do_undefinstr
118 +ENDPROC(__und_fault)
119 +
120 .align 5
121 __und_svc:
122 #ifdef CONFIG_KPROBES
123 @@ -261,25 +274,32 @@ __und_svc:
124 @
125 @ r0 - instruction
126 @
127 -#ifndef CONFIG_THUMB2_KERNEL
128 +#ifndef CONFIG_THUMB2_KERNEL
129 ldr r0, [r4, #-4]
130 #else
131 + mov r1, #2
132 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
133 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
134 - ldrhhs r9, [r4] @ bottom 16 bits
135 - orrhs r0, r9, r0, lsl #16
136 + blo __und_svc_fault
137 + ldrh r9, [r4] @ bottom 16 bits
138 + add r4, r4, #2
139 + str r4, [sp, #S_PC]
140 + orr r0, r9, r0, lsl #16
141 #endif
142 - adr r9, BSYM(1f)
143 + adr r9, BSYM(__und_svc_finish)
144 mov r2, r4
145 bl call_fpe
146
147 + mov r1, #4 @ PC correction to apply
148 +__und_svc_fault:
149 mov r0, sp @ struct pt_regs *regs
150 - bl do_undefinstr
151 + bl __und_fault
152
153 @
154 @ IRQs off again before pulling preserved data off the stack
155 @
156 -1: disable_irq_notrace
157 +__und_svc_finish:
158 + disable_irq_notrace
159
160 @
161 @ restore SPSR and restart the instruction
162 @@ -423,25 +443,33 @@ __und_usr:
163 mov r2, r4
164 mov r3, r5
165
166 + @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
167 + @ faulting instruction depending on Thumb mode.
168 + @ r3 = regs->ARM_cpsr
169 @
170 - @ fall through to the emulation code, which returns using r9 if
171 - @ it has emulated the instruction, or the more conventional lr
172 - @ if we are to treat this as a real undefined instruction
173 - @
174 - @ r0 - instruction
175 + @ The emulation code returns using r9 if it has emulated the
176 + @ instruction, or the more conventional lr if we are to treat
177 + @ this as a real undefined instruction
178 @
179 adr r9, BSYM(ret_from_exception)
180 - adr lr, BSYM(__und_usr_unknown)
181 +
182 tst r3, #PSR_T_BIT @ Thumb mode?
183 - itet eq @ explicit IT needed for the 1f label
184 - subeq r4, r2, #4 @ ARM instr at LR - 4
185 - subne r4, r2, #2 @ Thumb instr at LR - 2
186 -1: ldreqt r0, [r4]
187 + bne __und_usr_thumb
188 + sub r4, r2, #4 @ ARM instr at LR - 4
189 +1: ldrt r0, [r4]
190 #ifdef CONFIG_CPU_ENDIAN_BE8
191 - reveq r0, r0 @ little endian instruction
192 + rev r0, r0 @ little endian instruction
193 #endif
194 - beq call_fpe
195 + @ r0 = 32-bit ARM instruction which caused the exception
196 + @ r2 = PC value for the following instruction (:= regs->ARM_pc)
197 + @ r4 = PC value for the faulting instruction
198 + @ lr = 32-bit undefined instruction function
199 + adr lr, BSYM(__und_usr_fault_32)
200 + b call_fpe
201 +
202 +__und_usr_thumb:
203 @ Thumb instruction
204 + sub r4, r2, #2 @ First half of thumb instr at LR - 2
205 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
206 /*
207 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
208 @@ -455,7 +483,7 @@ __und_usr:
209 ldr r5, .LCcpu_architecture
210 ldr r5, [r5]
211 cmp r5, #CPU_ARCH_ARMv7
212 - blo __und_usr_unknown
213 + blo __und_usr_fault_16 @ 16bit undefined instruction
214 /*
215 * The following code won't get run unless the running CPU really is v7, so
216 * coding round the lack of ldrht on older arches is pointless. Temporarily
217 @@ -463,15 +491,18 @@ __und_usr:
218 */
219 .arch armv6t2
220 #endif
221 -2:
222 - ARM( ldrht r5, [r4], #2 )
223 - THUMB( ldrht r5, [r4] )
224 - THUMB( add r4, r4, #2 )
225 +2: ldrht r5, [r4]
226 cmp r5, #0xe800 @ 32bit instruction if xx != 0
227 - blo __und_usr_unknown
228 -3: ldrht r0, [r4]
229 + blo __und_usr_fault_16 @ 16bit undefined instruction
230 +3: ldrht r0, [r2]
231 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
232 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
233 orr r0, r0, r5, lsl #16
234 + adr lr, BSYM(__und_usr_fault_32)
235 + @ r0 = the two 16-bit Thumb instructions which caused the exception
236 + @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
237 + @ r4 = PC value for the first 16-bit Thumb instruction
238 + @ lr = 32bit undefined instruction function
239
240 #if __LINUX_ARM_ARCH__ < 7
241 /* If the target arch was overridden, change it back: */
242 @@ -482,17 +513,13 @@ __und_usr:
243 #endif
244 #endif /* __LINUX_ARM_ARCH__ < 7 */
245 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
246 - b __und_usr_unknown
247 + b __und_usr_fault_16
248 #endif
249 - UNWIND(.fnend )
250 + UNWIND(.fnend)
251 ENDPROC(__und_usr)
252
253 - @
254 - @ fallthrough to call_fpe
255 - @
256 -
257 /*
258 - * The out of line fixup for the ldrt above.
259 + * The out of line fixup for the ldrt instructions above.
260 */
261 .pushsection .fixup, "ax"
262 4: mov pc, r9
263 @@ -523,11 +550,12 @@ ENDPROC(__und_usr)
264 * NEON handler code.
265 *
266 * Emulators may wish to make use of the following registers:
267 - * r0 = instruction opcode.
268 - * r2 = PC+4
269 + * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
270 + * r2 = PC value to resume execution after successful emulation
271 * r9 = normal "successful" return address
272 - * r10 = this threads thread_info structure.
273 + * r10 = this threads thread_info structure
274 * lr = unrecognised instruction return address
275 + * IRQs disabled, FIQs enabled.
276 */
277 @
278 @ Fall-through from Thumb-2 __und_usr
279 @@ -662,12 +690,17 @@ ENTRY(no_fp)
280 mov pc, lr
281 ENDPROC(no_fp)
282
283 -__und_usr_unknown:
284 - enable_irq
285 +__und_usr_fault_32:
286 + mov r1, #4
287 + b 1f
288 +__und_usr_fault_16:
289 + mov r1, #2
290 +1: enable_irq
291 mov r0, sp
292 adr lr, BSYM(ret_from_exception)
293 - b do_undefinstr
294 -ENDPROC(__und_usr_unknown)
295 + b __und_fault
296 +ENDPROC(__und_usr_fault_32)
297 +ENDPROC(__und_usr_fault_16)
298
299 .align 5
300 __pabt_usr:
301 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
302 index 2b7b017..48f3624 100644
303 --- a/arch/arm/kernel/process.c
304 +++ b/arch/arm/kernel/process.c
305 @@ -267,6 +267,7 @@ void machine_shutdown(void)
306 void machine_halt(void)
307 {
308 machine_shutdown();
309 + local_irq_disable();
310 while (1);
311 }
312
313 @@ -288,6 +289,7 @@ void machine_restart(char *cmd)
314
315 /* Whoops - the platform was unable to reboot. Tell the user! */
316 printk("Reboot failed -- System halted\n");
317 + local_irq_disable();
318 while (1);
319 }
320
321 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
322 index 8f46446..7babc3f 100644
323 --- a/arch/arm/kernel/smp.c
324 +++ b/arch/arm/kernel/smp.c
325 @@ -590,7 +590,8 @@ void smp_send_stop(void)
326
327 cpumask_copy(&mask, cpu_online_mask);
328 cpumask_clear_cpu(smp_processor_id(), &mask);
329 - smp_cross_call(&mask, IPI_CPU_STOP);
330 + if (!cpumask_empty(&mask))
331 + smp_cross_call(&mask, IPI_CPU_STOP);
332
333 /* Wait up to one second for other CPUs to stop */
334 timeout = USEC_PER_SEC;
335 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
336 index 63d402f..a8ad1e3 100644
337 --- a/arch/arm/kernel/traps.c
338 +++ b/arch/arm/kernel/traps.c
339 @@ -370,18 +370,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
340
341 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
342 {
343 - unsigned int correction = thumb_mode(regs) ? 2 : 4;
344 unsigned int instr;
345 siginfo_t info;
346 void __user *pc;
347
348 - /*
349 - * According to the ARM ARM, PC is 2 or 4 bytes ahead,
350 - * depending whether we're in Thumb mode or not.
351 - * Correct this offset.
352 - */
353 - regs->ARM_pc -= correction;
354 -
355 pc = (void __user *)instruction_pointer(regs);
356
357 if (processor_mode(regs) == SVC_MODE) {
358 diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
359 index 5905ed1..d89d87ae 100644
360 --- a/arch/arm/mach-pxa/raumfeld.c
361 +++ b/arch/arm/mach-pxa/raumfeld.c
362 @@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
363
364 static struct eeti_ts_platform_data eeti_ts_pdata = {
365 .irq_active_high = 1,
366 + .irq_gpio = GPIO_TOUCH_IRQ,
367 };
368
369 static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
370 .type = "eeti_ts",
371 .addr = 0x0a,
372 - .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
373 .platform_data = &eeti_ts_pdata,
374 };
375
376 diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
377 index 845f461..c202113 100644
378 --- a/arch/arm/mm/tlb-v7.S
379 +++ b/arch/arm/mm/tlb-v7.S
380 @@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
381 dsb
382 mov r0, r0, lsr #PAGE_SHIFT @ align address
383 mov r1, r1, lsr #PAGE_SHIFT
384 +#ifdef CONFIG_ARM_ERRATA_720789
385 + mov r3, #0
386 +#else
387 asid r3, r3 @ mask ASID
388 +#endif
389 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
390 mov r1, r1, lsl #PAGE_SHIFT
391 1:
392 +#ifdef CONFIG_ARM_ERRATA_720789
393 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
394 +#else
395 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
396 +#endif
397 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
398
399 add r0, r0, #PAGE_SZ
400 @@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
401 mov r0, r0, lsl #PAGE_SHIFT
402 mov r1, r1, lsl #PAGE_SHIFT
403 1:
404 +#ifdef CONFIG_ARM_ERRATA_720789
405 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
406 +#else
407 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
408 +#endif
409 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
410 add r0, r0, #PAGE_SZ
411 cmp r0, r1
412 diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
413 index 4fa9903..cc926c9 100644
414 --- a/arch/arm/vfp/entry.S
415 +++ b/arch/arm/vfp/entry.S
416 @@ -7,18 +7,20 @@
417 * This program is free software; you can redistribute it and/or modify
418 * it under the terms of the GNU General Public License version 2 as
419 * published by the Free Software Foundation.
420 - *
421 - * Basic entry code, called from the kernel's undefined instruction trap.
422 - * r0 = faulted instruction
423 - * r5 = faulted PC+4
424 - * r9 = successful return
425 - * r10 = thread_info structure
426 - * lr = failure return
427 */
428 #include <asm/thread_info.h>
429 #include <asm/vfpmacros.h>
430 #include "../kernel/entry-header.S"
431
432 +@ VFP entry point.
433 +@
434 +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
435 +@ r2 = PC value to resume execution after successful emulation
436 +@ r9 = normal "successful" return address
437 +@ r10 = this threads thread_info structure
438 +@ lr = unrecognised instruction return address
439 +@ IRQs disabled.
440 +@
441 ENTRY(do_vfp)
442 #ifdef CONFIG_PREEMPT
443 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
444 diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
445 index 2d30c7f..3a0efaa 100644
446 --- a/arch/arm/vfp/vfphw.S
447 +++ b/arch/arm/vfp/vfphw.S
448 @@ -61,13 +61,13 @@
449
450 @ VFP hardware support entry point.
451 @
452 -@ r0 = faulted instruction
453 -@ r2 = faulted PC+4
454 -@ r9 = successful return
455 +@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
456 +@ r2 = PC value to resume execution after successful emulation
457 +@ r9 = normal "successful" return address
458 @ r10 = vfp_state union
459 @ r11 = CPU number
460 -@ lr = failure return
461 -
462 +@ lr = unrecognised instruction return address
463 +@ IRQs enabled.
464 ENTRY(vfp_support_entry)
465 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
466
467 @@ -161,9 +161,12 @@ vfp_hw_state_valid:
468 @ exception before retrying branch
469 @ out before setting an FPEXC that
470 @ stops us reading stuff
471 - VFPFMXR FPEXC, r1 @ restore FPEXC last
472 - sub r2, r2, #4
473 - str r2, [sp, #S_PC] @ retry the instruction
474 + VFPFMXR FPEXC, r1 @ Restore FPEXC last
475 + sub r2, r2, #4 @ Retry current instruction - if Thumb
476 + str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
477 + @ else it's one 32-bit instruction, so
478 + @ always subtract 4 from the following
479 + @ instruction address.
480 #ifdef CONFIG_PREEMPT
481 get_thread_info r10
482 ldr r4, [r10, #TI_PREEMPT] @ get preempt count
483 diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
484 index b0197b2..1ef803a 100644
485 --- a/arch/arm/vfp/vfpmodule.c
486 +++ b/arch/arm/vfp/vfpmodule.c
487 @@ -457,10 +457,16 @@ static int vfp_pm_suspend(void)
488
489 /* disable, just in case */
490 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
491 + } else if (vfp_current_hw_state[ti->cpu]) {
492 +#ifndef CONFIG_SMP
493 + fmxr(FPEXC, fpexc | FPEXC_EN);
494 + vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
495 + fmxr(FPEXC, fpexc);
496 +#endif
497 }
498
499 /* clear any information we had about last context state */
500 - memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
501 + vfp_current_hw_state[ti->cpu] = NULL;
502
503 return 0;
504 }
505 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
506 index 7d91166..6e6fe18 100644
507 --- a/arch/ia64/include/asm/atomic.h
508 +++ b/arch/ia64/include/asm/atomic.h
509 @@ -17,8 +17,8 @@
510 #include <asm/intrinsics.h>
511
512
513 -#define ATOMIC_INIT(i) ((atomic_t) { (i) })
514 -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
515 +#define ATOMIC_INIT(i) { (i) }
516 +#define ATOMIC64_INIT(i) { (i) }
517
518 #define atomic_read(v) (*(volatile int *)&(v)->counter)
519 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
520 diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
521 index 5c3e088..1034884 100644
522 --- a/arch/ia64/kernel/irq_ia64.c
523 +++ b/arch/ia64/kernel/irq_ia64.c
524 @@ -23,7 +23,6 @@
525 #include <linux/ioport.h>
526 #include <linux/kernel_stat.h>
527 #include <linux/ptrace.h>
528 -#include <linux/random.h> /* for rand_initialize_irq() */
529 #include <linux/signal.h>
530 #include <linux/smp.h>
531 #include <linux/threads.h>
532 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
533 index 1f84794..73ef56c 100644
534 --- a/arch/x86/kernel/alternative.c
535 +++ b/arch/x86/kernel/alternative.c
536 @@ -219,7 +219,7 @@ void __init arch_init_ideal_nops(void)
537 ideal_nops = intel_nops;
538 #endif
539 }
540 -
541 + break;
542 default:
543 #ifdef CONFIG_X86_64
544 ideal_nops = k8_nops;
545 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
546 index c9bda6d..24b852b 100644
547 --- a/arch/x86/kernel/microcode_core.c
548 +++ b/arch/x86/kernel/microcode_core.c
549 @@ -298,20 +298,31 @@ static ssize_t reload_store(struct device *dev,
550 const char *buf, size_t size)
551 {
552 unsigned long val;
553 - int cpu = dev->id;
554 - int ret = 0;
555 - char *end;
556 + int cpu;
557 + ssize_t ret = 0, tmp_ret;
558
559 - val = simple_strtoul(buf, &end, 0);
560 - if (end == buf)
561 + /* allow reload only from the BSP */
562 + if (boot_cpu_data.cpu_index != dev->id)
563 return -EINVAL;
564
565 - if (val == 1) {
566 - get_online_cpus();
567 - if (cpu_online(cpu))
568 - ret = reload_for_cpu(cpu);
569 - put_online_cpus();
570 + ret = kstrtoul(buf, 0, &val);
571 + if (ret)
572 + return ret;
573 +
574 + if (val != 1)
575 + return size;
576 +
577 + get_online_cpus();
578 + for_each_online_cpu(cpu) {
579 + tmp_ret = reload_for_cpu(cpu);
580 + if (tmp_ret != 0)
581 + pr_warn("Error reloading microcode on CPU %d\n", cpu);
582 +
583 + /* save retval of the first encountered reload error */
584 + if (!ret)
585 + ret = tmp_ret;
586 }
587 + put_online_cpus();
588
589 if (!ret)
590 ret = size;
591 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
592 index 0734086..bbac51e 100644
593 --- a/drivers/acpi/processor_driver.c
594 +++ b/drivers/acpi/processor_driver.c
595 @@ -442,7 +442,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
596 /* Normal CPU soft online event */
597 } else {
598 acpi_processor_ppc_has_changed(pr, 0);
599 - acpi_processor_cst_has_changed(pr);
600 + acpi_processor_hotplug(pr);
601 acpi_processor_reevaluate_tstate(pr, action);
602 acpi_processor_tstate_has_changed(pr);
603 }
604 diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
605 index 8b78750..845f97f 100644
606 --- a/drivers/char/mspec.c
607 +++ b/drivers/char/mspec.c
608 @@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
609 vdata->flags = flags;
610 vdata->type = type;
611 spin_lock_init(&vdata->lock);
612 - vdata->refcnt = ATOMIC_INIT(1);
613 + atomic_set(&vdata->refcnt, 1);
614 vma->vm_private_data = vdata;
615
616 vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
617 diff --git a/drivers/char/random.c b/drivers/char/random.c
618 index 4ec04a7..d98b2a6 100644
619 --- a/drivers/char/random.c
620 +++ b/drivers/char/random.c
621 @@ -125,21 +125,26 @@
622 * The current exported interfaces for gathering environmental noise
623 * from the devices are:
624 *
625 + * void add_device_randomness(const void *buf, unsigned int size);
626 * void add_input_randomness(unsigned int type, unsigned int code,
627 * unsigned int value);
628 - * void add_interrupt_randomness(int irq);
629 + * void add_interrupt_randomness(int irq, int irq_flags);
630 * void add_disk_randomness(struct gendisk *disk);
631 *
632 + * add_device_randomness() is for adding data to the random pool that
633 + * is likely to differ between two devices (or possibly even per boot).
634 + * This would be things like MAC addresses or serial numbers, or the
635 + * read-out of the RTC. This does *not* add any actual entropy to the
636 + * pool, but it initializes the pool to different values for devices
637 + * that might otherwise be identical and have very little entropy
638 + * available to them (particularly common in the embedded world).
639 + *
640 * add_input_randomness() uses the input layer interrupt timing, as well as
641 * the event type information from the hardware.
642 *
643 - * add_interrupt_randomness() uses the inter-interrupt timing as random
644 - * inputs to the entropy pool. Note that not all interrupts are good
645 - * sources of randomness! For example, the timer interrupts is not a
646 - * good choice, because the periodicity of the interrupts is too
647 - * regular, and hence predictable to an attacker. Network Interface
648 - * Controller interrupts are a better measure, since the timing of the
649 - * NIC interrupts are more unpredictable.
650 + * add_interrupt_randomness() uses the interrupt timing as random
651 + * inputs to the entropy pool. Using the cycle counters and the irq source
652 + * as inputs, it feeds the randomness roughly once a second.
653 *
654 * add_disk_randomness() uses what amounts to the seek time of block
655 * layer request events, on a per-disk_devt basis, as input to the
656 @@ -248,6 +253,8 @@
657 #include <linux/percpu.h>
658 #include <linux/cryptohash.h>
659 #include <linux/fips.h>
660 +#include <linux/ptrace.h>
661 +#include <linux/kmemcheck.h>
662
663 #ifdef CONFIG_GENERIC_HARDIRQS
664 # include <linux/irq.h>
665 @@ -256,8 +263,12 @@
666 #include <asm/processor.h>
667 #include <asm/uaccess.h>
668 #include <asm/irq.h>
669 +#include <asm/irq_regs.h>
670 #include <asm/io.h>
671
672 +#define CREATE_TRACE_POINTS
673 +#include <trace/events/random.h>
674 +
675 /*
676 * Configuration information
677 */
678 @@ -266,6 +277,8 @@
679 #define SEC_XFER_SIZE 512
680 #define EXTRACT_SIZE 10
681
682 +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
683 +
684 /*
685 * The minimum number of bits of entropy before we wake up a read on
686 * /dev/random. Should be enough to do a significant reseed.
687 @@ -420,8 +433,10 @@ struct entropy_store {
688 /* read-write data: */
689 spinlock_t lock;
690 unsigned add_ptr;
691 + unsigned input_rotate;
692 int entropy_count;
693 - int input_rotate;
694 + int entropy_total;
695 + unsigned int initialized:1;
696 __u8 last_data[EXTRACT_SIZE];
697 };
698
699 @@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
700 .pool = nonblocking_pool_data
701 };
702
703 +static __u32 const twist_table[8] = {
704 + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
705 + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
706 +
707 /*
708 * This function adds bytes into the entropy "pool". It does not
709 * update the entropy estimate. The caller should call
710 @@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
711 * it's cheap to do so and helps slightly in the expected case where
712 * the entropy is concentrated in the low-order bits.
713 */
714 -static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
715 - int nbytes, __u8 out[64])
716 +static void _mix_pool_bytes(struct entropy_store *r, const void *in,
717 + int nbytes, __u8 out[64])
718 {
719 - static __u32 const twist_table[8] = {
720 - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
721 - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
722 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
723 int input_rotate;
724 int wordmask = r->poolinfo->poolwords - 1;
725 const char *bytes = in;
726 __u32 w;
727 - unsigned long flags;
728
729 - /* Taps are constant, so we can load them without holding r->lock. */
730 tap1 = r->poolinfo->tap1;
731 tap2 = r->poolinfo->tap2;
732 tap3 = r->poolinfo->tap3;
733 tap4 = r->poolinfo->tap4;
734 tap5 = r->poolinfo->tap5;
735
736 - spin_lock_irqsave(&r->lock, flags);
737 - input_rotate = r->input_rotate;
738 - i = r->add_ptr;
739 + smp_rmb();
740 + input_rotate = ACCESS_ONCE(r->input_rotate);
741 + i = ACCESS_ONCE(r->add_ptr);
742
743 /* mix one byte at a time to simplify size handling and churn faster */
744 while (nbytes--) {
745 @@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
746 input_rotate += i ? 7 : 14;
747 }
748
749 - r->input_rotate = input_rotate;
750 - r->add_ptr = i;
751 + ACCESS_ONCE(r->input_rotate) = input_rotate;
752 + ACCESS_ONCE(r->add_ptr) = i;
753 + smp_wmb();
754
755 if (out)
756 for (j = 0; j < 16; j++)
757 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
758 +}
759 +
760 +static void __mix_pool_bytes(struct entropy_store *r, const void *in,
761 + int nbytes, __u8 out[64])
762 +{
763 + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
764 + _mix_pool_bytes(r, in, nbytes, out);
765 +}
766 +
767 +static void mix_pool_bytes(struct entropy_store *r, const void *in,
768 + int nbytes, __u8 out[64])
769 +{
770 + unsigned long flags;
771
772 + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
773 + spin_lock_irqsave(&r->lock, flags);
774 + _mix_pool_bytes(r, in, nbytes, out);
775 spin_unlock_irqrestore(&r->lock, flags);
776 }
777
778 -static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
779 +struct fast_pool {
780 + __u32 pool[4];
781 + unsigned long last;
782 + unsigned short count;
783 + unsigned char rotate;
784 + unsigned char last_timer_intr;
785 +};
786 +
787 +/*
788 + * This is a fast mixing routine used by the interrupt randomness
789 + * collector. It's hardcoded for an 128 bit pool and assumes that any
790 + * locks that might be needed are taken by the caller.
791 + */
792 +static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
793 {
794 - mix_pool_bytes_extract(r, in, bytes, NULL);
795 + const char *bytes = in;
796 + __u32 w;
797 + unsigned i = f->count;
798 + unsigned input_rotate = f->rotate;
799 +
800 + while (nbytes--) {
801 + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
802 + f->pool[(i + 1) & 3];
803 + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
804 + input_rotate += (i++ & 3) ? 7 : 14;
805 + }
806 + f->count = i;
807 + f->rotate = input_rotate;
808 }
809
810 /*
811 @@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
812 */
813 static void credit_entropy_bits(struct entropy_store *r, int nbits)
814 {
815 - unsigned long flags;
816 - int entropy_count;
817 + int entropy_count, orig;
818
819 if (!nbits)
820 return;
821
822 - spin_lock_irqsave(&r->lock, flags);
823 -
824 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
825 - entropy_count = r->entropy_count;
826 +retry:
827 + entropy_count = orig = ACCESS_ONCE(r->entropy_count);
828 entropy_count += nbits;
829 +
830 if (entropy_count < 0) {
831 DEBUG_ENT("negative entropy/overflow\n");
832 entropy_count = 0;
833 } else if (entropy_count > r->poolinfo->POOLBITS)
834 entropy_count = r->poolinfo->POOLBITS;
835 - r->entropy_count = entropy_count;
836 + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
837 + goto retry;
838 +
839 + if (!r->initialized && nbits > 0) {
840 + r->entropy_total += nbits;
841 + if (r->entropy_total > 128)
842 + r->initialized = 1;
843 + }
844 +
845 + trace_credit_entropy_bits(r->name, nbits, entropy_count,
846 + r->entropy_total, _RET_IP_);
847
848 /* should we wake readers? */
849 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
850 wake_up_interruptible(&random_read_wait);
851 kill_fasync(&fasync, SIGIO, POLL_IN);
852 }
853 - spin_unlock_irqrestore(&r->lock, flags);
854 }
855
856 /*********************************************************************
857 @@ -572,42 +636,24 @@ struct timer_rand_state {
858 unsigned dont_count_entropy:1;
859 };
860
861 -#ifndef CONFIG_GENERIC_HARDIRQS
862 -
863 -static struct timer_rand_state *irq_timer_state[NR_IRQS];
864 -
865 -static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
866 -{
867 - return irq_timer_state[irq];
868 -}
869 -
870 -static void set_timer_rand_state(unsigned int irq,
871 - struct timer_rand_state *state)
872 -{
873 - irq_timer_state[irq] = state;
874 -}
875 -
876 -#else
877 -
878 -static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
879 -{
880 - struct irq_desc *desc;
881 -
882 - desc = irq_to_desc(irq);
883 -
884 - return desc->timer_rand_state;
885 -}
886 -
887 -static void set_timer_rand_state(unsigned int irq,
888 - struct timer_rand_state *state)
889 +/*
890 + * Add device- or boot-specific data to the input and nonblocking
891 + * pools to help initialize them to unique values.
892 + *
893 + * None of this adds any entropy, it is meant to avoid the
894 + * problem of the nonblocking pool having similar initial state
895 + * across largely identical devices.
896 + */
897 +void add_device_randomness(const void *buf, unsigned int size)
898 {
899 - struct irq_desc *desc;
900 + unsigned long time = get_cycles() ^ jiffies;
901
902 - desc = irq_to_desc(irq);
903 -
904 - desc->timer_rand_state = state;
905 + mix_pool_bytes(&input_pool, buf, size, NULL);
906 + mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
907 + mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
908 + mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
909 }
910 -#endif
911 +EXPORT_SYMBOL(add_device_randomness);
912
913 static struct timer_rand_state input_timer_state;
914
915 @@ -637,13 +683,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
916 goto out;
917
918 sample.jiffies = jiffies;
919 -
920 - /* Use arch random value, fall back to cycles */
921 - if (!arch_get_random_int(&sample.cycles))
922 - sample.cycles = get_cycles();
923 -
924 + sample.cycles = get_cycles();
925 sample.num = num;
926 - mix_pool_bytes(&input_pool, &sample, sizeof(sample));
927 + mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
928
929 /*
930 * Calculate number of bits of randomness we probably added.
931 @@ -700,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
932 }
933 EXPORT_SYMBOL_GPL(add_input_randomness);
934
935 -void add_interrupt_randomness(int irq)
936 +static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
937 +
938 +void add_interrupt_randomness(int irq, int irq_flags)
939 {
940 - struct timer_rand_state *state;
941 + struct entropy_store *r;
942 + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
943 + struct pt_regs *regs = get_irq_regs();
944 + unsigned long now = jiffies;
945 + __u32 input[4], cycles = get_cycles();
946 +
947 + input[0] = cycles ^ jiffies;
948 + input[1] = irq;
949 + if (regs) {
950 + __u64 ip = instruction_pointer(regs);
951 + input[2] = ip;
952 + input[3] = ip >> 32;
953 + }
954
955 - state = get_timer_rand_state(irq);
956 + fast_mix(fast_pool, input, sizeof(input));
957
958 - if (state == NULL)
959 + if ((fast_pool->count & 1023) &&
960 + !time_after(now, fast_pool->last + HZ))
961 return;
962
963 - DEBUG_ENT("irq event %d\n", irq);
964 - add_timer_randomness(state, 0x100 + irq);
965 + fast_pool->last = now;
966 +
967 + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
968 + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
969 + /*
970 + * If we don't have a valid cycle counter, and we see
971 + * back-to-back timer interrupts, then skip giving credit for
972 + * any entropy.
973 + */
974 + if (cycles == 0) {
975 + if (irq_flags & __IRQF_TIMER) {
976 + if (fast_pool->last_timer_intr)
977 + return;
978 + fast_pool->last_timer_intr = 1;
979 + } else
980 + fast_pool->last_timer_intr = 0;
981 + }
982 + credit_entropy_bits(r, 1);
983 }
984
985 #ifdef CONFIG_BLOCK
986 @@ -742,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
987 */
988 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
989 {
990 - __u32 tmp[OUTPUT_POOL_WORDS];
991 + __u32 tmp[OUTPUT_POOL_WORDS];
992
993 if (r->pull && r->entropy_count < nbytes * 8 &&
994 r->entropy_count < r->poolinfo->POOLBITS) {
995 @@ -761,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
996
997 bytes = extract_entropy(r->pull, tmp, bytes,
998 random_read_wakeup_thresh / 8, rsvd);
999 - mix_pool_bytes(r, tmp, bytes);
1000 + mix_pool_bytes(r, tmp, bytes, NULL);
1001 credit_entropy_bits(r, bytes*8);
1002 }
1003 }
1004 @@ -820,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
1005 static void extract_buf(struct entropy_store *r, __u8 *out)
1006 {
1007 int i;
1008 - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
1009 + union {
1010 + __u32 w[5];
1011 + unsigned long l[LONGS(EXTRACT_SIZE)];
1012 + } hash;
1013 + __u32 workspace[SHA_WORKSPACE_WORDS];
1014 __u8 extract[64];
1015 + unsigned long flags;
1016
1017 /* Generate a hash across the pool, 16 words (512 bits) at a time */
1018 - sha_init(hash);
1019 + sha_init(hash.w);
1020 + spin_lock_irqsave(&r->lock, flags);
1021 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1022 - sha_transform(hash, (__u8 *)(r->pool + i), workspace);
1023 + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1024
1025 /*
1026 * We mix the hash back into the pool to prevent backtracking
1027 @@ -837,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1028 * brute-forcing the feedback as hard as brute-forcing the
1029 * hash.
1030 */
1031 - mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
1032 + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
1033 + spin_unlock_irqrestore(&r->lock, flags);
1034
1035 /*
1036 * To avoid duplicates, we atomically extract a portion of the
1037 * pool while mixing, and hash one final time.
1038 */
1039 - sha_transform(hash, extract, workspace);
1040 + sha_transform(hash.w, extract, workspace);
1041 memset(extract, 0, sizeof(extract));
1042 memset(workspace, 0, sizeof(workspace));
1043
1044 @@ -852,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1045 * pattern, we fold it in half. Thus, we always feed back
1046 * twice as much data as we output.
1047 */
1048 - hash[0] ^= hash[3];
1049 - hash[1] ^= hash[4];
1050 - hash[2] ^= rol32(hash[2], 16);
1051 - memcpy(out, hash, EXTRACT_SIZE);
1052 - memset(hash, 0, sizeof(hash));
1053 + hash.w[0] ^= hash.w[3];
1054 + hash.w[1] ^= hash.w[4];
1055 + hash.w[2] ^= rol32(hash.w[2], 16);
1056 +
1057 + /*
1058 + * If we have a architectural hardware random number
1059 + * generator, mix that in, too.
1060 + */
1061 + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
1062 + unsigned long v;
1063 + if (!arch_get_random_long(&v))
1064 + break;
1065 + hash.l[i] ^= v;
1066 + }
1067 +
1068 + memcpy(out, &hash, EXTRACT_SIZE);
1069 + memset(&hash, 0, sizeof(hash));
1070 }
1071
1072 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1073 - size_t nbytes, int min, int reserved)
1074 + size_t nbytes, int min, int reserved)
1075 {
1076 ssize_t ret = 0, i;
1077 __u8 tmp[EXTRACT_SIZE];
1078 - unsigned long flags;
1079
1080 + trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
1081 xfer_secondary_pool(r, nbytes);
1082 nbytes = account(r, nbytes, min, reserved);
1083
1084 @@ -873,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1085 extract_buf(r, tmp);
1086
1087 if (fips_enabled) {
1088 + unsigned long flags;
1089 +
1090 spin_lock_irqsave(&r->lock, flags);
1091 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1092 panic("Hardware RNG duplicated output!\n");
1093 @@ -898,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1094 ssize_t ret = 0, i;
1095 __u8 tmp[EXTRACT_SIZE];
1096
1097 + trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
1098 xfer_secondary_pool(r, nbytes);
1099 nbytes = account(r, nbytes, 0, 0);
1100
1101 @@ -931,17 +1026,35 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1102
1103 /*
1104 * This function is the exported kernel interface. It returns some
1105 - * number of good random numbers, suitable for seeding TCP sequence
1106 - * numbers, etc.
1107 + * number of good random numbers, suitable for key generation, seeding
1108 + * TCP sequence numbers, etc. It does not use the hw random number
1109 + * generator, if available; use get_random_bytes_arch() for that.
1110 */
1111 void get_random_bytes(void *buf, int nbytes)
1112 {
1113 + extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
1114 +}
1115 +EXPORT_SYMBOL(get_random_bytes);
1116 +
1117 +/*
1118 + * This function will use the architecture-specific hardware random
1119 + * number generator if it is available. The arch-specific hw RNG will
1120 + * almost certainly be faster than what we can do in software, but it
1121 + * is impossible to verify that it is implemented securely (as
1122 + * opposed, to, say, the AES encryption of a sequence number using a
1123 + * key known by the NSA). So it's useful if we need the speed, but
1124 + * only if we're willing to trust the hardware manufacturer not to
1125 + * have put in a back door.
1126 + */
1127 +void get_random_bytes_arch(void *buf, int nbytes)
1128 +{
1129 char *p = buf;
1130
1131 + trace_get_random_bytes(nbytes, _RET_IP_);
1132 while (nbytes) {
1133 unsigned long v;
1134 int chunk = min(nbytes, (int)sizeof(unsigned long));
1135 -
1136 +
1137 if (!arch_get_random_long(&v))
1138 break;
1139
1140 @@ -950,9 +1063,11 @@ void get_random_bytes(void *buf, int nbytes)
1141 nbytes -= chunk;
1142 }
1143
1144 - extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1145 + if (nbytes)
1146 + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1147 }
1148 -EXPORT_SYMBOL(get_random_bytes);
1149 +EXPORT_SYMBOL(get_random_bytes_arch);
1150 +
1151
1152 /*
1153 * init_std_data - initialize pool with system data
1154 @@ -966,23 +1081,30 @@ EXPORT_SYMBOL(get_random_bytes);
1155 static void init_std_data(struct entropy_store *r)
1156 {
1157 int i;
1158 - ktime_t now;
1159 - unsigned long flags;
1160 + ktime_t now = ktime_get_real();
1161 + unsigned long rv;
1162
1163 - spin_lock_irqsave(&r->lock, flags);
1164 r->entropy_count = 0;
1165 - spin_unlock_irqrestore(&r->lock, flags);
1166 -
1167 - now = ktime_get_real();
1168 - mix_pool_bytes(r, &now, sizeof(now));
1169 - for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
1170 - if (!arch_get_random_long(&flags))
1171 + r->entropy_total = 0;
1172 + mix_pool_bytes(r, &now, sizeof(now), NULL);
1173 + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
1174 + if (!arch_get_random_long(&rv))
1175 break;
1176 - mix_pool_bytes(r, &flags, sizeof(flags));
1177 + mix_pool_bytes(r, &rv, sizeof(rv), NULL);
1178 }
1179 - mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1180 + mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
1181 }
1182
1183 +/*
1184 + * Note that setup_arch() may call add_device_randomness()
1185 + * long before we get here. This allows seeding of the pools
1186 + * with some platform dependent data very early in the boot
1187 + * process. But it limits our options here. We must use
1188 + * statically allocated structures that already have all
1189 + * initializations complete at compile time. We should also
1190 + * take care not to overwrite the precious per platform data
1191 + * we were given.
1192 + */
1193 static int rand_initialize(void)
1194 {
1195 init_std_data(&input_pool);
1196 @@ -992,24 +1114,6 @@ static int rand_initialize(void)
1197 }
1198 module_init(rand_initialize);
1199
1200 -void rand_initialize_irq(int irq)
1201 -{
1202 - struct timer_rand_state *state;
1203 -
1204 - state = get_timer_rand_state(irq);
1205 -
1206 - if (state)
1207 - return;
1208 -
1209 - /*
1210 - * If kzalloc returns null, we just won't use that entropy
1211 - * source.
1212 - */
1213 - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1214 - if (state)
1215 - set_timer_rand_state(irq, state);
1216 -}
1217 -
1218 #ifdef CONFIG_BLOCK
1219 void rand_initialize_disk(struct gendisk *disk)
1220 {
1221 @@ -1117,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1222 count -= bytes;
1223 p += bytes;
1224
1225 - mix_pool_bytes(r, buf, bytes);
1226 + mix_pool_bytes(r, buf, bytes, NULL);
1227 cond_resched();
1228 }
1229
1230 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1231 index 153980b..b298158 100644
1232 --- a/drivers/firmware/dmi_scan.c
1233 +++ b/drivers/firmware/dmi_scan.c
1234 @@ -6,6 +6,7 @@
1235 #include <linux/dmi.h>
1236 #include <linux/efi.h>
1237 #include <linux/bootmem.h>
1238 +#include <linux/random.h>
1239 #include <asm/dmi.h>
1240
1241 /*
1242 @@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
1243
1244 dmi_table(buf, dmi_len, dmi_num, decode, NULL);
1245
1246 + add_device_randomness(buf, dmi_len);
1247 +
1248 dmi_iounmap(buf, dmi_len);
1249 return 0;
1250 }
1251 diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
1252 index 51e0e2d..a330492 100644
1253 --- a/drivers/firmware/pcdp.c
1254 +++ b/drivers/firmware/pcdp.c
1255 @@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
1256 if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
1257 return -ENODEV;
1258
1259 - pcdp = ioremap(efi.hcdp, 4096);
1260 + pcdp = early_ioremap(efi.hcdp, 4096);
1261 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
1262
1263 if (strstr(cmdline, "console=hcdp")) {
1264 @@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
1265 }
1266
1267 out:
1268 - iounmap(pcdp);
1269 + early_iounmap(pcdp, 4096);
1270 return rc;
1271 }
1272 diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
1273 index b99af34..a2abb8e 100644
1274 --- a/drivers/hid/hid-chicony.c
1275 +++ b/drivers/hid/hid-chicony.c
1276 @@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
1277 static const struct hid_device_id ch_devices[] = {
1278 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1279 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1280 + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1281 { }
1282 };
1283 MODULE_DEVICE_TABLE(hid, ch_devices);
1284 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1285 index 973c238..41d4437 100644
1286 --- a/drivers/hid/hid-core.c
1287 +++ b/drivers/hid/hid-core.c
1288 @@ -1404,12 +1404,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
1289 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1290 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
1291 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1292 + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1293 { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1294 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
1295 { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
1296 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
1297 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
1298 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
1299 + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
1300 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
1301 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1302 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1303 diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
1304 index 2f0be4c..9e43aac 100644
1305 --- a/drivers/hid/hid-cypress.c
1306 +++ b/drivers/hid/hid-cypress.c
1307 @@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
1308 .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
1309 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
1310 .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
1311 + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
1312 + .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
1313 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
1314 .driver_data = CP_2WHEEL_MOUSE_HACK },
1315 { }
1316 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1317 index bb1abf8..41ad6ff 100644
1318 --- a/drivers/hid/hid-ids.h
1319 +++ b/drivers/hid/hid-ids.h
1320 @@ -202,6 +202,7 @@
1321 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
1322 #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
1323 #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
1324 +#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
1325
1326 #define USB_VENDOR_ID_CHUNGHWAT 0x2247
1327 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
1328 @@ -231,6 +232,7 @@
1329 #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
1330 #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
1331 #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
1332 +#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
1333 #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
1334
1335 #define USB_VENDOR_ID_DEALEXTREAME 0x10c5
1336 @@ -567,6 +569,9 @@
1337 #define USB_VENDOR_ID_NINTENDO 0x057e
1338 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
1339
1340 +#define USB_VENDOR_ID_NOVATEK 0x0603
1341 +#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
1342 +
1343 #define USB_VENDOR_ID_NTRIG 0x1b96
1344 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
1345 #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
1346 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1347 index a6197f5..e754dff 100644
1348 --- a/drivers/hid/hid-multitouch.c
1349 +++ b/drivers/hid/hid-multitouch.c
1350 @@ -940,6 +940,11 @@ static const struct hid_device_id mt_devices[] = {
1351 HID_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
1352 USB_DEVICE_ID_PANABOARD_UBT880) },
1353
1354 + /* Novatek Panel */
1355 + { .driver_data = MT_CLS_DEFAULT,
1356 + HID_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
1357 + USB_DEVICE_ID_NOVATEK_PCT) },
1358 +
1359 /* PenMount panels */
1360 { .driver_data = MT_CLS_CONFIDENCE,
1361 HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
1362 diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
1363 index cecd35c..c77032c 100644
1364 --- a/drivers/input/tablet/wacom_wac.c
1365 +++ b/drivers/input/tablet/wacom_wac.c
1366 @@ -243,7 +243,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
1367 input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
1368 input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
1369 if (wacom->tool[0] != BTN_TOOL_MOUSE) {
1370 - input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
1371 + input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8));
1372 input_report_key(input, BTN_TOUCH, data[1] & 0x01);
1373 input_report_key(input, BTN_STYLUS, data[1] & 0x02);
1374 input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
1375 diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
1376 index 503c709..908407e 100644
1377 --- a/drivers/input/touchscreen/eeti_ts.c
1378 +++ b/drivers/input/touchscreen/eeti_ts.c
1379 @@ -48,7 +48,7 @@ struct eeti_ts_priv {
1380 struct input_dev *input;
1381 struct work_struct work;
1382 struct mutex mutex;
1383 - int irq, irq_active_high;
1384 + int irq_gpio, irq, irq_active_high;
1385 };
1386
1387 #define EETI_TS_BITDEPTH (11)
1388 @@ -62,7 +62,7 @@ struct eeti_ts_priv {
1389
1390 static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
1391 {
1392 - return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
1393 + return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
1394 }
1395
1396 static void eeti_ts_read(struct work_struct *work)
1397 @@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
1398 static int __devinit eeti_ts_probe(struct i2c_client *client,
1399 const struct i2c_device_id *idp)
1400 {
1401 - struct eeti_ts_platform_data *pdata;
1402 + struct eeti_ts_platform_data *pdata = client->dev.platform_data;
1403 struct eeti_ts_priv *priv;
1404 struct input_dev *input;
1405 unsigned int irq_flags;
1406 @@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
1407
1408 priv->client = client;
1409 priv->input = input;
1410 - priv->irq = client->irq;
1411 + priv->irq_gpio = pdata->irq_gpio;
1412 + priv->irq = gpio_to_irq(pdata->irq_gpio);
1413
1414 - pdata = client->dev.platform_data;
1415 + err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
1416 + if (err < 0)
1417 + goto err1;
1418
1419 if (pdata)
1420 priv->irq_active_high = pdata->irq_active_high;
1421 @@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
1422
1423 err = input_register_device(input);
1424 if (err)
1425 - goto err1;
1426 + goto err2;
1427
1428 err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
1429 client->name, priv);
1430 if (err) {
1431 dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
1432 - goto err2;
1433 + goto err3;
1434 }
1435
1436 /*
1437 @@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
1438 device_init_wakeup(&client->dev, 0);
1439 return 0;
1440
1441 -err2:
1442 +err3:
1443 input_unregister_device(input);
1444 input = NULL; /* so we dont try to free it below */
1445 +err2:
1446 + gpio_free(pdata->irq_gpio);
1447 err1:
1448 input_free_device(input);
1449 kfree(priv);
1450 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1451 index d7add9d..23904d2 100644
1452 --- a/drivers/md/raid1.c
1453 +++ b/drivers/md/raid1.c
1454 @@ -2429,7 +2429,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
1455 /* There is nowhere to write, so all non-sync
1456 * drives must be failed - so we are finished
1457 */
1458 - sector_t rv = max_sector - sector_nr;
1459 + sector_t rv;
1460 + if (min_bad > 0)
1461 + max_sector = sector_nr + min_bad;
1462 + rv = max_sector - sector_nr;
1463 *skipped = 1;
1464 put_buf(r1_bio);
1465 return rv;
1466 diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
1467 index bef5296..647dd95 100644
1468 --- a/drivers/media/rc/ene_ir.c
1469 +++ b/drivers/media/rc/ene_ir.c
1470 @@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1471
1472 spin_lock_init(&dev->hw_lock);
1473
1474 + dev->hw_io = pnp_port_start(pnp_dev, 0);
1475 +
1476 pnp_set_drvdata(pnp_dev, dev);
1477 dev->pnp_dev = pnp_dev;
1478
1479 @@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
1480
1481 /* claim the resources */
1482 error = -EBUSY;
1483 - dev->hw_io = pnp_port_start(pnp_dev, 0);
1484 if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
1485 dev->hw_io = -1;
1486 dev->irq = -1;
1487 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
1488 index 1efad20..1287645 100644
1489 --- a/drivers/mfd/ab3100-core.c
1490 +++ b/drivers/mfd/ab3100-core.c
1491 @@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
1492 u32 fatevent;
1493 int err;
1494
1495 - add_interrupt_randomness(irq);
1496 -
1497 err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
1498 event_regs, 3);
1499 if (err)
1500 @@ -933,9 +931,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
1501
1502 err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
1503 IRQF_ONESHOT, "ab3100-core", ab3100);
1504 - /* This real unpredictable IRQ is of course sampled for entropy */
1505 - rand_initialize_irq(client->irq);
1506 -
1507 if (err)
1508 goto exit_no_irq;
1509
1510 diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
1511 index 43a76c4..db662e2 100644
1512 --- a/drivers/mfd/ezx-pcap.c
1513 +++ b/drivers/mfd/ezx-pcap.c
1514 @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
1515 }
1516 local_irq_enable();
1517 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
1518 - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
1519 + } while (gpio_get_value(pdata->gpio));
1520 }
1521
1522 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
1523 diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
1524 index f742745..b90f3e0 100644
1525 --- a/drivers/mfd/wm831x-otp.c
1526 +++ b/drivers/mfd/wm831x-otp.c
1527 @@ -18,6 +18,7 @@
1528 #include <linux/bcd.h>
1529 #include <linux/delay.h>
1530 #include <linux/mfd/core.h>
1531 +#include <linux/random.h>
1532
1533 #include <linux/mfd/wm831x/core.h>
1534 #include <linux/mfd/wm831x/otp.h>
1535 @@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
1536
1537 int wm831x_otp_init(struct wm831x *wm831x)
1538 {
1539 + char uuid[WM831X_UNIQUE_ID_LEN];
1540 int ret;
1541
1542 ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
1543 @@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
1544 dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
1545 ret);
1546
1547 + ret = wm831x_unique_id_read(wm831x, uuid);
1548 + if (ret == 0)
1549 + add_device_randomness(uuid, sizeof(uuid));
1550 + else
1551 + dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
1552 +
1553 return ret;
1554 }
1555
1556 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
1557 index a9dd6a9..c098b24 100644
1558 --- a/drivers/net/ethernet/intel/e1000e/82571.c
1559 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
1560 @@ -1582,10 +1582,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1561 * auto-negotiation in the TXCW register and disable
1562 * forced link in the Device Control register in an
1563 * attempt to auto-negotiate with our link partner.
1564 - * If the partner code word is null, stop forcing
1565 - * and restart auto negotiation.
1566 */
1567 - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1568 + if (rxcw & E1000_RXCW_C) {
1569 /* Enable autoneg, and unforce link up */
1570 ew32(TXCW, mac->txcw);
1571 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1572 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1573 index 8f13420..147b628 100644
1574 --- a/drivers/net/tun.c
1575 +++ b/drivers/net/tun.c
1576 @@ -185,7 +185,6 @@ static void __tun_detach(struct tun_struct *tun)
1577 netif_tx_lock_bh(tun->dev);
1578 netif_carrier_off(tun->dev);
1579 tun->tfile = NULL;
1580 - tun->socket.file = NULL;
1581 netif_tx_unlock_bh(tun->dev);
1582
1583 /* Drop read queue */
1584 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1585 index 6dfd964..28a0edd 100644
1586 --- a/drivers/net/wireless/ath/ath9k/hw.c
1587 +++ b/drivers/net/wireless/ath/ath9k/hw.c
1588 @@ -676,6 +676,7 @@ int ath9k_hw_init(struct ath_hw *ah)
1589 case AR9300_DEVID_AR9340:
1590 case AR9300_DEVID_AR9580:
1591 case AR9300_DEVID_AR9462:
1592 + case AR9485_DEVID_AR1111:
1593 break;
1594 default:
1595 if (common->bus_ops->ath_bus_type == ATH_USB)
1596 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
1597 index e88f182..f8e1fbb 100644
1598 --- a/drivers/net/wireless/ath/ath9k/hw.h
1599 +++ b/drivers/net/wireless/ath/ath9k/hw.h
1600 @@ -48,6 +48,7 @@
1601 #define AR9300_DEVID_AR9580 0x0033
1602 #define AR9300_DEVID_AR9462 0x0034
1603 #define AR9300_DEVID_AR9330 0x0035
1604 +#define AR9485_DEVID_AR1111 0x0037
1605
1606 #define AR5416_AR9100_DEVID 0x000b
1607
1608 diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
1609 index 77dc327..e44097a 100644
1610 --- a/drivers/net/wireless/ath/ath9k/pci.c
1611 +++ b/drivers/net/wireless/ath/ath9k/pci.c
1612 @@ -35,6 +35,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
1613 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
1614 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
1615 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
1616 + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
1617 { 0 }
1618 };
1619
1620 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1621 index da2be3e..7db5d45 100644
1622 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1623 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1624 @@ -709,11 +709,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
1625 */
1626 static bool rs_use_green(struct ieee80211_sta *sta)
1627 {
1628 - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1629 - struct iwl_rxon_context *ctx = sta_priv->ctx;
1630 -
1631 - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
1632 - !(ctx->ht.non_gf_sta_present);
1633 + /*
1634 + * There's a bug somewhere in this code that causes the
1635 + * scaling to get stuck because GF+SGI can't be combined
1636 + * in SISO rates. Until we find that bug, disable GF, it
1637 + * has only limited benefit and we still interoperate with
1638 + * GF APs since we can always receive GF transmissions.
1639 + */
1640 + return false;
1641 }
1642
1643 /**
1644 diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
1645 index e0c6d11..0f4bf8c 100644
1646 --- a/drivers/net/wireless/rt2x00/rt61pci.c
1647 +++ b/drivers/net/wireless/rt2x00/rt61pci.c
1648 @@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1649
1650 static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
1651 {
1652 - struct ieee80211_conf conf = { .flags = 0 };
1653 - struct rt2x00lib_conf libconf = { .conf = &conf };
1654 + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
1655
1656 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
1657 }
1658 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
1659 index 77aadde..556cbb4 100644
1660 --- a/drivers/platform/x86/asus-wmi.c
1661 +++ b/drivers/platform/x86/asus-wmi.c
1662 @@ -1467,14 +1467,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
1663 */
1664 if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
1665 asus->dsts_id = ASUS_WMI_METHODID_DSTS;
1666 - else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
1667 + else
1668 asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
1669
1670 - if (!asus->dsts_id) {
1671 - pr_err("Can't find DSTS");
1672 - return -ENODEV;
1673 - }
1674 -
1675 /* CWAP allow to define the behavior of the Fn+F2 key,
1676 * this method doesn't seems to be present on Eee PCs */
1677 if (asus->driver->quirks->wapf >= 0)
1678 diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
1679 index 3b6e6a6..41c06fe 100644
1680 --- a/drivers/rtc/rtc-wm831x.c
1681 +++ b/drivers/rtc/rtc-wm831x.c
1682 @@ -24,7 +24,7 @@
1683 #include <linux/mfd/wm831x/core.h>
1684 #include <linux/delay.h>
1685 #include <linux/platform_device.h>
1686 -
1687 +#include <linux/random.h>
1688
1689 /*
1690 * R16416 (0x4020) - RTC Write Counter
1691 @@ -96,6 +96,26 @@ struct wm831x_rtc {
1692 unsigned int alarm_enabled:1;
1693 };
1694
1695 +static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
1696 +{
1697 + int ret;
1698 + u16 reg;
1699 +
1700 + /*
1701 + * The write counter contains a pseudo-random number which is
1702 + * regenerated every time we set the RTC so it should be a
1703 + * useful per-system source of entropy.
1704 + */
1705 + ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
1706 + if (ret >= 0) {
1707 + reg = ret;
1708 + add_device_randomness(&reg, sizeof(reg));
1709 + } else {
1710 + dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
1711 + ret);
1712 + }
1713 +}
1714 +
1715 /*
1716 * Read current time and date in RTC
1717 */
1718 @@ -431,6 +451,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
1719 alm_irq, ret);
1720 }
1721
1722 + wm831x_rtc_add_randomness(wm831x);
1723 +
1724 return 0;
1725
1726 err:
1727 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
1728 index fa323f8..52a5f62 100644
1729 --- a/drivers/target/target_core_cdb.c
1730 +++ b/drivers/target/target_core_cdb.c
1731 @@ -1022,11 +1022,11 @@ int target_emulate_unmap(struct se_task *task)
1732 struct se_cmd *cmd = task->task_se_cmd;
1733 struct se_device *dev = cmd->se_dev;
1734 unsigned char *buf, *ptr = NULL;
1735 - unsigned char *cdb = &cmd->t_task_cdb[0];
1736 sector_t lba;
1737 - unsigned int size = cmd->data_length, range;
1738 - int ret = 0, offset;
1739 - unsigned short dl, bd_dl;
1740 + int size = cmd->data_length;
1741 + u32 range;
1742 + int ret = 0;
1743 + int dl, bd_dl;
1744
1745 if (!dev->transport->do_discard) {
1746 pr_err("UNMAP emulation not supported for: %s\n",
1747 @@ -1035,24 +1035,41 @@ int target_emulate_unmap(struct se_task *task)
1748 return -ENOSYS;
1749 }
1750
1751 - /* First UNMAP block descriptor starts at 8 byte offset */
1752 - offset = 8;
1753 - size -= 8;
1754 - dl = get_unaligned_be16(&cdb[0]);
1755 - bd_dl = get_unaligned_be16(&cdb[2]);
1756 -
1757 buf = transport_kmap_data_sg(cmd);
1758
1759 - ptr = &buf[offset];
1760 - pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
1761 + dl = get_unaligned_be16(&buf[0]);
1762 + bd_dl = get_unaligned_be16(&buf[2]);
1763 +
1764 + size = min(size - 8, bd_dl);
1765 + if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
1766 + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1767 + ret = -EINVAL;
1768 + goto err;
1769 + }
1770 +
1771 + /* First UNMAP block descriptor starts at 8 byte offset */
1772 + ptr = &buf[8];
1773 + pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1774 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1775
1776 - while (size) {
1777 + while (size >= 16) {
1778 lba = get_unaligned_be64(&ptr[0]);
1779 range = get_unaligned_be32(&ptr[8]);
1780 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1781 (unsigned long long)lba, range);
1782
1783 + if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
1784 + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
1785 + ret = -EINVAL;
1786 + goto err;
1787 + }
1788 +
1789 + if (lba + range > dev->transport->get_blocks(dev) + 1) {
1790 + cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
1791 + ret = -EINVAL;
1792 + goto err;
1793 + }
1794 +
1795 ret = dev->transport->do_discard(dev, lba, range);
1796 if (ret < 0) {
1797 pr_err("blkdev_issue_discard() failed: %d\n",
1798 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1799 index 6241b71..e727b87 100644
1800 --- a/drivers/usb/core/hub.c
1801 +++ b/drivers/usb/core/hub.c
1802 @@ -24,6 +24,7 @@
1803 #include <linux/kthread.h>
1804 #include <linux/mutex.h>
1805 #include <linux/freezer.h>
1806 +#include <linux/random.h>
1807
1808 #include <asm/uaccess.h>
1809 #include <asm/byteorder.h>
1810 @@ -1951,6 +1952,14 @@ int usb_new_device(struct usb_device *udev)
1811 /* Tell the world! */
1812 announce_device(udev);
1813
1814 + if (udev->serial)
1815 + add_device_randomness(udev->serial, strlen(udev->serial));
1816 + if (udev->product)
1817 + add_device_randomness(udev->product, strlen(udev->product));
1818 + if (udev->manufacturer)
1819 + add_device_randomness(udev->manufacturer,
1820 + strlen(udev->manufacturer));
1821 +
1822 device_enable_async_suspend(&udev->dev);
1823
1824 /*
1825 diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
1826 index ccbfef5..1e1e2d2 100644
1827 --- a/drivers/video/smscufx.c
1828 +++ b/drivers/video/smscufx.c
1829 @@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
1830 result = fb_sys_write(info, buf, count, ppos);
1831
1832 if (result > 0) {
1833 - int start = max((int)(offset / info->fix.line_length) - 1, 0);
1834 + int start = max((int)(offset / info->fix.line_length), 0);
1835 int lines = min((u32)((result / info->fix.line_length) + 1),
1836 (u32)info->var.yres);
1837
1838 diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
1839 index 24a49d4..1585db1 100644
1840 --- a/fs/exofs/ore.c
1841 +++ b/fs/exofs/ore.c
1842 @@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
1843 bio->bi_rw |= REQ_WRITE;
1844 }
1845
1846 - osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
1847 - bio, per_dev->length);
1848 + osd_req_write(or, _ios_obj(ios, cur_comp),
1849 + per_dev->offset, bio, per_dev->length);
1850 ORE_DBGMSG("write(0x%llx) offset=0x%llx "
1851 "length=0x%llx dev=%d\n",
1852 - _LLU(_ios_obj(ios, dev)->id),
1853 + _LLU(_ios_obj(ios, cur_comp)->id),
1854 _LLU(per_dev->offset),
1855 _LLU(per_dev->length), dev);
1856 } else if (ios->kern_buff) {
1857 @@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
1858 (ios->si.unit_off + ios->length >
1859 ios->layout->stripe_unit));
1860
1861 - ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
1862 + ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
1863 per_dev->offset,
1864 ios->kern_buff, ios->length);
1865 if (unlikely(ret))
1866 goto out;
1867 ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
1868 "length=0x%llx dev=%d\n",
1869 - _LLU(_ios_obj(ios, dev)->id),
1870 + _LLU(_ios_obj(ios, cur_comp)->id),
1871 _LLU(per_dev->offset),
1872 _LLU(ios->length), per_dev->dev);
1873 } else {
1874 - osd_req_set_attributes(or, _ios_obj(ios, dev));
1875 + osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
1876 ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
1877 - _LLU(_ios_obj(ios, dev)->id),
1878 + _LLU(_ios_obj(ios, cur_comp)->id),
1879 ios->out_attr_len, dev);
1880 }
1881
1882 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
1883 index 2a70fce..6fe98ed 100644
1884 --- a/fs/nilfs2/ioctl.c
1885 +++ b/fs/nilfs2/ioctl.c
1886 @@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
1887 if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
1888 goto out;
1889
1890 - down_read(&inode->i_sb->s_umount);
1891 + mutex_lock(&nilfs->ns_snapshot_mount_mutex);
1892
1893 nilfs_transaction_begin(inode->i_sb, &ti, 0);
1894 ret = nilfs_cpfile_change_cpmode(
1895 @@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
1896 else
1897 nilfs_transaction_commit(inode->i_sb); /* never fails */
1898
1899 - up_read(&inode->i_sb->s_umount);
1900 + mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
1901 out:
1902 mnt_drop_write_file(filp);
1903 return ret;
1904 diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
1905 index 1099a76..496904b 100644
1906 --- a/fs/nilfs2/super.c
1907 +++ b/fs/nilfs2/super.c
1908 @@ -948,6 +948,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
1909 struct nilfs_root *root;
1910 int ret;
1911
1912 + mutex_lock(&nilfs->ns_snapshot_mount_mutex);
1913 +
1914 down_read(&nilfs->ns_segctor_sem);
1915 ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
1916 up_read(&nilfs->ns_segctor_sem);
1917 @@ -972,6 +974,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
1918 ret = nilfs_get_root_dentry(s, root, root_dentry);
1919 nilfs_put_root(root);
1920 out:
1921 + mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
1922 return ret;
1923 }
1924
1925 diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
1926 index 501b7f8..41e6a04 100644
1927 --- a/fs/nilfs2/the_nilfs.c
1928 +++ b/fs/nilfs2/the_nilfs.c
1929 @@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
1930 nilfs->ns_bdev = bdev;
1931 atomic_set(&nilfs->ns_ndirtyblks, 0);
1932 init_rwsem(&nilfs->ns_sem);
1933 + mutex_init(&nilfs->ns_snapshot_mount_mutex);
1934 INIT_LIST_HEAD(&nilfs->ns_dirty_files);
1935 INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
1936 spin_lock_init(&nilfs->ns_inode_lock);
1937 diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
1938 index 9992b11..de7435f 100644
1939 --- a/fs/nilfs2/the_nilfs.h
1940 +++ b/fs/nilfs2/the_nilfs.h
1941 @@ -47,6 +47,7 @@ enum {
1942 * @ns_flags: flags
1943 * @ns_bdev: block device
1944 * @ns_sem: semaphore for shared states
1945 + * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
1946 * @ns_sbh: buffer heads of on-disk super blocks
1947 * @ns_sbp: pointers to super block data
1948 * @ns_sbwtime: previous write time of super block
1949 @@ -99,6 +100,7 @@ struct the_nilfs {
1950
1951 struct block_device *ns_bdev;
1952 struct rw_semaphore ns_sem;
1953 + struct mutex ns_snapshot_mount_mutex;
1954
1955 /*
1956 * used for
1957 diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
1958 index f875b31..16625d7 100644
1959 --- a/include/linux/input/eeti_ts.h
1960 +++ b/include/linux/input/eeti_ts.h
1961 @@ -2,6 +2,7 @@
1962 #define LINUX_INPUT_EETI_TS_H
1963
1964 struct eeti_ts_platform_data {
1965 + int irq_gpio;
1966 unsigned int irq_active_high;
1967 };
1968
1969 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
1970 index f1e2527..9a323d1 100644
1971 --- a/include/linux/irqdesc.h
1972 +++ b/include/linux/irqdesc.h
1973 @@ -39,7 +39,6 @@ struct module;
1974 */
1975 struct irq_desc {
1976 struct irq_data irq_data;
1977 - struct timer_rand_state *timer_rand_state;
1978 unsigned int __percpu *kstat_irqs;
1979 irq_flow_handler_t handle_irq;
1980 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
1981 diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
1982 index 40c37216..32a1b5c 100644
1983 --- a/include/linux/mfd/ezx-pcap.h
1984 +++ b/include/linux/mfd/ezx-pcap.h
1985 @@ -16,6 +16,7 @@ struct pcap_subdev {
1986 struct pcap_platform_data {
1987 unsigned int irq_base;
1988 unsigned int config;
1989 + int gpio;
1990 void (*init) (void *); /* board specific init */
1991 int num_subdevs;
1992 struct pcap_subdev *subdevs;
1993 diff --git a/include/linux/random.h b/include/linux/random.h
1994 index 8f74538..ac621ce 100644
1995 --- a/include/linux/random.h
1996 +++ b/include/linux/random.h
1997 @@ -48,13 +48,13 @@ struct rnd_state {
1998
1999 #ifdef __KERNEL__
2000
2001 -extern void rand_initialize_irq(int irq);
2002 -
2003 +extern void add_device_randomness(const void *, unsigned int);
2004 extern void add_input_randomness(unsigned int type, unsigned int code,
2005 unsigned int value);
2006 -extern void add_interrupt_randomness(int irq);
2007 +extern void add_interrupt_randomness(int irq, int irq_flags);
2008
2009 extern void get_random_bytes(void *buf, int nbytes);
2010 +extern void get_random_bytes_arch(void *buf, int nbytes);
2011 void generate_random_uuid(unsigned char uuid_out[16]);
2012
2013 #ifndef MODULE
2014 diff --git a/include/trace/events/random.h b/include/trace/events/random.h
2015 new file mode 100644
2016 index 0000000..422df19
2017 --- /dev/null
2018 +++ b/include/trace/events/random.h
2019 @@ -0,0 +1,134 @@
2020 +#undef TRACE_SYSTEM
2021 +#define TRACE_SYSTEM random
2022 +
2023 +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
2024 +#define _TRACE_RANDOM_H
2025 +
2026 +#include <linux/writeback.h>
2027 +#include <linux/tracepoint.h>
2028 +
2029 +DECLARE_EVENT_CLASS(random__mix_pool_bytes,
2030 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
2031 +
2032 + TP_ARGS(pool_name, bytes, IP),
2033 +
2034 + TP_STRUCT__entry(
2035 + __field( const char *, pool_name )
2036 + __field( int, bytes )
2037 + __field(unsigned long, IP )
2038 + ),
2039 +
2040 + TP_fast_assign(
2041 + __entry->pool_name = pool_name;
2042 + __entry->bytes = bytes;
2043 + __entry->IP = IP;
2044 + ),
2045 +
2046 + TP_printk("%s pool: bytes %d caller %pF",
2047 + __entry->pool_name, __entry->bytes, (void *)__entry->IP)
2048 +);
2049 +
2050 +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
2051 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
2052 +
2053 + TP_ARGS(pool_name, bytes, IP)
2054 +);
2055 +
2056 +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
2057 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
2058 +
2059 + TP_ARGS(pool_name, bytes, IP)
2060 +);
2061 +
2062 +TRACE_EVENT(credit_entropy_bits,
2063 + TP_PROTO(const char *pool_name, int bits, int entropy_count,
2064 + int entropy_total, unsigned long IP),
2065 +
2066 + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
2067 +
2068 + TP_STRUCT__entry(
2069 + __field( const char *, pool_name )
2070 + __field( int, bits )
2071 + __field( int, entropy_count )
2072 + __field( int, entropy_total )
2073 + __field(unsigned long, IP )
2074 + ),
2075 +
2076 + TP_fast_assign(
2077 + __entry->pool_name = pool_name;
2078 + __entry->bits = bits;
2079 + __entry->entropy_count = entropy_count;
2080 + __entry->entropy_total = entropy_total;
2081 + __entry->IP = IP;
2082 + ),
2083 +
2084 + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
2085 + "caller %pF", __entry->pool_name, __entry->bits,
2086 + __entry->entropy_count, __entry->entropy_total,
2087 + (void *)__entry->IP)
2088 +);
2089 +
2090 +TRACE_EVENT(get_random_bytes,
2091 + TP_PROTO(int nbytes, unsigned long IP),
2092 +
2093 + TP_ARGS(nbytes, IP),
2094 +
2095 + TP_STRUCT__entry(
2096 + __field( int, nbytes )
2097 + __field(unsigned long, IP )
2098 + ),
2099 +
2100 + TP_fast_assign(
2101 + __entry->nbytes = nbytes;
2102 + __entry->IP = IP;
2103 + ),
2104 +
2105 + TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
2106 +);
2107 +
2108 +DECLARE_EVENT_CLASS(random__extract_entropy,
2109 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
2110 + unsigned long IP),
2111 +
2112 + TP_ARGS(pool_name, nbytes, entropy_count, IP),
2113 +
2114 + TP_STRUCT__entry(
2115 + __field( const char *, pool_name )
2116 + __field( int, nbytes )
2117 + __field( int, entropy_count )
2118 + __field(unsigned long, IP )
2119 + ),
2120 +
2121 + TP_fast_assign(
2122 + __entry->pool_name = pool_name;
2123 + __entry->nbytes = nbytes;
2124 + __entry->entropy_count = entropy_count;
2125 + __entry->IP = IP;
2126 + ),
2127 +
2128 + TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
2129 + __entry->pool_name, __entry->nbytes, __entry->entropy_count,
2130 + (void *)__entry->IP)
2131 +);
2132 +
2133 +
2134 +DEFINE_EVENT(random__extract_entropy, extract_entropy,
2135 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
2136 + unsigned long IP),
2137 +
2138 + TP_ARGS(pool_name, nbytes, entropy_count, IP)
2139 +);
2140 +
2141 +DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
2142 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
2143 + unsigned long IP),
2144 +
2145 + TP_ARGS(pool_name, nbytes, entropy_count, IP)
2146 +);
2147 +
2148 +
2149 +
2150 +#endif /* _TRACE_RANDOM_H */
2151 +
2152 +/* This part must be outside protection */
2153 +#include <trace/define_trace.h>
2154 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
2155 index bdb1803..131ca17 100644
2156 --- a/kernel/irq/handle.c
2157 +++ b/kernel/irq/handle.c
2158 @@ -133,7 +133,7 @@ irqreturn_t
2159 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
2160 {
2161 irqreturn_t retval = IRQ_NONE;
2162 - unsigned int random = 0, irq = desc->irq_data.irq;
2163 + unsigned int flags = 0, irq = desc->irq_data.irq;
2164
2165 do {
2166 irqreturn_t res;
2167 @@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
2168
2169 /* Fall through to add to randomness */
2170 case IRQ_HANDLED:
2171 - random |= action->flags;
2172 + flags |= action->flags;
2173 break;
2174
2175 default:
2176 @@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
2177 action = action->next;
2178 } while (action);
2179
2180 - if (random & IRQF_SAMPLE_RANDOM)
2181 - add_interrupt_randomness(irq);
2182 + add_interrupt_randomness(irq, flags);
2183
2184 if (!noirqdebug)
2185 note_interrupt(irq, desc, retval);
2186 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
2187 index 89a3ea8..b9d1d83 100644
2188 --- a/kernel/irq/manage.c
2189 +++ b/kernel/irq/manage.c
2190 @@ -890,22 +890,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
2191 return -ENOSYS;
2192 if (!try_module_get(desc->owner))
2193 return -ENODEV;
2194 - /*
2195 - * Some drivers like serial.c use request_irq() heavily,
2196 - * so we have to be careful not to interfere with a
2197 - * running system.
2198 - */
2199 - if (new->flags & IRQF_SAMPLE_RANDOM) {
2200 - /*
2201 - * This function might sleep, we want to call it first,
2202 - * outside of the atomic block.
2203 - * Yes, this might clear the entropy pool if the wrong
2204 - * driver is attempted to be loaded, without actually
2205 - * installing a new handler, but is this really a problem,
2206 - * only the sysadmin is able to do this.
2207 - */
2208 - rand_initialize_irq(irq);
2209 - }
2210
2211 /*
2212 * Check whether the interrupt nests into another interrupt
2213 @@ -1339,7 +1323,6 @@ EXPORT_SYMBOL(free_irq);
2214 * Flags:
2215 *
2216 * IRQF_SHARED Interrupt is shared
2217 - * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
2218 * IRQF_TRIGGER_* Specify active edge(s) or level
2219 *
2220 */
2221 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2222 index 263e177..a799df5 100644
2223 --- a/mm/hugetlb.c
2224 +++ b/mm/hugetlb.c
2225 @@ -2392,6 +2392,22 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2226 {
2227 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2228 __unmap_hugepage_range(vma, start, end, ref_page);
2229 + /*
2230 + * Clear this flag so that x86's huge_pmd_share page_table_shareable
2231 + * test will fail on a vma being torn down, and not grab a page table
2232 + * on its way out. We're lucky that the flag has such an appropriate
2233 + * name, and can in fact be safely cleared here. We could clear it
2234 + * before the __unmap_hugepage_range above, but all that's necessary
2235 + * is to clear it before releasing the i_mmap_mutex below.
2236 + *
2237 + * This works because in the contexts this is called, the VMA is
2238 + * going to be destroyed. It is not vunerable to madvise(DONTNEED)
2239 + * because madvise is not supported on hugetlbfs. The same applies
2240 + * for direct IO. unmap_hugepage_range() is only being called just
2241 + * before free_pgtables() so clearing VM_MAYSHARE will not cause
2242 + * surprises later.
2243 + */
2244 + vma->vm_flags &= ~VM_MAYSHARE;
2245 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2246 }
2247
2248 @@ -2958,9 +2974,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2249 }
2250 }
2251 spin_unlock(&mm->page_table_lock);
2252 - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2253 -
2254 + /*
2255 + * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
2256 + * may have cleared our pud entry and done put_page on the page table:
2257 + * once we release i_mmap_mutex, another task can do the final put_page
2258 + * and that page table be reused and filled with junk.
2259 + */
2260 flush_tlb_range(vma, start, end);
2261 + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2262 }
2263
2264 int hugetlb_reserve_pages(struct inode *inode,
2265 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2266 index 0de20d7..274c3cc 100644
2267 --- a/mm/memory-failure.c
2268 +++ b/mm/memory-failure.c
2269 @@ -1433,8 +1433,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
2270 /* Keep page count to indicate a given hugepage is isolated. */
2271
2272 list_add(&hpage->lru, &pagelist);
2273 - ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
2274 - true);
2275 + ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
2276 + MIGRATE_SYNC);
2277 if (ret) {
2278 struct page *page1, *page2;
2279 list_for_each_entry_safe(page1, page2, &pagelist, lru)
2280 @@ -1563,7 +1563,7 @@ int soft_offline_page(struct page *page, int flags)
2281 page_is_file_cache(page));
2282 list_add(&page->lru, &pagelist);
2283 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
2284 - 0, MIGRATE_SYNC);
2285 + false, MIGRATE_SYNC);
2286 if (ret) {
2287 putback_lru_pages(&pagelist);
2288 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
2289 diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
2290 index 9a611d3..862b608 100644
2291 --- a/mm/mmu_notifier.c
2292 +++ b/mm/mmu_notifier.c
2293 @@ -33,6 +33,24 @@
2294 void __mmu_notifier_release(struct mm_struct *mm)
2295 {
2296 struct mmu_notifier *mn;
2297 + struct hlist_node *n;
2298 +
2299 + /*
2300 + * RCU here will block mmu_notifier_unregister until
2301 + * ->release returns.
2302 + */
2303 + rcu_read_lock();
2304 + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
2305 + /*
2306 + * if ->release runs before mmu_notifier_unregister it
2307 + * must be handled as it's the only way for the driver
2308 + * to flush all existing sptes and stop the driver
2309 + * from establishing any more sptes before all the
2310 + * pages in the mm are freed.
2311 + */
2312 + if (mn->ops->release)
2313 + mn->ops->release(mn, mm);
2314 + rcu_read_unlock();
2315
2316 spin_lock(&mm->mmu_notifier_mm->lock);
2317 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
2318 @@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
2319 * mmu_notifier_unregister to return.
2320 */
2321 hlist_del_init_rcu(&mn->hlist);
2322 - /*
2323 - * RCU here will block mmu_notifier_unregister until
2324 - * ->release returns.
2325 - */
2326 - rcu_read_lock();
2327 - spin_unlock(&mm->mmu_notifier_mm->lock);
2328 - /*
2329 - * if ->release runs before mmu_notifier_unregister it
2330 - * must be handled as it's the only way for the driver
2331 - * to flush all existing sptes and stop the driver
2332 - * from establishing any more sptes before all the
2333 - * pages in the mm are freed.
2334 - */
2335 - if (mn->ops->release)
2336 - mn->ops->release(mn, mm);
2337 - rcu_read_unlock();
2338 - spin_lock(&mm->mmu_notifier_mm->lock);
2339 }
2340 spin_unlock(&mm->mmu_notifier_mm->lock);
2341
2342 @@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
2343 {
2344 BUG_ON(atomic_read(&mm->mm_count) <= 0);
2345
2346 - spin_lock(&mm->mmu_notifier_mm->lock);
2347 if (!hlist_unhashed(&mn->hlist)) {
2348 - hlist_del_rcu(&mn->hlist);
2349 -
2350 /*
2351 * RCU here will force exit_mmap to wait ->release to finish
2352 * before freeing the pages.
2353 */
2354 rcu_read_lock();
2355 - spin_unlock(&mm->mmu_notifier_mm->lock);
2356 +
2357 /*
2358 * exit_mmap will block in mmu_notifier_release to
2359 * guarantee ->release is called before freeing the
2360 @@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
2361 if (mn->ops->release)
2362 mn->ops->release(mn, mm);
2363 rcu_read_unlock();
2364 - } else
2365 +
2366 + spin_lock(&mm->mmu_notifier_mm->lock);
2367 + hlist_del_rcu(&mn->hlist);
2368 spin_unlock(&mm->mmu_notifier_mm->lock);
2369 + }
2370
2371 /*
2372 * Wait any running method to finish, of course including
2373 diff --git a/net/core/dev.c b/net/core/dev.c
2374 index 533c586..c299416 100644
2375 --- a/net/core/dev.c
2376 +++ b/net/core/dev.c
2377 @@ -1173,6 +1173,7 @@ static int __dev_open(struct net_device *dev)
2378 net_dmaengine_get();
2379 dev_set_rx_mode(dev);
2380 dev_activate(dev);
2381 + add_device_randomness(dev->dev_addr, dev->addr_len);
2382 }
2383
2384 return ret;
2385 @@ -4765,6 +4766,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2386 err = ops->ndo_set_mac_address(dev, sa);
2387 if (!err)
2388 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
2389 + add_device_randomness(dev->dev_addr, dev->addr_len);
2390 return err;
2391 }
2392 EXPORT_SYMBOL(dev_set_mac_address);
2393 @@ -5543,6 +5545,7 @@ int register_netdevice(struct net_device *dev)
2394 dev_init_scheduler(dev);
2395 dev_hold(dev);
2396 list_netdevice(dev);
2397 + add_device_randomness(dev->dev_addr, dev->addr_len);
2398
2399 /* Notify protocols, that a new device appeared. */
2400 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
2401 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2402 index b8052ba..900fc61 100644
2403 --- a/net/core/rtnetlink.c
2404 +++ b/net/core/rtnetlink.c
2405 @@ -1376,6 +1376,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
2406 goto errout;
2407 send_addr_notify = 1;
2408 modified = 1;
2409 + add_device_randomness(dev->dev_addr, dev->addr_len);
2410 }
2411
2412 if (tb[IFLA_MTU]) {
2413 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
2414 index e5fbb7c..e80fa33 100644
2415 --- a/net/mac80211/mesh.c
2416 +++ b/net/mac80211/mesh.c
2417 @@ -595,6 +595,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
2418
2419 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
2420 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
2421 + del_timer_sync(&sdata->u.mesh.mesh_path_timer);
2422 /*
2423 * If the timer fired while we waited for it, it will have
2424 * requeued the work. Now the work will be running again
2425 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2426 index 25302c8..57f2731 100644
2427 --- a/net/sunrpc/clnt.c
2428 +++ b/net/sunrpc/clnt.c
2429 @@ -1846,12 +1846,13 @@ call_timeout(struct rpc_task *task)
2430 return;
2431 }
2432 if (RPC_IS_SOFT(task)) {
2433 - if (clnt->cl_chatty)
2434 + if (clnt->cl_chatty) {
2435 rcu_read_lock();
2436 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
2437 clnt->cl_protname,
2438 rcu_dereference(clnt->cl_xprt)->servername);
2439 rcu_read_unlock();
2440 + }
2441 if (task->tk_flags & RPC_TASK_TIMEOUT)
2442 rpc_exit(task, -ETIMEDOUT);
2443 else
2444 diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
2445 index 4c38b33..4d53ad5 100644
2446 --- a/net/sunrpc/rpcb_clnt.c
2447 +++ b/net/sunrpc/rpcb_clnt.c
2448 @@ -251,7 +251,7 @@ static int rpcb_create_local_unix(struct net *net)
2449 if (IS_ERR(clnt)) {
2450 dprintk("RPC: failed to create AF_LOCAL rpcbind "
2451 "client (errno %ld).\n", PTR_ERR(clnt));
2452 - result = -PTR_ERR(clnt);
2453 + result = PTR_ERR(clnt);
2454 goto out;
2455 }
2456
2457 @@ -298,7 +298,7 @@ static int rpcb_create_local_net(struct net *net)
2458 if (IS_ERR(clnt)) {
2459 dprintk("RPC: failed to create local rpcbind "
2460 "client (errno %ld).\n", PTR_ERR(clnt));
2461 - result = -PTR_ERR(clnt);
2462 + result = PTR_ERR(clnt);
2463 goto out;
2464 }
2465
2466 diff --git a/net/wireless/core.c b/net/wireless/core.c
2467 index ccdfed8..bb5302d 100644
2468 --- a/net/wireless/core.c
2469 +++ b/net/wireless/core.c
2470 @@ -975,6 +975,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
2471 */
2472 synchronize_rcu();
2473 INIT_LIST_HEAD(&wdev->list);
2474 + /*
2475 + * Ensure that all events have been processed and
2476 + * freed.
2477 + */
2478 + cfg80211_process_wdev_events(wdev);
2479 break;
2480 case NETDEV_PRE_UP:
2481 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
2482 diff --git a/net/wireless/core.h b/net/wireless/core.h
2483 index 3ac2dd0..ce5597c 100644
2484 --- a/net/wireless/core.h
2485 +++ b/net/wireless/core.h
2486 @@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
2487 struct net_device *dev, enum nl80211_iftype ntype,
2488 u32 *flags, struct vif_params *params);
2489 void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
2490 +void cfg80211_process_wdev_events(struct wireless_dev *wdev);
2491
2492 int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
2493 struct wireless_dev *wdev,
2494 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2495 index baf5704..460af03 100644
2496 --- a/net/wireless/reg.c
2497 +++ b/net/wireless/reg.c
2498 @@ -891,7 +891,21 @@ static void handle_channel(struct wiphy *wiphy,
2499 chan->max_antenna_gain = min(chan->orig_mag,
2500 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
2501 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
2502 - chan->max_power = min(chan->max_power, chan->max_reg_power);
2503 + if (chan->orig_mpwr) {
2504 + /*
2505 + * Devices that have their own custom regulatory domain
2506 + * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
2507 + * passed country IE power settings.
2508 + */
2509 + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
2510 + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
2511 + wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
2512 + chan->max_power = chan->max_reg_power;
2513 + else
2514 + chan->max_power = min(chan->orig_mpwr,
2515 + chan->max_reg_power);
2516 + } else
2517 + chan->max_power = chan->max_reg_power;
2518 }
2519
2520 static void handle_band(struct wiphy *wiphy,
2521 diff --git a/net/wireless/util.c b/net/wireless/util.c
2522 index 0eb6cc0..d835377 100644
2523 --- a/net/wireless/util.c
2524 +++ b/net/wireless/util.c
2525 @@ -717,7 +717,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
2526 wdev->connect_keys = NULL;
2527 }
2528
2529 -static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
2530 +void cfg80211_process_wdev_events(struct wireless_dev *wdev)
2531 {
2532 struct cfg80211_event *ev;
2533 unsigned long flags;
2534 @@ -974,6 +974,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
2535 }
2536 mutex_unlock(&rdev->devlist_mtx);
2537
2538 + if (total == 1)
2539 + return 0;
2540 +
2541 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
2542 const struct ieee80211_iface_combination *c;
2543 struct ieee80211_iface_limit *limits;
2544 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2545 index d906c5b..3897027 100644
2546 --- a/sound/pci/hda/patch_conexant.c
2547 +++ b/sound/pci/hda/patch_conexant.c
2548 @@ -2975,7 +2975,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2549 SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
2550 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
2551 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
2552 - SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
2553 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
2554 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
2555 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
2556 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2557 index 62e1627..152d91b 100644
2558 --- a/sound/pci/hda/patch_realtek.c
2559 +++ b/sound/pci/hda/patch_realtek.c
2560 @@ -6056,6 +6056,8 @@ static const struct alc_fixup alc269_fixups[] = {
2561 [ALC269_FIXUP_PCM_44K] = {
2562 .type = ALC_FIXUP_FUNC,
2563 .v.func = alc269_fixup_pcm_44k,
2564 + .chained = true,
2565 + .chain_id = ALC269_FIXUP_QUANTA_MUTE
2566 },
2567 [ALC269_FIXUP_STEREO_DMIC] = {
2568 .type = ALC_FIXUP_FUNC,
2569 @@ -6157,9 +6159,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2570 SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
2571 SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
2572 SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
2573 + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
2574 + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
2575 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
2576 - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
2577 - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
2578 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
2579 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
2580
2581 #if 0

  ViewVC Help
Powered by ViewVC 1.1.20