/[linux-patches]/genpatches-2.6/tags/3.0-30/1040_linux-3.0.41.patch
Gentoo

Contents of /genpatches-2.6/tags/3.0-30/1040_linux-3.0.41.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2206 - (show annotations) (download)
Mon Sep 17 18:58:14 2012 UTC (2 years, 3 months ago) by mpagano
File size: 60381 byte(s)
3.0-30 release
1 diff --git a/MAINTAINERS b/MAINTAINERS
2 index de85391..c8c0874 100644
3 --- a/MAINTAINERS
4 +++ b/MAINTAINERS
5 @@ -5247,7 +5247,7 @@ F: Documentation/blockdev/ramdisk.txt
6 F: drivers/block/brd.c
7
8 RANDOM NUMBER DRIVER
9 -M: Matt Mackall <mpm@selenic.com>
10 +M: Theodore Ts'o" <tytso@mit.edu>
11 S: Maintained
12 F: drivers/char/random.c
13
14 diff --git a/Makefile b/Makefile
15 index ec4fee5..2cbfd97 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 3
20 PATCHLEVEL = 0
21 -SUBLEVEL = 40
22 +SUBLEVEL = 41
23 EXTRAVERSION =
24 NAME = Sneaky Weasel
25
26 diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
27 index 2bf2243..166d6aa 100644
28 --- a/arch/arm/configs/mxs_defconfig
29 +++ b/arch/arm/configs/mxs_defconfig
30 @@ -29,7 +29,6 @@ CONFIG_NO_HZ=y
31 CONFIG_HIGH_RES_TIMERS=y
32 CONFIG_PREEMPT_VOLUNTARY=y
33 CONFIG_AEABI=y
34 -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
35 CONFIG_AUTO_ZRELADDR=y
36 CONFIG_FPE_NWFPE=y
37 CONFIG_NET=y
38 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
39 index 1252a26..42dec04 100644
40 --- a/arch/arm/include/asm/cacheflush.h
41 +++ b/arch/arm/include/asm/cacheflush.h
42 @@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
43 static inline void
44 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
45 {
46 - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
47 + struct mm_struct *mm = vma->vm_mm;
48 +
49 + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
50 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
51 vma->vm_flags);
52 }
53 @@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
54 static inline void
55 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
56 {
57 - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
58 + struct mm_struct *mm = vma->vm_mm;
59 +
60 + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
61 unsigned long addr = user_addr & PAGE_MASK;
62 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
63 }
64 diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
65 index 53cd5b4..875634a 100644
66 --- a/arch/arm/mm/tlb-v7.S
67 +++ b/arch/arm/mm/tlb-v7.S
68 @@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
69 dsb
70 mov r0, r0, lsr #PAGE_SHIFT @ align address
71 mov r1, r1, lsr #PAGE_SHIFT
72 +#ifdef CONFIG_ARM_ERRATA_720789
73 + mov r3, #0
74 +#else
75 asid r3, r3 @ mask ASID
76 +#endif
77 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
78 mov r1, r1, lsl #PAGE_SHIFT
79 1:
80 +#ifdef CONFIG_ARM_ERRATA_720789
81 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
82 +#else
83 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
84 +#endif
85 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
86
87 add r0, r0, #PAGE_SZ
88 @@ -70,7 +78,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
89 mov r0, r0, lsl #PAGE_SHIFT
90 mov r1, r1, lsl #PAGE_SHIFT
91 1:
92 +#ifdef CONFIG_ARM_ERRATA_720789
93 + ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
94 +#else
95 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
96 +#endif
97 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
98 add r0, r0, #PAGE_SZ
99 cmp r0, r1
100 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
101 index 4468814..6fcc9a0 100644
102 --- a/arch/ia64/include/asm/atomic.h
103 +++ b/arch/ia64/include/asm/atomic.h
104 @@ -18,8 +18,8 @@
105 #include <asm/system.h>
106
107
108 -#define ATOMIC_INIT(i) ((atomic_t) { (i) })
109 -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
110 +#define ATOMIC_INIT(i) { (i) }
111 +#define ATOMIC64_INIT(i) { (i) }
112
113 #define atomic_read(v) (*(volatile int *)&(v)->counter)
114 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
115 diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
116 index 782c3a35..3540c5e 100644
117 --- a/arch/ia64/kernel/irq_ia64.c
118 +++ b/arch/ia64/kernel/irq_ia64.c
119 @@ -23,7 +23,6 @@
120 #include <linux/ioport.h>
121 #include <linux/kernel_stat.h>
122 #include <linux/ptrace.h>
123 -#include <linux/random.h> /* for rand_initialize_irq() */
124 #include <linux/signal.h>
125 #include <linux/smp.h>
126 #include <linux/threads.h>
127 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
128 index 5d9c61d..e5f7248 100644
129 --- a/arch/x86/include/asm/processor.h
130 +++ b/arch/x86/include/asm/processor.h
131 @@ -99,7 +99,6 @@ struct cpuinfo_x86 {
132 u16 apicid;
133 u16 initial_apicid;
134 u16 x86_clflush_size;
135 -#ifdef CONFIG_SMP
136 /* number of cores as seen by the OS: */
137 u16 booted_cores;
138 /* Physical processor id: */
139 @@ -110,7 +109,6 @@ struct cpuinfo_x86 {
140 u8 compute_unit_id;
141 /* Index into per_cpu list: */
142 u16 cpu_index;
143 -#endif
144 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
145
146 #define X86_VENDOR_INTEL 0
147 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
148 index a81f2d5..dfabea4 100644
149 --- a/arch/x86/kernel/alternative.c
150 +++ b/arch/x86/kernel/alternative.c
151 @@ -220,7 +220,7 @@ void __init arch_init_ideal_nops(void)
152 ideal_nops = intel_nops;
153 #endif
154 }
155 -
156 + break;
157 default:
158 #ifdef CONFIG_X86_64
159 ideal_nops = k8_nops;
160 diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
161 index bae1efe..be16854 100644
162 --- a/arch/x86/kernel/amd_nb.c
163 +++ b/arch/x86/kernel/amd_nb.c
164 @@ -154,16 +154,14 @@ int amd_get_subcaches(int cpu)
165 {
166 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
167 unsigned int mask;
168 - int cuid = 0;
169 + int cuid;
170
171 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
172 return 0;
173
174 pci_read_config_dword(link, 0x1d4, &mask);
175
176 -#ifdef CONFIG_SMP
177 cuid = cpu_data(cpu).compute_unit_id;
178 -#endif
179 return (mask >> (4 * cuid)) & 0xf;
180 }
181
182 @@ -172,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask)
183 static unsigned int reset, ban;
184 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
185 unsigned int reg;
186 - int cuid = 0;
187 + int cuid;
188
189 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
190 return -EINVAL;
191 @@ -190,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask)
192 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
193 }
194
195 -#ifdef CONFIG_SMP
196 cuid = cpu_data(cpu).compute_unit_id;
197 -#endif
198 mask <<= 4 * cuid;
199 mask |= (0xf ^ (1 << cuid)) << 26;
200
201 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
202 index b13ed39..8115040 100644
203 --- a/arch/x86/kernel/cpu/amd.c
204 +++ b/arch/x86/kernel/cpu/amd.c
205 @@ -146,7 +146,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
206
207 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
208 {
209 -#ifdef CONFIG_SMP
210 /* calling is from identify_secondary_cpu() ? */
211 if (!c->cpu_index)
212 return;
213 @@ -190,7 +189,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
214
215 valid_k7:
216 ;
217 -#endif
218 }
219
220 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
221 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
222 index 22a073d..0cb2883 100644
223 --- a/arch/x86/kernel/cpu/common.c
224 +++ b/arch/x86/kernel/cpu/common.c
225 @@ -675,9 +675,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
226 if (this_cpu->c_early_init)
227 this_cpu->c_early_init(c);
228
229 -#ifdef CONFIG_SMP
230 c->cpu_index = 0;
231 -#endif
232 filter_cpuid_features(c, false);
233
234 setup_smep(c);
235 @@ -760,10 +758,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
236 c->apicid = c->initial_apicid;
237 # endif
238 #endif
239 -
240 -#ifdef CONFIG_X86_HT
241 c->phys_proc_id = c->initial_apicid;
242 -#endif
243 }
244
245 setup_smep(c);
246 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
247 index ed6086e..e0dc000 100644
248 --- a/arch/x86/kernel/cpu/intel.c
249 +++ b/arch/x86/kernel/cpu/intel.c
250 @@ -179,7 +179,6 @@ static void __cpuinit trap_init_f00f_bug(void)
251
252 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
253 {
254 -#ifdef CONFIG_SMP
255 /* calling is from identify_secondary_cpu() ? */
256 if (!c->cpu_index)
257 return;
258 @@ -196,7 +195,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
259 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
260 "with B stepping processors.\n");
261 }
262 -#endif
263 }
264
265 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
266 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
267 index ff1ae9b..942bda2 100644
268 --- a/arch/x86/kernel/cpu/mcheck/mce.c
269 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
270 @@ -122,9 +122,7 @@ void mce_setup(struct mce *m)
271 m->time = get_seconds();
272 m->cpuvendor = boot_cpu_data.x86_vendor;
273 m->cpuid = cpuid_eax(1);
274 -#ifdef CONFIG_SMP
275 m->socketid = cpu_data(m->extcpu).phys_proc_id;
276 -#endif
277 m->apicid = cpu_data(m->extcpu).initial_apicid;
278 rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
279 }
280 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
281 index dc4fb77..b97aa72 100644
282 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
283 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
284 @@ -65,11 +65,9 @@ struct threshold_bank {
285 };
286 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
287
288 -#ifdef CONFIG_SMP
289 static unsigned char shared_bank[NR_BANKS] = {
290 0, 0, 0, 0, 1
291 };
292 -#endif
293
294 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
295
296 @@ -227,10 +225,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
297
298 if (!block)
299 per_cpu(bank_map, cpu) |= (1 << bank);
300 -#ifdef CONFIG_SMP
301 +
302 if (shared_bank[bank] && c->cpu_core_id)
303 break;
304 -#endif
305
306 memset(&b, 0, sizeof(b));
307 b.cpu = cpu;
308 diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
309 index 62ac8cb..72c365a 100644
310 --- a/arch/x86/kernel/cpu/proc.c
311 +++ b/arch/x86/kernel/cpu/proc.c
312 @@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
313 static int show_cpuinfo(struct seq_file *m, void *v)
314 {
315 struct cpuinfo_x86 *c = v;
316 - unsigned int cpu = 0;
317 + unsigned int cpu;
318 int i;
319
320 -#ifdef CONFIG_SMP
321 cpu = c->cpu_index;
322 -#endif
323 seq_printf(m, "processor\t: %u\n"
324 "vendor_id\t: %s\n"
325 "cpu family\t: %d\n"
326 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
327 index f924280..c4e2465 100644
328 --- a/arch/x86/kernel/microcode_core.c
329 +++ b/arch/x86/kernel/microcode_core.c
330 @@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev,
331 const char *buf, size_t size)
332 {
333 unsigned long val;
334 - int cpu = dev->id;
335 - int ret = 0;
336 - char *end;
337 + int cpu;
338 + ssize_t ret = 0, tmp_ret;
339
340 - val = simple_strtoul(buf, &end, 0);
341 - if (end == buf)
342 + /* allow reload only from the BSP */
343 + if (boot_cpu_data.cpu_index != dev->id)
344 return -EINVAL;
345
346 - if (val == 1) {
347 - get_online_cpus();
348 - if (cpu_online(cpu))
349 - ret = reload_for_cpu(cpu);
350 - put_online_cpus();
351 + ret = kstrtoul(buf, 0, &val);
352 + if (ret)
353 + return ret;
354 +
355 + if (val != 1)
356 + return size;
357 +
358 + get_online_cpus();
359 + for_each_online_cpu(cpu) {
360 + tmp_ret = reload_for_cpu(cpu);
361 + if (tmp_ret != 0)
362 + pr_warn("Error reloading microcode on CPU %d\n", cpu);
363 +
364 + /* save retval of the first encountered reload error */
365 + if (!ret)
366 + ret = tmp_ret;
367 }
368 + put_online_cpus();
369
370 if (!ret)
371 ret = size;
372 diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
373 index 25d139c..579051c 100644
374 --- a/drivers/char/mspec.c
375 +++ b/drivers/char/mspec.c
376 @@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
377 vdata->flags = flags;
378 vdata->type = type;
379 spin_lock_init(&vdata->lock);
380 - vdata->refcnt = ATOMIC_INIT(1);
381 + atomic_set(&vdata->refcnt, 1);
382 vma->vm_private_data = vdata;
383
384 vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
385 diff --git a/drivers/char/random.c b/drivers/char/random.c
386 index c35a785..fceac95 100644
387 --- a/drivers/char/random.c
388 +++ b/drivers/char/random.c
389 @@ -125,21 +125,26 @@
390 * The current exported interfaces for gathering environmental noise
391 * from the devices are:
392 *
393 + * void add_device_randomness(const void *buf, unsigned int size);
394 * void add_input_randomness(unsigned int type, unsigned int code,
395 * unsigned int value);
396 - * void add_interrupt_randomness(int irq);
397 + * void add_interrupt_randomness(int irq, int irq_flags);
398 * void add_disk_randomness(struct gendisk *disk);
399 *
400 + * add_device_randomness() is for adding data to the random pool that
401 + * is likely to differ between two devices (or possibly even per boot).
402 + * This would be things like MAC addresses or serial numbers, or the
403 + * read-out of the RTC. This does *not* add any actual entropy to the
404 + * pool, but it initializes the pool to different values for devices
405 + * that might otherwise be identical and have very little entropy
406 + * available to them (particularly common in the embedded world).
407 + *
408 * add_input_randomness() uses the input layer interrupt timing, as well as
409 * the event type information from the hardware.
410 *
411 - * add_interrupt_randomness() uses the inter-interrupt timing as random
412 - * inputs to the entropy pool. Note that not all interrupts are good
413 - * sources of randomness! For example, the timer interrupts is not a
414 - * good choice, because the periodicity of the interrupts is too
415 - * regular, and hence predictable to an attacker. Network Interface
416 - * Controller interrupts are a better measure, since the timing of the
417 - * NIC interrupts are more unpredictable.
418 + * add_interrupt_randomness() uses the interrupt timing as random
419 + * inputs to the entropy pool. Using the cycle counters and the irq source
420 + * as inputs, it feeds the randomness roughly once a second.
421 *
422 * add_disk_randomness() uses what amounts to the seek time of block
423 * layer request events, on a per-disk_devt basis, as input to the
424 @@ -248,6 +253,8 @@
425 #include <linux/percpu.h>
426 #include <linux/cryptohash.h>
427 #include <linux/fips.h>
428 +#include <linux/ptrace.h>
429 +#include <linux/kmemcheck.h>
430
431 #ifdef CONFIG_GENERIC_HARDIRQS
432 # include <linux/irq.h>
433 @@ -256,8 +263,12 @@
434 #include <asm/processor.h>
435 #include <asm/uaccess.h>
436 #include <asm/irq.h>
437 +#include <asm/irq_regs.h>
438 #include <asm/io.h>
439
440 +#define CREATE_TRACE_POINTS
441 +#include <trace/events/random.h>
442 +
443 /*
444 * Configuration information
445 */
446 @@ -266,6 +277,8 @@
447 #define SEC_XFER_SIZE 512
448 #define EXTRACT_SIZE 10
449
450 +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
451 +
452 /*
453 * The minimum number of bits of entropy before we wake up a read on
454 * /dev/random. Should be enough to do a significant reseed.
455 @@ -420,8 +433,10 @@ struct entropy_store {
456 /* read-write data: */
457 spinlock_t lock;
458 unsigned add_ptr;
459 + unsigned input_rotate;
460 int entropy_count;
461 - int input_rotate;
462 + int entropy_total;
463 + unsigned int initialized:1;
464 __u8 last_data[EXTRACT_SIZE];
465 };
466
467 @@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
468 .pool = nonblocking_pool_data
469 };
470
471 +static __u32 const twist_table[8] = {
472 + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
473 + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
474 +
475 /*
476 * This function adds bytes into the entropy "pool". It does not
477 * update the entropy estimate. The caller should call
478 @@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
479 * it's cheap to do so and helps slightly in the expected case where
480 * the entropy is concentrated in the low-order bits.
481 */
482 -static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
483 - int nbytes, __u8 out[64])
484 +static void _mix_pool_bytes(struct entropy_store *r, const void *in,
485 + int nbytes, __u8 out[64])
486 {
487 - static __u32 const twist_table[8] = {
488 - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
489 - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
490 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
491 int input_rotate;
492 int wordmask = r->poolinfo->poolwords - 1;
493 const char *bytes = in;
494 __u32 w;
495 - unsigned long flags;
496
497 - /* Taps are constant, so we can load them without holding r->lock. */
498 tap1 = r->poolinfo->tap1;
499 tap2 = r->poolinfo->tap2;
500 tap3 = r->poolinfo->tap3;
501 tap4 = r->poolinfo->tap4;
502 tap5 = r->poolinfo->tap5;
503
504 - spin_lock_irqsave(&r->lock, flags);
505 - input_rotate = r->input_rotate;
506 - i = r->add_ptr;
507 + smp_rmb();
508 + input_rotate = ACCESS_ONCE(r->input_rotate);
509 + i = ACCESS_ONCE(r->add_ptr);
510
511 /* mix one byte at a time to simplify size handling and churn faster */
512 while (nbytes--) {
513 @@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
514 input_rotate += i ? 7 : 14;
515 }
516
517 - r->input_rotate = input_rotate;
518 - r->add_ptr = i;
519 + ACCESS_ONCE(r->input_rotate) = input_rotate;
520 + ACCESS_ONCE(r->add_ptr) = i;
521 + smp_wmb();
522
523 if (out)
524 for (j = 0; j < 16; j++)
525 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
526 +}
527
528 +static void __mix_pool_bytes(struct entropy_store *r, const void *in,
529 + int nbytes, __u8 out[64])
530 +{
531 + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
532 + _mix_pool_bytes(r, in, nbytes, out);
533 +}
534 +
535 +static void mix_pool_bytes(struct entropy_store *r, const void *in,
536 + int nbytes, __u8 out[64])
537 +{
538 + unsigned long flags;
539 +
540 + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
541 + spin_lock_irqsave(&r->lock, flags);
542 + _mix_pool_bytes(r, in, nbytes, out);
543 spin_unlock_irqrestore(&r->lock, flags);
544 }
545
546 -static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
547 +struct fast_pool {
548 + __u32 pool[4];
549 + unsigned long last;
550 + unsigned short count;
551 + unsigned char rotate;
552 + unsigned char last_timer_intr;
553 +};
554 +
555 +/*
556 + * This is a fast mixing routine used by the interrupt randomness
557 + * collector. It's hardcoded for an 128 bit pool and assumes that any
558 + * locks that might be needed are taken by the caller.
559 + */
560 +static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
561 {
562 - mix_pool_bytes_extract(r, in, bytes, NULL);
563 + const char *bytes = in;
564 + __u32 w;
565 + unsigned i = f->count;
566 + unsigned input_rotate = f->rotate;
567 +
568 + while (nbytes--) {
569 + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
570 + f->pool[(i + 1) & 3];
571 + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
572 + input_rotate += (i++ & 3) ? 7 : 14;
573 + }
574 + f->count = i;
575 + f->rotate = input_rotate;
576 }
577
578 /*
579 @@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
580 */
581 static void credit_entropy_bits(struct entropy_store *r, int nbits)
582 {
583 - unsigned long flags;
584 - int entropy_count;
585 + int entropy_count, orig;
586
587 if (!nbits)
588 return;
589
590 - spin_lock_irqsave(&r->lock, flags);
591 -
592 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
593 - entropy_count = r->entropy_count;
594 +retry:
595 + entropy_count = orig = ACCESS_ONCE(r->entropy_count);
596 entropy_count += nbits;
597 +
598 if (entropy_count < 0) {
599 DEBUG_ENT("negative entropy/overflow\n");
600 entropy_count = 0;
601 } else if (entropy_count > r->poolinfo->POOLBITS)
602 entropy_count = r->poolinfo->POOLBITS;
603 - r->entropy_count = entropy_count;
604 + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
605 + goto retry;
606 +
607 + if (!r->initialized && nbits > 0) {
608 + r->entropy_total += nbits;
609 + if (r->entropy_total > 128)
610 + r->initialized = 1;
611 + }
612 +
613 + trace_credit_entropy_bits(r->name, nbits, entropy_count,
614 + r->entropy_total, _RET_IP_);
615
616 /* should we wake readers? */
617 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
618 wake_up_interruptible(&random_read_wait);
619 kill_fasync(&fasync, SIGIO, POLL_IN);
620 }
621 - spin_unlock_irqrestore(&r->lock, flags);
622 }
623
624 /*********************************************************************
625 @@ -572,42 +636,24 @@ struct timer_rand_state {
626 unsigned dont_count_entropy:1;
627 };
628
629 -#ifndef CONFIG_GENERIC_HARDIRQS
630 -
631 -static struct timer_rand_state *irq_timer_state[NR_IRQS];
632 -
633 -static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
634 -{
635 - return irq_timer_state[irq];
636 -}
637 -
638 -static void set_timer_rand_state(unsigned int irq,
639 - struct timer_rand_state *state)
640 -{
641 - irq_timer_state[irq] = state;
642 -}
643 -
644 -#else
645 -
646 -static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
647 -{
648 - struct irq_desc *desc;
649 -
650 - desc = irq_to_desc(irq);
651 -
652 - return desc->timer_rand_state;
653 -}
654 -
655 -static void set_timer_rand_state(unsigned int irq,
656 - struct timer_rand_state *state)
657 +/*
658 + * Add device- or boot-specific data to the input and nonblocking
659 + * pools to help initialize them to unique values.
660 + *
661 + * None of this adds any entropy, it is meant to avoid the
662 + * problem of the nonblocking pool having similar initial state
663 + * across largely identical devices.
664 + */
665 +void add_device_randomness(const void *buf, unsigned int size)
666 {
667 - struct irq_desc *desc;
668 -
669 - desc = irq_to_desc(irq);
670 + unsigned long time = get_cycles() ^ jiffies;
671
672 - desc->timer_rand_state = state;
673 + mix_pool_bytes(&input_pool, buf, size, NULL);
674 + mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
675 + mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
676 + mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
677 }
678 -#endif
679 +EXPORT_SYMBOL(add_device_randomness);
680
681 static struct timer_rand_state input_timer_state;
682
683 @@ -624,8 +670,8 @@ static struct timer_rand_state input_timer_state;
684 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
685 {
686 struct {
687 - cycles_t cycles;
688 long jiffies;
689 + unsigned cycles;
690 unsigned num;
691 } sample;
692 long delta, delta2, delta3;
693 @@ -639,7 +685,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
694 sample.jiffies = jiffies;
695 sample.cycles = get_cycles();
696 sample.num = num;
697 - mix_pool_bytes(&input_pool, &sample, sizeof(sample));
698 + mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
699
700 /*
701 * Calculate number of bits of randomness we probably added.
702 @@ -696,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
703 }
704 EXPORT_SYMBOL_GPL(add_input_randomness);
705
706 -void add_interrupt_randomness(int irq)
707 +static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
708 +
709 +void add_interrupt_randomness(int irq, int irq_flags)
710 {
711 - struct timer_rand_state *state;
712 + struct entropy_store *r;
713 + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
714 + struct pt_regs *regs = get_irq_regs();
715 + unsigned long now = jiffies;
716 + __u32 input[4], cycles = get_cycles();
717 +
718 + input[0] = cycles ^ jiffies;
719 + input[1] = irq;
720 + if (regs) {
721 + __u64 ip = instruction_pointer(regs);
722 + input[2] = ip;
723 + input[3] = ip >> 32;
724 + }
725
726 - state = get_timer_rand_state(irq);
727 + fast_mix(fast_pool, input, sizeof(input));
728
729 - if (state == NULL)
730 + if ((fast_pool->count & 1023) &&
731 + !time_after(now, fast_pool->last + HZ))
732 return;
733
734 - DEBUG_ENT("irq event %d\n", irq);
735 - add_timer_randomness(state, 0x100 + irq);
736 + fast_pool->last = now;
737 +
738 + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
739 + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
740 + /*
741 + * If we don't have a valid cycle counter, and we see
742 + * back-to-back timer interrupts, then skip giving credit for
743 + * any entropy.
744 + */
745 + if (cycles == 0) {
746 + if (irq_flags & __IRQF_TIMER) {
747 + if (fast_pool->last_timer_intr)
748 + return;
749 + fast_pool->last_timer_intr = 1;
750 + } else
751 + fast_pool->last_timer_intr = 0;
752 + }
753 + credit_entropy_bits(r, 1);
754 }
755
756 #ifdef CONFIG_BLOCK
757 @@ -738,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
758 */
759 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
760 {
761 - __u32 tmp[OUTPUT_POOL_WORDS];
762 + __u32 tmp[OUTPUT_POOL_WORDS];
763
764 if (r->pull && r->entropy_count < nbytes * 8 &&
765 r->entropy_count < r->poolinfo->POOLBITS) {
766 @@ -757,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
767
768 bytes = extract_entropy(r->pull, tmp, bytes,
769 random_read_wakeup_thresh / 8, rsvd);
770 - mix_pool_bytes(r, tmp, bytes);
771 + mix_pool_bytes(r, tmp, bytes, NULL);
772 credit_entropy_bits(r, bytes*8);
773 }
774 }
775 @@ -816,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
776 static void extract_buf(struct entropy_store *r, __u8 *out)
777 {
778 int i;
779 - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
780 + union {
781 + __u32 w[5];
782 + unsigned long l[LONGS(EXTRACT_SIZE)];
783 + } hash;
784 + __u32 workspace[SHA_WORKSPACE_WORDS];
785 __u8 extract[64];
786 + unsigned long flags;
787
788 /* Generate a hash across the pool, 16 words (512 bits) at a time */
789 - sha_init(hash);
790 + sha_init(hash.w);
791 + spin_lock_irqsave(&r->lock, flags);
792 for (i = 0; i < r->poolinfo->poolwords; i += 16)
793 - sha_transform(hash, (__u8 *)(r->pool + i), workspace);
794 + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
795
796 /*
797 * We mix the hash back into the pool to prevent backtracking
798 @@ -833,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
799 * brute-forcing the feedback as hard as brute-forcing the
800 * hash.
801 */
802 - mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
803 + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
804 + spin_unlock_irqrestore(&r->lock, flags);
805
806 /*
807 * To avoid duplicates, we atomically extract a portion of the
808 * pool while mixing, and hash one final time.
809 */
810 - sha_transform(hash, extract, workspace);
811 + sha_transform(hash.w, extract, workspace);
812 memset(extract, 0, sizeof(extract));
813 memset(workspace, 0, sizeof(workspace));
814
815 @@ -848,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
816 * pattern, we fold it in half. Thus, we always feed back
817 * twice as much data as we output.
818 */
819 - hash[0] ^= hash[3];
820 - hash[1] ^= hash[4];
821 - hash[2] ^= rol32(hash[2], 16);
822 - memcpy(out, hash, EXTRACT_SIZE);
823 - memset(hash, 0, sizeof(hash));
824 + hash.w[0] ^= hash.w[3];
825 + hash.w[1] ^= hash.w[4];
826 + hash.w[2] ^= rol32(hash.w[2], 16);
827 +
828 + /*
829 + * If we have a architectural hardware random number
830 + * generator, mix that in, too.
831 + */
832 + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
833 + unsigned long v;
834 + if (!arch_get_random_long(&v))
835 + break;
836 + hash.l[i] ^= v;
837 + }
838 +
839 + memcpy(out, &hash, EXTRACT_SIZE);
840 + memset(&hash, 0, sizeof(hash));
841 }
842
843 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
844 - size_t nbytes, int min, int reserved)
845 + size_t nbytes, int min, int reserved)
846 {
847 ssize_t ret = 0, i;
848 __u8 tmp[EXTRACT_SIZE];
849 - unsigned long flags;
850
851 + trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
852 xfer_secondary_pool(r, nbytes);
853 nbytes = account(r, nbytes, min, reserved);
854
855 @@ -869,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
856 extract_buf(r, tmp);
857
858 if (fips_enabled) {
859 + unsigned long flags;
860 +
861 spin_lock_irqsave(&r->lock, flags);
862 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
863 panic("Hardware RNG duplicated output!\n");
864 @@ -894,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
865 ssize_t ret = 0, i;
866 __u8 tmp[EXTRACT_SIZE];
867
868 + trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
869 xfer_secondary_pool(r, nbytes);
870 nbytes = account(r, nbytes, 0, 0);
871
872 @@ -927,8 +1026,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
873
874 /*
875 * This function is the exported kernel interface. It returns some
876 - * number of good random numbers, suitable for seeding TCP sequence
877 - * numbers, etc.
878 + * number of good random numbers, suitable for key generation, seeding
879 + * TCP sequence numbers, etc. It does not use the hw random number
880 + * generator, if available; use get_random_bytes_arch() for that.
881 */
882 void get_random_bytes(void *buf, int nbytes)
883 {
884 @@ -937,6 +1037,39 @@ void get_random_bytes(void *buf, int nbytes)
885 EXPORT_SYMBOL(get_random_bytes);
886
887 /*
888 + * This function will use the architecture-specific hardware random
889 + * number generator if it is available. The arch-specific hw RNG will
890 + * almost certainly be faster than what we can do in software, but it
891 + * is impossible to verify that it is implemented securely (as
892 + * opposed, to, say, the AES encryption of a sequence number using a
893 + * key known by the NSA). So it's useful if we need the speed, but
894 + * only if we're willing to trust the hardware manufacturer not to
895 + * have put in a back door.
896 + */
897 +void get_random_bytes_arch(void *buf, int nbytes)
898 +{
899 + char *p = buf;
900 +
901 + trace_get_random_bytes(nbytes, _RET_IP_);
902 + while (nbytes) {
903 + unsigned long v;
904 + int chunk = min(nbytes, (int)sizeof(unsigned long));
905 +
906 + if (!arch_get_random_long(&v))
907 + break;
908 +
909 + memcpy(p, &v, chunk);
910 + p += chunk;
911 + nbytes -= chunk;
912 + }
913 +
914 + if (nbytes)
915 + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
916 +}
917 +EXPORT_SYMBOL(get_random_bytes_arch);
918 +
919 +
920 +/*
921 * init_std_data - initialize pool with system data
922 *
923 * @r: pool to initialize
924 @@ -947,18 +1080,31 @@ EXPORT_SYMBOL(get_random_bytes);
925 */
926 static void init_std_data(struct entropy_store *r)
927 {
928 - ktime_t now;
929 - unsigned long flags;
930 + int i;
931 + ktime_t now = ktime_get_real();
932 + unsigned long rv;
933
934 - spin_lock_irqsave(&r->lock, flags);
935 r->entropy_count = 0;
936 - spin_unlock_irqrestore(&r->lock, flags);
937 -
938 - now = ktime_get_real();
939 - mix_pool_bytes(r, &now, sizeof(now));
940 - mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
941 + r->entropy_total = 0;
942 + mix_pool_bytes(r, &now, sizeof(now), NULL);
943 + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
944 + if (!arch_get_random_long(&rv))
945 + break;
946 + mix_pool_bytes(r, &rv, sizeof(rv), NULL);
947 + }
948 + mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
949 }
950
951 +/*
952 + * Note that setup_arch() may call add_device_randomness()
953 + * long before we get here. This allows seeding of the pools
954 + * with some platform dependent data very early in the boot
955 + * process. But it limits our options here. We must use
956 + * statically allocated structures that already have all
957 + * initializations complete at compile time. We should also
958 + * take care not to overwrite the precious per platform data
959 + * we were given.
960 + */
961 static int rand_initialize(void)
962 {
963 init_std_data(&input_pool);
964 @@ -968,24 +1114,6 @@ static int rand_initialize(void)
965 }
966 module_init(rand_initialize);
967
968 -void rand_initialize_irq(int irq)
969 -{
970 - struct timer_rand_state *state;
971 -
972 - state = get_timer_rand_state(irq);
973 -
974 - if (state)
975 - return;
976 -
977 - /*
978 - * If kzalloc returns null, we just won't use that entropy
979 - * source.
980 - */
981 - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
982 - if (state)
983 - set_timer_rand_state(irq, state);
984 -}
985 -
986 #ifdef CONFIG_BLOCK
987 void rand_initialize_disk(struct gendisk *disk)
988 {
989 @@ -1093,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
990 count -= bytes;
991 p += bytes;
992
993 - mix_pool_bytes(r, buf, bytes);
994 + mix_pool_bytes(r, buf, bytes, NULL);
995 cond_resched();
996 }
997
998 @@ -1236,10 +1364,15 @@ static int proc_do_uuid(ctl_table *table, int write,
999 uuid = table->data;
1000 if (!uuid) {
1001 uuid = tmp_uuid;
1002 - uuid[8] = 0;
1003 - }
1004 - if (uuid[8] == 0)
1005 generate_random_uuid(uuid);
1006 + } else {
1007 + static DEFINE_SPINLOCK(bootid_spinlock);
1008 +
1009 + spin_lock(&bootid_spinlock);
1010 + if (!uuid[8])
1011 + generate_random_uuid(uuid);
1012 + spin_unlock(&bootid_spinlock);
1013 + }
1014
1015 sprintf(buf, "%pU", uuid);
1016
1017 @@ -1318,9 +1451,14 @@ late_initcall(random_int_secret_init);
1018 DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1019 unsigned int get_random_int(void)
1020 {
1021 - __u32 *hash = get_cpu_var(get_random_int_hash);
1022 + __u32 *hash;
1023 unsigned int ret;
1024
1025 + if (arch_get_random_int(&ret))
1026 + return ret;
1027 +
1028 + hash = get_cpu_var(get_random_int_hash);
1029 +
1030 hash[0] += current->pid + jiffies + get_cycles();
1031 md5_transform(hash, random_int_secret);
1032 ret = hash[0];
1033 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1034 index f6cf448..240966b 100644
1035 --- a/drivers/edac/i7core_edac.c
1036 +++ b/drivers/edac/i7core_edac.c
1037 @@ -1842,11 +1842,9 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
1038 if (mce->bank != 8)
1039 return 0;
1040
1041 -#ifdef CONFIG_SMP
1042 /* Only handle if it is the right mc controller */
1043 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
1044 return 0;
1045 -#endif
1046
1047 smp_rmb();
1048 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1049 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1050 index bcb1126..02a52d1 100644
1051 --- a/drivers/firmware/dmi_scan.c
1052 +++ b/drivers/firmware/dmi_scan.c
1053 @@ -6,6 +6,7 @@
1054 #include <linux/dmi.h>
1055 #include <linux/efi.h>
1056 #include <linux/bootmem.h>
1057 +#include <linux/random.h>
1058 #include <asm/dmi.h>
1059
1060 /*
1061 @@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
1062
1063 dmi_table(buf, dmi_len, dmi_num, decode, NULL);
1064
1065 + add_device_randomness(buf, dmi_len);
1066 +
1067 dmi_iounmap(buf, dmi_len);
1068 return 0;
1069 }
1070 diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
1071 index 51e0e2d..a330492 100644
1072 --- a/drivers/firmware/pcdp.c
1073 +++ b/drivers/firmware/pcdp.c
1074 @@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
1075 if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
1076 return -ENODEV;
1077
1078 - pcdp = ioremap(efi.hcdp, 4096);
1079 + pcdp = early_ioremap(efi.hcdp, 4096);
1080 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
1081
1082 if (strstr(cmdline, "console=hcdp")) {
1083 @@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
1084 }
1085
1086 out:
1087 - iounmap(pcdp);
1088 + early_iounmap(pcdp, 4096);
1089 return rc;
1090 }
1091 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
1092 index 252defd..87fd034 100644
1093 --- a/drivers/hwmon/coretemp.c
1094 +++ b/drivers/hwmon/coretemp.c
1095 @@ -47,16 +47,15 @@
1096 #define MAX_ATTRS 5 /* Maximum no of per-core attrs */
1097 #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
1098
1099 -#ifdef CONFIG_SMP
1100 #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
1101 #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
1102 +#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
1103 +
1104 +#ifdef CONFIG_SMP
1105 #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
1106 #else
1107 -#define TO_PHYS_ID(cpu) (cpu)
1108 -#define TO_CORE_ID(cpu) (cpu)
1109 #define for_each_sibling(i, cpu) for (i = 0; false; )
1110 #endif
1111 -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
1112
1113 /*
1114 * Per-Core Temperature Data
1115 diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
1116 index 08ba5ad..a28ebf0 100644
1117 --- a/drivers/input/tablet/wacom_wac.c
1118 +++ b/drivers/input/tablet/wacom_wac.c
1119 @@ -242,7 +242,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
1120 input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
1121 input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
1122 if (wacom->tool[0] != BTN_TOOL_MOUSE) {
1123 - input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
1124 + input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8));
1125 input_report_key(input, BTN_TOUCH, data[1] & 0x01);
1126 input_report_key(input, BTN_STYLUS, data[1] & 0x02);
1127 input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
1128 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
1129 index a20e1c4..ccd81b1 100644
1130 --- a/drivers/mfd/ab3100-core.c
1131 +++ b/drivers/mfd/ab3100-core.c
1132 @@ -408,8 +408,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
1133 u32 fatevent;
1134 int err;
1135
1136 - add_interrupt_randomness(irq);
1137 -
1138 err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
1139 event_regs, 3);
1140 if (err)
1141 @@ -938,9 +936,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
1142
1143 err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
1144 IRQF_ONESHOT, "ab3100-core", ab3100);
1145 - /* This real unpredictable IRQ is of course sampled for entropy */
1146 - rand_initialize_irq(client->irq);
1147 -
1148 if (err)
1149 goto exit_no_irq;
1150
1151 diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
1152 index 3d7dce6..d69dc4b 100644
1153 --- a/drivers/mfd/ab3550-core.c
1154 +++ b/drivers/mfd/ab3550-core.c
1155 @@ -1309,8 +1309,6 @@ static int __init ab3550_probe(struct i2c_client *client,
1156
1157 err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
1158 IRQF_ONESHOT, "ab3550-core", ab);
1159 - /* This real unpredictable IRQ is of course sampled for entropy */
1160 - rand_initialize_irq(client->irq);
1161
1162 if (err)
1163 goto exit_no_irq;
1164 diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
1165 index 43a76c4..db662e2 100644
1166 --- a/drivers/mfd/ezx-pcap.c
1167 +++ b/drivers/mfd/ezx-pcap.c
1168 @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
1169 }
1170 local_irq_enable();
1171 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
1172 - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
1173 + } while (gpio_get_value(pdata->gpio));
1174 }
1175
1176 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
1177 diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
1178 index f742745..b90f3e0 100644
1179 --- a/drivers/mfd/wm831x-otp.c
1180 +++ b/drivers/mfd/wm831x-otp.c
1181 @@ -18,6 +18,7 @@
1182 #include <linux/bcd.h>
1183 #include <linux/delay.h>
1184 #include <linux/mfd/core.h>
1185 +#include <linux/random.h>
1186
1187 #include <linux/mfd/wm831x/core.h>
1188 #include <linux/mfd/wm831x/otp.h>
1189 @@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
1190
1191 int wm831x_otp_init(struct wm831x *wm831x)
1192 {
1193 + char uuid[WM831X_UNIQUE_ID_LEN];
1194 int ret;
1195
1196 ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
1197 @@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
1198 dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
1199 ret);
1200
1201 + ret = wm831x_unique_id_read(wm831x, uuid);
1202 + if (ret == 0)
1203 + add_device_randomness(uuid, sizeof(uuid));
1204 + else
1205 + dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
1206 +
1207 return ret;
1208 }
1209
1210 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
1211 index 5278e84..0d0ee55 100644
1212 --- a/drivers/net/e1000e/82571.c
1213 +++ b/drivers/net/e1000e/82571.c
1214 @@ -1602,10 +1602,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1215 * auto-negotiation in the TXCW register and disable
1216 * forced link in the Device Control register in an
1217 * attempt to auto-negotiate with our link partner.
1218 - * If the partner code word is null, stop forcing
1219 - * and restart auto negotiation.
1220 */
1221 - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1222 + if (rxcw & E1000_RXCW_C) {
1223 /* Enable autoneg, and unforce link up */
1224 ew32(TXCW, mac->txcw);
1225 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1226 diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
1227 index 9d35ec1..9e5fd45 100644
1228 --- a/drivers/net/wireless/rt2x00/rt61pci.c
1229 +++ b/drivers/net/wireless/rt2x00/rt61pci.c
1230 @@ -2254,8 +2254,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1231
1232 static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
1233 {
1234 - struct ieee80211_conf conf = { .flags = 0 };
1235 - struct rt2x00lib_conf libconf = { .conf = &conf };
1236 + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
1237
1238 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
1239 }
1240 diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
1241 index bdc909b..f3c2110 100644
1242 --- a/drivers/rtc/rtc-wm831x.c
1243 +++ b/drivers/rtc/rtc-wm831x.c
1244 @@ -24,7 +24,7 @@
1245 #include <linux/mfd/wm831x/core.h>
1246 #include <linux/delay.h>
1247 #include <linux/platform_device.h>
1248 -
1249 +#include <linux/random.h>
1250
1251 /*
1252 * R16416 (0x4020) - RTC Write Counter
1253 @@ -96,6 +96,26 @@ struct wm831x_rtc {
1254 unsigned int alarm_enabled:1;
1255 };
1256
1257 +static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
1258 +{
1259 + int ret;
1260 + u16 reg;
1261 +
1262 + /*
1263 + * The write counter contains a pseudo-random number which is
1264 + * regenerated every time we set the RTC so it should be a
1265 + * useful per-system source of entropy.
1266 + */
1267 + ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
1268 + if (ret >= 0) {
1269 + reg = ret;
1270 + add_device_randomness(&reg, sizeof(reg));
1271 + } else {
1272 + dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
1273 + ret);
1274 + }
1275 +}
1276 +
1277 /*
1278 * Read current time and date in RTC
1279 */
1280 @@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
1281 alm_irq, ret);
1282 }
1283
1284 + wm831x_rtc_add_randomness(wm831x);
1285 +
1286 return 0;
1287
1288 err:
1289 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1290 index 34bb059..3c0aa02 100644
1291 --- a/drivers/usb/core/hub.c
1292 +++ b/drivers/usb/core/hub.c
1293 @@ -24,6 +24,7 @@
1294 #include <linux/kthread.h>
1295 #include <linux/mutex.h>
1296 #include <linux/freezer.h>
1297 +#include <linux/random.h>
1298
1299 #include <asm/uaccess.h>
1300 #include <asm/byteorder.h>
1301 @@ -1902,6 +1903,14 @@ int usb_new_device(struct usb_device *udev)
1302 /* Tell the world! */
1303 announce_device(udev);
1304
1305 + if (udev->serial)
1306 + add_device_randomness(udev->serial, strlen(udev->serial));
1307 + if (udev->product)
1308 + add_device_randomness(udev->product, strlen(udev->product));
1309 + if (udev->manufacturer)
1310 + add_device_randomness(udev->manufacturer,
1311 + strlen(udev->manufacturer));
1312 +
1313 device_enable_async_suspend(&udev->dev);
1314 /* Register the device. The device driver is responsible
1315 * for configuring the device and invoking the add-device
1316 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
1317 index 3e65427..0d1c9bd 100644
1318 --- a/fs/nilfs2/ioctl.c
1319 +++ b/fs/nilfs2/ioctl.c
1320 @@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
1321 if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
1322 goto out;
1323
1324 - down_read(&inode->i_sb->s_umount);
1325 + mutex_lock(&nilfs->ns_snapshot_mount_mutex);
1326
1327 nilfs_transaction_begin(inode->i_sb, &ti, 0);
1328 ret = nilfs_cpfile_change_cpmode(
1329 @@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
1330 else
1331 nilfs_transaction_commit(inode->i_sb); /* never fails */
1332
1333 - up_read(&inode->i_sb->s_umount);
1334 + mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
1335 out:
1336 mnt_drop_write(filp->f_path.mnt);
1337 return ret;
1338 diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
1339 index 8351c44..97bfbdd 100644
1340 --- a/fs/nilfs2/super.c
1341 +++ b/fs/nilfs2/super.c
1342 @@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
1343 struct nilfs_root *root;
1344 int ret;
1345
1346 + mutex_lock(&nilfs->ns_snapshot_mount_mutex);
1347 +
1348 down_read(&nilfs->ns_segctor_sem);
1349 ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
1350 up_read(&nilfs->ns_segctor_sem);
1351 @@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
1352 ret = nilfs_get_root_dentry(s, root, root_dentry);
1353 nilfs_put_root(root);
1354 out:
1355 + mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
1356 return ret;
1357 }
1358
1359 diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
1360 index 35a8970..1c98f53 100644
1361 --- a/fs/nilfs2/the_nilfs.c
1362 +++ b/fs/nilfs2/the_nilfs.c
1363 @@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
1364 nilfs->ns_bdev = bdev;
1365 atomic_set(&nilfs->ns_ndirtyblks, 0);
1366 init_rwsem(&nilfs->ns_sem);
1367 + mutex_init(&nilfs->ns_snapshot_mount_mutex);
1368 INIT_LIST_HEAD(&nilfs->ns_dirty_files);
1369 INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
1370 spin_lock_init(&nilfs->ns_inode_lock);
1371 diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
1372 index 9992b11..de7435f 100644
1373 --- a/fs/nilfs2/the_nilfs.h
1374 +++ b/fs/nilfs2/the_nilfs.h
1375 @@ -47,6 +47,7 @@ enum {
1376 * @ns_flags: flags
1377 * @ns_bdev: block device
1378 * @ns_sem: semaphore for shared states
1379 + * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
1380 * @ns_sbh: buffer heads of on-disk super blocks
1381 * @ns_sbp: pointers to super block data
1382 * @ns_sbwtime: previous write time of super block
1383 @@ -99,6 +100,7 @@ struct the_nilfs {
1384
1385 struct block_device *ns_bdev;
1386 struct rw_semaphore ns_sem;
1387 + struct mutex ns_snapshot_mount_mutex;
1388
1389 /*
1390 * used for
1391 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
1392 index 2d921b3..d0a3100 100644
1393 --- a/include/linux/irqdesc.h
1394 +++ b/include/linux/irqdesc.h
1395 @@ -38,7 +38,6 @@ struct timer_rand_state;
1396 */
1397 struct irq_desc {
1398 struct irq_data irq_data;
1399 - struct timer_rand_state *timer_rand_state;
1400 unsigned int __percpu *kstat_irqs;
1401 irq_flow_handler_t handle_irq;
1402 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
1403 diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
1404 index 40c37216..32a1b5c 100644
1405 --- a/include/linux/mfd/ezx-pcap.h
1406 +++ b/include/linux/mfd/ezx-pcap.h
1407 @@ -16,6 +16,7 @@ struct pcap_subdev {
1408 struct pcap_platform_data {
1409 unsigned int irq_base;
1410 unsigned int config;
1411 + int gpio;
1412 void (*init) (void *); /* board specific init */
1413 int num_subdevs;
1414 struct pcap_subdev *subdevs;
1415 diff --git a/include/linux/random.h b/include/linux/random.h
1416 index d13059f..ac621ce 100644
1417 --- a/include/linux/random.h
1418 +++ b/include/linux/random.h
1419 @@ -48,13 +48,13 @@ struct rnd_state {
1420
1421 #ifdef __KERNEL__
1422
1423 -extern void rand_initialize_irq(int irq);
1424 -
1425 +extern void add_device_randomness(const void *, unsigned int);
1426 extern void add_input_randomness(unsigned int type, unsigned int code,
1427 unsigned int value);
1428 -extern void add_interrupt_randomness(int irq);
1429 +extern void add_interrupt_randomness(int irq, int irq_flags);
1430
1431 extern void get_random_bytes(void *buf, int nbytes);
1432 +extern void get_random_bytes_arch(void *buf, int nbytes);
1433 void generate_random_uuid(unsigned char uuid_out[16]);
1434
1435 #ifndef MODULE
1436 @@ -91,6 +91,19 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed)
1437 state->s3 = __seed(i, 15);
1438 }
1439
1440 +#ifdef CONFIG_ARCH_RANDOM
1441 +# include <asm/archrandom.h>
1442 +#else
1443 +static inline int arch_get_random_long(unsigned long *v)
1444 +{
1445 + return 0;
1446 +}
1447 +static inline int arch_get_random_int(unsigned int *v)
1448 +{
1449 + return 0;
1450 +}
1451 +#endif
1452 +
1453 #endif /* __KERNEL___ */
1454
1455 #endif /* _LINUX_RANDOM_H */
1456 diff --git a/include/trace/events/random.h b/include/trace/events/random.h
1457 new file mode 100644
1458 index 0000000..422df19
1459 --- /dev/null
1460 +++ b/include/trace/events/random.h
1461 @@ -0,0 +1,134 @@
1462 +#undef TRACE_SYSTEM
1463 +#define TRACE_SYSTEM random
1464 +
1465 +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
1466 +#define _TRACE_RANDOM_H
1467 +
1468 +#include <linux/writeback.h>
1469 +#include <linux/tracepoint.h>
1470 +
1471 +DECLARE_EVENT_CLASS(random__mix_pool_bytes,
1472 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
1473 +
1474 + TP_ARGS(pool_name, bytes, IP),
1475 +
1476 + TP_STRUCT__entry(
1477 + __field( const char *, pool_name )
1478 + __field( int, bytes )
1479 + __field(unsigned long, IP )
1480 + ),
1481 +
1482 + TP_fast_assign(
1483 + __entry->pool_name = pool_name;
1484 + __entry->bytes = bytes;
1485 + __entry->IP = IP;
1486 + ),
1487 +
1488 + TP_printk("%s pool: bytes %d caller %pF",
1489 + __entry->pool_name, __entry->bytes, (void *)__entry->IP)
1490 +);
1491 +
1492 +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
1493 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
1494 +
1495 + TP_ARGS(pool_name, bytes, IP)
1496 +);
1497 +
1498 +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
1499 + TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
1500 +
1501 + TP_ARGS(pool_name, bytes, IP)
1502 +);
1503 +
1504 +TRACE_EVENT(credit_entropy_bits,
1505 + TP_PROTO(const char *pool_name, int bits, int entropy_count,
1506 + int entropy_total, unsigned long IP),
1507 +
1508 + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
1509 +
1510 + TP_STRUCT__entry(
1511 + __field( const char *, pool_name )
1512 + __field( int, bits )
1513 + __field( int, entropy_count )
1514 + __field( int, entropy_total )
1515 + __field(unsigned long, IP )
1516 + ),
1517 +
1518 + TP_fast_assign(
1519 + __entry->pool_name = pool_name;
1520 + __entry->bits = bits;
1521 + __entry->entropy_count = entropy_count;
1522 + __entry->entropy_total = entropy_total;
1523 + __entry->IP = IP;
1524 + ),
1525 +
1526 + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
1527 + "caller %pF", __entry->pool_name, __entry->bits,
1528 + __entry->entropy_count, __entry->entropy_total,
1529 + (void *)__entry->IP)
1530 +);
1531 +
1532 +TRACE_EVENT(get_random_bytes,
1533 + TP_PROTO(int nbytes, unsigned long IP),
1534 +
1535 + TP_ARGS(nbytes, IP),
1536 +
1537 + TP_STRUCT__entry(
1538 + __field( int, nbytes )
1539 + __field(unsigned long, IP )
1540 + ),
1541 +
1542 + TP_fast_assign(
1543 + __entry->nbytes = nbytes;
1544 + __entry->IP = IP;
1545 + ),
1546 +
1547 + TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
1548 +);
1549 +
1550 +DECLARE_EVENT_CLASS(random__extract_entropy,
1551 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
1552 + unsigned long IP),
1553 +
1554 + TP_ARGS(pool_name, nbytes, entropy_count, IP),
1555 +
1556 + TP_STRUCT__entry(
1557 + __field( const char *, pool_name )
1558 + __field( int, nbytes )
1559 + __field( int, entropy_count )
1560 + __field(unsigned long, IP )
1561 + ),
1562 +
1563 + TP_fast_assign(
1564 + __entry->pool_name = pool_name;
1565 + __entry->nbytes = nbytes;
1566 + __entry->entropy_count = entropy_count;
1567 + __entry->IP = IP;
1568 + ),
1569 +
1570 + TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
1571 + __entry->pool_name, __entry->nbytes, __entry->entropy_count,
1572 + (void *)__entry->IP)
1573 +);
1574 +
1575 +
1576 +DEFINE_EVENT(random__extract_entropy, extract_entropy,
1577 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
1578 + unsigned long IP),
1579 +
1580 + TP_ARGS(pool_name, nbytes, entropy_count, IP)
1581 +);
1582 +
1583 +DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
1584 + TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
1585 + unsigned long IP),
1586 +
1587 + TP_ARGS(pool_name, nbytes, entropy_count, IP)
1588 +);
1589 +
1590 +
1591 +
1592 +#endif /* _TRACE_RANDOM_H */
1593 +
1594 +/* This part must be outside protection */
1595 +#include <trace/define_trace.h>
1596 diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
1597 index 470d08c..10e0772 100644
1598 --- a/kernel/irq/handle.c
1599 +++ b/kernel/irq/handle.c
1600 @@ -117,7 +117,7 @@ irqreturn_t
1601 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
1602 {
1603 irqreturn_t retval = IRQ_NONE;
1604 - unsigned int random = 0, irq = desc->irq_data.irq;
1605 + unsigned int flags = 0, irq = desc->irq_data.irq;
1606
1607 do {
1608 irqreturn_t res;
1609 @@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
1610
1611 /* Fall through to add to randomness */
1612 case IRQ_HANDLED:
1613 - random |= action->flags;
1614 + flags |= action->flags;
1615 break;
1616
1617 default:
1618 @@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
1619 action = action->next;
1620 } while (action);
1621
1622 - if (random & IRQF_SAMPLE_RANDOM)
1623 - add_interrupt_randomness(irq);
1624 + add_interrupt_randomness(irq, flags);
1625
1626 if (!noirqdebug)
1627 note_interrupt(irq, desc, retval);
1628 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1629 index df8136f..fa4a70e 100644
1630 --- a/kernel/irq/manage.c
1631 +++ b/kernel/irq/manage.c
1632 @@ -886,22 +886,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1633
1634 if (desc->irq_data.chip == &no_irq_chip)
1635 return -ENOSYS;
1636 - /*
1637 - * Some drivers like serial.c use request_irq() heavily,
1638 - * so we have to be careful not to interfere with a
1639 - * running system.
1640 - */
1641 - if (new->flags & IRQF_SAMPLE_RANDOM) {
1642 - /*
1643 - * This function might sleep, we want to call it first,
1644 - * outside of the atomic block.
1645 - * Yes, this might clear the entropy pool if the wrong
1646 - * driver is attempted to be loaded, without actually
1647 - * installing a new handler, but is this really a problem,
1648 - * only the sysadmin is able to do this.
1649 - */
1650 - rand_initialize_irq(irq);
1651 - }
1652
1653 /*
1654 * Check whether the interrupt nests into another interrupt
1655 @@ -1325,7 +1309,6 @@ EXPORT_SYMBOL(free_irq);
1656 * Flags:
1657 *
1658 * IRQF_SHARED Interrupt is shared
1659 - * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1660 * IRQF_TRIGGER_* Specify active edge(s) or level
1661 *
1662 */
1663 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1664 index ae60a53..037f077 100644
1665 --- a/mm/hugetlb.c
1666 +++ b/mm/hugetlb.c
1667 @@ -2301,6 +2301,22 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1668 {
1669 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
1670 __unmap_hugepage_range(vma, start, end, ref_page);
1671 + /*
1672 + * Clear this flag so that x86's huge_pmd_share page_table_shareable
1673 + * test will fail on a vma being torn down, and not grab a page table
1674 + * on its way out. We're lucky that the flag has such an appropriate
1675 + * name, and can in fact be safely cleared here. We could clear it
1676 + * before the __unmap_hugepage_range above, but all that's necessary
1677 + * is to clear it before releasing the i_mmap_mutex below.
1678 + *
1679 + * This works because in the contexts this is called, the VMA is
1680 + * going to be destroyed. It is not vunerable to madvise(DONTNEED)
1681 + * because madvise is not supported on hugetlbfs. The same applies
1682 + * for direct IO. unmap_hugepage_range() is only being called just
1683 + * before free_pgtables() so clearing VM_MAYSHARE will not cause
1684 + * surprises later.
1685 + */
1686 + vma->vm_flags &= ~VM_MAYSHARE;
1687 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
1688 }
1689
1690 @@ -2853,9 +2869,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
1691 }
1692 }
1693 spin_unlock(&mm->page_table_lock);
1694 - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
1695 -
1696 + /*
1697 + * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
1698 + * may have cleared our pud entry and done put_page on the page table:
1699 + * once we release i_mmap_mutex, another task can do the final put_page
1700 + * and that page table be reused and filled with junk.
1701 + */
1702 flush_tlb_range(vma, start, end);
1703 + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
1704 }
1705
1706 int hugetlb_reserve_pages(struct inode *inode,
1707 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
1708 index 6496748..2f49dcf 100644
1709 --- a/mm/memory-failure.c
1710 +++ b/mm/memory-failure.c
1711 @@ -1334,8 +1334,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
1712 /* Keep page count to indicate a given hugepage is isolated. */
1713
1714 list_add(&hpage->lru, &pagelist);
1715 - ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
1716 - true);
1717 + ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
1718 + MIGRATE_SYNC);
1719 if (ret) {
1720 struct page *page1, *page2;
1721 list_for_each_entry_safe(page1, page2, &pagelist, lru)
1722 @@ -1464,7 +1464,7 @@ int soft_offline_page(struct page *page, int flags)
1723 page_is_file_cache(page));
1724 list_add(&page->lru, &pagelist);
1725 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1726 - 0, MIGRATE_SYNC);
1727 + false, MIGRATE_SYNC);
1728 if (ret) {
1729 putback_lru_pages(&pagelist);
1730 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1731 diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
1732 index 8d032de..71c7811 100644
1733 --- a/mm/mmu_notifier.c
1734 +++ b/mm/mmu_notifier.c
1735 @@ -33,6 +33,24 @@
1736 void __mmu_notifier_release(struct mm_struct *mm)
1737 {
1738 struct mmu_notifier *mn;
1739 + struct hlist_node *n;
1740 +
1741 + /*
1742 + * RCU here will block mmu_notifier_unregister until
1743 + * ->release returns.
1744 + */
1745 + rcu_read_lock();
1746 + hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
1747 + /*
1748 + * if ->release runs before mmu_notifier_unregister it
1749 + * must be handled as it's the only way for the driver
1750 + * to flush all existing sptes and stop the driver
1751 + * from establishing any more sptes before all the
1752 + * pages in the mm are freed.
1753 + */
1754 + if (mn->ops->release)
1755 + mn->ops->release(mn, mm);
1756 + rcu_read_unlock();
1757
1758 spin_lock(&mm->mmu_notifier_mm->lock);
1759 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
1760 @@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
1761 * mmu_notifier_unregister to return.
1762 */
1763 hlist_del_init_rcu(&mn->hlist);
1764 - /*
1765 - * RCU here will block mmu_notifier_unregister until
1766 - * ->release returns.
1767 - */
1768 - rcu_read_lock();
1769 - spin_unlock(&mm->mmu_notifier_mm->lock);
1770 - /*
1771 - * if ->release runs before mmu_notifier_unregister it
1772 - * must be handled as it's the only way for the driver
1773 - * to flush all existing sptes and stop the driver
1774 - * from establishing any more sptes before all the
1775 - * pages in the mm are freed.
1776 - */
1777 - if (mn->ops->release)
1778 - mn->ops->release(mn, mm);
1779 - rcu_read_unlock();
1780 - spin_lock(&mm->mmu_notifier_mm->lock);
1781 }
1782 spin_unlock(&mm->mmu_notifier_mm->lock);
1783
1784 @@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
1785 {
1786 BUG_ON(atomic_read(&mm->mm_count) <= 0);
1787
1788 - spin_lock(&mm->mmu_notifier_mm->lock);
1789 if (!hlist_unhashed(&mn->hlist)) {
1790 - hlist_del_rcu(&mn->hlist);
1791 -
1792 /*
1793 * RCU here will force exit_mmap to wait ->release to finish
1794 * before freeing the pages.
1795 */
1796 rcu_read_lock();
1797 - spin_unlock(&mm->mmu_notifier_mm->lock);
1798 +
1799 /*
1800 * exit_mmap will block in mmu_notifier_release to
1801 * guarantee ->release is called before freeing the
1802 @@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
1803 if (mn->ops->release)
1804 mn->ops->release(mn, mm);
1805 rcu_read_unlock();
1806 - } else
1807 +
1808 + spin_lock(&mm->mmu_notifier_mm->lock);
1809 + hlist_del_rcu(&mn->hlist);
1810 spin_unlock(&mm->mmu_notifier_mm->lock);
1811 + }
1812
1813 /*
1814 * Wait any running method to finish, of course including
1815 diff --git a/net/core/dev.c b/net/core/dev.c
1816 index a71eafc..8235b81 100644
1817 --- a/net/core/dev.c
1818 +++ b/net/core/dev.c
1819 @@ -1163,6 +1163,7 @@ static int __dev_open(struct net_device *dev)
1820 net_dmaengine_get();
1821 dev_set_rx_mode(dev);
1822 dev_activate(dev);
1823 + add_device_randomness(dev->dev_addr, dev->addr_len);
1824 }
1825
1826 return ret;
1827 @@ -4730,6 +4731,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
1828 err = ops->ndo_set_mac_address(dev, sa);
1829 if (!err)
1830 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1831 + add_device_randomness(dev->dev_addr, dev->addr_len);
1832 return err;
1833 }
1834 EXPORT_SYMBOL(dev_set_mac_address);
1835 @@ -5507,6 +5509,7 @@ int register_netdevice(struct net_device *dev)
1836 dev_init_scheduler(dev);
1837 dev_hold(dev);
1838 list_netdevice(dev);
1839 + add_device_randomness(dev->dev_addr, dev->addr_len);
1840
1841 /* Notify protocols, that a new device appeared. */
1842 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
1843 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1844 index 861d53f..ac49ad5 100644
1845 --- a/net/core/rtnetlink.c
1846 +++ b/net/core/rtnetlink.c
1847 @@ -1304,6 +1304,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1848 goto errout;
1849 send_addr_notify = 1;
1850 modified = 1;
1851 + add_device_randomness(dev->dev_addr, dev->addr_len);
1852 }
1853
1854 if (tb[IFLA_MTU]) {
1855 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
1856 index 29e9980..370aa94 100644
1857 --- a/net/mac80211/mesh.c
1858 +++ b/net/mac80211/mesh.c
1859 @@ -490,6 +490,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
1860
1861 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
1862 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
1863 + del_timer_sync(&sdata->u.mesh.mesh_path_timer);
1864 /*
1865 * If the timer fired while we waited for it, it will have
1866 * requeued the work. Now the work will be running again
1867 diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
1868 index e45d2fb..bf0a7f6 100644
1869 --- a/net/sunrpc/rpcb_clnt.c
1870 +++ b/net/sunrpc/rpcb_clnt.c
1871 @@ -193,7 +193,7 @@ static int rpcb_create_local_unix(void)
1872 if (IS_ERR(clnt)) {
1873 dprintk("RPC: failed to create AF_LOCAL rpcbind "
1874 "client (errno %ld).\n", PTR_ERR(clnt));
1875 - result = -PTR_ERR(clnt);
1876 + result = PTR_ERR(clnt);
1877 goto out;
1878 }
1879
1880 @@ -242,7 +242,7 @@ static int rpcb_create_local_net(void)
1881 if (IS_ERR(clnt)) {
1882 dprintk("RPC: failed to create local rpcbind "
1883 "client (errno %ld).\n", PTR_ERR(clnt));
1884 - result = -PTR_ERR(clnt);
1885 + result = PTR_ERR(clnt);
1886 goto out;
1887 }
1888
1889 diff --git a/net/wireless/core.c b/net/wireless/core.c
1890 index 880dbe2..498c760 100644
1891 --- a/net/wireless/core.c
1892 +++ b/net/wireless/core.c
1893 @@ -959,6 +959,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
1894 */
1895 synchronize_rcu();
1896 INIT_LIST_HEAD(&wdev->list);
1897 + /*
1898 + * Ensure that all events have been processed and
1899 + * freed.
1900 + */
1901 + cfg80211_process_wdev_events(wdev);
1902 break;
1903 case NETDEV_PRE_UP:
1904 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
1905 diff --git a/net/wireless/core.h b/net/wireless/core.h
1906 index a570ff9..8351645 100644
1907 --- a/net/wireless/core.h
1908 +++ b/net/wireless/core.h
1909 @@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
1910 struct net_device *dev, enum nl80211_iftype ntype,
1911 u32 *flags, struct vif_params *params);
1912 void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
1913 +void cfg80211_process_wdev_events(struct wireless_dev *wdev);
1914
1915 int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
1916 struct wireless_dev *wdev,
1917 diff --git a/net/wireless/util.c b/net/wireless/util.c
1918 index bbcb58e..18e22be 100644
1919 --- a/net/wireless/util.c
1920 +++ b/net/wireless/util.c
1921 @@ -719,7 +719,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
1922 wdev->connect_keys = NULL;
1923 }
1924
1925 -static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
1926 +void cfg80211_process_wdev_events(struct wireless_dev *wdev)
1927 {
1928 struct cfg80211_event *ev;
1929 unsigned long flags;
1930 @@ -975,6 +975,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
1931 }
1932 mutex_unlock(&rdev->devlist_mtx);
1933
1934 + if (total == 1)
1935 + return 0;
1936 +
1937 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
1938 const struct ieee80211_iface_combination *c;
1939 struct ieee80211_iface_limit *limits;

  ViewVC Help
Powered by ViewVC 1.1.20