/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1010_linux-2.6.32.11.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1010_linux-2.6.32.11.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (4 years, 1 month ago) by mpagano
File size: 156521 byte(s)
2.6.32-15 release
1 diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
2 index 3015da0..fe09a2c 100644
3 --- a/Documentation/filesystems/tmpfs.txt
4 +++ b/Documentation/filesystems/tmpfs.txt
5 @@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for
6 all files in that instance (if CONFIG_NUMA is enabled) - which can be
7 adjusted on the fly via 'mount -o remount ...'
8
9 -mpol=default prefers to allocate memory from the local node
10 +mpol=default use the process allocation policy
11 + (see set_mempolicy(2))
12 mpol=prefer:Node prefers to allocate memory from the given Node
13 mpol=bind:NodeList allocates memory only from nodes in NodeList
14 mpol=interleave prefers to allocate from each node in turn
15 mpol=interleave:NodeList allocates from each node of NodeList in turn
16 +mpol=local prefers to allocate memory from the local node
17
18 NodeList format is a comma-separated list of decimal numbers and ranges,
19 a range being two hyphen-separated decimal numbers, the smallest and
20 @@ -134,3 +136,5 @@ Author:
21 Christoph Rohland <cr@sap.com>, 1.12.01
22 Updated:
23 Hugh Dickins, 4 June 2007
24 +Updated:
25 + KOSAKI Motohiro, 16 Mar 2010
26 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
27 index fa6fbf4..3cb8fa3 100644
28 --- a/arch/arm/boot/compressed/head.S
29 +++ b/arch/arm/boot/compressed/head.S
30 @@ -162,8 +162,8 @@ not_angel:
31
32 .text
33 adr r0, LC0
34 - ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} )
35 - THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, ip} )
36 + ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
37 + THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
38 THUMB( ldr sp, [r0, #28] )
39 subs r0, r0, r1 @ calculate the delta offset
40
41 @@ -174,12 +174,13 @@ not_angel:
42 /*
43 * We're running at a different address. We need to fix
44 * up various pointers:
45 - * r5 - zImage base address
46 - * r6 - GOT start
47 + * r5 - zImage base address (_start)
48 + * r6 - size of decompressed image
49 + * r11 - GOT start
50 * ip - GOT end
51 */
52 add r5, r5, r0
53 - add r6, r6, r0
54 + add r11, r11, r0
55 add ip, ip, r0
56
57 #ifndef CONFIG_ZBOOT_ROM
58 @@ -197,10 +198,10 @@ not_angel:
59 /*
60 * Relocate all entries in the GOT table.
61 */
62 -1: ldr r1, [r6, #0] @ relocate entries in the GOT
63 +1: ldr r1, [r11, #0] @ relocate entries in the GOT
64 add r1, r1, r0 @ table. This fixes up the
65 - str r1, [r6], #4 @ C references.
66 - cmp r6, ip
67 + str r1, [r11], #4 @ C references.
68 + cmp r11, ip
69 blo 1b
70 #else
71
72 @@ -208,12 +209,12 @@ not_angel:
73 * Relocate entries in the GOT table. We only relocate
74 * the entries that are outside the (relocated) BSS region.
75 */
76 -1: ldr r1, [r6, #0] @ relocate entries in the GOT
77 +1: ldr r1, [r11, #0] @ relocate entries in the GOT
78 cmp r1, r2 @ entry < bss_start ||
79 cmphs r3, r1 @ _end < entry
80 addlo r1, r1, r0 @ table. This fixes up the
81 - str r1, [r6], #4 @ C references.
82 - cmp r6, ip
83 + str r1, [r11], #4 @ C references.
84 + cmp r11, ip
85 blo 1b
86 #endif
87
88 @@ -239,6 +240,7 @@ not_relocated: mov r0, #0
89 * Check to see if we will overwrite ourselves.
90 * r4 = final kernel address
91 * r5 = start of this image
92 + * r6 = size of decompressed image
93 * r2 = end of malloc space (and therefore this image)
94 * We basically want:
95 * r4 >= r2 -> OK
96 @@ -246,8 +248,7 @@ not_relocated: mov r0, #0
97 */
98 cmp r4, r2
99 bhs wont_overwrite
100 - sub r3, sp, r5 @ > compressed kernel size
101 - add r0, r4, r3, lsl #2 @ allow for 4x expansion
102 + add r0, r4, r6
103 cmp r0, r5
104 bls wont_overwrite
105
106 @@ -263,7 +264,6 @@ not_relocated: mov r0, #0
107 * r1-r3 = unused
108 * r4 = kernel execution address
109 * r5 = decompressed kernel start
110 - * r6 = processor ID
111 * r7 = architecture ID
112 * r8 = atags pointer
113 * r9-r12,r14 = corrupted
114 @@ -304,7 +304,8 @@ LC0: .word LC0 @ r1
115 .word _end @ r3
116 .word zreladdr @ r4
117 .word _start @ r5
118 - .word _got_start @ r6
119 + .word _image_size @ r6
120 + .word _got_start @ r11
121 .word _got_end @ ip
122 .word user_stack+4096 @ sp
123 LC1: .word reloc_end - reloc_start
124 @@ -328,7 +329,6 @@ params: ldr r0, =params_phys
125 *
126 * On entry,
127 * r4 = kernel execution address
128 - * r6 = processor ID
129 * r7 = architecture number
130 * r8 = atags pointer
131 * r9 = run-time address of "start" (???)
132 @@ -534,7 +534,6 @@ __common_mmu_cache_on:
133 * r1-r3 = unused
134 * r4 = kernel execution address
135 * r5 = decompressed kernel start
136 - * r6 = processor ID
137 * r7 = architecture ID
138 * r8 = atags pointer
139 * r9-r12,r14 = corrupted
140 @@ -573,19 +572,19 @@ call_kernel: bl cache_clean_flush
141 * r1 = corrupted
142 * r2 = corrupted
143 * r3 = block offset
144 - * r6 = corrupted
145 + * r9 = corrupted
146 * r12 = corrupted
147 */
148
149 call_cache_fn: adr r12, proc_types
150 #ifdef CONFIG_CPU_CP15
151 - mrc p15, 0, r6, c0, c0 @ get processor ID
152 + mrc p15, 0, r9, c0, c0 @ get processor ID
153 #else
154 - ldr r6, =CONFIG_PROCESSOR_ID
155 + ldr r9, =CONFIG_PROCESSOR_ID
156 #endif
157 1: ldr r1, [r12, #0] @ get value
158 ldr r2, [r12, #4] @ get mask
159 - eor r1, r1, r6 @ (real ^ match)
160 + eor r1, r1, r9 @ (real ^ match)
161 tst r1, r2 @ & mask
162 ARM( addeq pc, r12, r3 ) @ call cache function
163 THUMB( addeq r12, r3 )
164 @@ -764,8 +763,7 @@ proc_types:
165 * Turn off the Cache and MMU. ARMv3 does not support
166 * reading the control register, but ARMv4 does.
167 *
168 - * On entry, r6 = processor ID
169 - * On exit, r0, r1, r2, r3, r12 corrupted
170 + * On exit, r0, r1, r2, r3, r9, r12 corrupted
171 * This routine must preserve: r4, r6, r7
172 */
173 .align 5
174 @@ -838,10 +836,8 @@ __armv3_mmu_cache_off:
175 /*
176 * Clean and flush the cache to maintain consistency.
177 *
178 - * On entry,
179 - * r6 = processor ID
180 * On exit,
181 - * r1, r2, r3, r11, r12 corrupted
182 + * r1, r2, r3, r9, r11, r12 corrupted
183 * This routine must preserve:
184 * r0, r4, r5, r6, r7
185 */
186 @@ -953,7 +949,7 @@ __armv4_mmu_cache_flush:
187 mov r2, #64*1024 @ default: 32K dcache size (*2)
188 mov r11, #32 @ default: 32 byte line size
189 mrc p15, 0, r3, c0, c0, 1 @ read cache type
190 - teq r3, r6 @ cache ID register present?
191 + teq r3, r9 @ cache ID register present?
192 beq no_cache_id
193 mov r1, r3, lsr #18
194 and r1, r1, #7
195 diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
196 index a5924b9..cbed030 100644
197 --- a/arch/arm/boot/compressed/vmlinux.lds.in
198 +++ b/arch/arm/boot/compressed/vmlinux.lds.in
199 @@ -36,6 +36,9 @@ SECTIONS
200
201 _etext = .;
202
203 + /* Assume size of decompressed image is 4x the compressed image */
204 + _image_size = (_etext - _text) * 4;
205 +
206 _got_start = .;
207 .got : { *(.got) }
208 _got_end = .;
209 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
210 index bb1719a..5adba4f 100644
211 --- a/arch/mips/mm/tlbex.c
212 +++ b/arch/mips/mm/tlbex.c
213 @@ -73,9 +73,6 @@ static int __cpuinit m4kc_tlbp_war(void)
214 enum label_id {
215 label_second_part = 1,
216 label_leave,
217 -#ifdef MODULE_START
218 - label_module_alloc,
219 -#endif
220 label_vmalloc,
221 label_vmalloc_done,
222 label_tlbw_hazard,
223 @@ -92,9 +89,6 @@ enum label_id {
224
225 UASM_L_LA(_second_part)
226 UASM_L_LA(_leave)
227 -#ifdef MODULE_START
228 -UASM_L_LA(_module_alloc)
229 -#endif
230 UASM_L_LA(_vmalloc)
231 UASM_L_LA(_vmalloc_done)
232 UASM_L_LA(_tlbw_hazard)
233 @@ -802,8 +796,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
234 } else {
235 #if defined(CONFIG_HUGETLB_PAGE)
236 const enum label_id ls = label_tlb_huge_update;
237 -#elif defined(MODULE_START)
238 - const enum label_id ls = label_module_alloc;
239 #else
240 const enum label_id ls = label_vmalloc;
241 #endif
242 diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
243 index fd56a71..974ba71 100644
244 --- a/arch/sh/boot/compressed/misc.c
245 +++ b/arch/sh/boot/compressed/misc.c
246 @@ -132,7 +132,7 @@ void decompress_kernel(void)
247 output_addr = (CONFIG_MEMORY_START + 0x2000);
248 #else
249 output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
250 -#ifdef CONFIG_29BIT
251 +#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
252 output_addr |= P2SEG;
253 #endif
254 #endif
255 diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
256 index 4b7c937..815cab6 100644
257 --- a/arch/sparc/prom/p1275.c
258 +++ b/arch/sparc/prom/p1275.c
259 @@ -32,8 +32,7 @@ extern void prom_cif_interface(void);
260 extern void prom_cif_callback(void);
261
262 /*
263 - * This provides SMP safety on the p1275buf. prom_callback() drops this lock
264 - * to allow recursuve acquisition.
265 + * This provides SMP safety on the p1275buf.
266 */
267 DEFINE_SPINLOCK(prom_entry_lock);
268
269 @@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long fmt, ...)
270
271 p = p1275buf.prom_buffer;
272
273 - spin_lock_irqsave(&prom_entry_lock, flags);
274 + raw_local_save_flags(flags);
275 + raw_local_irq_restore(PIL_NMI);
276 + spin_lock(&prom_entry_lock);
277
278 p1275buf.prom_args[0] = (unsigned long)p; /* service */
279 strcpy (p, service);
280 @@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long fmt, ...)
281 va_end(list);
282 x = p1275buf.prom_args [nargs + 3];
283
284 - spin_unlock_irqrestore(&prom_entry_lock, flags);
285 + spin_unlock(&prom_entry_lock);
286 + raw_local_irq_restore(flags);
287
288 return x;
289 }
290 diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
291 index 14f9890..c22a164 100644
292 --- a/arch/x86/include/asm/fixmap.h
293 +++ b/arch/x86/include/asm/fixmap.h
294 @@ -82,6 +82,9 @@ enum fixed_addresses {
295 #endif
296 FIX_DBGP_BASE,
297 FIX_EARLYCON_MEM_BASE,
298 +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
299 + FIX_OHCI1394_BASE,
300 +#endif
301 #ifdef CONFIG_X86_LOCAL_APIC
302 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
303 #endif
304 @@ -126,9 +129,6 @@ enum fixed_addresses {
305 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
306 (__end_of_permanent_fixed_addresses & 255),
307 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
308 -#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
309 - FIX_OHCI1394_BASE,
310 -#endif
311 #ifdef CONFIG_X86_32
312 FIX_WP_TEST,
313 #endif
314 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
315 index 4ffe09b..8cb8489 100644
316 --- a/arch/x86/include/asm/msr-index.h
317 +++ b/arch/x86/include/asm/msr-index.h
318 @@ -104,6 +104,8 @@
319 #define MSR_AMD64_PATCH_LEVEL 0x0000008b
320 #define MSR_AMD64_NB_CFG 0xc001001f
321 #define MSR_AMD64_PATCH_LOADER 0xc0010020
322 +#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
323 +#define MSR_AMD64_OSVW_STATUS 0xc0010141
324 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
325 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
326 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
327 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
328 index 195e4b7..23c2da8 100644
329 --- a/arch/x86/kernel/acpi/boot.c
330 +++ b/arch/x86/kernel/acpi/boot.c
331 @@ -1191,9 +1191,6 @@ static void __init acpi_process_madt(void)
332 if (!error) {
333 acpi_lapic = 1;
334
335 -#ifdef CONFIG_X86_BIGSMP
336 - generic_bigsmp_probe();
337 -#endif
338 /*
339 * Parse MADT IO-APIC entries
340 */
341 @@ -1203,8 +1200,6 @@ static void __init acpi_process_madt(void)
342 acpi_ioapic = 1;
343
344 smp_found_config = 1;
345 - if (apic->setup_apic_routing)
346 - apic->setup_apic_routing();
347 }
348 }
349 if (error == -EINVAL) {
350 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
351 index c86dbcf..0e69e17 100644
352 --- a/arch/x86/kernel/apic/apic.c
353 +++ b/arch/x86/kernel/apic/apic.c
354 @@ -1665,9 +1665,7 @@ int __init APIC_init_uniprocessor(void)
355 #endif
356
357 enable_IR_x2apic();
358 -#ifdef CONFIG_X86_64
359 default_setup_apic_routing();
360 -#endif
361
362 verify_local_APIC();
363 connect_bsp_APIC();
364 @@ -1915,18 +1913,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
365 if (apicid > max_physical_apicid)
366 max_physical_apicid = apicid;
367
368 -#ifdef CONFIG_X86_32
369 - switch (boot_cpu_data.x86_vendor) {
370 - case X86_VENDOR_INTEL:
371 - if (num_processors > 8)
372 - def_to_bigsmp = 1;
373 - break;
374 - case X86_VENDOR_AMD:
375 - if (max_physical_apicid >= 8)
376 - def_to_bigsmp = 1;
377 - }
378 -#endif
379 -
380 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
381 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
382 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
383 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
384 index 0c0182c..88b9d22 100644
385 --- a/arch/x86/kernel/apic/probe_32.c
386 +++ b/arch/x86/kernel/apic/probe_32.c
387 @@ -54,6 +54,31 @@ late_initcall(print_ipi_mode);
388
389 void default_setup_apic_routing(void)
390 {
391 + int version = apic_version[boot_cpu_physical_apicid];
392 +
393 + if (num_possible_cpus() > 8) {
394 + switch (boot_cpu_data.x86_vendor) {
395 + case X86_VENDOR_INTEL:
396 + if (!APIC_XAPIC(version)) {
397 + def_to_bigsmp = 0;
398 + break;
399 + }
400 + /* If P4 and above fall through */
401 + case X86_VENDOR_AMD:
402 + def_to_bigsmp = 1;
403 + }
404 + }
405 +
406 +#ifdef CONFIG_X86_BIGSMP
407 + generic_bigsmp_probe();
408 +#endif
409 +
410 + if (apic->setup_apic_routing)
411 + apic->setup_apic_routing();
412 +}
413 +
414 +void setup_apic_flat_routing(void)
415 +{
416 #ifdef CONFIG_X86_IO_APIC
417 printk(KERN_INFO
418 "Enabling APIC mode: Flat. Using %d I/O APICs\n",
419 @@ -103,7 +128,7 @@ struct apic apic_default = {
420 .init_apic_ldr = default_init_apic_ldr,
421
422 .ioapic_phys_id_map = default_ioapic_phys_id_map,
423 - .setup_apic_routing = default_setup_apic_routing,
424 + .setup_apic_routing = setup_apic_flat_routing,
425 .multi_timer_check = NULL,
426 .apicid_to_node = default_apicid_to_node,
427 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
428 diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
429 index c4cbd30..4c56f54 100644
430 --- a/arch/x86/kernel/apic/probe_64.c
431 +++ b/arch/x86/kernel/apic/probe_64.c
432 @@ -67,17 +67,8 @@ void __init default_setup_apic_routing(void)
433 }
434 #endif
435
436 - if (apic == &apic_flat) {
437 - switch (boot_cpu_data.x86_vendor) {
438 - case X86_VENDOR_INTEL:
439 - if (num_processors > 8)
440 - apic = &apic_physflat;
441 - break;
442 - case X86_VENDOR_AMD:
443 - if (max_physical_apicid >= 8)
444 - apic = &apic_physflat;
445 - }
446 - }
447 + if (apic == &apic_flat && num_possible_cpus() > 8)
448 + apic = &apic_physflat;
449
450 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
451
452 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
453 index a2a03cf..2a94890 100644
454 --- a/arch/x86/kernel/cpu/intel.c
455 +++ b/arch/x86/kernel/cpu/intel.c
456 @@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
457 if (c->x86_power & (1 << 8)) {
458 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
459 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
460 - sched_clock_stable = 1;
461 + if (!check_tsc_unstable())
462 + sched_clock_stable = 1;
463 }
464
465 /*
466 diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
467 index 5be95ef..e07bc4e 100644
468 --- a/arch/x86/kernel/mpparse.c
469 +++ b/arch/x86/kernel/mpparse.c
470 @@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
471 x86_init.mpparse.mpc_record(1);
472 }
473
474 -#ifdef CONFIG_X86_BIGSMP
475 - generic_bigsmp_probe();
476 -#endif
477 -
478 - if (apic->setup_apic_routing)
479 - apic->setup_apic_routing();
480 -
481 if (!num_processors)
482 printk(KERN_ERR "MPTABLE: no processors registered!\n");
483 return num_processors;
484 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
485 index f010ab4..d0ba107 100644
486 --- a/arch/x86/kernel/process.c
487 +++ b/arch/x86/kernel/process.c
488 @@ -439,21 +439,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
489 }
490
491 /*
492 - * Check for AMD CPUs, which have potentially C1E support
493 + * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
494 + * For more information see
495 + * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
496 + * - Erratum #365 for family 0x11 (not affected because C1e not in use)
497 */
498 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
499 {
500 + u64 val;
501 if (c->x86_vendor != X86_VENDOR_AMD)
502 - return 0;
503 -
504 - if (c->x86 < 0x0F)
505 - return 0;
506 + goto no_c1e_idle;
507
508 /* Family 0x0f models < rev F do not have C1E */
509 - if (c->x86 == 0x0f && c->x86_model < 0x40)
510 - return 0;
511 + if (c->x86 == 0x0F && c->x86_model >= 0x40)
512 + return 1;
513
514 - return 1;
515 + if (c->x86 == 0x10) {
516 + /*
517 + * check OSVW bit for CPUs that are not affected
518 + * by erratum #400
519 + */
520 + rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
521 + if (val >= 2) {
522 + rdmsrl(MSR_AMD64_OSVW_STATUS, val);
523 + if (!(val & BIT(1)))
524 + goto no_c1e_idle;
525 + }
526 + return 1;
527 + }
528 +
529 +no_c1e_idle:
530 + return 0;
531 }
532
533 static cpumask_var_t c1e_mask;
534 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
535 index f9ce04f..6eabe90 100644
536 --- a/arch/x86/kernel/process_64.c
537 +++ b/arch/x86/kernel/process_64.c
538 @@ -546,6 +546,7 @@ void set_personality_ia32(void)
539
540 /* Make sure to be in 32bit mode */
541 set_thread_flag(TIF_IA32);
542 + current->personality |= force_personality32;
543
544 /* Prepare the first "return" to user space */
545 current_thread_info()->status |= TS_COMPAT;
546 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
547 index 565ebc6..28e963d 100644
548 --- a/arch/x86/kernel/smpboot.c
549 +++ b/arch/x86/kernel/smpboot.c
550 @@ -1066,9 +1066,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
551 set_cpu_sibling_map(0);
552
553 enable_IR_x2apic();
554 -#ifdef CONFIG_X86_64
555 default_setup_apic_routing();
556 -#endif
557
558 if (smp_sanity_check(max_cpus) < 0) {
559 printk(KERN_INFO "SMP disabled\n");
560 diff --git a/block/blk-settings.c b/block/blk-settings.c
561 index d5aa886..9651c0a 100644
562 --- a/block/blk-settings.c
563 +++ b/block/blk-settings.c
564 @@ -8,6 +8,7 @@
565 #include <linux/blkdev.h>
566 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
567 #include <linux/gcd.h>
568 +#include <linux/lcm.h>
569
570 #include "blk.h"
571
572 @@ -490,18 +491,31 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
573
574 /**
575 * blk_stack_limits - adjust queue_limits for stacked devices
576 - * @t: the stacking driver limits (top)
577 - * @b: the underlying queue limits (bottom)
578 + * @t: the stacking driver limits (top device)
579 + * @b: the underlying queue limits (bottom, component device)
580 * @offset: offset to beginning of data within component device
581 *
582 * Description:
583 - * Merges two queue_limit structs. Returns 0 if alignment didn't
584 - * change. Returns -1 if adding the bottom device caused
585 - * misalignment.
586 + * This function is used by stacking drivers like MD and DM to ensure
587 + * that all component devices have compatible block sizes and
588 + * alignments. The stacking driver must provide a queue_limits
589 + * struct (top) and then iteratively call the stacking function for
590 + * all component (bottom) devices. The stacking function will
591 + * attempt to combine the values and ensure proper alignment.
592 + *
593 + * Returns 0 if the top and bottom queue_limits are compatible. The
594 + * top device's block sizes and alignment offsets may be adjusted to
595 + * ensure alignment with the bottom device. If no compatible sizes
596 + * and alignments exist, -1 is returned and the resulting top
597 + * queue_limits will have the misaligned flag set to indicate that
598 + * the alignment_offset is undefined.
599 */
600 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
601 sector_t offset)
602 {
603 + sector_t alignment;
604 + unsigned int top, bottom, ret = 0;
605 +
606 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
607 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
608 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
609 @@ -518,6 +532,26 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
610 t->max_segment_size = min_not_zero(t->max_segment_size,
611 b->max_segment_size);
612
613 + t->misaligned |= b->misaligned;
614 +
615 + alignment = queue_limit_alignment_offset(b, offset);
616 +
617 + /* Bottom device has different alignment. Check that it is
618 + * compatible with the current top alignment.
619 + */
620 + if (t->alignment_offset != alignment) {
621 +
622 + top = max(t->physical_block_size, t->io_min)
623 + + t->alignment_offset;
624 + bottom = max(b->physical_block_size, b->io_min) + alignment;
625 +
626 + /* Verify that top and bottom intervals line up */
627 + if (max(top, bottom) & (min(top, bottom) - 1)) {
628 + t->misaligned = 1;
629 + ret = -1;
630 + }
631 + }
632 +
633 t->logical_block_size = max(t->logical_block_size,
634 b->logical_block_size);
635
636 @@ -525,37 +559,46 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
637 b->physical_block_size);
638
639 t->io_min = max(t->io_min, b->io_min);
640 + t->io_opt = lcm(t->io_opt, b->io_opt);
641 +
642 t->no_cluster |= b->no_cluster;
643
644 - /* Bottom device offset aligned? */
645 - if (offset &&
646 - (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
647 + /* Physical block size a multiple of the logical block size? */
648 + if (t->physical_block_size & (t->logical_block_size - 1)) {
649 + t->physical_block_size = t->logical_block_size;
650 t->misaligned = 1;
651 - return -1;
652 + ret = -1;
653 }
654
655 - /* If top has no alignment offset, inherit from bottom */
656 - if (!t->alignment_offset)
657 - t->alignment_offset =
658 - b->alignment_offset & (b->physical_block_size - 1);
659 + /* Minimum I/O a multiple of the physical block size? */
660 + if (t->io_min & (t->physical_block_size - 1)) {
661 + t->io_min = t->physical_block_size;
662 + t->misaligned = 1;
663 + ret = -1;
664 + }
665
666 - /* Top device aligned on logical block boundary? */
667 - if (t->alignment_offset & (t->logical_block_size - 1)) {
668 + /* Optimal I/O a multiple of the physical block size? */
669 + if (t->io_opt & (t->physical_block_size - 1)) {
670 + t->io_opt = 0;
671 t->misaligned = 1;
672 - return -1;
673 + ret = -1;
674 }
675
676 - /* Find lcm() of optimal I/O size */
677 - if (t->io_opt && b->io_opt)
678 - t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
679 - else if (b->io_opt)
680 - t->io_opt = b->io_opt;
681 + /* Find lowest common alignment_offset */
682 + t->alignment_offset = lcm(t->alignment_offset, alignment)
683 + & (max(t->physical_block_size, t->io_min) - 1);
684
685 - /* Verify that optimal I/O size is a multiple of io_min */
686 - if (t->io_min && t->io_opt % t->io_min)
687 - return -1;
688 + /* Verify that new alignment_offset is on a logical block boundary */
689 + if (t->alignment_offset & (t->logical_block_size - 1)) {
690 + t->misaligned = 1;
691 + ret = -1;
692 + }
693
694 - return 0;
695 + /* Discard */
696 + t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
697 + b->max_discard_sectors);
698 +
699 + return ret;
700 }
701 EXPORT_SYMBOL(blk_stack_limits);
702
703 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
704 index a587046..2c53024 100644
705 --- a/drivers/ata/ahci.c
706 +++ b/drivers/ata/ahci.c
707 @@ -2831,6 +2831,14 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
708 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
709 * to the harddisk doesn't become online after
710 * resuming from STR. Warn and fail suspend.
711 + *
712 + * http://bugzilla.kernel.org/show_bug.cgi?id=12276
713 + *
714 + * Use dates instead of versions to match as HP is
715 + * apparently recycling both product and version
716 + * strings.
717 + *
718 + * http://bugzilla.kernel.org/show_bug.cgi?id=15462
719 */
720 {
721 .ident = "dv4",
722 @@ -2839,7 +2847,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
723 DMI_MATCH(DMI_PRODUCT_NAME,
724 "HP Pavilion dv4 Notebook PC"),
725 },
726 - .driver_data = "F.30", /* cutoff BIOS version */
727 + .driver_data = "20090105", /* F.30 */
728 },
729 {
730 .ident = "dv5",
731 @@ -2848,7 +2856,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
732 DMI_MATCH(DMI_PRODUCT_NAME,
733 "HP Pavilion dv5 Notebook PC"),
734 },
735 - .driver_data = "F.16", /* cutoff BIOS version */
736 + .driver_data = "20090506", /* F.16 */
737 },
738 {
739 .ident = "dv6",
740 @@ -2857,7 +2865,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
741 DMI_MATCH(DMI_PRODUCT_NAME,
742 "HP Pavilion dv6 Notebook PC"),
743 },
744 - .driver_data = "F.21", /* cutoff BIOS version */
745 + .driver_data = "20090423", /* F.21 */
746 },
747 {
748 .ident = "HDX18",
749 @@ -2866,7 +2874,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
750 DMI_MATCH(DMI_PRODUCT_NAME,
751 "HP HDX18 Notebook PC"),
752 },
753 - .driver_data = "F.23", /* cutoff BIOS version */
754 + .driver_data = "20090430", /* F.23 */
755 },
756 /*
757 * Acer eMachines G725 has the same problem. BIOS
758 @@ -2874,6 +2882,8 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
759 * work. Inbetween, there are V1.06, V2.06 and V3.03
760 * that we don't have much idea about. For now,
761 * blacklist anything older than V3.04.
762 + *
763 + * http://bugzilla.kernel.org/show_bug.cgi?id=15104
764 */
765 {
766 .ident = "G725",
767 @@ -2881,19 +2891,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
768 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
769 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
770 },
771 - .driver_data = "V3.04", /* cutoff BIOS version */
772 + .driver_data = "20091216", /* V3.04 */
773 },
774 { } /* terminate list */
775 };
776 const struct dmi_system_id *dmi = dmi_first_match(sysids);
777 - const char *ver;
778 + int year, month, date;
779 + char buf[9];
780
781 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
782 return false;
783
784 - ver = dmi_get_system_info(DMI_BIOS_VERSION);
785 + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
786 + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
787
788 - return !ver || strcmp(ver, dmi->driver_data) < 0;
789 + return strcmp(buf, dmi->driver_data) < 0;
790 }
791
792 static bool ahci_broken_online(struct pci_dev *pdev)
793 diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
794 index 66fa4e1..f27c4d6 100644
795 --- a/drivers/char/tty_buffer.c
796 +++ b/drivers/char/tty_buffer.c
797 @@ -247,7 +247,8 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
798 {
799 int copied = 0;
800 do {
801 - int space = tty_buffer_request_room(tty, size - copied);
802 + int goal = min(size - copied, TTY_BUFFER_PAGE);
803 + int space = tty_buffer_request_room(tty, goal);
804 struct tty_buffer *tb = tty->buf.tail;
805 /* If there is no space then tb may be NULL */
806 if (unlikely(space == 0))
807 @@ -283,7 +284,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
808 {
809 int copied = 0;
810 do {
811 - int space = tty_buffer_request_room(tty, size - copied);
812 + int goal = min(size - copied, TTY_BUFFER_PAGE);
813 + int space = tty_buffer_request_room(tty, goal);
814 struct tty_buffer *tb = tty->buf.tail;
815 /* If there is no space then tb may be NULL */
816 if (unlikely(space == 0))
817 diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
818 index 713ed7d..ac2aea8 100644
819 --- a/drivers/edac/edac_mce_amd.c
820 +++ b/drivers/edac/edac_mce_amd.c
821 @@ -311,9 +311,13 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
822 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
823 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
824 } else {
825 - pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
826 - }
827 + u8 assoc_cpus = regs->nbsh & 0xf;
828 +
829 + if (assoc_cpus > 0)
830 + pr_cont(", core: %d", fls(assoc_cpus) - 1);
831
832 + pr_cont("\n");
833 + }
834
835 pr_emerg("%s.\n", EXT_ERR_MSG(xec));
836
837 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
838 index b54ba63..d5671c3 100644
839 --- a/drivers/gpu/drm/drm_edid.c
840 +++ b/drivers/gpu/drm/drm_edid.c
841 @@ -834,8 +834,57 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
842 return modes;
843 }
844
845 +static int add_detailed_modes(struct drm_connector *connector,
846 + struct detailed_timing *timing,
847 + struct edid *edid, u32 quirks, int preferred)
848 +{
849 + int i, modes = 0;
850 + struct detailed_non_pixel *data = &timing->data.other_data;
851 + int timing_level = standard_timing_level(edid);
852 + struct drm_display_mode *newmode;
853 + struct drm_device *dev = connector->dev;
854 +
855 + if (timing->pixel_clock) {
856 + newmode = drm_mode_detailed(dev, edid, timing, quirks);
857 + if (!newmode)
858 + return 0;
859 +
860 + if (preferred)
861 + newmode->type |= DRM_MODE_TYPE_PREFERRED;
862 +
863 + drm_mode_probed_add(connector, newmode);
864 + return 1;
865 + }
866 +
867 + /* other timing types */
868 + switch (data->type) {
869 + case EDID_DETAIL_MONITOR_RANGE:
870 + /* Get monitor range data */
871 + break;
872 + case EDID_DETAIL_STD_MODES:
873 + /* Six modes per detailed section */
874 + for (i = 0; i < 6; i++) {
875 + struct std_timing *std;
876 + struct drm_display_mode *newmode;
877 +
878 + std = &data->data.timings[i];
879 + newmode = drm_mode_std(dev, std, edid->revision,
880 + timing_level);
881 + if (newmode) {
882 + drm_mode_probed_add(connector, newmode);
883 + modes++;
884 + }
885 + }
886 + break;
887 + default:
888 + break;
889 + }
890 +
891 + return modes;
892 +}
893 +
894 /**
895 - * add_detailed_modes - get detailed mode info from EDID data
896 + * add_detailed_info - get detailed mode info from EDID data
897 * @connector: attached connector
898 * @edid: EDID block to scan
899 * @quirks: quirks to apply
900 @@ -846,67 +895,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
901 static int add_detailed_info(struct drm_connector *connector,
902 struct edid *edid, u32 quirks)
903 {
904 - struct drm_device *dev = connector->dev;
905 - int i, j, modes = 0;
906 - int timing_level;
907 -
908 - timing_level = standard_timing_level(edid);
909 + int i, modes = 0;
910
911 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
912 struct detailed_timing *timing = &edid->detailed_timings[i];
913 - struct detailed_non_pixel *data = &timing->data.other_data;
914 - struct drm_display_mode *newmode;
915 -
916 - /* X server check is version 1.1 or higher */
917 - if (edid->version == 1 && edid->revision >= 1 &&
918 - !timing->pixel_clock) {
919 - /* Other timing or info */
920 - switch (data->type) {
921 - case EDID_DETAIL_MONITOR_SERIAL:
922 - break;
923 - case EDID_DETAIL_MONITOR_STRING:
924 - break;
925 - case EDID_DETAIL_MONITOR_RANGE:
926 - /* Get monitor range data */
927 - break;
928 - case EDID_DETAIL_MONITOR_NAME:
929 - break;
930 - case EDID_DETAIL_MONITOR_CPDATA:
931 - break;
932 - case EDID_DETAIL_STD_MODES:
933 - for (j = 0; j < 6; i++) {
934 - struct std_timing *std;
935 - struct drm_display_mode *newmode;
936 -
937 - std = &data->data.timings[j];
938 - newmode = drm_mode_std(dev, std,
939 - edid->revision,
940 - timing_level);
941 - if (newmode) {
942 - drm_mode_probed_add(connector, newmode);
943 - modes++;
944 - }
945 - }
946 - break;
947 - default:
948 - break;
949 - }
950 - } else {
951 - newmode = drm_mode_detailed(dev, edid, timing, quirks);
952 - if (!newmode)
953 - continue;
954 + int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
955
956 - /* First detailed mode is preferred */
957 - if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
958 - newmode->type |= DRM_MODE_TYPE_PREFERRED;
959 - drm_mode_probed_add(connector, newmode);
960 + /* In 1.0, only timings are allowed */
961 + if (!timing->pixel_clock && edid->version == 1 &&
962 + edid->revision == 0)
963 + continue;
964
965 - modes++;
966 - }
967 + modes += add_detailed_modes(connector, timing, edid, quirks,
968 + preferred);
969 }
970
971 return modes;
972 }
973 +
974 /**
975 * add_detailed_mode_eedid - get detailed mode info from addtional timing
976 * EDID block
977 @@ -920,12 +926,9 @@ static int add_detailed_info(struct drm_connector *connector,
978 static int add_detailed_info_eedid(struct drm_connector *connector,
979 struct edid *edid, u32 quirks)
980 {
981 - struct drm_device *dev = connector->dev;
982 - int i, j, modes = 0;
983 + int i, modes = 0;
984 char *edid_ext = NULL;
985 struct detailed_timing *timing;
986 - struct detailed_non_pixel *data;
987 - struct drm_display_mode *newmode;
988 int edid_ext_num;
989 int start_offset, end_offset;
990 int timing_level;
991 @@ -976,51 +979,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
992 for (i = start_offset; i < end_offset;
993 i += sizeof(struct detailed_timing)) {
994 timing = (struct detailed_timing *)(edid_ext + i);
995 - data = &timing->data.other_data;
996 - /* Detailed mode timing */
997 - if (timing->pixel_clock) {
998 - newmode = drm_mode_detailed(dev, edid, timing, quirks);
999 - if (!newmode)
1000 - continue;
1001 -
1002 - drm_mode_probed_add(connector, newmode);
1003 -
1004 - modes++;
1005 - continue;
1006 - }
1007 -
1008 - /* Other timing or info */
1009 - switch (data->type) {
1010 - case EDID_DETAIL_MONITOR_SERIAL:
1011 - break;
1012 - case EDID_DETAIL_MONITOR_STRING:
1013 - break;
1014 - case EDID_DETAIL_MONITOR_RANGE:
1015 - /* Get monitor range data */
1016 - break;
1017 - case EDID_DETAIL_MONITOR_NAME:
1018 - break;
1019 - case EDID_DETAIL_MONITOR_CPDATA:
1020 - break;
1021 - case EDID_DETAIL_STD_MODES:
1022 - /* Five modes per detailed section */
1023 - for (j = 0; j < 5; i++) {
1024 - struct std_timing *std;
1025 - struct drm_display_mode *newmode;
1026 -
1027 - std = &data->data.timings[j];
1028 - newmode = drm_mode_std(dev, std,
1029 - edid->revision,
1030 - timing_level);
1031 - if (newmode) {
1032 - drm_mode_probed_add(connector, newmode);
1033 - modes++;
1034 - }
1035 - }
1036 - break;
1037 - default:
1038 - break;
1039 - }
1040 + modes += add_detailed_modes(connector, timing, edid, quirks, 0);
1041 }
1042
1043 return modes;
1044 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1045 index f5d49a7..aafbef7 100644
1046 --- a/drivers/gpu/drm/i915/i915_drv.h
1047 +++ b/drivers/gpu/drm/i915/i915_drv.h
1048 @@ -258,7 +258,7 @@ typedef struct drm_i915_private {
1049
1050 struct notifier_block lid_notifier;
1051
1052 - int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
1053 + int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
1054 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
1055 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1056 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1057 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1058 index 04da731..1e9c66a 100644
1059 --- a/drivers/gpu/drm/i915/i915_gem.c
1060 +++ b/drivers/gpu/drm/i915/i915_gem.c
1061 @@ -1470,9 +1470,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1062 obj_priv->dirty = 0;
1063
1064 for (i = 0; i < page_count; i++) {
1065 - if (obj_priv->pages[i] == NULL)
1066 - break;
1067 -
1068 if (obj_priv->dirty)
1069 set_page_dirty(obj_priv->pages[i]);
1070
1071 @@ -2246,7 +2243,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
1072 struct address_space *mapping;
1073 struct inode *inode;
1074 struct page *page;
1075 - int ret;
1076
1077 if (obj_priv->pages_refcount++ != 0)
1078 return 0;
1079 @@ -2269,11 +2265,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
1080 mapping_gfp_mask (mapping) |
1081 __GFP_COLD |
1082 gfpmask);
1083 - if (IS_ERR(page)) {
1084 - ret = PTR_ERR(page);
1085 - i915_gem_object_put_pages(obj);
1086 - return ret;
1087 - }
1088 + if (IS_ERR(page))
1089 + goto err_pages;
1090 +
1091 obj_priv->pages[i] = page;
1092 }
1093
1094 @@ -2281,6 +2275,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
1095 i915_gem_object_do_bit_17_swizzle(obj);
1096
1097 return 0;
1098 +
1099 +err_pages:
1100 + while (i--)
1101 + page_cache_release(obj_priv->pages[i]);
1102 +
1103 + drm_free_large(obj_priv->pages);
1104 + obj_priv->pages = NULL;
1105 + obj_priv->pages_refcount--;
1106 + return PTR_ERR(page);
1107 }
1108
1109 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1110 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1111 index 96cd256..97169ea 100644
1112 --- a/drivers/gpu/drm/i915/intel_bios.c
1113 +++ b/drivers/gpu/drm/i915/intel_bios.c
1114 @@ -241,10 +241,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
1115 GPIOF,
1116 };
1117
1118 - /* Set sensible defaults in case we can't find the general block
1119 - or it is the wrong chipset */
1120 - dev_priv->crt_ddc_bus = -1;
1121 -
1122 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
1123 if (general) {
1124 u16 block_size = get_blocksize(general);
1125 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
1126 index 5e730e6..166a24e 100644
1127 --- a/drivers/gpu/drm/i915/intel_crt.c
1128 +++ b/drivers/gpu/drm/i915/intel_crt.c
1129 @@ -557,7 +557,7 @@ void intel_crt_init(struct drm_device *dev)
1130 else {
1131 i2c_reg = GPIOA;
1132 /* Use VBT information for CRT DDC if available */
1133 - if (dev_priv->crt_ddc_bus != -1)
1134 + if (dev_priv->crt_ddc_bus != 0)
1135 i2c_reg = dev_priv->crt_ddc_bus;
1136 }
1137 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
1138 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
1139 index 2d7bcee..cb4290a 100644
1140 --- a/drivers/hwmon/coretemp.c
1141 +++ b/drivers/hwmon/coretemp.c
1142 @@ -228,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
1143 if (err) {
1144 dev_warn(dev,
1145 "Unable to access MSR 0xEE, for Tjmax, left"
1146 - " at default");
1147 + " at default\n");
1148 } else if (eax & 0x40000000) {
1149 tjmax = tjmax_ee;
1150 }
1151 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1152 index 55edcfe..4d73fcf 100644
1153 --- a/drivers/i2c/busses/i2c-i801.c
1154 +++ b/drivers/i2c/busses/i2c-i801.c
1155 @@ -415,9 +415,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
1156 data->block[0] = 32; /* max for SMBus block reads */
1157 }
1158
1159 + /* Experience has shown that the block buffer can only be used for
1160 + SMBus (not I2C) block transactions, even though the datasheet
1161 + doesn't mention this limitation. */
1162 if ((i801_features & FEATURE_BLOCK_BUFFER)
1163 - && !(command == I2C_SMBUS_I2C_BLOCK_DATA
1164 - && read_write == I2C_SMBUS_READ)
1165 + && command != I2C_SMBUS_I2C_BLOCK_DATA
1166 && i801_set_block_buffer_mode() == 0)
1167 result = i801_block_transaction_by_block(data, read_write,
1168 hwpec);
1169 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1170 index fc8823b..0c99db0 100644
1171 --- a/drivers/input/mouse/alps.c
1172 +++ b/drivers/input/mouse/alps.c
1173 @@ -62,6 +62,8 @@ static const struct alps_model_info alps_model_data[] = {
1174 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
1175 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
1176 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */
1177 + { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
1178 + ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
1179 };
1180
1181 /*
1182 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1183 index 2a5982e..525b9b9 100644
1184 --- a/drivers/input/serio/i8042-x86ia64io.h
1185 +++ b/drivers/input/serio/i8042-x86ia64io.h
1186 @@ -442,6 +442,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
1187 },
1188 },
1189 {
1190 + /* Medion Akoya E1222 */
1191 + .matches = {
1192 + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
1193 + DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
1194 + },
1195 + },
1196 + {
1197 /* Mivvy M310 */
1198 .matches = {
1199 DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
1200 diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
1201 index 9114ae1..e6307ba 100644
1202 --- a/drivers/input/tablet/wacom.h
1203 +++ b/drivers/input/tablet/wacom.h
1204 @@ -1,7 +1,7 @@
1205 /*
1206 * drivers/input/tablet/wacom.h
1207 *
1208 - * USB Wacom Graphire and Wacom Intuos tablet support
1209 + * USB Wacom tablet support
1210 *
1211 * Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
1212 * Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk>
1213 @@ -69,6 +69,7 @@
1214 * v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
1215 * v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
1216 * v1.51 (pc) - Added support for Intuos4
1217 + * v1.52 (pc) - Query Wacom data upon system resume
1218 */
1219
1220 /*
1221 @@ -89,9 +90,9 @@
1222 /*
1223 * Version Information
1224 */
1225 -#define DRIVER_VERSION "v1.51"
1226 +#define DRIVER_VERSION "v1.52"
1227 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
1228 -#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
1229 +#define DRIVER_DESC "USB Wacom tablet driver"
1230 #define DRIVER_LICENSE "GPL"
1231
1232 MODULE_AUTHOR(DRIVER_AUTHOR);
1233 diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
1234 index ea30c98..b5b69cc 100644
1235 --- a/drivers/input/tablet/wacom_sys.c
1236 +++ b/drivers/input/tablet/wacom_sys.c
1237 @@ -1,7 +1,7 @@
1238 /*
1239 * drivers/input/tablet/wacom_sys.c
1240 *
1241 - * USB Wacom Graphire and Wacom Intuos tablet support - system specific code
1242 + * USB Wacom tablet support - system specific code
1243 */
1244
1245 /*
1246 @@ -562,9 +562,10 @@ static int wacom_resume(struct usb_interface *intf)
1247 int rv;
1248
1249 mutex_lock(&wacom->lock);
1250 - if (wacom->open)
1251 + if (wacom->open) {
1252 rv = usb_submit_urb(wacom->irq, GFP_NOIO);
1253 - else
1254 + wacom_query_tablet_data(intf);
1255 + } else
1256 rv = 0;
1257 mutex_unlock(&wacom->lock);
1258
1259 diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
1260 index cc768ca..a0f7b99 100644
1261 --- a/drivers/isdn/gigaset/ev-layer.c
1262 +++ b/drivers/isdn/gigaset/ev-layer.c
1263 @@ -1243,14 +1243,10 @@ static void do_action(int action, struct cardstate *cs,
1264 * note that bcs may be NULL if no B channel is free
1265 */
1266 at_state2->ConState = 700;
1267 - kfree(at_state2->str_var[STR_NMBR]);
1268 - at_state2->str_var[STR_NMBR] = NULL;
1269 - kfree(at_state2->str_var[STR_ZCPN]);
1270 - at_state2->str_var[STR_ZCPN] = NULL;
1271 - kfree(at_state2->str_var[STR_ZBC]);
1272 - at_state2->str_var[STR_ZBC] = NULL;
1273 - kfree(at_state2->str_var[STR_ZHLC]);
1274 - at_state2->str_var[STR_ZHLC] = NULL;
1275 + for (i = 0; i < STR_NUM; ++i) {
1276 + kfree(at_state2->str_var[i]);
1277 + at_state2->str_var[i] = NULL;
1278 + }
1279 at_state2->int_var[VAR_ZCTP] = -1;
1280
1281 spin_lock_irqsave(&cs->lock, flags);
1282 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
1283 index 6a8e138..b3065b8 100644
1284 --- a/drivers/isdn/gigaset/interface.c
1285 +++ b/drivers/isdn/gigaset/interface.c
1286 @@ -635,7 +635,6 @@ void gigaset_if_receive(struct cardstate *cs,
1287 if ((tty = cs->tty) == NULL)
1288 gig_dbg(DEBUG_ANY, "receive on closed device");
1289 else {
1290 - tty_buffer_request_room(tty, len);
1291 tty_insert_flip_string(tty, buffer, len);
1292 tty_flip_buffer_push(tty);
1293 }
1294 diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1295 index e5225d2..0823e26 100644
1296 --- a/drivers/leds/leds-gpio.c
1297 +++ b/drivers/leds/leds-gpio.c
1298 @@ -211,7 +211,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1299 const struct of_device_id *match)
1300 {
1301 struct device_node *np = ofdev->node, *child;
1302 - struct gpio_led led;
1303 struct gpio_led_of_platform_data *pdata;
1304 int count = 0, ret;
1305
1306 @@ -226,8 +225,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1307 if (!pdata)
1308 return -ENOMEM;
1309
1310 - memset(&led, 0, sizeof(led));
1311 for_each_child_of_node(np, child) {
1312 + struct gpio_led led = {};
1313 enum of_gpio_flags flags;
1314 const char *state;
1315
1316 diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
1317 index db74946..efddf15 100644
1318 --- a/drivers/media/video/em28xx/em28xx-dvb.c
1319 +++ b/drivers/media/video/em28xx/em28xx-dvb.c
1320 @@ -610,6 +610,7 @@ static int dvb_fini(struct em28xx *dev)
1321
1322 if (dev->dvb) {
1323 unregister_dvb(dev->dvb);
1324 + kfree(dev->dvb);
1325 dev->dvb = NULL;
1326 }
1327
1328 diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
1329 index f31f05a..fba147c 100644
1330 --- a/drivers/mmc/host/s3cmci.c
1331 +++ b/drivers/mmc/host/s3cmci.c
1332 @@ -1361,6 +1361,8 @@ static struct mmc_host_ops s3cmci_ops = {
1333 static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
1334 /* This is currently here to avoid a number of if (host->pdata)
1335 * checks. Any zero fields to ensure reaonable defaults are picked. */
1336 + .no_wprotect = 1,
1337 + .no_detect = 1,
1338 };
1339
1340 #ifdef CONFIG_CPU_FREQ
1341 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
1342 index aaea41e..e8e87a7 100644
1343 --- a/drivers/net/e1000e/hw.h
1344 +++ b/drivers/net/e1000e/hw.h
1345 @@ -356,6 +356,7 @@ enum e1e_registers {
1346 #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
1347 #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
1348
1349 +#define E1000_DEV_ID_ICH8_82567V_3 0x1501
1350 #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
1351 #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
1352 #define E1000_DEV_ID_ICH8_IGP_C 0x104B
1353 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
1354 index eff3f47..c688b55 100644
1355 --- a/drivers/net/e1000e/ich8lan.c
1356 +++ b/drivers/net/e1000e/ich8lan.c
1357 @@ -3209,6 +3209,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
1358 u32 phy_ctrl;
1359
1360 switch (hw->mac.type) {
1361 + case e1000_ich8lan:
1362 case e1000_ich9lan:
1363 case e1000_ich10lan:
1364 case e1000_pchlan:
1365 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
1366 index 2154530..f590bea 100644
1367 --- a/drivers/net/e1000e/netdev.c
1368 +++ b/drivers/net/e1000e/netdev.c
1369 @@ -5360,6 +5360,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
1370 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
1371 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
1372 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
1373 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
1374
1375 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
1376 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
1377 diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
1378 index 34b0492..9c42149 100644
1379 --- a/drivers/net/ixgbe/ixgbe_82599.c
1380 +++ b/drivers/net/ixgbe/ixgbe_82599.c
1381 @@ -332,6 +332,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
1382 case IXGBE_DEV_ID_82599_KX4:
1383 case IXGBE_DEV_ID_82599_KX4_MEZZ:
1384 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1385 + case IXGBE_DEV_ID_82599_KR:
1386 case IXGBE_DEV_ID_82599_XAUI_LOM:
1387 /* Default device ID is mezzanine card KX/KX4 */
1388 media_type = ixgbe_media_type_backplane;
1389 diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
1390 index a456578..6339d65 100644
1391 --- a/drivers/net/ixgbe/ixgbe_main.c
1392 +++ b/drivers/net/ixgbe/ixgbe_main.c
1393 @@ -96,6 +96,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
1394 board_82599 },
1395 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
1396 board_82599 },
1397 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
1398 + board_82599 },
1399 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
1400 board_82599 },
1401 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
1402 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
1403 index ef4bdd5..7d66f5b 100644
1404 --- a/drivers/net/ixgbe/ixgbe_type.h
1405 +++ b/drivers/net/ixgbe/ixgbe_type.h
1406 @@ -50,6 +50,7 @@
1407 #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
1408 #define IXGBE_DEV_ID_82599_KX4 0x10F7
1409 #define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
1410 +#define IXGBE_DEV_ID_82599_KR 0x1517
1411 #define IXGBE_DEV_ID_82599_CX4 0x10F9
1412 #define IXGBE_DEV_ID_82599_SFP 0x10FB
1413 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
1414 diff --git a/drivers/net/jme.c b/drivers/net/jme.c
1415 index 1d2a325..3bb3a6d 100644
1416 --- a/drivers/net/jme.c
1417 +++ b/drivers/net/jme.c
1418 @@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1419 jme->jme_vlan_rx(skb, jme->vlgrp,
1420 le16_to_cpu(rxdesc->descwb.vlan));
1421 NET_STAT(jme).rx_bytes += 4;
1422 + } else {
1423 + dev_kfree_skb(skb);
1424 }
1425 } else {
1426 jme->jme_rx(skb);
1427 @@ -2085,12 +2087,45 @@ jme_tx_timeout(struct net_device *netdev)
1428 jme_reset_link(jme);
1429 }
1430
1431 +static inline void jme_pause_rx(struct jme_adapter *jme)
1432 +{
1433 + atomic_dec(&jme->link_changing);
1434 +
1435 + jme_set_rx_pcc(jme, PCC_OFF);
1436 + if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1437 + JME_NAPI_DISABLE(jme);
1438 + } else {
1439 + tasklet_disable(&jme->rxclean_task);
1440 + tasklet_disable(&jme->rxempty_task);
1441 + }
1442 +}
1443 +
1444 +static inline void jme_resume_rx(struct jme_adapter *jme)
1445 +{
1446 + struct dynpcc_info *dpi = &(jme->dpi);
1447 +
1448 + if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1449 + JME_NAPI_ENABLE(jme);
1450 + } else {
1451 + tasklet_hi_enable(&jme->rxclean_task);
1452 + tasklet_hi_enable(&jme->rxempty_task);
1453 + }
1454 + dpi->cur = PCC_P1;
1455 + dpi->attempt = PCC_P1;
1456 + dpi->cnt = 0;
1457 + jme_set_rx_pcc(jme, PCC_P1);
1458 +
1459 + atomic_inc(&jme->link_changing);
1460 +}
1461 +
1462 static void
1463 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1464 {
1465 struct jme_adapter *jme = netdev_priv(netdev);
1466
1467 + jme_pause_rx(jme);
1468 jme->vlgrp = grp;
1469 + jme_resume_rx(jme);
1470 }
1471
1472 static void
1473 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1474 index ba5d3fe..dcc1c23 100644
1475 --- a/drivers/net/tg3.c
1476 +++ b/drivers/net/tg3.c
1477 @@ -4995,7 +4995,7 @@ static void tg3_poll_controller(struct net_device *dev)
1478 struct tg3 *tp = netdev_priv(dev);
1479
1480 for (i = 0; i < tp->irq_cnt; i++)
1481 - tg3_interrupt(tp->napi[i].irq_vec, dev);
1482 + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1483 }
1484 #endif
1485
1486 @@ -5392,7 +5392,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1487 mss = 0;
1488 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
1489 struct iphdr *iph;
1490 - int tcp_opt_len, ip_tcp_len, hdr_len;
1491 + u32 tcp_opt_len, ip_tcp_len, hdr_len;
1492
1493 if (skb_header_cloned(skb) &&
1494 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1495 @@ -5423,8 +5423,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1496 IPPROTO_TCP,
1497 0);
1498
1499 - if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
1500 - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
1501 + if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
1502 + mss |= hdr_len << 9;
1503 + else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
1504 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1505 if (tcp_opt_len || iph->ihl > 5) {
1506 int tsflags;
1507
1508 @@ -5459,6 +5461,9 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1509
1510 would_hit_hwbug = 0;
1511
1512 + if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
1513 + would_hit_hwbug = 1;
1514 +
1515 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
1516 would_hit_hwbug = 1;
1517 else if (tg3_4g_overflow_test(mapping, len))
1518 @@ -5482,6 +5487,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
1519
1520 tnapi->tx_buffers[entry].skb = NULL;
1521
1522 + if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
1523 + len <= 8)
1524 + would_hit_hwbug = 1;
1525 +
1526 if (tg3_4g_overflow_test(mapping, len))
1527 would_hit_hwbug = 1;
1528
1529 @@ -12608,6 +12617,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
1530 }
1531 }
1532
1533 + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1534 + tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
1535 +
1536 tp->irq_max = 1;
1537
1538 #ifdef TG3_NAPI
1539 @@ -13975,8 +13987,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
1540 goto err_out_iounmap;
1541 }
1542
1543 - if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
1544 - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1545 + if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
1546 dev->netdev_ops = &tg3_netdev_ops;
1547 else
1548 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
1549 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
1550 index bab7940..529f55a 100644
1551 --- a/drivers/net/tg3.h
1552 +++ b/drivers/net/tg3.h
1553 @@ -2759,6 +2759,9 @@ struct tg3 {
1554 #define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
1555 #define TG3_FLG3_PHY_IS_FET 0x00010000
1556 #define TG3_FLG3_ENABLE_RSS 0x00020000
1557 +#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
1558 +#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
1559 +#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
1560
1561 struct timer_list timer;
1562 u16 timer_counter;
1563 diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
1564 index aafdc1e..2c79c78 100644
1565 --- a/drivers/net/wireless/ath/ath5k/ath5k.h
1566 +++ b/drivers/net/wireless/ath/ath5k/ath5k.h
1567 @@ -540,7 +540,7 @@ struct ath5k_txq_info {
1568 u32 tqi_cbr_period; /* Constant bit rate period */
1569 u32 tqi_cbr_overflow_limit;
1570 u32 tqi_burst_time;
1571 - u32 tqi_ready_time; /* Not used */
1572 + u32 tqi_ready_time; /* Time queue waits after an event */
1573 };
1574
1575 /*
1576 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1577 index 46f913b..6313788 100644
1578 --- a/drivers/net/wireless/ath/ath5k/base.c
1579 +++ b/drivers/net/wireless/ath/ath5k/base.c
1580 @@ -1511,7 +1511,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1581
1582 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1583 if (ret)
1584 - return ret;
1585 + goto err;
1586 +
1587 if (sc->opmode == NL80211_IFTYPE_AP ||
1588 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1589 /*
1590 @@ -1538,10 +1539,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1591 if (ret) {
1592 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1593 "hardware queue!\n", __func__);
1594 - return ret;
1595 + goto err;
1596 }
1597 + ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1598 + if (ret)
1599 + goto err;
1600
1601 - return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
1602 + /* reconfigure cabq with ready time to 80% of beacon_interval */
1603 + ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1604 + if (ret)
1605 + goto err;
1606 +
1607 + qi.tqi_ready_time = (sc->bintval * 80) / 100;
1608 + ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1609 + if (ret)
1610 + goto err;
1611 +
1612 + ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1613 +err:
1614 + return ret;
1615 }
1616
1617 static void
1618 diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
1619 index eeebb9a..b7c5725 100644
1620 --- a/drivers/net/wireless/ath/ath5k/qcu.c
1621 +++ b/drivers/net/wireless/ath/ath5k/qcu.c
1622 @@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
1623 break;
1624
1625 case AR5K_TX_QUEUE_CAB:
1626 + /* XXX: use BCN_SENT_GT, if we can figure out how */
1627 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
1628 - AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
1629 + AR5K_QCU_MISC_FRSHED_DBA_GT |
1630 AR5K_QCU_MISC_CBREXP_DIS |
1631 AR5K_QCU_MISC_CBREXP_BCN_DIS);
1632
1633 - ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
1634 + ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
1635 (AR5K_TUNE_SW_BEACON_RESP -
1636 AR5K_TUNE_DMA_BEACON_RESP) -
1637 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
1638 diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
1639 index 34e13c7..257ea18 100644
1640 --- a/drivers/net/wireless/ath/ath5k/reset.c
1641 +++ b/drivers/net/wireless/ath/ath5k/reset.c
1642 @@ -1382,8 +1382,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1643 * Set clocks to 32KHz operation and use an
1644 * external 32KHz crystal when sleeping if one
1645 * exists */
1646 - if (ah->ah_version == AR5K_AR5212)
1647 - ath5k_hw_set_sleep_clock(ah, true);
1648 + if (ah->ah_version == AR5K_AR5212 &&
1649 + ah->ah_op_mode != NL80211_IFTYPE_AP)
1650 + ath5k_hw_set_sleep_clock(ah, true);
1651
1652 /*
1653 * Disable beacons and reset the register
1654 diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
1655 index cdb90c5..ad11969 100644
1656 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
1657 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
1658 @@ -368,6 +368,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1659 u16 tid, u16 *ssn);
1660 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1661 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1662 +void ath9k_enable_ps(struct ath_softc *sc);
1663
1664 /********/
1665 /* VIFs */
1666 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1667 index 5864eaa..15eb245 100644
1668 --- a/drivers/net/wireless/ath/ath9k/main.c
1669 +++ b/drivers/net/wireless/ath/ath9k/main.c
1670 @@ -1544,6 +1544,7 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1671 IEEE80211_HW_AMPDU_AGGREGATION |
1672 IEEE80211_HW_SUPPORTS_PS |
1673 IEEE80211_HW_PS_NULLFUNC_STACK |
1674 + IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1675 IEEE80211_HW_SPECTRUM_MGMT;
1676
1677 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1678 @@ -2305,6 +2306,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1679 mutex_unlock(&sc->mutex);
1680 }
1681
1682 +void ath9k_enable_ps(struct ath_softc *sc)
1683 +{
1684 + sc->ps_enabled = true;
1685 + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1686 + if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1687 + sc->imask |= ATH9K_INT_TIM_TIMER;
1688 + ath9k_hw_set_interrupts(sc->sc_ah,
1689 + sc->imask);
1690 + }
1691 + }
1692 + ath9k_hw_setrxabort(sc->sc_ah, 1);
1693 +}
1694 +
1695 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1696 {
1697 struct ath_wiphy *aphy = hw->priv;
1698 @@ -2336,19 +2350,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1699 if (changed & IEEE80211_CONF_CHANGE_PS) {
1700 if (conf->flags & IEEE80211_CONF_PS) {
1701 sc->sc_flags |= SC_OP_PS_ENABLED;
1702 - if (!(ah->caps.hw_caps &
1703 - ATH9K_HW_CAP_AUTOSLEEP)) {
1704 - if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1705 - sc->imask |= ATH9K_INT_TIM_TIMER;
1706 - ath9k_hw_set_interrupts(sc->sc_ah,
1707 - sc->imask);
1708 - }
1709 - }
1710 - sc->ps_enabled = true;
1711 if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
1712 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
1713 - sc->ps_enabled = true;
1714 - ath9k_hw_setrxabort(sc->sc_ah, 1);
1715 + ath9k_enable_ps(sc);
1716 }
1717 } else {
1718 sc->ps_enabled = false;
1719 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
1720 index 9009bac..a232361 100644
1721 --- a/drivers/net/wireless/ath/ath9k/xmit.c
1722 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
1723 @@ -1320,25 +1320,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1724 return htype;
1725 }
1726
1727 -static bool is_pae(struct sk_buff *skb)
1728 -{
1729 - struct ieee80211_hdr *hdr;
1730 - __le16 fc;
1731 -
1732 - hdr = (struct ieee80211_hdr *)skb->data;
1733 - fc = hdr->frame_control;
1734 -
1735 - if (ieee80211_is_data(fc)) {
1736 - if (ieee80211_is_nullfunc(fc) ||
1737 - /* Port Access Entity (IEEE 802.1X) */
1738 - (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1739 - return true;
1740 - }
1741 - }
1742 -
1743 - return false;
1744 -}
1745 -
1746 static int get_hw_crypto_keytype(struct sk_buff *skb)
1747 {
1748 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1749 @@ -1648,7 +1629,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1750 goto tx_done;
1751 }
1752
1753 - if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1754 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1755 /*
1756 * Try aggregation if it's a unicast data frame
1757 * and the destination is HT capable.
1758 @@ -1998,10 +1979,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1759
1760 if (bf->bf_isnullfunc &&
1761 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
1762 - if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
1763 - sc->ps_enabled = true;
1764 - ath9k_hw_setrxabort(sc->sc_ah, 1);
1765 - } else
1766 + if ((sc->sc_flags & SC_OP_PS_ENABLED))
1767 + ath9k_enable_ps(sc);
1768 + else
1769 sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
1770 }
1771
1772 @@ -2210,7 +2190,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1773 if (ATH_TXQ_SETUP(sc, i)) {
1774 txq = &sc->tx.txq[i];
1775
1776 - spin_lock(&txq->axq_lock);
1777 + spin_lock_bh(&txq->axq_lock);
1778
1779 list_for_each_entry_safe(ac,
1780 ac_tmp, &txq->axq_acq, list) {
1781 @@ -2231,7 +2211,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1782 }
1783 }
1784
1785 - spin_unlock(&txq->axq_lock);
1786 + spin_unlock_bh(&txq->axq_lock);
1787 }
1788 }
1789 }
1790 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
1791 index 2bde1a9..a8a00d2 100644
1792 --- a/drivers/net/wireless/b43/main.c
1793 +++ b/drivers/net/wireless/b43/main.c
1794 @@ -852,19 +852,16 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
1795 if (B43_WARN_ON(!modparam_hwtkip))
1796 return;
1797
1798 - mutex_lock(&wl->mutex);
1799 -
1800 + /* This is only called from the RX path through mac80211, where
1801 + * our mutex is already locked. */
1802 + B43_WARN_ON(!mutex_is_locked(&wl->mutex));
1803 dev = wl->current_dev;
1804 - if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
1805 - goto out_unlock;
1806 + B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
1807
1808 keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
1809
1810 rx_tkip_phase1_write(dev, index, iv32, phase1key);
1811 keymac_write(dev, index, addr);
1812 -
1813 -out_unlock:
1814 - mutex_unlock(&wl->mutex);
1815 }
1816
1817 static void do_key_write(struct b43_wldev *dev,
1818 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
1819 index 9d60f6c..56bfcc3 100644
1820 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
1821 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
1822 @@ -2545,11 +2545,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
1823 memset((void *)&priv->hw_params, 0,
1824 sizeof(struct iwl_hw_params));
1825
1826 - priv->shared_virt =
1827 - pci_alloc_consistent(priv->pci_dev,
1828 - sizeof(struct iwl3945_shared),
1829 - &priv->shared_phys);
1830 -
1831 + priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
1832 + sizeof(struct iwl3945_shared),
1833 + &priv->shared_phys, GFP_KERNEL);
1834 if (!priv->shared_virt) {
1835 IWL_ERR(priv, "failed to allocate pci memory\n");
1836 mutex_unlock(&priv->mutex);
1837 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
1838 index 6d6235f..4a4f7e4 100644
1839 --- a/drivers/net/wireless/iwlwifi/iwl-core.c
1840 +++ b/drivers/net/wireless/iwlwifi/iwl-core.c
1841 @@ -1598,9 +1598,9 @@ EXPORT_SYMBOL(iwl_uninit_drv);
1842 void iwl_free_isr_ict(struct iwl_priv *priv)
1843 {
1844 if (priv->ict_tbl_vir) {
1845 - pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
1846 - PAGE_SIZE, priv->ict_tbl_vir,
1847 - priv->ict_tbl_dma);
1848 + dma_free_coherent(&priv->pci_dev->dev,
1849 + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1850 + priv->ict_tbl_vir, priv->ict_tbl_dma);
1851 priv->ict_tbl_vir = NULL;
1852 }
1853 }
1854 @@ -1616,9 +1616,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
1855 if (priv->cfg->use_isr_legacy)
1856 return 0;
1857 /* allocate shrared data table */
1858 - priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
1859 - ICT_COUNT) + PAGE_SIZE,
1860 - &priv->ict_tbl_dma);
1861 + priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
1862 + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1863 + &priv->ict_tbl_dma, GFP_KERNEL);
1864 if (!priv->ict_tbl_vir)
1865 return -ENOMEM;
1866
1867 diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
1868 index bd0b12e..f8481e8 100644
1869 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
1870 +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
1871 @@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
1872 struct fw_desc *desc)
1873 {
1874 if (desc->v_addr)
1875 - pci_free_consistent(pci_dev, desc->len,
1876 - desc->v_addr, desc->p_addr);
1877 + dma_free_coherent(&pci_dev->dev, desc->len,
1878 + desc->v_addr, desc->p_addr);
1879 desc->v_addr = NULL;
1880 desc->len = 0;
1881 }
1882 @@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
1883 static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
1884 struct fw_desc *desc)
1885 {
1886 - desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
1887 + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
1888 + &desc->p_addr, GFP_KERNEL);
1889 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
1890 }
1891
1892 diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
1893 index 493626b..3198a8a 100644
1894 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c
1895 +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
1896 @@ -345,10 +345,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1897 }
1898 }
1899
1900 - pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1901 - rxq->dma_addr);
1902 - pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
1903 - rxq->rb_stts, rxq->rb_stts_dma);
1904 + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1905 + rxq->dma_addr);
1906 + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1907 + rxq->rb_stts, rxq->rb_stts_dma);
1908 rxq->bd = NULL;
1909 rxq->rb_stts = NULL;
1910 }
1911 @@ -357,7 +357,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
1912 int iwl_rx_queue_alloc(struct iwl_priv *priv)
1913 {
1914 struct iwl_rx_queue *rxq = &priv->rxq;
1915 - struct pci_dev *dev = priv->pci_dev;
1916 + struct device *dev = &priv->pci_dev->dev;
1917 int i;
1918
1919 spin_lock_init(&rxq->lock);
1920 @@ -365,12 +365,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
1921 INIT_LIST_HEAD(&rxq->rx_used);
1922
1923 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
1924 - rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
1925 + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
1926 + GFP_KERNEL);
1927 if (!rxq->bd)
1928 goto err_bd;
1929
1930 - rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
1931 - &rxq->rb_stts_dma);
1932 + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
1933 + &rxq->rb_stts_dma, GFP_KERNEL);
1934 if (!rxq->rb_stts)
1935 goto err_rb;
1936
1937 @@ -387,8 +388,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
1938 return 0;
1939
1940 err_rb:
1941 - pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1942 - rxq->dma_addr);
1943 + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1944 + rxq->dma_addr);
1945 err_bd:
1946 return -ENOMEM;
1947 }
1948 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
1949 index f449f06..e143adc 100644
1950 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
1951 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
1952 @@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
1953 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
1954 struct iwl_dma_ptr *ptr, size_t size)
1955 {
1956 - ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
1957 + ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
1958 + GFP_KERNEL);
1959 if (!ptr->addr)
1960 return -ENOMEM;
1961 ptr->size = size;
1962 @@ -73,7 +74,7 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
1963 if (unlikely(!ptr->addr))
1964 return;
1965
1966 - pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
1967 + dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1968 memset(ptr, 0, sizeof(*ptr));
1969 }
1970
1971 @@ -125,7 +126,7 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1972 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1973 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1974 else {
1975 - IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
1976 + IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1977 priv->stations[sta_id].tid[tid].tfds_in_queue,
1978 freed);
1979 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1980 @@ -145,7 +146,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
1981 {
1982 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1983 struct iwl_queue *q = &txq->q;
1984 - struct pci_dev *dev = priv->pci_dev;
1985 + struct device *dev = &priv->pci_dev->dev;
1986 int i, len;
1987
1988 if (q->n_bd == 0)
1989 @@ -164,8 +165,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
1990
1991 /* De-alloc circular buffer of TFDs */
1992 if (txq->q.n_bd)
1993 - pci_free_consistent(dev, priv->hw_params.tfd_size *
1994 - txq->q.n_bd, txq->tfds, txq->q.dma_addr);
1995 + dma_free_coherent(dev, priv->hw_params.tfd_size *
1996 + txq->q.n_bd, txq->tfds, txq->q.dma_addr);
1997
1998 /* De-alloc array of per-TFD driver data */
1999 kfree(txq->txb);
2000 @@ -194,7 +195,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2001 {
2002 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
2003 struct iwl_queue *q = &txq->q;
2004 - struct pci_dev *dev = priv->pci_dev;
2005 + struct device *dev = &priv->pci_dev->dev;
2006 int i, len;
2007
2008 if (q->n_bd == 0)
2009 @@ -209,8 +210,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2010
2011 /* De-alloc circular buffer of TFDs */
2012 if (txq->q.n_bd)
2013 - pci_free_consistent(dev, priv->hw_params.tfd_size *
2014 - txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2015 + dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
2016 + txq->tfds, txq->q.dma_addr);
2017
2018 /* deallocate arrays */
2019 kfree(txq->cmd);
2020 @@ -301,7 +302,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
2021 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2022 struct iwl_tx_queue *txq, u32 id)
2023 {
2024 - struct pci_dev *dev = priv->pci_dev;
2025 + struct device *dev = &priv->pci_dev->dev;
2026 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2027
2028 /* Driver private data, only for Tx (not command) queues,
2029 @@ -320,8 +321,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2030
2031 /* Circular buffer of transmit frame descriptors (TFDs),
2032 * shared with device */
2033 - txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
2034 -
2035 + txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
2036 + GFP_KERNEL);
2037 if (!txq->tfds) {
2038 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
2039 goto error;
2040 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2041 index 5f26c93..064d3cd 100644
2042 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2043 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2044 @@ -356,10 +356,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
2045 static void iwl3945_unset_hw_params(struct iwl_priv *priv)
2046 {
2047 if (priv->shared_virt)
2048 - pci_free_consistent(priv->pci_dev,
2049 - sizeof(struct iwl3945_shared),
2050 - priv->shared_virt,
2051 - priv->shared_phys);
2052 + dma_free_coherent(&priv->pci_dev->dev,
2053 + sizeof(struct iwl3945_shared),
2054 + priv->shared_virt,
2055 + priv->shared_phys);
2056 }
2057
2058 static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2059 @@ -1272,10 +1272,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
2060 }
2061 }
2062
2063 - pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2064 - rxq->dma_addr);
2065 - pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
2066 - rxq->rb_stts, rxq->rb_stts_dma);
2067 + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2068 + rxq->dma_addr);
2069 + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
2070 + rxq->rb_stts, rxq->rb_stts_dma);
2071 rxq->bd = NULL;
2072 rxq->rb_stts = NULL;
2073 }
2074 diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2075 index a007230..1685c09 100644
2076 --- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2077 +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2078 @@ -443,7 +443,8 @@ out:
2079
2080 void wl1251_debugfs_reset(struct wl1251 *wl)
2081 {
2082 - memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2083 + if (wl->stats.fw_stats != NULL)
2084 + memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2085 wl->stats.retry_count = 0;
2086 wl->stats.excessive_retries = 0;
2087 }
2088 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2089 index 6477722..4493060 100644
2090 --- a/drivers/pci/pci.c
2091 +++ b/drivers/pci/pci.c
2092 @@ -2350,18 +2350,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
2093 */
2094 int pcix_get_max_mmrbc(struct pci_dev *dev)
2095 {
2096 - int err, cap;
2097 + int cap;
2098 u32 stat;
2099
2100 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2101 if (!cap)
2102 return -EINVAL;
2103
2104 - err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2105 - if (err)
2106 + if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2107 return -EINVAL;
2108
2109 - return (stat & PCI_X_STATUS_MAX_READ) >> 12;
2110 + return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2111 }
2112 EXPORT_SYMBOL(pcix_get_max_mmrbc);
2113
2114 @@ -2374,18 +2373,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
2115 */
2116 int pcix_get_mmrbc(struct pci_dev *dev)
2117 {
2118 - int ret, cap;
2119 - u32 cmd;
2120 + int cap;
2121 + u16 cmd;
2122
2123 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2124 if (!cap)
2125 return -EINVAL;
2126
2127 - ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2128 - if (!ret)
2129 - ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2130 + if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2131 + return -EINVAL;
2132
2133 - return ret;
2134 + return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2135 }
2136 EXPORT_SYMBOL(pcix_get_mmrbc);
2137
2138 @@ -2400,28 +2398,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
2139 */
2140 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2141 {
2142 - int cap, err = -EINVAL;
2143 - u32 stat, cmd, v, o;
2144 + int cap;
2145 + u32 stat, v, o;
2146 + u16 cmd;
2147
2148 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2149 - goto out;
2150 + return -EINVAL;
2151
2152 v = ffs(mmrbc) - 10;
2153
2154 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2155 if (!cap)
2156 - goto out;
2157 + return -EINVAL;
2158
2159 - err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2160 - if (err)
2161 - goto out;
2162 + if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2163 + return -EINVAL;
2164
2165 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2166 return -E2BIG;
2167
2168 - err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2169 - if (err)
2170 - goto out;
2171 + if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2172 + return -EINVAL;
2173
2174 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2175 if (o != v) {
2176 @@ -2431,10 +2428,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2177
2178 cmd &= ~PCI_X_CMD_MAX_READ;
2179 cmd |= v << 2;
2180 - err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
2181 + if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2182 + return -EIO;
2183 }
2184 -out:
2185 - return err;
2186 + return 0;
2187 }
2188 EXPORT_SYMBOL(pcix_set_mmrbc);
2189
2190 diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
2191 index 9f5ccbe..72fa87c 100644
2192 --- a/drivers/pci/pcie/aer/aerdrv_core.c
2193 +++ b/drivers/pci/pcie/aer/aerdrv_core.c
2194 @@ -78,19 +78,15 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
2195 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
2196 {
2197 int pos;
2198 - u32 status, mask;
2199 + u32 status;
2200
2201 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
2202 if (!pos)
2203 return -EIO;
2204
2205 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
2206 - pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
2207 - if (dev->error_state == pci_channel_io_normal)
2208 - status &= ~mask; /* Clear corresponding nonfatal bits */
2209 - else
2210 - status &= mask; /* Clear corresponding fatal bits */
2211 - pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
2212 + if (status)
2213 + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
2214
2215 return 0;
2216 }
2217 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2218 index 245d2cd..e2641cd 100644
2219 --- a/drivers/pci/quirks.c
2220 +++ b/drivers/pci/quirks.c
2221 @@ -2513,6 +2513,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
2222 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
2223 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
2224 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
2225 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
2226
2227 #endif /* CONFIG_PCI_IOV */
2228
2229 diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
2230 index c790d45..cae6b2c 100644
2231 --- a/drivers/scsi/mvsas/mv_init.c
2232 +++ b/drivers/scsi/mvsas/mv_init.c
2233 @@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
2234 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
2235 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
2236 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
2237 + { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
2238
2239 { } /* terminate list */
2240 };
2241 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2242 index bf52dec..64084aa 100644
2243 --- a/drivers/scsi/scsi_transport_fc.c
2244 +++ b/drivers/scsi/scsi_transport_fc.c
2245 @@ -1215,6 +1215,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
2246 {
2247 struct fc_vport *vport = transport_class_to_vport(dev);
2248 struct Scsi_Host *shost = vport_to_shost(vport);
2249 + unsigned long flags;
2250 +
2251 + spin_lock_irqsave(shost->host_lock, flags);
2252 + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
2253 + spin_unlock_irqrestore(shost->host_lock, flags);
2254 + return -EBUSY;
2255 + }
2256 + vport->flags |= FC_VPORT_DELETING;
2257 + spin_unlock_irqrestore(shost->host_lock, flags);
2258
2259 fc_queue_work(shost, &vport->vport_delete_work);
2260 return count;
2261 @@ -1804,6 +1813,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
2262 list_for_each_entry(vport, &fc_host->vports, peers) {
2263 if ((vport->channel == 0) &&
2264 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
2265 + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
2266 + break;
2267 + vport->flags |= FC_VPORT_DELETING;
2268 match = 1;
2269 break;
2270 }
2271 @@ -3328,18 +3340,6 @@ fc_vport_terminate(struct fc_vport *vport)
2272 unsigned long flags;
2273 int stat;
2274
2275 - spin_lock_irqsave(shost->host_lock, flags);
2276 - if (vport->flags & FC_VPORT_CREATING) {
2277 - spin_unlock_irqrestore(shost->host_lock, flags);
2278 - return -EBUSY;
2279 - }
2280 - if (vport->flags & (FC_VPORT_DEL)) {
2281 - spin_unlock_irqrestore(shost->host_lock, flags);
2282 - return -EALREADY;
2283 - }
2284 - vport->flags |= FC_VPORT_DELETING;
2285 - spin_unlock_irqrestore(shost->host_lock, flags);
2286 -
2287 if (i->f->vport_delete)
2288 stat = i->f->vport_delete(vport);
2289 else
2290 diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
2291 index 55b034b..3c8a024 100644
2292 --- a/drivers/scsi/ses.c
2293 +++ b/drivers/scsi/ses.c
2294 @@ -591,8 +591,6 @@ static int ses_intf_add(struct device *cdev,
2295 ses_dev->page10_len = len;
2296 buf = NULL;
2297 }
2298 - kfree(hdr_buf);
2299 -
2300 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
2301 if (!scomp)
2302 goto err_free;
2303 @@ -604,6 +602,8 @@ static int ses_intf_add(struct device *cdev,
2304 goto err_free;
2305 }
2306
2307 + kfree(hdr_buf);
2308 +
2309 edev->scratch = ses_dev;
2310 for (i = 0; i < components; i++)
2311 edev->component[i].scratch = scomp + i;
2312 diff --git a/drivers/staging/rt2860/common/2860_rtmp_init.c b/drivers/staging/rt2860/common/2860_rtmp_init.c
2313 index 0bc0fb9..98b0f8e 100644
2314 --- a/drivers/staging/rt2860/common/2860_rtmp_init.c
2315 +++ b/drivers/staging/rt2860/common/2860_rtmp_init.c
2316 @@ -716,7 +716,7 @@ VOID RTMPFreeTxRxRingMemory(
2317 {
2318 if ((pAd->RxRing.Cell[index].DmaBuf.AllocVa) && (pAd->RxRing.Cell[index].pNdisPacket))
2319 {
2320 - PCI_UNMAP_SINGLE(pObj->pci_dev, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
2321 + PCI_UNMAP_SINGLE(pAd, pAd->RxRing.Cell[index].DmaBuf.AllocPa, pAd->RxRing.Cell[index].DmaBuf.AllocSize, PCI_DMA_FROMDEVICE);
2322 RELEASE_NDIS_PACKET(pAd, pAd->RxRing.Cell[index].pNdisPacket, NDIS_STATUS_SUCCESS);
2323 }
2324 }
2325 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2326 index 24120db..2f12e2d 100644
2327 --- a/drivers/usb/core/devio.c
2328 +++ b/drivers/usb/core/devio.c
2329 @@ -1176,6 +1176,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
2330 free_async(as);
2331 return -ENOMEM;
2332 }
2333 + /* Isochronous input data may end up being discontiguous
2334 + * if some of the packets are short. Clear the buffer so
2335 + * that the gaps don't leak kernel data to userspace.
2336 + */
2337 + if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
2338 + memset(as->urb->transfer_buffer, 0,
2339 + uurb->buffer_length);
2340 }
2341 as->urb->dev = ps->dev;
2342 as->urb->pipe = (uurb->type << 30) |
2343 @@ -1312,10 +1319,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
2344 void __user *addr = as->userurb;
2345 unsigned int i;
2346
2347 - if (as->userbuffer && urb->actual_length)
2348 - if (copy_to_user(as->userbuffer, urb->transfer_buffer,
2349 - urb->actual_length))
2350 + if (as->userbuffer && urb->actual_length) {
2351 + if (urb->number_of_packets > 0) /* Isochronous */
2352 + i = urb->transfer_buffer_length;
2353 + else /* Non-Isoc */
2354 + i = urb->actual_length;
2355 + if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
2356 goto err_out;
2357 + }
2358 if (put_user(as->status, &userurb->status))
2359 goto err_out;
2360 if (put_user(urb->actual_length, &userurb->actual_length))
2361 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2362 index e18c677..35bf518 100644
2363 --- a/drivers/usb/host/ehci-hcd.c
2364 +++ b/drivers/usb/host/ehci-hcd.c
2365 @@ -993,7 +993,7 @@ rescan:
2366 /* endpoints can be iso streams. for now, we don't
2367 * accelerate iso completions ... so spin a while.
2368 */
2369 - if (qh->hw->hw_info1 == 0) {
2370 + if (qh->hw == NULL) {
2371 ehci_vdbg (ehci, "iso delay\n");
2372 goto idle_timeout;
2373 }
2374 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2375 index a5535b5..5cc3f48 100644
2376 --- a/drivers/usb/host/ehci-sched.c
2377 +++ b/drivers/usb/host/ehci-sched.c
2378 @@ -1121,8 +1121,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
2379 urb->interval);
2380 }
2381
2382 - /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
2383 - } else if (unlikely (stream->hw_info1 != 0)) {
2384 + /* if dev->ep [epnum] is a QH, hw is set */
2385 + } else if (unlikely (stream->hw != NULL)) {
2386 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
2387 urb->dev->devpath, epnum,
2388 usb_pipein(urb->pipe) ? "in" : "out");
2389 @@ -1553,13 +1553,27 @@ itd_patch(
2390 static inline void
2391 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
2392 {
2393 - /* always prepend ITD/SITD ... only QH tree is order-sensitive */
2394 - itd->itd_next = ehci->pshadow [frame];
2395 - itd->hw_next = ehci->periodic [frame];
2396 - ehci->pshadow [frame].itd = itd;
2397 + union ehci_shadow *prev = &ehci->pshadow[frame];
2398 + __hc32 *hw_p = &ehci->periodic[frame];
2399 + union ehci_shadow here = *prev;
2400 + __hc32 type = 0;
2401 +
2402 + /* skip any iso nodes which might belong to previous microframes */
2403 + while (here.ptr) {
2404 + type = Q_NEXT_TYPE(ehci, *hw_p);
2405 + if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
2406 + break;
2407 + prev = periodic_next_shadow(ehci, prev, type);
2408 + hw_p = shadow_next_periodic(ehci, &here, type);
2409 + here = *prev;
2410 + }
2411 +
2412 + itd->itd_next = here;
2413 + itd->hw_next = *hw_p;
2414 + prev->itd = itd;
2415 itd->frame = frame;
2416 wmb ();
2417 - ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2418 + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2419 }
2420
2421 /* fit urb's itds into the selected schedule slot; activate as needed */
2422 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2423 index 2d85e21..b1dce96 100644
2424 --- a/drivers/usb/host/ehci.h
2425 +++ b/drivers/usb/host/ehci.h
2426 @@ -394,9 +394,8 @@ struct ehci_iso_sched {
2427 * acts like a qh would, if EHCI had them for ISO.
2428 */
2429 struct ehci_iso_stream {
2430 - /* first two fields match QH, but info1 == 0 */
2431 - __hc32 hw_next;
2432 - __hc32 hw_info1;
2433 + /* first field matches ehci_hq, but is NULL */
2434 + struct ehci_qh_hw *hw;
2435
2436 u32 refcount;
2437 u8 bEndpointAddress;
2438 diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
2439 index 9260c74..e3548ee 100644
2440 --- a/drivers/usb/host/r8a66597-hcd.c
2441 +++ b/drivers/usb/host/r8a66597-hcd.c
2442 @@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
2443
2444 /* this function must be called with interrupt disabled */
2445 static void free_usb_address(struct r8a66597 *r8a66597,
2446 - struct r8a66597_device *dev)
2447 + struct r8a66597_device *dev, int reset)
2448 {
2449 int port;
2450
2451 @@ -430,7 +430,13 @@ static void free_usb_address(struct r8a66597 *r8a66597,
2452 dev->state = USB_STATE_DEFAULT;
2453 r8a66597->address_map &= ~(1 << dev->address);
2454 dev->address = 0;
2455 - dev_set_drvdata(&dev->udev->dev, NULL);
2456 + /*
2457 + * Only when resetting USB, it is necessary to erase drvdata. When
2458 + * a usb device with usb hub is disconnect, "dev->udev" is already
2459 + * freed on usb_desconnect(). So we cannot access the data.
2460 + */
2461 + if (reset)
2462 + dev_set_drvdata(&dev->udev->dev, NULL);
2463 list_del(&dev->device_list);
2464 kfree(dev);
2465
2466 @@ -1067,7 +1073,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
2467 struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
2468
2469 disable_r8a66597_pipe_all(r8a66597, dev);
2470 - free_usb_address(r8a66597, dev);
2471 + free_usb_address(r8a66597, dev, 0);
2472
2473 start_root_hub_sampling(r8a66597, port, 0);
2474 }
2475 @@ -2085,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
2476 spin_lock_irqsave(&r8a66597->lock, flags);
2477 dev = get_r8a66597_device(r8a66597, addr);
2478 disable_r8a66597_pipe_all(r8a66597, dev);
2479 - free_usb_address(r8a66597, dev);
2480 + free_usb_address(r8a66597, dev, 0);
2481 put_child_connect_map(r8a66597, addr);
2482 spin_unlock_irqrestore(&r8a66597->lock, flags);
2483 }
2484 @@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2485 rh->port |= (1 << USB_PORT_FEAT_RESET);
2486
2487 disable_r8a66597_pipe_all(r8a66597, dev);
2488 - free_usb_address(r8a66597, dev);
2489 + free_usb_address(r8a66597, dev, 1);
2490
2491 r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
2492 get_dvstctr_reg(port));
2493 diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
2494 index 932f999..9974f32 100644
2495 --- a/drivers/usb/host/xhci-hcd.c
2496 +++ b/drivers/usb/host/xhci-hcd.c
2497 @@ -1157,6 +1157,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2498 cmd_completion = &virt_dev->cmd_completion;
2499 cmd_status = &virt_dev->cmd_status;
2500 }
2501 + init_completion(cmd_completion);
2502
2503 if (!ctx_change)
2504 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2505 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2506 index ce937e7..9cf4652 100644
2507 --- a/drivers/usb/serial/ftdi_sio.c
2508 +++ b/drivers/usb/serial/ftdi_sio.c
2509 @@ -658,6 +658,7 @@ static struct usb_device_id id_table_combined [] = {
2510 { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
2511 { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
2512 { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
2513 + { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
2514 { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
2515 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
2516 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
2517 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2518 index d10b5a8..8f9e805 100644
2519 --- a/drivers/usb/serial/ftdi_sio_ids.h
2520 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2521 @@ -501,6 +501,13 @@
2522 #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2523
2524 /*
2525 + * Contec products (http://www.contec.com)
2526 + * Submitted by Daniel Sangorrin
2527 + */
2528 +#define CONTEC_VID 0x06CE /* Vendor ID */
2529 +#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2530 +
2531 +/*
2532 * Definitions for B&B Electronics products.
2533 */
2534 #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
2535 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2536 index be3dff1..fcf56f9 100644
2537 --- a/drivers/usb/serial/option.c
2538 +++ b/drivers/usb/serial/option.c
2539 @@ -288,7 +288,9 @@ static int option_resume(struct usb_serial *serial);
2540
2541 #define QUALCOMM_VENDOR_ID 0x05C6
2542
2543 -#define MAXON_VENDOR_ID 0x16d8
2544 +#define CMOTECH_VENDOR_ID 0x16d8
2545 +#define CMOTECH_PRODUCT_6008 0x6008
2546 +#define CMOTECH_PRODUCT_6280 0x6280
2547
2548 #define TELIT_VENDOR_ID 0x1bc7
2549 #define TELIT_PRODUCT_UC864E 0x1003
2550 @@ -520,7 +522,8 @@ static struct usb_device_id option_ids[] = {
2551 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
2552 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
2553 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2554 - { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
2555 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
2556 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
2557 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
2558 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
2559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2560 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2561 index 7528b8d..8ab4ab2 100644
2562 --- a/drivers/usb/serial/qcserial.c
2563 +++ b/drivers/usb/serial/qcserial.c
2564 @@ -47,6 +47,35 @@ static struct usb_device_id id_table[] = {
2565 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
2566 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
2567 {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
2568 + {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
2569 + {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
2570 + {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
2571 + {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
2572 + {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
2573 + {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
2574 + {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
2575 + {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
2576 + {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
2577 + {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
2578 + {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
2579 + {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
2580 + {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
2581 + {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
2582 + {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
2583 + {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
2584 + {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
2585 + {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2586 + {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2587 + {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2588 + {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2589 + {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2590 + {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2591 + {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2592 + {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2593 + {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2594 + {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2595 + {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
2596 + {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
2597 { } /* Terminating entry */
2598 };
2599 MODULE_DEVICE_TABLE(usb, id_table);
2600 diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
2601 index eb12182..d25df51 100644
2602 --- a/drivers/video/efifb.c
2603 +++ b/drivers/video/efifb.c
2604 @@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
2605 return 0;
2606 }
2607
2608 +static void efifb_destroy(struct fb_info *info)
2609 +{
2610 + if (info->screen_base)
2611 + iounmap(info->screen_base);
2612 + release_mem_region(info->aperture_base, info->aperture_size);
2613 + framebuffer_release(info);
2614 +}
2615 +
2616 static struct fb_ops efifb_ops = {
2617 .owner = THIS_MODULE,
2618 + .fb_destroy = efifb_destroy,
2619 .fb_setcolreg = efifb_setcolreg,
2620 .fb_fillrect = cfb_fillrect,
2621 .fb_copyarea = cfb_copyarea,
2622 @@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
2623 info->par = NULL;
2624
2625 info->aperture_base = efifb_fix.smem_start;
2626 - info->aperture_size = size_total;
2627 + info->aperture_size = size_remap;
2628
2629 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
2630 if (!info->screen_base) {
2631 diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
2632 index 28d9cf7..7127bfe 100644
2633 --- a/drivers/virtio/virtio_pci.c
2634 +++ b/drivers/virtio/virtio_pci.c
2635 @@ -473,7 +473,8 @@ static void vp_del_vqs(struct virtio_device *vdev)
2636
2637 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
2638 info = vq->priv;
2639 - if (vp_dev->per_vq_vectors)
2640 + if (vp_dev->per_vq_vectors &&
2641 + info->msix_vector != VIRTIO_MSI_NO_VECTOR)
2642 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
2643 vq);
2644 vp_del_vq(vq);
2645 diff --git a/fs/exec.c b/fs/exec.c
2646 index 9b88366..a2a3944 100644
2647 --- a/fs/exec.c
2648 +++ b/fs/exec.c
2649 @@ -1913,8 +1913,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
2650 /*
2651 * Dont allow local users get cute and trick others to coredump
2652 * into their pre-created files:
2653 + * Note, this is not relevant for pipes
2654 */
2655 - if (inode->i_uid != current_fsuid())
2656 + if (!ispipe && (inode->i_uid != current_fsuid()))
2657 goto close_fail;
2658 if (!file->f_op)
2659 goto close_fail;
2660 diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
2661 index 4eb308a..a32bcd7 100644
2662 --- a/fs/gfs2/file.c
2663 +++ b/fs/gfs2/file.c
2664 @@ -606,7 +606,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
2665
2666 if (!(fl->fl_flags & FL_POSIX))
2667 return -ENOLCK;
2668 - if (__mandatory_lock(&ip->i_inode))
2669 + if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
2670 return -ENOLCK;
2671
2672 if (cmd == F_CANCELLK) {
2673 diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
2674 index 09f3837..7f237d2 100644
2675 --- a/fs/nfs/delegation.h
2676 +++ b/fs/nfs/delegation.h
2677 @@ -68,4 +68,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode)
2678 }
2679 #endif
2680
2681 +static inline int nfs_have_delegated_attributes(struct inode *inode)
2682 +{
2683 + return nfs_have_delegation(inode, FMODE_READ) &&
2684 + !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
2685 +}
2686 +
2687 #endif
2688 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2689 index 7cb2985..f360e9c 100644
2690 --- a/fs/nfs/dir.c
2691 +++ b/fs/nfs/dir.c
2692 @@ -1797,7 +1797,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
2693 cache = nfs_access_search_rbtree(inode, cred);
2694 if (cache == NULL)
2695 goto out;
2696 - if (!nfs_have_delegation(inode, FMODE_READ) &&
2697 + if (!nfs_have_delegated_attributes(inode) &&
2698 !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
2699 goto out_stale;
2700 res->jiffies = cache->jiffies;
2701 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
2702 index 393d40f..61b3bf5 100644
2703 --- a/fs/nfs/file.c
2704 +++ b/fs/nfs/file.c
2705 @@ -486,7 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
2706 {
2707 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
2708
2709 - if (gfp & __GFP_WAIT)
2710 + /* Only do I/O if gfp is a superset of GFP_KERNEL */
2711 + if ((gfp & GFP_KERNEL) == GFP_KERNEL)
2712 nfs_wb_page(page->mapping->host, page);
2713 /* If PagePrivate() is set, then the page is not freeable */
2714 if (PagePrivate(page))
2715 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
2716 index faa0918..3c80474 100644
2717 --- a/fs/nfs/inode.c
2718 +++ b/fs/nfs/inode.c
2719 @@ -759,7 +759,7 @@ int nfs_attribute_timeout(struct inode *inode)
2720 {
2721 struct nfs_inode *nfsi = NFS_I(inode);
2722
2723 - if (nfs_have_delegation(inode, FMODE_READ))
2724 + if (nfs_have_delegated_attributes(inode))
2725 return 0;
2726 return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
2727 }
2728 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
2729 index a12c45b..29d9d36 100644
2730 --- a/fs/nfs/pagelist.c
2731 +++ b/fs/nfs/pagelist.c
2732 @@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req)
2733 */
2734 int nfs_set_page_tag_locked(struct nfs_page *req)
2735 {
2736 - struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
2737 -
2738 if (!nfs_lock_request_dontget(req))
2739 return 0;
2740 if (req->wb_page != NULL)
2741 - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
2742 + radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
2743 return 1;
2744 }
2745
2746 @@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
2747 */
2748 void nfs_clear_page_tag_locked(struct nfs_page *req)
2749 {
2750 - struct inode *inode = req->wb_context->path.dentry->d_inode;
2751 - struct nfs_inode *nfsi = NFS_I(inode);
2752 -
2753 if (req->wb_page != NULL) {
2754 + struct inode *inode = req->wb_context->path.dentry->d_inode;
2755 + struct nfs_inode *nfsi = NFS_I(inode);
2756 +
2757 spin_lock(&inode->i_lock);
2758 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
2759 nfs_unlock_request(req);
2760 @@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
2761 * nfs_clear_request - Free up all resources allocated to the request
2762 * @req:
2763 *
2764 - * Release page resources associated with a write request after it
2765 - * has completed.
2766 + * Release page and open context resources associated with a read/write
2767 + * request after it has completed.
2768 */
2769 void nfs_clear_request(struct nfs_page *req)
2770 {
2771 struct page *page = req->wb_page;
2772 + struct nfs_open_context *ctx = req->wb_context;
2773 +
2774 if (page != NULL) {
2775 page_cache_release(page);
2776 req->wb_page = NULL;
2777 }
2778 + if (ctx != NULL) {
2779 + put_nfs_open_context(ctx);
2780 + req->wb_context = NULL;
2781 + }
2782 }
2783
2784
2785 @@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref)
2786 {
2787 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
2788
2789 - /* Release struct file or cached credential */
2790 + /* Release struct file and open context */
2791 nfs_clear_request(req);
2792 - put_nfs_open_context(req->wb_context);
2793 nfs_page_free(req);
2794 }
2795
2796 diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
2797 index 0028d2e..90be97f 100644
2798 --- a/fs/partitions/msdos.c
2799 +++ b/fs/partitions/msdos.c
2800 @@ -31,14 +31,17 @@
2801 */
2802 #include <asm/unaligned.h>
2803
2804 -#define SYS_IND(p) (get_unaligned(&p->sys_ind))
2805 -#define NR_SECTS(p) ({ __le32 __a = get_unaligned(&p->nr_sects); \
2806 - le32_to_cpu(__a); \
2807 - })
2808 +#define SYS_IND(p) get_unaligned(&p->sys_ind)
2809
2810 -#define START_SECT(p) ({ __le32 __a = get_unaligned(&p->start_sect); \
2811 - le32_to_cpu(__a); \
2812 - })
2813 +static inline sector_t nr_sects(struct partition *p)
2814 +{
2815 + return (sector_t)get_unaligned_le32(&p->nr_sects);
2816 +}
2817 +
2818 +static inline sector_t start_sect(struct partition *p)
2819 +{
2820 + return (sector_t)get_unaligned_le32(&p->start_sect);
2821 +}
2822
2823 static inline int is_extended_partition(struct partition *p)
2824 {
2825 @@ -104,13 +107,13 @@ static int aix_magic_present(unsigned char *p, struct block_device *bdev)
2826
2827 static void
2828 parse_extended(struct parsed_partitions *state, struct block_device *bdev,
2829 - u32 first_sector, u32 first_size)
2830 + sector_t first_sector, sector_t first_size)
2831 {
2832 struct partition *p;
2833 Sector sect;
2834 unsigned char *data;
2835 - u32 this_sector, this_size;
2836 - int sector_size = bdev_logical_block_size(bdev) / 512;
2837 + sector_t this_sector, this_size;
2838 + sector_t sector_size = bdev_logical_block_size(bdev) / 512;
2839 int loopct = 0; /* number of links followed
2840 without finding a data partition */
2841 int i;
2842 @@ -145,14 +148,14 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
2843 * First process the data partition(s)
2844 */
2845 for (i=0; i<4; i++, p++) {
2846 - u32 offs, size, next;
2847 - if (!NR_SECTS(p) || is_extended_partition(p))
2848 + sector_t offs, size, next;
2849 + if (!nr_sects(p) || is_extended_partition(p))
2850 continue;
2851
2852 /* Check the 3rd and 4th entries -
2853 these sometimes contain random garbage */
2854 - offs = START_SECT(p)*sector_size;
2855 - size = NR_SECTS(p)*sector_size;
2856 + offs = start_sect(p)*sector_size;
2857 + size = nr_sects(p)*sector_size;
2858 next = this_sector + offs;
2859 if (i >= 2) {
2860 if (offs + size > this_size)
2861 @@ -179,13 +182,13 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
2862 */
2863 p -= 4;
2864 for (i=0; i<4; i++, p++)
2865 - if (NR_SECTS(p) && is_extended_partition(p))
2866 + if (nr_sects(p) && is_extended_partition(p))
2867 break;
2868 if (i == 4)
2869 goto done; /* nothing left to do */
2870
2871 - this_sector = first_sector + START_SECT(p) * sector_size;
2872 - this_size = NR_SECTS(p) * sector_size;
2873 + this_sector = first_sector + start_sect(p) * sector_size;
2874 + this_size = nr_sects(p) * sector_size;
2875 put_dev_sector(sect);
2876 }
2877 done:
2878 @@ -197,7 +200,7 @@ done:
2879
2880 static void
2881 parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
2882 - u32 offset, u32 size, int origin)
2883 + sector_t offset, sector_t size, int origin)
2884 {
2885 #ifdef CONFIG_SOLARIS_X86_PARTITION
2886 Sector sect;
2887 @@ -244,7 +247,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
2888 */
2889 static void
2890 parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
2891 - u32 offset, u32 size, int origin, char *flavour,
2892 + sector_t offset, sector_t size, int origin, char *flavour,
2893 int max_partitions)
2894 {
2895 Sector sect;
2896 @@ -263,7 +266,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
2897 if (le16_to_cpu(l->d_npartitions) < max_partitions)
2898 max_partitions = le16_to_cpu(l->d_npartitions);
2899 for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
2900 - u32 bsd_start, bsd_size;
2901 + sector_t bsd_start, bsd_size;
2902
2903 if (state->next == state->limit)
2904 break;
2905 @@ -290,7 +293,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
2906
2907 static void
2908 parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
2909 - u32 offset, u32 size, int origin)
2910 + sector_t offset, sector_t size, int origin)
2911 {
2912 #ifdef CONFIG_BSD_DISKLABEL
2913 parse_bsd(state, bdev, offset, size, origin,
2914 @@ -300,7 +303,7 @@ parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
2915
2916 static void
2917 parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
2918 - u32 offset, u32 size, int origin)
2919 + sector_t offset, sector_t size, int origin)
2920 {
2921 #ifdef CONFIG_BSD_DISKLABEL
2922 parse_bsd(state, bdev, offset, size, origin,
2923 @@ -310,7 +313,7 @@ parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
2924
2925 static void
2926 parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
2927 - u32 offset, u32 size, int origin)
2928 + sector_t offset, sector_t size, int origin)
2929 {
2930 #ifdef CONFIG_BSD_DISKLABEL
2931 parse_bsd(state, bdev, offset, size, origin,
2932 @@ -324,7 +327,7 @@ parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
2933 */
2934 static void
2935 parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
2936 - u32 offset, u32 size, int origin)
2937 + sector_t offset, sector_t size, int origin)
2938 {
2939 #ifdef CONFIG_UNIXWARE_DISKLABEL
2940 Sector sect;
2941 @@ -348,7 +351,8 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
2942
2943 if (p->s_label != UNIXWARE_FS_UNUSED)
2944 put_partition(state, state->next++,
2945 - START_SECT(p), NR_SECTS(p));
2946 + le32_to_cpu(p->start_sect),
2947 + le32_to_cpu(p->nr_sects));
2948 p++;
2949 }
2950 put_dev_sector(sect);
2951 @@ -363,7 +367,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
2952 */
2953 static void
2954 parse_minix(struct parsed_partitions *state, struct block_device *bdev,
2955 - u32 offset, u32 size, int origin)
2956 + sector_t offset, sector_t size, int origin)
2957 {
2958 #ifdef CONFIG_MINIX_SUBPARTITION
2959 Sector sect;
2960 @@ -390,7 +394,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
2961 /* add each partition in use */
2962 if (SYS_IND(p) == MINIX_PARTITION)
2963 put_partition(state, state->next++,
2964 - START_SECT(p), NR_SECTS(p));
2965 + start_sect(p), nr_sects(p));
2966 }
2967 printk(" >\n");
2968 }
2969 @@ -401,7 +405,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
2970 static struct {
2971 unsigned char id;
2972 void (*parse)(struct parsed_partitions *, struct block_device *,
2973 - u32, u32, int);
2974 + sector_t, sector_t, int);
2975 } subtypes[] = {
2976 {FREEBSD_PARTITION, parse_freebsd},
2977 {NETBSD_PARTITION, parse_netbsd},
2978 @@ -415,7 +419,7 @@ static struct {
2979
2980 int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
2981 {
2982 - int sector_size = bdev_logical_block_size(bdev) / 512;
2983 + sector_t sector_size = bdev_logical_block_size(bdev) / 512;
2984 Sector sect;
2985 unsigned char *data;
2986 struct partition *p;
2987 @@ -483,14 +487,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
2988
2989 state->next = 5;
2990 for (slot = 1 ; slot <= 4 ; slot++, p++) {
2991 - u32 start = START_SECT(p)*sector_size;
2992 - u32 size = NR_SECTS(p)*sector_size;
2993 + sector_t start = start_sect(p)*sector_size;
2994 + sector_t size = nr_sects(p)*sector_size;
2995 if (!size)
2996 continue;
2997 if (is_extended_partition(p)) {
2998 - /* prevent someone doing mkfs or mkswap on an
2999 - extended partition, but leave room for LILO */
3000 - put_partition(state, slot, start, size == 1 ? 1 : 2);
3001 + /*
3002 + * prevent someone doing mkfs or mkswap on an
3003 + * extended partition, but leave room for LILO
3004 + * FIXME: this uses one logical sector for > 512b
3005 + * sector, although it may not be enough/proper.
3006 + */
3007 + sector_t n = 2;
3008 + n = min(size, max(sector_size, n));
3009 + put_partition(state, slot, start, n);
3010 +
3011 printk(" <");
3012 parse_extended(state, bdev, start, size);
3013 printk(" >");
3014 @@ -513,7 +524,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3015 unsigned char id = SYS_IND(p);
3016 int n;
3017
3018 - if (!NR_SECTS(p))
3019 + if (!nr_sects(p))
3020 continue;
3021
3022 for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
3023 @@ -521,8 +532,8 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3024
3025 if (!subtypes[n].parse)
3026 continue;
3027 - subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
3028 - NR_SECTS(p)*sector_size, slot);
3029 + subtypes[n].parse(state, bdev, start_sect(p)*sector_size,
3030 + nr_sects(p)*sector_size, slot);
3031 }
3032 put_dev_sector(sect);
3033 return 1;
3034 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
3035 index 2534987..2ed79a9 100644
3036 --- a/fs/quota/dquot.c
3037 +++ b/fs/quota/dquot.c
3038 @@ -229,6 +229,8 @@ static struct hlist_head *dquot_hash;
3039 struct dqstats dqstats;
3040 EXPORT_SYMBOL(dqstats);
3041
3042 +static qsize_t inode_get_rsv_space(struct inode *inode);
3043 +
3044 static inline unsigned int
3045 hashfn(const struct super_block *sb, unsigned int id, int type)
3046 {
3047 @@ -820,11 +822,14 @@ static int dqinit_needed(struct inode *inode, int type)
3048 static void add_dquot_ref(struct super_block *sb, int type)
3049 {
3050 struct inode *inode, *old_inode = NULL;
3051 + int reserved = 0;
3052
3053 spin_lock(&inode_lock);
3054 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
3055 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
3056 continue;
3057 + if (unlikely(inode_get_rsv_space(inode) > 0))
3058 + reserved = 1;
3059 if (!atomic_read(&inode->i_writecount))
3060 continue;
3061 if (!dqinit_needed(inode, type))
3062 @@ -845,6 +850,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
3063 }
3064 spin_unlock(&inode_lock);
3065 iput(old_inode);
3066 +
3067 + if (reserved) {
3068 + printk(KERN_WARNING "VFS (%s): Writes happened before quota"
3069 + " was turned on thus quota information is probably "
3070 + "inconsistent. Please run quotacheck(8).\n", sb->s_id);
3071 + }
3072 }
3073
3074 /*
3075 @@ -958,10 +969,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
3076 /*
3077 * Claim reserved quota space
3078 */
3079 -static void dquot_claim_reserved_space(struct dquot *dquot,
3080 - qsize_t number)
3081 +static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
3082 {
3083 - WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
3084 + if (dquot->dq_dqb.dqb_rsvspace < number) {
3085 + WARN_ON_ONCE(1);
3086 + number = dquot->dq_dqb.dqb_rsvspace;
3087 + }
3088 dquot->dq_dqb.dqb_curspace += number;
3089 dquot->dq_dqb.dqb_rsvspace -= number;
3090 }
3091 @@ -969,7 +982,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot,
3092 static inline
3093 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
3094 {
3095 - dquot->dq_dqb.dqb_rsvspace -= number;
3096 + if (dquot->dq_dqb.dqb_rsvspace >= number)
3097 + dquot->dq_dqb.dqb_rsvspace -= number;
3098 + else {
3099 + WARN_ON_ONCE(1);
3100 + dquot->dq_dqb.dqb_rsvspace = 0;
3101 + }
3102 }
3103
3104 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
3105 @@ -1287,6 +1305,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
3106 return QUOTA_NL_BHARDBELOW;
3107 return QUOTA_NL_NOWARN;
3108 }
3109 +
3110 /*
3111 * Initialize quota pointers in inode
3112 * We do things in a bit complicated way but by that we avoid calling
3113 @@ -1298,6 +1317,7 @@ int dquot_initialize(struct inode *inode, int type)
3114 int cnt, ret = 0;
3115 struct dquot *got[MAXQUOTAS] = { NULL, NULL };
3116 struct super_block *sb = inode->i_sb;
3117 + qsize_t rsv;
3118
3119 /* First test before acquiring mutex - solves deadlocks when we
3120 * re-enter the quota code and are already holding the mutex */
3121 @@ -1332,6 +1352,13 @@ int dquot_initialize(struct inode *inode, int type)
3122 if (!inode->i_dquot[cnt]) {
3123 inode->i_dquot[cnt] = got[cnt];
3124 got[cnt] = NULL;
3125 + /*
3126 + * Make quota reservation system happy if someone
3127 + * did a write before quota was turned on
3128 + */
3129 + rsv = inode_get_rsv_space(inode);
3130 + if (unlikely(rsv))
3131 + dquot_resv_space(inode->i_dquot[cnt], rsv);
3132 }
3133 }
3134 out_err:
3135 @@ -1399,28 +1426,30 @@ static qsize_t *inode_reserved_space(struct inode * inode)
3136 return inode->i_sb->dq_op->get_reserved_space(inode);
3137 }
3138
3139 -static void inode_add_rsv_space(struct inode *inode, qsize_t number)
3140 +void inode_add_rsv_space(struct inode *inode, qsize_t number)
3141 {
3142 spin_lock(&inode->i_lock);
3143 *inode_reserved_space(inode) += number;
3144 spin_unlock(&inode->i_lock);
3145 }
3146 +EXPORT_SYMBOL(inode_add_rsv_space);
3147
3148 -
3149 -static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3150 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3151 {
3152 spin_lock(&inode->i_lock);
3153 *inode_reserved_space(inode) -= number;
3154 __inode_add_bytes(inode, number);
3155 spin_unlock(&inode->i_lock);
3156 }
3157 +EXPORT_SYMBOL(inode_claim_rsv_space);
3158
3159 -static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3160 +void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3161 {
3162 spin_lock(&inode->i_lock);
3163 *inode_reserved_space(inode) -= number;
3164 spin_unlock(&inode->i_lock);
3165 }
3166 +EXPORT_SYMBOL(inode_sub_rsv_space);
3167
3168 static qsize_t inode_get_rsv_space(struct inode *inode)
3169 {
3170 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
3171 index 12ff8c3..3e9bd6a 100644
3172 --- a/include/linux/decompress/mm.h
3173 +++ b/include/linux/decompress/mm.h
3174 @@ -14,11 +14,21 @@
3175
3176 /* Code active when included from pre-boot environment: */
3177
3178 +/*
3179 + * Some architectures want to ensure there is no local data in their
3180 + * pre-boot environment, so that data can arbitarily relocated (via
3181 + * GOT references). This is achieved by defining STATIC_RW_DATA to
3182 + * be null.
3183 + */
3184 +#ifndef STATIC_RW_DATA
3185 +#define STATIC_RW_DATA static
3186 +#endif
3187 +
3188 /* A trivial malloc implementation, adapted from
3189 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
3190 */
3191 -static unsigned long malloc_ptr;
3192 -static int malloc_count;
3193 +STATIC_RW_DATA unsigned long malloc_ptr;
3194 +STATIC_RW_DATA int malloc_count;
3195
3196 static void *malloc(int size)
3197 {
3198 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
3199 index 9bace4b..040b679 100644
3200 --- a/include/linux/hrtimer.h
3201 +++ b/include/linux/hrtimer.h
3202 @@ -162,10 +162,11 @@ struct hrtimer_clock_base {
3203 * @expires_next: absolute time of the next event which was scheduled
3204 * via clock_set_next_event()
3205 * @hres_active: State of high resolution mode
3206 - * @check_clocks: Indictator, when set evaluate time source and clock
3207 - * event devices whether high resolution mode can be
3208 - * activated.
3209 - * @nr_events: Total number of timer interrupt events
3210 + * @hang_detected: The last hrtimer interrupt detected a hang
3211 + * @nr_events: Total number of hrtimer interrupt events
3212 + * @nr_retries: Total number of hrtimer interrupt retries
3213 + * @nr_hangs: Total number of hrtimer interrupt hangs
3214 + * @max_hang_time: Maximum time spent in hrtimer_interrupt
3215 */
3216 struct hrtimer_cpu_base {
3217 spinlock_t lock;
3218 @@ -173,7 +174,11 @@ struct hrtimer_cpu_base {
3219 #ifdef CONFIG_HIGH_RES_TIMERS
3220 ktime_t expires_next;
3221 int hres_active;
3222 + int hang_detected;
3223 unsigned long nr_events;
3224 + unsigned long nr_retries;
3225 + unsigned long nr_hangs;
3226 + ktime_t max_hang_time;
3227 #endif
3228 };
3229
3230 diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
3231 index 5a9aae4..137130b 100644
3232 --- a/include/linux/if_tunnel.h
3233 +++ b/include/linux/if_tunnel.h
3234 @@ -2,6 +2,7 @@
3235 #define _IF_TUNNEL_H_
3236
3237 #include <linux/types.h>
3238 +#include <asm/byteorder.h>
3239
3240 #ifdef __KERNEL__
3241 #include <linux/ip.h>
3242 diff --git a/include/linux/lcm.h b/include/linux/lcm.h
3243 new file mode 100644
3244 index 0000000..7bf01d7
3245 --- /dev/null
3246 +++ b/include/linux/lcm.h
3247 @@ -0,0 +1,8 @@
3248 +#ifndef _LCM_H
3249 +#define _LCM_H
3250 +
3251 +#include <linux/compiler.h>
3252 +
3253 +unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
3254 +
3255 +#endif /* _LCM_H */
3256 diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
3257 index 3ebb231..a529d86 100644
3258 --- a/include/linux/quotaops.h
3259 +++ b/include/linux/quotaops.h
3260 @@ -26,6 +26,10 @@ static inline void writeout_quota_sb(struct super_block *sb, int type)
3261 sb->s_qcop->quota_sync(sb, type);
3262 }
3263
3264 +void inode_add_rsv_space(struct inode *inode, qsize_t number);
3265 +void inode_claim_rsv_space(struct inode *inode, qsize_t number);
3266 +void inode_sub_rsv_space(struct inode *inode, qsize_t number);
3267 +
3268 int dquot_initialize(struct inode *inode, int type);
3269 int dquot_drop(struct inode *inode);
3270 struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
3271 @@ -42,7 +46,6 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number);
3272 int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
3273 int dquot_claim_space(struct inode *inode, qsize_t number);
3274 void dquot_release_reserved_space(struct inode *inode, qsize_t number);
3275 -qsize_t dquot_get_reserved_space(struct inode *inode);
3276
3277 int dquot_free_space(struct inode *inode, qsize_t number);
3278 int dquot_free_inode(const struct inode *inode, qsize_t number);
3279 @@ -199,6 +202,8 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
3280 if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
3281 return 1;
3282 }
3283 + else
3284 + inode_add_rsv_space(inode, nr);
3285 return 0;
3286 }
3287
3288 @@ -221,7 +226,7 @@ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
3289 if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
3290 return 1;
3291 } else
3292 - inode_add_bytes(inode, nr);
3293 + inode_claim_rsv_space(inode, nr);
3294
3295 mark_inode_dirty(inode);
3296 return 0;
3297 @@ -235,6 +240,8 @@ void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
3298 {
3299 if (sb_any_quota_active(inode->i_sb))
3300 inode->i_sb->dq_op->release_rsv(inode, nr);
3301 + else
3302 + inode_sub_rsv_space(inode, nr);
3303 }
3304
3305 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
3306 diff --git a/include/linux/tty.h b/include/linux/tty.h
3307 index f0f43d0..e9c57e9 100644
3308 --- a/include/linux/tty.h
3309 +++ b/include/linux/tty.h
3310 @@ -68,6 +68,17 @@ struct tty_buffer {
3311 unsigned long data[0];
3312 };
3313
3314 +/*
3315 + * We default to dicing tty buffer allocations to this many characters
3316 + * in order to avoid multiple page allocations. We know the size of
3317 + * tty_buffer itself but it must also be taken into account that the
3318 + * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
3319 + * logic this must match
3320 + */
3321 +
3322 +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
3323 +
3324 +
3325 struct tty_bufhead {
3326 struct delayed_work work;
3327 spinlock_t lock;
3328 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3329 index 998c30f..c39ed07 100644
3330 --- a/include/net/mac80211.h
3331 +++ b/include/net/mac80211.h
3332 @@ -908,6 +908,9 @@ enum ieee80211_tkip_key_type {
3333 * @IEEE80211_HW_BEACON_FILTER:
3334 * Hardware supports dropping of irrelevant beacon frames to
3335 * avoid waking up cpu.
3336 + * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
3337 + * Hardware can provide ack status reports of Tx frames to
3338 + * the stack.
3339 */
3340 enum ieee80211_hw_flags {
3341 IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
3342 @@ -924,6 +927,7 @@ enum ieee80211_hw_flags {
3343 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
3344 IEEE80211_HW_MFP_CAPABLE = 1<<13,
3345 IEEE80211_HW_BEACON_FILTER = 1<<14,
3346 + IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<15,
3347 };
3348
3349 /**
3350 diff --git a/init/main.c b/init/main.c
3351 index 4051d75..bc109c7 100644
3352 --- a/init/main.c
3353 +++ b/init/main.c
3354 @@ -369,12 +369,6 @@ static void __init smp_init(void)
3355 {
3356 unsigned int cpu;
3357
3358 - /*
3359 - * Set up the current CPU as possible to migrate to.
3360 - * The other ones will be done by cpu_up/cpu_down()
3361 - */
3362 - set_cpu_active(smp_processor_id(), true);
3363 -
3364 /* FIXME: This should be done in userspace --RR */
3365 for_each_present_cpu(cpu) {
3366 if (num_online_cpus() >= setup_max_cpus)
3367 @@ -486,6 +480,7 @@ static void __init boot_cpu_init(void)
3368 int cpu = smp_processor_id();
3369 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3370 set_cpu_online(cpu, true);
3371 + set_cpu_active(cpu, true);
3372 set_cpu_present(cpu, true);
3373 set_cpu_possible(cpu, true);
3374 }
3375 @@ -851,7 +846,7 @@ static int __init kernel_init(void * unused)
3376 /*
3377 * init can allocate pages on any node
3378 */
3379 - set_mems_allowed(node_possible_map);
3380 + set_mems_allowed(node_states[N_HIGH_MEMORY]);
3381 /*
3382 * init can run on any cpu.
3383 */
3384 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
3385 index ee9d697..d01bc14 100644
3386 --- a/ipc/mqueue.c
3387 +++ b/ipc/mqueue.c
3388 @@ -706,7 +706,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
3389 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
3390 if (IS_ERR(dentry)) {
3391 error = PTR_ERR(dentry);
3392 - goto out_err;
3393 + goto out_putfd;
3394 }
3395 mntget(ipc_ns->mq_mnt);
3396
3397 @@ -744,7 +744,6 @@ out:
3398 mntput(ipc_ns->mq_mnt);
3399 out_putfd:
3400 put_unused_fd(fd);
3401 -out_err:
3402 fd = error;
3403 out_upsem:
3404 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
3405 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
3406 index 39e5121..a81a910 100644
3407 --- a/kernel/cpuset.c
3408 +++ b/kernel/cpuset.c
3409 @@ -921,9 +921,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
3410 * call to guarantee_online_mems(), as we know no one is changing
3411 * our task's cpuset.
3412 *
3413 - * Hold callback_mutex around the two modifications of our tasks
3414 - * mems_allowed to synchronize with cpuset_mems_allowed().
3415 - *
3416 * While the mm_struct we are migrating is typically from some
3417 * other task, the task_struct mems_allowed that we are hacking
3418 * is for our current task, which must allocate new pages for that
3419 @@ -1392,11 +1389,10 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
3420
3421 if (cs == &top_cpuset) {
3422 cpumask_copy(cpus_attach, cpu_possible_mask);
3423 - to = node_possible_map;
3424 } else {
3425 guarantee_online_cpus(cs, cpus_attach);
3426 - guarantee_online_mems(cs, &to);
3427 }
3428 + guarantee_online_mems(cs, &to);
3429
3430 /* do per-task migration stuff possibly for each in the threadgroup */
3431 cpuset_attach_task(tsk, &to, cs);
3432 @@ -2091,15 +2087,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
3433 static int cpuset_track_online_nodes(struct notifier_block *self,
3434 unsigned long action, void *arg)
3435 {
3436 + nodemask_t oldmems;
3437 +
3438 cgroup_lock();
3439 switch (action) {
3440 case MEM_ONLINE:
3441 - case MEM_OFFLINE:
3442 + oldmems = top_cpuset.mems_allowed;
3443 mutex_lock(&callback_mutex);
3444 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
3445 mutex_unlock(&callback_mutex);
3446 - if (action == MEM_OFFLINE)
3447 - scan_for_empty_cpusets(&top_cpuset);
3448 + update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
3449 + break;
3450 + case MEM_OFFLINE:
3451 + /*
3452 + * needn't update top_cpuset.mems_allowed explicitly because
3453 + * scan_for_empty_cpusets() will update it.
3454 + */
3455 + scan_for_empty_cpusets(&top_cpuset);
3456 break;
3457 default:
3458 break;
3459 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
3460 index 3e1c36e..931a4d9 100644
3461 --- a/kernel/hrtimer.c
3462 +++ b/kernel/hrtimer.c
3463 @@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
3464 static int hrtimer_reprogram(struct hrtimer *timer,
3465 struct hrtimer_clock_base *base)
3466 {
3467 - ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
3468 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
3469 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
3470 int res;
3471
3472 @@ -582,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer,
3473 if (expires.tv64 < 0)
3474 return -ETIME;
3475
3476 - if (expires.tv64 >= expires_next->tv64)
3477 + if (expires.tv64 >= cpu_base->expires_next.tv64)
3478 + return 0;
3479 +
3480 + /*
3481 + * If a hang was detected in the last timer interrupt then we
3482 + * do not schedule a timer which is earlier than the expiry
3483 + * which we enforced in the hang detection. We want the system
3484 + * to make progress.
3485 + */
3486 + if (cpu_base->hang_detected)
3487 return 0;
3488
3489 /*
3490 @@ -590,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
3491 */
3492 res = tick_program_event(expires, 0);
3493 if (!IS_ERR_VALUE(res))
3494 - *expires_next = expires;
3495 + cpu_base->expires_next = expires;
3496 return res;
3497 }
3498
3499 @@ -1217,29 +1226,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
3500
3501 #ifdef CONFIG_HIGH_RES_TIMERS
3502
3503 -static int force_clock_reprogram;
3504 -
3505 -/*
3506 - * After 5 iteration's attempts, we consider that hrtimer_interrupt()
3507 - * is hanging, which could happen with something that slows the interrupt
3508 - * such as the tracing. Then we force the clock reprogramming for each future
3509 - * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
3510 - * threshold that we will overwrite.
3511 - * The next tick event will be scheduled to 3 times we currently spend on
3512 - * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
3513 - * 1/4 of their time to process the hrtimer interrupts. This is enough to
3514 - * let it running without serious starvation.
3515 - */
3516 -
3517 -static inline void
3518 -hrtimer_interrupt_hanging(struct clock_event_device *dev,
3519 - ktime_t try_time)
3520 -{
3521 - force_clock_reprogram = 1;
3522 - dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
3523 - printk(KERN_WARNING "hrtimer: interrupt too slow, "
3524 - "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
3525 -}
3526 /*
3527 * High resolution timer interrupt
3528 * Called with interrupts disabled
3529 @@ -1248,21 +1234,15 @@ void hrtimer_interrupt(struct clock_event_device *dev)
3530 {
3531 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
3532 struct hrtimer_clock_base *base;
3533 - ktime_t expires_next, now;
3534 - int nr_retries = 0;
3535 - int i;
3536 + ktime_t expires_next, now, entry_time, delta;
3537 + int i, retries = 0;
3538
3539 BUG_ON(!cpu_base->hres_active);
3540 cpu_base->nr_events++;
3541 dev->next_event.tv64 = KTIME_MAX;
3542
3543 - retry:
3544 - /* 5 retries is enough to notice a hang */
3545 - if (!(++nr_retries % 5))
3546 - hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
3547 -
3548 - now = ktime_get();
3549 -
3550 + entry_time = now = ktime_get();
3551 +retry:
3552 expires_next.tv64 = KTIME_MAX;
3553
3554 spin_lock(&cpu_base->lock);
3555 @@ -1324,10 +1304,48 @@ void hrtimer_interrupt(struct clock_event_device *dev)
3556 spin_unlock(&cpu_base->lock);
3557
3558 /* Reprogramming necessary ? */
3559 - if (expires_next.tv64 != KTIME_MAX) {
3560 - if (tick_program_event(expires_next, force_clock_reprogram))
3561 - goto retry;
3562 + if (expires_next.tv64 == KTIME_MAX ||
3563 + !tick_program_event(expires_next, 0)) {
3564 + cpu_base->hang_detected = 0;
3565 + return;
3566 }
3567 +
3568 + /*
3569 + * The next timer was already expired due to:
3570 + * - tracing
3571 + * - long lasting callbacks
3572 + * - being scheduled away when running in a VM
3573 + *
3574 + * We need to prevent that we loop forever in the hrtimer
3575 + * interrupt routine. We give it 3 attempts to avoid
3576 + * overreacting on some spurious event.
3577 + */
3578 + now = ktime_get();
3579 + cpu_base->nr_retries++;
3580 + if (++retries < 3)
3581 + goto retry;
3582 + /*
3583 + * Give the system a chance to do something else than looping
3584 + * here. We stored the entry time, so we know exactly how long
3585 + * we spent here. We schedule the next event this amount of
3586 + * time away.
3587 + */
3588 + cpu_base->nr_hangs++;
3589 + cpu_base->hang_detected = 1;
3590 + delta = ktime_sub(now, entry_time);
3591 + if (delta.tv64 > cpu_base->max_hang_time.tv64)
3592 + cpu_base->max_hang_time = delta;
3593 + /*
3594 + * Limit it to a sensible value as we enforce a longer
3595 + * delay. Give the CPU at least 100ms to catch up.
3596 + */
3597 + if (delta.tv64 > 100 * NSEC_PER_MSEC)
3598 + expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
3599 + else
3600 + expires_next = ktime_add(now, delta);
3601 + tick_program_event(expires_next, 1);
3602 + printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
3603 + ktime_to_ns(delta));
3604 }
3605
3606 /*
3607 diff --git a/kernel/kthread.c b/kernel/kthread.c
3608 index ab7ae57..84027cf 100644
3609 --- a/kernel/kthread.c
3610 +++ b/kernel/kthread.c
3611 @@ -196,7 +196,7 @@ int kthreadd(void *unused)
3612 set_task_comm(tsk, "kthreadd");
3613 ignore_signals(tsk);
3614 set_cpus_allowed_ptr(tsk, cpu_all_mask);
3615 - set_mems_allowed(node_possible_map);
3616 + set_mems_allowed(node_states[N_HIGH_MEMORY]);
3617
3618 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
3619
3620 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
3621 index 413d101..447e8db 100644
3622 --- a/kernel/perf_event.c
3623 +++ b/kernel/perf_event.c
3624 @@ -4981,12 +4981,22 @@ int perf_event_init_task(struct task_struct *child)
3625 return ret;
3626 }
3627
3628 +static void __init perf_event_init_all_cpus(void)
3629 +{
3630 + int cpu;
3631 + struct perf_cpu_context *cpuctx;
3632 +
3633 + for_each_possible_cpu(cpu) {
3634 + cpuctx = &per_cpu(perf_cpu_context, cpu);
3635 + __perf_event_init_context(&cpuctx->ctx, NULL);
3636 + }
3637 +}
3638 +
3639 static void __cpuinit perf_event_init_cpu(int cpu)
3640 {
3641 struct perf_cpu_context *cpuctx;
3642
3643 cpuctx = &per_cpu(perf_cpu_context, cpu);
3644 - __perf_event_init_context(&cpuctx->ctx, NULL);
3645
3646 spin_lock(&perf_resource_lock);
3647 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
3648 @@ -5057,6 +5067,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
3649
3650 void __init perf_event_init(void)
3651 {
3652 + perf_event_init_all_cpus();
3653 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3654 (void *)(long)smp_processor_id());
3655 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
3656 diff --git a/kernel/sched.c b/kernel/sched.c
3657 index 380e1fa..ed61192 100644
3658 --- a/kernel/sched.c
3659 +++ b/kernel/sched.c
3660 @@ -3402,6 +3402,7 @@ struct sd_lb_stats {
3661 unsigned long max_load;
3662 unsigned long busiest_load_per_task;
3663 unsigned long busiest_nr_running;
3664 + unsigned long busiest_group_capacity;
3665
3666 int group_imb; /* Is there imbalance in this sd */
3667 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3668 @@ -3721,8 +3722,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3669 unsigned long load, max_cpu_load, min_cpu_load;
3670 int i;
3671 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3672 - unsigned long sum_avg_load_per_task;
3673 - unsigned long avg_load_per_task;
3674 + unsigned long avg_load_per_task = 0;
3675
3676 if (local_group) {
3677 balance_cpu = group_first_cpu(group);
3678 @@ -3731,7 +3731,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3679 }
3680
3681 /* Tally up the load of all CPUs in the group */
3682 - sum_avg_load_per_task = avg_load_per_task = 0;
3683 max_cpu_load = 0;
3684 min_cpu_load = ~0UL;
3685
3686 @@ -3761,7 +3760,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3687 sgs->sum_nr_running += rq->nr_running;
3688 sgs->sum_weighted_load += weighted_cpuload(i);
3689
3690 - sum_avg_load_per_task += cpu_avg_load_per_task(i);
3691 }
3692
3693 /*
3694 @@ -3779,7 +3777,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3695 /* Adjust by relative CPU power of the group */
3696 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
3697
3698 -
3699 /*
3700 * Consider the group unbalanced when the imbalance is larger
3701 * than the average weight of two tasks.
3702 @@ -3789,8 +3786,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3703 * normalized nr_running number somewhere that negates
3704 * the hierarchy?
3705 */
3706 - avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
3707 - group->cpu_power;
3708 + if (sgs->sum_nr_running)
3709 + avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
3710
3711 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3712 sgs->group_imb = 1;
3713 @@ -3859,6 +3856,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3714 sds->max_load = sgs.avg_load;
3715 sds->busiest = group;
3716 sds->busiest_nr_running = sgs.sum_nr_running;
3717 + sds->busiest_group_capacity = sgs.group_capacity;
3718 sds->busiest_load_per_task = sgs.sum_weighted_load;
3719 sds->group_imb = sgs.group_imb;
3720 }
3721 @@ -3881,6 +3879,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3722 {
3723 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3724 unsigned int imbn = 2;
3725 + unsigned long scaled_busy_load_per_task;
3726
3727 if (sds->this_nr_running) {
3728 sds->this_load_per_task /= sds->this_nr_running;
3729 @@ -3891,8 +3890,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3730 sds->this_load_per_task =
3731 cpu_avg_load_per_task(this_cpu);
3732
3733 - if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3734 - sds->busiest_load_per_task * imbn) {
3735 + scaled_busy_load_per_task = sds->busiest_load_per_task
3736 + * SCHED_LOAD_SCALE;
3737 + scaled_busy_load_per_task /= sds->busiest->cpu_power;
3738 +
3739 + if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
3740 + (scaled_busy_load_per_task * imbn)) {
3741 *imbalance = sds->busiest_load_per_task;
3742 return;
3743 }
3744 @@ -3943,7 +3946,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3745 static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3746 unsigned long *imbalance)
3747 {
3748 - unsigned long max_pull;
3749 + unsigned long max_pull, load_above_capacity = ~0UL;
3750 +
3751 + sds->busiest_load_per_task /= sds->busiest_nr_running;
3752 + if (sds->group_imb) {
3753 + sds->busiest_load_per_task =
3754 + min(sds->busiest_load_per_task, sds->avg_load);
3755 + }
3756 +
3757 /*
3758 * In the presence of smp nice balancing, certain scenarios can have
3759 * max load less than avg load(as we skip the groups at or below
3760 @@ -3954,9 +3964,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3761 return fix_small_imbalance(sds, this_cpu, imbalance);
3762 }
3763
3764 - /* Don't want to pull so many tasks that a group would go idle */
3765 - max_pull = min(sds->max_load - sds->avg_load,
3766 - sds->max_load - sds->busiest_load_per_task);
3767 + if (!sds->group_imb) {
3768 + /*
3769 + * Don't want to pull so many tasks that a group would go idle.
3770 + */
3771 + load_above_capacity = (sds->busiest_nr_running -
3772 + sds->busiest_group_capacity);
3773 +
3774 + load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
3775 +
3776 + load_above_capacity /= sds->busiest->cpu_power;
3777 + }
3778 +
3779 + /*
3780 + * We're trying to get all the cpus to the average_load, so we don't
3781 + * want to push ourselves above the average load, nor do we wish to
3782 + * reduce the max loaded cpu below the average load. At the same time,
3783 + * we also don't want to reduce the group load below the group capacity
3784 + * (so that we can implement power-savings policies etc). Thus we look
3785 + * for the minimum possible imbalance.
3786 + * Be careful of negative numbers as they'll appear as very large values
3787 + * with unsigned longs.
3788 + */
3789 + max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
3790
3791 /* How much load to actually move to equalise the imbalance */
3792 *imbalance = min(max_pull * sds->busiest->cpu_power,
3793 @@ -4024,7 +4054,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3794 * 4) This group is more busy than the avg busieness at this
3795 * sched_domain.
3796 * 5) The imbalance is within the specified limit.
3797 - * 6) Any rebalance would lead to ping-pong
3798 */
3799 if (balance && !(*balance))
3800 goto ret;
3801 @@ -4043,25 +4072,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3802 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3803 goto out_balanced;
3804
3805 - sds.busiest_load_per_task /= sds.busiest_nr_running;
3806 - if (sds.group_imb)
3807 - sds.busiest_load_per_task =
3808 - min(sds.busiest_load_per_task, sds.avg_load);
3809 -
3810 - /*
3811 - * We're trying to get all the cpus to the average_load, so we don't
3812 - * want to push ourselves above the average load, nor do we wish to
3813 - * reduce the max loaded cpu below the average load, as either of these
3814 - * actions would just result in more rebalancing later, and ping-pong
3815 - * tasks around. Thus we look for the minimum possible imbalance.
3816 - * Negative imbalances (*we* are more loaded than anyone else) will
3817 - * be counted as no imbalance for these purposes -- we can't fix that
3818 - * by pulling tasks to us. Be careful of negative numbers as they'll
3819 - * appear as very large values with unsigned longs.
3820 - */
3821 - if (sds.max_load <= sds.busiest_load_per_task)
3822 - goto out_balanced;
3823 -
3824 /* Looks like there is an imbalance. Compute it */
3825 calculate_imbalance(&sds, this_cpu, imbalance);
3826 return sds.busiest;
3827 diff --git a/kernel/softlockup.c b/kernel/softlockup.c
3828 index 81324d1..d2080ad 100644
3829 --- a/kernel/softlockup.c
3830 +++ b/kernel/softlockup.c
3831 @@ -140,11 +140,11 @@ void softlockup_tick(void)
3832 * Wake up the high-prio watchdog task twice per
3833 * threshold timespan.
3834 */
3835 - if (now > touch_timestamp + softlockup_thresh/2)
3836 + if (time_after(now - softlockup_thresh/2, touch_timestamp))
3837 wake_up_process(per_cpu(watchdog_task, this_cpu));
3838
3839 /* Warn about unreasonable delays: */
3840 - if (now <= (touch_timestamp + softlockup_thresh))
3841 + if (time_before_eq(now - softlockup_thresh, touch_timestamp))
3842 return;
3843
3844 per_cpu(print_timestamp, this_cpu) = touch_timestamp;
3845 diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
3846 index ecc7adb..f8b0f96 100644
3847 --- a/kernel/time/clocksource.c
3848 +++ b/kernel/time/clocksource.c
3849 @@ -515,6 +515,10 @@ static inline void clocksource_select(void) { }
3850 */
3851 static int __init clocksource_done_booting(void)
3852 {
3853 + mutex_lock(&clocksource_mutex);
3854 + curr_clocksource = clocksource_default_clock();
3855 + mutex_unlock(&clocksource_mutex);
3856 +
3857 finished_booting = 1;
3858
3859 /*
3860 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
3861 index 1b5b7aa..54c0dda 100644
3862 --- a/kernel/time/timer_list.c
3863 +++ b/kernel/time/timer_list.c
3864 @@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
3865 P_ns(expires_next);
3866 P(hres_active);
3867 P(nr_events);
3868 + P(nr_retries);
3869 + P(nr_hangs);
3870 + P_ns(max_hang_time);
3871 #endif
3872 #undef P
3873 #undef P_ns
3874 @@ -252,7 +255,7 @@ static int timer_list_show(struct seq_file *m, void *v)
3875 u64 now = ktime_to_ns(ktime_get());
3876 int cpu;
3877
3878 - SEQ_printf(m, "Timer List Version: v0.4\n");
3879 + SEQ_printf(m, "Timer List Version: v0.5\n");
3880 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
3881 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
3882
3883 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3884 index 6dc4e5e..0cccb6c 100644
3885 --- a/kernel/trace/ftrace.c
3886 +++ b/kernel/trace/ftrace.c
3887 @@ -3258,6 +3258,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3888 {
3889 /* Make sure we do not use the parent ret_stack */
3890 t->ret_stack = NULL;
3891 + t->curr_ret_stack = -1;
3892
3893 if (ftrace_graph_active) {
3894 struct ftrace_ret_stack *ret_stack;
3895 @@ -3267,7 +3268,6 @@ void ftrace_graph_init_task(struct task_struct *t)
3896 GFP_KERNEL);
3897 if (!ret_stack)
3898 return;
3899 - t->curr_ret_stack = -1;
3900 atomic_set(&t->tracing_graph_pause, 0);
3901 atomic_set(&t->trace_overrun, 0);
3902 t->ftrace_timestamp = 0;
3903 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3904 index 5dd017f..c88b21c 100644
3905 --- a/kernel/trace/ring_buffer.c
3906 +++ b/kernel/trace/ring_buffer.c
3907 @@ -2237,12 +2237,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
3908 if (ring_buffer_flags != RB_BUFFERS_ON)
3909 return NULL;
3910
3911 - if (atomic_read(&buffer->record_disabled))
3912 - return NULL;
3913 -
3914 /* If we are tracing schedule, we don't want to recurse */
3915 resched = ftrace_preempt_disable();
3916
3917 + if (atomic_read(&buffer->record_disabled))
3918 + goto out_nocheck;
3919 +
3920 if (trace_recursive_lock())
3921 goto out_nocheck;
3922
3923 @@ -2474,11 +2474,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
3924 if (ring_buffer_flags != RB_BUFFERS_ON)
3925 return -EBUSY;
3926
3927 - if (atomic_read(&buffer->record_disabled))
3928 - return -EBUSY;
3929 -
3930 resched = ftrace_preempt_disable();
3931
3932 + if (atomic_read(&buffer->record_disabled))
3933 + goto out;
3934 +
3935 cpu = raw_smp_processor_id();
3936
3937 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3938 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3939 index b20d3ec..3cfb60b 100644
3940 --- a/kernel/trace/trace.c
3941 +++ b/kernel/trace/trace.c
3942 @@ -748,10 +748,10 @@ out:
3943 mutex_unlock(&trace_types_lock);
3944 }
3945
3946 -static void __tracing_reset(struct trace_array *tr, int cpu)
3947 +static void __tracing_reset(struct ring_buffer *buffer, int cpu)
3948 {
3949 ftrace_disable_cpu();
3950 - ring_buffer_reset_cpu(tr->buffer, cpu);
3951 + ring_buffer_reset_cpu(buffer, cpu);
3952 ftrace_enable_cpu();
3953 }
3954
3955 @@ -763,7 +763,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
3956
3957 /* Make sure all commits have finished */
3958 synchronize_sched();
3959 - __tracing_reset(tr, cpu);
3960 + __tracing_reset(buffer, cpu);
3961
3962 ring_buffer_record_enable(buffer);
3963 }
3964 @@ -781,7 +781,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
3965 tr->time_start = ftrace_now(tr->cpu);
3966
3967 for_each_online_cpu(cpu)
3968 - __tracing_reset(tr, cpu);
3969 + __tracing_reset(buffer, cpu);
3970
3971 ring_buffer_record_enable(buffer);
3972 }
3973 @@ -858,6 +858,8 @@ void tracing_start(void)
3974 goto out;
3975 }
3976
3977 + /* Prevent the buffers from switching */
3978 + __raw_spin_lock(&ftrace_max_lock);
3979
3980 buffer = global_trace.buffer;
3981 if (buffer)
3982 @@ -867,6 +869,8 @@ void tracing_start(void)
3983 if (buffer)
3984 ring_buffer_record_enable(buffer);
3985
3986 + __raw_spin_unlock(&ftrace_max_lock);
3987 +
3988 ftrace_start();
3989 out:
3990 spin_unlock_irqrestore(&tracing_start_lock, flags);
3991 @@ -888,6 +892,9 @@ void tracing_stop(void)
3992 if (trace_stop_count++)
3993 goto out;
3994
3995 + /* Prevent the buffers from switching */
3996 + __raw_spin_lock(&ftrace_max_lock);
3997 +
3998 buffer = global_trace.buffer;
3999 if (buffer)
4000 ring_buffer_record_disable(buffer);
4001 @@ -896,6 +903,8 @@ void tracing_stop(void)
4002 if (buffer)
4003 ring_buffer_record_disable(buffer);
4004
4005 + __raw_spin_unlock(&ftrace_max_lock);
4006 +
4007 out:
4008 spin_unlock_irqrestore(&tracing_start_lock, flags);
4009 }
4010 @@ -1162,6 +1171,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
4011 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
4012 return;
4013
4014 + /*
4015 + * NMIs can not handle page faults, even with fix ups.
4016 + * The save user stack can (and often does) fault.
4017 + */
4018 + if (unlikely(in_nmi()))
4019 + return;
4020 +
4021 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
4022 sizeof(*entry), flags, pc);
4023 if (!event)
4024 diff --git a/lib/Makefile b/lib/Makefile
4025 index 2e78277..452f188 100644
4026 --- a/lib/Makefile
4027 +++ b/lib/Makefile
4028 @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
4029
4030 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
4031 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
4032 - string_helpers.o gcd.o
4033 + string_helpers.o gcd.o lcm.o
4034
4035 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
4036 CFLAGS_kobject.o += -DDEBUG
4037 diff --git a/lib/lcm.c b/lib/lcm.c
4038 new file mode 100644
4039 index 0000000..157cd88
4040 --- /dev/null
4041 +++ b/lib/lcm.c
4042 @@ -0,0 +1,15 @@
4043 +#include <linux/kernel.h>
4044 +#include <linux/gcd.h>
4045 +#include <linux/module.h>
4046 +
4047 +/* Lowest common multiple */
4048 +unsigned long lcm(unsigned long a, unsigned long b)
4049 +{
4050 + if (a && b)
4051 + return (a * b) / gcd(a, b);
4052 + else if (b)
4053 + return b;
4054 +
4055 + return a;
4056 +}
4057 +EXPORT_SYMBOL_GPL(lcm);
4058 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4059 index 4545d59..f29d8d7 100644
4060 --- a/mm/mempolicy.c
4061 +++ b/mm/mempolicy.c
4062 @@ -2122,8 +2122,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4063 char *rest = nodelist;
4064 while (isdigit(*rest))
4065 rest++;
4066 - if (!*rest)
4067 - err = 0;
4068 + if (*rest)
4069 + goto out;
4070 }
4071 break;
4072 case MPOL_INTERLEAVE:
4073 @@ -2132,7 +2132,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4074 */
4075 if (!nodelist)
4076 nodes = node_states[N_HIGH_MEMORY];
4077 - err = 0;
4078 break;
4079 case MPOL_LOCAL:
4080 /*
4081 @@ -2142,11 +2141,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4082 goto out;
4083 mode = MPOL_PREFERRED;
4084 break;
4085 -
4086 - /*
4087 - * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
4088 - * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
4089 - */
4090 + case MPOL_DEFAULT:
4091 + /*
4092 + * Insist on a empty nodelist
4093 + */
4094 + if (!nodelist)
4095 + err = 0;
4096 + goto out;
4097 + case MPOL_BIND:
4098 + /*
4099 + * Insist on a nodelist
4100 + */
4101 + if (!nodelist)
4102 + goto out;
4103 }
4104
4105 mode_flags = 0;
4106 @@ -2160,13 +2167,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4107 else if (!strcmp(flags, "relative"))
4108 mode_flags |= MPOL_F_RELATIVE_NODES;
4109 else
4110 - err = 1;
4111 + goto out;
4112 }
4113
4114 new = mpol_new(mode, mode_flags, &nodes);
4115 if (IS_ERR(new))
4116 - err = 1;
4117 - else {
4118 + goto out;
4119 +
4120 + {
4121 int ret;
4122 NODEMASK_SCRATCH(scratch);
4123 if (scratch) {
4124 @@ -2177,13 +2185,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4125 ret = -ENOMEM;
4126 NODEMASK_SCRATCH_FREE(scratch);
4127 if (ret) {
4128 - err = 1;
4129 mpol_put(new);
4130 - } else if (no_context) {
4131 - /* save for contextualization */
4132 - new->w.user_nodemask = nodes;
4133 + goto out;
4134 }
4135 }
4136 + err = 0;
4137 + if (no_context) {
4138 + /* save for contextualization */
4139 + new->w.user_nodemask = nodes;
4140 + }
4141
4142 out:
4143 /* Restore string for error message */
4144 diff --git a/mm/readahead.c b/mm/readahead.c
4145 index 8f40b47..337b20e 100644
4146 --- a/mm/readahead.c
4147 +++ b/mm/readahead.c
4148 @@ -553,5 +553,17 @@ page_cache_async_readahead(struct address_space *mapping,
4149
4150 /* do read-ahead */
4151 ondemand_readahead(mapping, ra, filp, true, offset, req_size);
4152 +
4153 +#ifdef CONFIG_BLOCK
4154 + /*
4155 + * Normally the current page is !uptodate and lock_page() will be
4156 + * immediately called to implicitly unplug the device. However this
4157 + * is not always true for RAID conifgurations, where data arrives
4158 + * not strictly in their submission order. In this case we need to
4159 + * explicitly kick off the IO.
4160 + */
4161 + if (PageUptodate(page))
4162 + blk_run_backing_dev(mapping->backing_dev_info, NULL);
4163 +#endif
4164 }
4165 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
4166 diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
4167 index 947f8bb..8d1c4a9 100644
4168 --- a/net/bluetooth/l2cap.c
4169 +++ b/net/bluetooth/l2cap.c
4170 @@ -2813,6 +2813,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
4171 int len = cmd->len - sizeof(*rsp);
4172 char req[64];
4173
4174 + if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4175 + l2cap_send_disconn_req(conn, sk);
4176 + goto done;
4177 + }
4178 +
4179 /* throw out any old stored conf requests */
4180 result = L2CAP_CONF_SUCCESS;
4181 len = l2cap_parse_conf_rsp(sk, rsp->data,
4182 @@ -3885,16 +3890,24 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
4183 struct sock *sk;
4184 struct hlist_node *node;
4185 char *str = buf;
4186 + int size = PAGE_SIZE;
4187
4188 read_lock_bh(&l2cap_sk_list.lock);
4189
4190 sk_for_each(sk, node, &l2cap_sk_list.head) {
4191 struct l2cap_pinfo *pi = l2cap_pi(sk);
4192 + int len;
4193
4194 - str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4195 + len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4196 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4197 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
4198 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
4199 +
4200 + size -= len;
4201 + if (size <= 0)
4202 + break;
4203 +
4204 + str += len;
4205 }
4206
4207 read_unlock_bh(&l2cap_sk_list.lock);
4208 diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
4209 index 25692bc..ef3abf2 100644
4210 --- a/net/bluetooth/rfcomm/core.c
4211 +++ b/net/bluetooth/rfcomm/core.c
4212 @@ -251,7 +251,6 @@ static void rfcomm_session_timeout(unsigned long arg)
4213 BT_DBG("session %p state %ld", s, s->state);
4214
4215 set_bit(RFCOMM_TIMED_OUT, &s->flags);
4216 - rfcomm_session_put(s);
4217 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
4218 }
4219
4220 @@ -1917,6 +1916,7 @@ static inline void rfcomm_process_sessions(void)
4221 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
4222 s->state = BT_DISCONN;
4223 rfcomm_send_disc(s, 0);
4224 + rfcomm_session_put(s);
4225 continue;
4226 }
4227
4228 @@ -2096,6 +2096,7 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4229 struct rfcomm_session *s;
4230 struct list_head *pp, *p;
4231 char *str = buf;
4232 + int size = PAGE_SIZE;
4233
4234 rfcomm_lock();
4235
4236 @@ -2104,11 +2105,21 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4237 list_for_each(pp, &s->dlcs) {
4238 struct sock *sk = s->sock->sk;
4239 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
4240 + int len;
4241
4242 - str += sprintf(str, "%s %s %ld %d %d %d %d\n",
4243 + len = snprintf(str, size, "%s %s %ld %d %d %d %d\n",
4244 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4245 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
4246 +
4247 + size -= len;
4248 + if (size <= 0)
4249 + break;
4250 +
4251 + str += len;
4252 }
4253 +
4254 + if (size <= 0)
4255 + break;
4256 }
4257
4258 rfcomm_unlock();
4259 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
4260 index 8a20aaf..30a3649 100644
4261 --- a/net/bluetooth/rfcomm/sock.c
4262 +++ b/net/bluetooth/rfcomm/sock.c
4263 @@ -1065,13 +1065,22 @@ static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf)
4264 struct sock *sk;
4265 struct hlist_node *node;
4266 char *str = buf;
4267 + int size = PAGE_SIZE;
4268
4269 read_lock_bh(&rfcomm_sk_list.lock);
4270
4271 sk_for_each(sk, node, &rfcomm_sk_list.head) {
4272 - str += sprintf(str, "%s %s %d %d\n",
4273 + int len;
4274 +
4275 + len = snprintf(str, size, "%s %s %d %d\n",
4276 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4277 sk->sk_state, rfcomm_pi(sk)->channel);
4278 +
4279 + size -= len;
4280 + if (size <= 0)
4281 + break;
4282 +
4283 + str += len;
4284 }
4285
4286 read_unlock_bh(&rfcomm_sk_list.lock);
4287 diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
4288 index 77f4153..5c0685e 100644
4289 --- a/net/bluetooth/sco.c
4290 +++ b/net/bluetooth/sco.c
4291 @@ -957,13 +957,22 @@ static ssize_t sco_sysfs_show(struct class *dev, char *buf)
4292 struct sock *sk;
4293 struct hlist_node *node;
4294 char *str = buf;
4295 + int size = PAGE_SIZE;
4296
4297 read_lock_bh(&sco_sk_list.lock);
4298
4299 sk_for_each(sk, node, &sco_sk_list.head) {
4300 - str += sprintf(str, "%s %s %d\n",
4301 + int len;
4302 +
4303 + len = snprintf(str, size, "%s %s %d\n",
4304 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4305 sk->sk_state);
4306 +
4307 + size -= len;
4308 + if (size <= 0)
4309 + break;
4310 +
4311 + str += len;
4312 }
4313
4314 read_unlock_bh(&sco_sk_list.lock);
4315 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
4316 index 5a46164..ca62bfe 100644
4317 --- a/net/mac80211/ieee80211_i.h
4318 +++ b/net/mac80211/ieee80211_i.h
4319 @@ -264,6 +264,7 @@ enum ieee80211_sta_flags {
4320 IEEE80211_STA_DISABLE_11N = BIT(4),
4321 IEEE80211_STA_CSA_RECEIVED = BIT(5),
4322 IEEE80211_STA_MFP_ENABLED = BIT(6),
4323 + IEEE80211_STA_NULLFUNC_ACKED = BIT(7),
4324 };
4325
4326 /* flags for MLME request */
4327 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
4328 index 797f539..19fbd25 100644
4329 --- a/net/mac80211/main.c
4330 +++ b/net/mac80211/main.c
4331 @@ -441,6 +441,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
4332 rcu_read_lock();
4333
4334 sband = local->hw.wiphy->bands[info->band];
4335 + fc = hdr->frame_control;
4336
4337 sta = sta_info_get(local, hdr->addr1);
4338
4339 @@ -522,6 +523,20 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
4340 local->dot11FailedCount++;
4341 }
4342
4343 + if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
4344 + (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
4345 + !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
4346 + local->ps_sdata && !(local->scanning)) {
4347 + if (info->flags & IEEE80211_TX_STAT_ACK) {
4348 + local->ps_sdata->u.mgd.flags |=
4349 + IEEE80211_STA_NULLFUNC_ACKED;
4350 + ieee80211_queue_work(&local->hw,
4351 + &local->dynamic_ps_enable_work);
4352 + } else
4353 + mod_timer(&local->dynamic_ps_timer, jiffies +
4354 + msecs_to_jiffies(10));
4355 + }
4356 +
4357 /* this was a transmitted frame, but now we want to reuse it */
4358 skb_orphan(skb);
4359
4360 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4361 index 6cae295..4a15df1 100644
4362 --- a/net/mac80211/mlme.c
4363 +++ b/net/mac80211/mlme.c
4364 @@ -650,8 +650,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
4365 } else {
4366 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
4367 ieee80211_send_nullfunc(local, sdata, 1);
4368 - conf->flags |= IEEE80211_CONF_PS;
4369 - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4370 +
4371 + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
4372 + conf->flags |= IEEE80211_CONF_PS;
4373 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4374 + }
4375 }
4376 }
4377
4378 @@ -742,6 +745,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
4379 container_of(work, struct ieee80211_local,
4380 dynamic_ps_enable_work);
4381 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
4382 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
4383
4384 /* can only happen when PS was just disabled anyway */
4385 if (!sdata)
4386 @@ -750,11 +754,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
4387 if (local->hw.conf.flags & IEEE80211_CONF_PS)
4388 return;
4389
4390 - if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
4391 + if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
4392 + (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
4393 ieee80211_send_nullfunc(local, sdata, 1);
4394
4395 - local->hw.conf.flags |= IEEE80211_CONF_PS;
4396 - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4397 + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
4398 + (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
4399 + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
4400 + local->hw.conf.flags |= IEEE80211_CONF_PS;
4401 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
4402 + }
4403 }
4404
4405 void ieee80211_dynamic_ps_timer(unsigned long data)
4406 @@ -2458,6 +2467,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4407 list_add(&wk->list, &ifmgd->work_list);
4408
4409 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
4410 + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
4411
4412 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
4413 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
4414 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4415 index 16c6cdc..538a7d7 100644
4416 --- a/net/mac80211/rx.c
4417 +++ b/net/mac80211/rx.c
4418 @@ -1590,6 +1590,7 @@ static ieee80211_rx_result debug_noinline
4419 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
4420 {
4421 struct net_device *dev = rx->dev;
4422 + struct ieee80211_local *local = rx->local;
4423 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
4424 __le16 fc = hdr->frame_control;
4425 int err;
4426 @@ -1612,6 +1613,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
4427 dev->stats.rx_packets++;
4428 dev->stats.rx_bytes += rx->skb->len;
4429
4430 + if (ieee80211_is_data(hdr->frame_control) &&
4431 + !is_multicast_ether_addr(hdr->addr1) &&
4432 + local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
4433 + mod_timer(&local->dynamic_ps_timer, jiffies +
4434 + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
4435 + }
4436 +
4437 ieee80211_deliver_skb(rx);
4438
4439 return RX_QUEUED;
4440 diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
4441 index 1a3b650..2f181aa 100644
4442 --- a/net/netfilter/xt_recent.c
4443 +++ b/net/netfilter/xt_recent.c
4444 @@ -260,7 +260,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
4445 for (i = 0; i < e->nstamps; i++) {
4446 if (info->seconds && time_after(time, e->stamps[i]))
4447 continue;
4448 - if (info->hit_count && ++hits >= info->hit_count) {
4449 + if (!info->hit_count || ++hits >= info->hit_count) {
4450 ret = !ret;
4451 break;
4452 }
4453 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
4454 index 9c5a19d..2370ab4 100644
4455 --- a/net/sunrpc/auth_gss/auth_gss.c
4456 +++ b/net/sunrpc/auth_gss/auth_gss.c
4457 @@ -1273,9 +1273,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
4458 rqstp->rq_release_snd_buf = priv_release_snd_buf;
4459 return 0;
4460 out_free:
4461 - for (i--; i >= 0; i--) {
4462 - __free_page(rqstp->rq_enc_pages[i]);
4463 - }
4464 + rqstp->rq_enc_pages_num = i;
4465 + priv_release_snd_buf(rqstp);
4466 out:
4467 return -EAGAIN;
4468 }
4469 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
4470 index 49278f8..27a2378 100644
4471 --- a/net/sunrpc/rpc_pipe.c
4472 +++ b/net/sunrpc/rpc_pipe.c
4473 @@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
4474 struct dentry *dentry;
4475
4476 dentry = __rpc_lookup_create(parent, name);
4477 + if (IS_ERR(dentry))
4478 + return dentry;
4479 if (dentry->d_inode == NULL)
4480 return dentry;
4481 dput(dentry);
4482 diff --git a/security/min_addr.c b/security/min_addr.c
4483 index c844eed..fc43c9d 100644
4484 --- a/security/min_addr.c
4485 +++ b/security/min_addr.c
4486 @@ -33,6 +33,9 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
4487 {
4488 int ret;
4489
4490 + if (!capable(CAP_SYS_RAWIO))
4491 + return -EPERM;
4492 +
4493 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
4494
4495 update_mmap_min_addr();
4496 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
4497 index 67ca440..e7efcef 100644
4498 --- a/sound/pci/ac97/ac97_patch.c
4499 +++ b/sound/pci/ac97/ac97_patch.c
4500 @@ -1867,12 +1867,14 @@ static unsigned int ad1981_jacks_blacklist[] = {
4501 0x10140523, /* Thinkpad R40 */
4502 0x10140534, /* Thinkpad X31 */
4503 0x10140537, /* Thinkpad T41p */
4504 + 0x1014053e, /* Thinkpad R40e */
4505 0x10140554, /* Thinkpad T42p/R50p */
4506 0x10140567, /* Thinkpad T43p 2668-G7U */
4507 0x10140581, /* Thinkpad X41-2527 */
4508 0x10280160, /* Dell Dimension 2400 */
4509 0x104380b0, /* Asus A7V8X-MX */
4510 0x11790241, /* Toshiba Satellite A-15 S127 */
4511 + 0x1179ff10, /* Toshiba P500 */
4512 0x144dc01a, /* Samsung NP-X20C004/SEG */
4513 0 /* end */
4514 };
4515 diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
4516 index ddcd4a9..78c8736 100644
4517 --- a/sound/pci/cmipci.c
4518 +++ b/sound/pci/cmipci.c
4519 @@ -941,13 +941,21 @@ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci
4520 struct snd_pcm_substream *substream)
4521 {
4522 size_t ptr;
4523 - unsigned int reg;
4524 + unsigned int reg, rem, tries;
4525 +
4526 if (!rec->running)
4527 return 0;
4528 #if 1 // this seems better..
4529 reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2;
4530 - ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1);
4531 - ptr >>= rec->shift;
4532 + for (tries = 0; tries < 3; tries++) {
4533 + rem = snd_cmipci_read_w(cm, reg);
4534 + if (rem < rec->dma_size)
4535 + goto ok;
4536 + }
4537 + printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem);
4538 + return SNDRV_PCM_POS_XRUN;
4539 +ok:
4540 + ptr = (rec->dma_size - (rem + 1)) >> rec->shift;
4541 #else
4542 reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1;
4543 ptr = snd_cmipci_read(cm, reg) - rec->offset;
4544 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4545 index 673cec3..dd3a8e7 100644
4546 --- a/sound/pci/hda/hda_intel.c
4547 +++ b/sound/pci/hda/hda_intel.c
4548 @@ -2228,8 +2228,10 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
4549 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
4550 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
4551 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
4552 + SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
4553 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
4554 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
4555 + SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
4556 {}
4557 };
4558
4559 @@ -2317,6 +2319,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
4560 static struct snd_pci_quirk msi_white_list[] __devinitdata = {
4561 SND_PCI_QUIRK(0x103c, 0x30f7, "HP Pavilion dv4t-1300", 1),
4562 SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
4563 + SND_PCI_QUIRK(0x107b, 0x0380, "Gateway M-6866", 1),
4564 {}
4565 };
4566
4567 @@ -2333,6 +2336,13 @@ static void __devinit check_msi(struct azx *chip)
4568 "hda_intel: msi for device %04x:%04x set to %d\n",
4569 q->subvendor, q->subdevice, q->value);
4570 chip->msi = q->value;
4571 + return;
4572 + }
4573 +
4574 + /* NVidia chipsets seem to cause troubles with MSI */
4575 + if (chip->driver_type == AZX_DRIVER_NVIDIA) {
4576 + printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
4577 + chip->msi = 0;
4578 }
4579 }
4580
4581 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4582 index 905859d..79afb46 100644
4583 --- a/sound/pci/hda/patch_conexant.c
4584 +++ b/sound/pci/hda/patch_conexant.c
4585 @@ -1581,6 +1581,21 @@ static int patch_cxt5047(struct hda_codec *codec)
4586 #endif
4587 }
4588 spec->vmaster_nid = 0x13;
4589 +
4590 + switch (codec->subsystem_id >> 16) {
4591 + case 0x103c:
4592 + /* HP laptops have really bad sound over 0 dB on NID 0x10.
4593 + * Fix max PCM level to 0 dB (originally it has 0x1e steps
4594 + * with 0 dB offset 0x17)
4595 + */
4596 + snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
4597 + (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
4598 + (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
4599 + (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
4600 + (1 << AC_AMPCAP_MUTE_SHIFT));
4601 + break;
4602 + }
4603 +
4604 return 0;
4605 }
4606
4607 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4608 index 911dd1f..26c70d6 100644
4609 --- a/sound/pci/hda/patch_realtek.c
4610 +++ b/sound/pci/hda/patch_realtek.c
4611 @@ -400,6 +400,8 @@ static int alc_mux_enum_info(struct snd_kcontrol *kcontrol,
4612 unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
4613 if (mux_idx >= spec->num_mux_defs)
4614 mux_idx = 0;
4615 + if (!spec->input_mux[mux_idx].num_items && mux_idx > 0)
4616 + mux_idx = 0;
4617 return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
4618 }
4619
4620 @@ -428,6 +430,8 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
4621
4622 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
4623 imux = &spec->input_mux[mux_idx];
4624 + if (!imux->num_items && mux_idx > 0)
4625 + imux = &spec->input_mux[0];
4626
4627 type = get_wcaps_type(get_wcaps(codec, nid));
4628 if (type == AC_WID_AUD_MIX) {
4629 @@ -6248,6 +6252,7 @@ static const char *alc260_models[ALC260_MODEL_LAST] = {
4630
4631 static struct snd_pci_quirk alc260_cfg_tbl[] = {
4632 SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
4633 + SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
4634 SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
4635 SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
4636 SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
4637 @@ -6277,7 +6282,7 @@ static struct alc_config_preset alc260_presets[] = {
4638 .num_dacs = ARRAY_SIZE(alc260_dac_nids),
4639 .dac_nids = alc260_dac_nids,
4640 .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
4641 - .adc_nids = alc260_adc_nids,
4642 + .adc_nids = alc260_dual_adc_nids,
4643 .num_channel_mode = ARRAY_SIZE(alc260_modes),
4644 .channel_mode = alc260_modes,
4645 .input_mux = &alc260_capture_source,
4646 @@ -8917,7 +8922,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
4647 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
4648 SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
4649 SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
4650 - SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
4651 + SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
4652
4653 {}
4654 };
4655 @@ -9743,6 +9748,8 @@ static void alc882_auto_init_input_src(struct hda_codec *codec)
4656 continue;
4657 mux_idx = c >= spec->num_mux_defs ? 0 : c;
4658 imux = &spec->input_mux[mux_idx];
4659 + if (!imux->num_items && mux_idx > 0)
4660 + imux = &spec->input_mux[0];
4661 for (idx = 0; idx < conns; idx++) {
4662 /* if the current connection is the selected one,
4663 * unmute it as default - otherwise mute it
4664 diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
4665 index bdd3b7e..bd498d4 100644
4666 --- a/tools/perf/Documentation/Makefile
4667 +++ b/tools/perf/Documentation/Makefile
4668 @@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
4669 DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
4670 DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
4671
4672 +# Make the path relative to DESTDIR, not prefix
4673 +ifndef DESTDIR
4674 prefix?=$(HOME)
4675 +endif
4676 bindir?=$(prefix)/bin
4677 htmldir?=$(prefix)/share/doc/perf-doc
4678 pdfdir?=$(prefix)/share/doc/perf-doc
4679 @@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man
4680 man1dir=$(mandir)/man1
4681 man5dir=$(mandir)/man5
4682 man7dir=$(mandir)/man7
4683 -# DESTDIR=
4684
4685 ASCIIDOC=asciidoc
4686 ASCIIDOC_EXTRA = --unsafe
4687 diff --git a/tools/perf/Makefile b/tools/perf/Makefile
4688 index 7e190d5..719d028 100644
4689 --- a/tools/perf/Makefile
4690 +++ b/tools/perf/Makefile
4691 @@ -218,7 +218,10 @@ STRIP ?= strip
4692 # runtime figures out where they are based on the path to the executable.
4693 # This can help installing the suite in a relocatable way.
4694
4695 +# Make the path relative to DESTDIR, not to prefix
4696 +ifndef DESTDIR
4697 prefix = $(HOME)
4698 +endif
4699 bindir_relative = bin
4700 bindir = $(prefix)/$(bindir_relative)
4701 mandir = share/man
4702 @@ -235,7 +238,6 @@ sysconfdir = $(prefix)/etc
4703 ETC_PERFCONFIG = etc/perfconfig
4704 endif
4705 lib = lib
4706 -# DESTDIR=
4707
4708 export prefix bindir sharedir sysconfdir
4709

  ViewVC Help
Powered by ViewVC 1.1.20