/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1002_linux-2.6.32.3.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1002_linux-2.6.32.3.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (4 years, 2 months ago) by mpagano
File size: 155869 byte(s)
2.6.32-15 release
1 diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
2 index af6885c..e1def17 100644
3 --- a/Documentation/filesystems/ext4.txt
4 +++ b/Documentation/filesystems/ext4.txt
5 @@ -196,7 +196,7 @@ nobarrier This also requires an IO stack which can support
6 also be used to enable or disable barriers, for
7 consistency with other ext4 mount options.
8
9 -inode_readahead=n This tuning parameter controls the maximum
10 +inode_readahead_blks=n This tuning parameter controls the maximum
11 number of inode table blocks that ext4's inode
12 table readahead algorithm will pre-read into
13 the buffer cache. The default value is 32 blocks.
14 diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
15 index a5b632e..f0c624f 100644
16 --- a/arch/powerpc/kernel/align.c
17 +++ b/arch/powerpc/kernel/align.c
18 @@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
19 */
20 static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
21 unsigned int areg, struct pt_regs *regs,
22 - unsigned int flags, unsigned int length)
23 + unsigned int flags, unsigned int length,
24 + unsigned int elsize)
25 {
26 char *ptr;
27 + unsigned long *lptr;
28 int ret = 0;
29 + int sw = 0;
30 + int i, j;
31
32 flush_vsx_to_thread(current);
33
34 @@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
35 else
36 ptr = (char *) &current->thread.vr[reg - 32];
37
38 - if (flags & ST)
39 - ret = __copy_to_user(addr, ptr, length);
40 - else {
41 - if (flags & SPLT){
42 - ret = __copy_from_user(ptr, addr, length);
43 - ptr += length;
44 + lptr = (unsigned long *) ptr;
45 +
46 + if (flags & SW)
47 + sw = elsize-1;
48 +
49 + for (j = 0; j < length; j += elsize) {
50 + for (i = 0; i < elsize; ++i) {
51 + if (flags & ST)
52 + ret |= __put_user(ptr[i^sw], addr + i);
53 + else
54 + ret |= __get_user(ptr[i^sw], addr + i);
55 }
56 - ret |= __copy_from_user(ptr, addr, length);
57 + ptr += elsize;
58 + addr += elsize;
59 }
60 - if (flags & U)
61 - regs->gpr[areg] = regs->dar;
62 - if (ret)
63 +
64 + if (!ret) {
65 + if (flags & U)
66 + regs->gpr[areg] = regs->dar;
67 +
68 + /* Splat load copies the same data to top and bottom 8 bytes */
69 + if (flags & SPLT)
70 + lptr[1] = lptr[0];
71 + /* For 8 byte loads, zero the top 8 bytes */
72 + else if (!(flags & ST) && (8 == length))
73 + lptr[1] = 0;
74 + } else
75 return -EFAULT;
76 +
77 return 1;
78 }
79 #endif
80 @@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs)
81
82 #ifdef CONFIG_VSX
83 if ((instruction & 0xfc00003e) == 0x7c000018) {
84 - /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
85 + unsigned int elsize;
86 +
87 + /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
88 reg |= (instruction & 0x1) << 5;
89 /* Simple inline decoder instead of a table */
90 + /* VSX has only 8 and 16 byte memory accesses */
91 + nb = 8;
92 if (instruction & 0x200)
93 nb = 16;
94 - else if (instruction & 0x080)
95 - nb = 8;
96 - else
97 - nb = 4;
98 +
99 + /* Vector stores in little-endian mode swap individual
100 + elements, so process them separately */
101 + elsize = 4;
102 + if (instruction & 0x80)
103 + elsize = 8;
104 +
105 flags = 0;
106 + if (regs->msr & MSR_LE)
107 + flags |= SW;
108 if (instruction & 0x100)
109 flags |= ST;
110 if (instruction & 0x040)
111 @@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs)
112 nb = 8;
113 }
114 PPC_WARN_EMULATED(vsx);
115 - return emulate_vsx(addr, reg, areg, regs, flags, nb);
116 + return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
117 }
118 #endif
119 /* A size of 0 indicates an instruction we don't support, with
120 diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
121 index 7e2b6ba..0e3e728 100644
122 --- a/arch/x86/include/asm/msr.h
123 +++ b/arch/x86/include/asm/msr.h
124 @@ -27,6 +27,18 @@ struct msr {
125 };
126 };
127
128 +struct msr_info {
129 + u32 msr_no;
130 + struct msr reg;
131 + struct msr *msrs;
132 + int err;
133 +};
134 +
135 +struct msr_regs_info {
136 + u32 *regs;
137 + int err;
138 +};
139 +
140 static inline unsigned long long native_read_tscp(unsigned int *aux)
141 {
142 unsigned long low, high;
143 @@ -244,11 +256,14 @@ do { \
144
145 #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
146
147 +struct msr *msrs_alloc(void);
148 +void msrs_free(struct msr *msrs);
149 +
150 #ifdef CONFIG_SMP
151 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
152 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
153 -void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
154 -void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
155 +void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
156 +void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
157 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
158 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
159 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
160 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
161 index c978648..13b1885 100644
162 --- a/arch/x86/include/asm/processor.h
163 +++ b/arch/x86/include/asm/processor.h
164 @@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
165 unsigned int *ecx, unsigned int *edx)
166 {
167 /* ecx is often an input as well as an output. */
168 - asm("cpuid"
169 + asm volatile("cpuid"
170 : "=a" (*eax),
171 "=b" (*ebx),
172 "=c" (*ecx),
173 diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
174 index d1414af..e90a8a9 100644
175 --- a/arch/x86/include/asm/uv/uv_hub.h
176 +++ b/arch/x86/include/asm/uv/uv_hub.h
177 @@ -31,20 +31,20 @@
178 * contiguous (although various IO spaces may punch holes in
179 * it)..
180 *
181 - * N - Number of bits in the node portion of a socket physical
182 - * address.
183 + * N - Number of bits in the node portion of a socket physical
184 + * address.
185 *
186 - * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
187 - * routers always have low bit of 1, C/MBricks have low bit
188 - * equal to 0. Most addressing macros that target UV hub chips
189 - * right shift the NASID by 1 to exclude the always-zero bit.
190 - * NASIDs contain up to 15 bits.
191 + * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
192 + * routers always have low bit of 1, C/MBricks have low bit
193 + * equal to 0. Most addressing macros that target UV hub chips
194 + * right shift the NASID by 1 to exclude the always-zero bit.
195 + * NASIDs contain up to 15 bits.
196 *
197 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
198 * of nasids.
199 *
200 - * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
201 - * of the nasid for socket usage.
202 + * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
203 + * of the nasid for socket usage.
204 *
205 *
206 * NumaLink Global Physical Address Format:
207 @@ -71,12 +71,12 @@
208 *
209 *
210 * APICID format
211 - * NOTE!!!!!! This is the current format of the APICID. However, code
212 - * should assume that this will change in the future. Use functions
213 - * in this file for all APICID bit manipulations and conversion.
214 + * NOTE!!!!!! This is the current format of the APICID. However, code
215 + * should assume that this will change in the future. Use functions
216 + * in this file for all APICID bit manipulations and conversion.
217 *
218 - * 1111110000000000
219 - * 5432109876543210
220 + * 1111110000000000
221 + * 5432109876543210
222 * pppppppppplc0cch
223 * sssssssssss
224 *
225 @@ -89,9 +89,9 @@
226 * Note: Processor only supports 12 bits in the APICID register. The ACPI
227 * tables hold all 16 bits. Software needs to be aware of this.
228 *
229 - * Unless otherwise specified, all references to APICID refer to
230 - * the FULL value contained in ACPI tables, not the subset in the
231 - * processor APICID register.
232 + * Unless otherwise specified, all references to APICID refer to
233 + * the FULL value contained in ACPI tables, not the subset in the
234 + * processor APICID register.
235 */
236
237
238 @@ -151,16 +151,16 @@ struct uv_hub_info_s {
239 };
240
241 DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
242 -#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
243 +#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
244 #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
245
246 /*
247 * Local & Global MMR space macros.
248 - * Note: macros are intended to be used ONLY by inline functions
249 - * in this file - not by other kernel code.
250 - * n - NASID (full 15-bit global nasid)
251 - * g - GNODE (full 15-bit global nasid, right shifted 1)
252 - * p - PNODE (local part of nsids, right shifted 1)
253 + * Note: macros are intended to be used ONLY by inline functions
254 + * in this file - not by other kernel code.
255 + * n - NASID (full 15-bit global nasid)
256 + * g - GNODE (full 15-bit global nasid, right shifted 1)
257 + * p - PNODE (local part of nsids, right shifted 1)
258 */
259 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
260 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
261 @@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
262 /*
263 * Macros for converting between kernel virtual addresses, socket local physical
264 * addresses, and UV global physical addresses.
265 - * Note: use the standard __pa() & __va() macros for converting
266 - * between socket virtual and socket physical addresses.
267 + * Note: use the standard __pa() & __va() macros for converting
268 + * between socket virtual and socket physical addresses.
269 */
270
271 /* socket phys RAM --> UV global physical address */
272 @@ -265,21 +265,18 @@ static inline int uv_apicid_to_pnode(int apicid)
273 * Access global MMRs using the low memory MMR32 space. This region supports
274 * faster MMR access but not all MMRs are accessible in this space.
275 */
276 -static inline unsigned long *uv_global_mmr32_address(int pnode,
277 - unsigned long offset)
278 +static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
279 {
280 return __va(UV_GLOBAL_MMR32_BASE |
281 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
282 }
283
284 -static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
285 - unsigned long val)
286 +static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
287 {
288 writeq(val, uv_global_mmr32_address(pnode, offset));
289 }
290
291 -static inline unsigned long uv_read_global_mmr32(int pnode,
292 - unsigned long offset)
293 +static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
294 {
295 return readq(uv_global_mmr32_address(pnode, offset));
296 }
297 @@ -288,25 +285,32 @@ static inline unsigned long uv_read_global_mmr32(int pnode,
298 * Access Global MMR space using the MMR space located at the top of physical
299 * memory.
300 */
301 -static inline unsigned long *uv_global_mmr64_address(int pnode,
302 - unsigned long offset)
303 +static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset)
304 {
305 return __va(UV_GLOBAL_MMR64_BASE |
306 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
307 }
308
309 -static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
310 - unsigned long val)
311 +static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
312 {
313 writeq(val, uv_global_mmr64_address(pnode, offset));
314 }
315
316 -static inline unsigned long uv_read_global_mmr64(int pnode,
317 - unsigned long offset)
318 +static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
319 {
320 return readq(uv_global_mmr64_address(pnode, offset));
321 }
322
323 +static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
324 +{
325 + writeb(val, uv_global_mmr64_address(pnode, offset));
326 +}
327 +
328 +static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
329 +{
330 + return readb(uv_global_mmr64_address(pnode, offset));
331 +}
332 +
333 /*
334 * Access hub local MMRs. Faster than using global space but only local MMRs
335 * are accessible.
336 @@ -426,11 +430,17 @@ static inline void uv_set_scir_bits(unsigned char value)
337 }
338 }
339
340 +static inline unsigned long uv_scir_offset(int apicid)
341 +{
342 + return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
343 +}
344 +
345 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
346 {
347 if (uv_cpu_hub_info(cpu)->scir.state != value) {
348 + uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
349 + uv_cpu_hub_info(cpu)->scir.offset, value);
350 uv_cpu_hub_info(cpu)->scir.state = value;
351 - uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
352 }
353 }
354
355 diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
356 index e0b3130..c8243f0 100644
357 --- a/arch/x86/kernel/amd_iommu_init.c
358 +++ b/arch/x86/kernel/amd_iommu_init.c
359 @@ -136,6 +136,11 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
360 system */
361
362 /*
363 + * Set to true if ACPI table parsing and hardware intialization went properly
364 + */
365 +static bool amd_iommu_initialized;
366 +
367 +/*
368 * Pointer to the device table which is shared by all AMD IOMMUs
369 * it is indexed by the PCI device id or the HT unit id and contains
370 * information about the domain the device belongs to as well as the
371 @@ -913,6 +918,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
372 }
373 WARN_ON(p != end);
374
375 + amd_iommu_initialized = true;
376 +
377 return 0;
378 }
379
380 @@ -1263,6 +1270,9 @@ int __init amd_iommu_init(void)
381 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
382 goto free;
383
384 + if (!amd_iommu_initialized)
385 + goto free;
386 +
387 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
388 goto free;
389
390 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
391 index 326c254..2ab3535 100644
392 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
393 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
394 @@ -607,8 +607,10 @@ void __init uv_system_init(void)
395 uv_rtc_init();
396
397 for_each_present_cpu(cpu) {
398 + int apicid = per_cpu(x86_cpu_to_apicid, cpu);
399 +
400 nid = cpu_to_node(cpu);
401 - pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
402 + pnode = uv_apicid_to_pnode(apicid);
403 blade = boot_pnode_to_blade(pnode);
404 lcpu = uv_blade_info[blade].nr_possible_cpus;
405 uv_blade_info[blade].nr_possible_cpus++;
406 @@ -629,15 +631,13 @@ void __init uv_system_init(void)
407 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
408 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
409 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
410 - uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
411 + uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
412 uv_node_to_blade[nid] = blade;
413 uv_cpu_to_blade[cpu] = blade;
414 max_pnode = max(pnode, max_pnode);
415
416 - printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
417 - "lcpu %d, blade %d\n",
418 - cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
419 - lcpu, blade);
420 + printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
421 + cpu, apicid, pnode, nid, lcpu, blade);
422 }
423
424 /* Add blade/pnode info for nodes without cpus */
425 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
426 index 7b058a2..c06acdd 100644
427 --- a/arch/x86/kernel/ptrace.c
428 +++ b/arch/x86/kernel/ptrace.c
429 @@ -408,14 +408,14 @@ static int genregs_get(struct task_struct *target,
430 {
431 if (kbuf) {
432 unsigned long *k = kbuf;
433 - while (count > 0) {
434 + while (count >= sizeof(*k)) {
435 *k++ = getreg(target, pos);
436 count -= sizeof(*k);
437 pos += sizeof(*k);
438 }
439 } else {
440 unsigned long __user *u = ubuf;
441 - while (count > 0) {
442 + while (count >= sizeof(*u)) {
443 if (__put_user(getreg(target, pos), u++))
444 return -EFAULT;
445 count -= sizeof(*u);
446 @@ -434,14 +434,14 @@ static int genregs_set(struct task_struct *target,
447 int ret = 0;
448 if (kbuf) {
449 const unsigned long *k = kbuf;
450 - while (count > 0 && !ret) {
451 + while (count >= sizeof(*k) && !ret) {
452 ret = putreg(target, pos, *k++);
453 count -= sizeof(*k);
454 pos += sizeof(*k);
455 }
456 } else {
457 const unsigned long __user *u = ubuf;
458 - while (count > 0 && !ret) {
459 + while (count >= sizeof(*u) && !ret) {
460 unsigned long word;
461 ret = __get_user(word, u++);
462 if (ret)
463 @@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_struct *target,
464 {
465 if (kbuf) {
466 compat_ulong_t *k = kbuf;
467 - while (count > 0) {
468 + while (count >= sizeof(*k)) {
469 getreg32(target, pos, k++);
470 count -= sizeof(*k);
471 pos += sizeof(*k);
472 }
473 } else {
474 compat_ulong_t __user *u = ubuf;
475 - while (count > 0) {
476 + while (count >= sizeof(*u)) {
477 compat_ulong_t word;
478 getreg32(target, pos, &word);
479 if (__put_user(word, u++))
480 @@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_struct *target,
481 int ret = 0;
482 if (kbuf) {
483 const compat_ulong_t *k = kbuf;
484 - while (count > 0 && !ret) {
485 + while (count >= sizeof(*k) && !ret) {
486 ret = putreg32(target, pos, *k++);
487 count -= sizeof(*k);
488 pos += sizeof(*k);
489 }
490 } else {
491 const compat_ulong_t __user *u = ubuf;
492 - while (count > 0 && !ret) {
493 + while (count >= sizeof(*u) && !ret) {
494 compat_ulong_t word;
495 ret = __get_user(word, u++);
496 if (ret)
497 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
498 index 23c2176..41659fb 100644
499 --- a/arch/x86/kvm/lapic.c
500 +++ b/arch/x86/kvm/lapic.c
501 @@ -1156,6 +1156,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
502 hrtimer_cancel(&apic->lapic_timer.timer);
503 update_divide_count(apic);
504 start_apic_timer(apic);
505 + apic->irr_pending = true;
506 }
507
508 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
509 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
510 index 72558f8..85e12cd 100644
511 --- a/arch/x86/kvm/paging_tmpl.h
512 +++ b/arch/x86/kvm/paging_tmpl.h
513 @@ -455,8 +455,6 @@ out_unlock:
514 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
515 {
516 struct kvm_shadow_walk_iterator iterator;
517 - pt_element_t gpte;
518 - gpa_t pte_gpa = -1;
519 int level;
520 u64 *sptep;
521 int need_flush = 0;
522 @@ -471,10 +469,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
523 if (level == PT_PAGE_TABLE_LEVEL ||
524 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
525 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
526 - struct kvm_mmu_page *sp = page_header(__pa(sptep));
527 -
528 - pte_gpa = (sp->gfn << PAGE_SHIFT);
529 - pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
530
531 if (is_shadow_present_pte(*sptep)) {
532 rmap_remove(vcpu->kvm, sptep);
533 @@ -493,18 +487,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
534 if (need_flush)
535 kvm_flush_remote_tlbs(vcpu->kvm);
536 spin_unlock(&vcpu->kvm->mmu_lock);
537 -
538 - if (pte_gpa == -1)
539 - return;
540 - if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
541 - sizeof(pt_element_t)))
542 - return;
543 - if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
544 - if (mmu_topup_memory_caches(vcpu))
545 - return;
546 - kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
547 - sizeof(pt_element_t), 0);
548 - }
549 }
550
551 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
552 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
553 index 85f5db9..c2b6f39 100644
554 --- a/arch/x86/lib/Makefile
555 +++ b/arch/x86/lib/Makefile
556 @@ -2,14 +2,14 @@
557 # Makefile for x86 specific library files.
558 #
559
560 -obj-$(CONFIG_SMP) := msr.o
561 +obj-$(CONFIG_SMP) += msr-smp.o
562
563 lib-y := delay.o
564 lib-y += thunk_$(BITS).o
565 lib-y += usercopy_$(BITS).o getuser.o putuser.o
566 lib-y += memcpy_$(BITS).o
567
568 -obj-y += msr-reg.o msr-reg-export.o
569 +obj-y += msr.o msr-reg.o msr-reg-export.o
570
571 ifeq ($(CONFIG_X86_32),y)
572 obj-y += atomic64_32.o
573 diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
574 new file mode 100644
575 index 0000000..a6b1b86
576 --- /dev/null
577 +++ b/arch/x86/lib/msr-smp.c
578 @@ -0,0 +1,204 @@
579 +#include <linux/module.h>
580 +#include <linux/preempt.h>
581 +#include <linux/smp.h>
582 +#include <asm/msr.h>
583 +
584 +static void __rdmsr_on_cpu(void *info)
585 +{
586 + struct msr_info *rv = info;
587 + struct msr *reg;
588 + int this_cpu = raw_smp_processor_id();
589 +
590 + if (rv->msrs)
591 + reg = per_cpu_ptr(rv->msrs, this_cpu);
592 + else
593 + reg = &rv->reg;
594 +
595 + rdmsr(rv->msr_no, reg->l, reg->h);
596 +}
597 +
598 +static void __wrmsr_on_cpu(void *info)
599 +{
600 + struct msr_info *rv = info;
601 + struct msr *reg;
602 + int this_cpu = raw_smp_processor_id();
603 +
604 + if (rv->msrs)
605 + reg = per_cpu_ptr(rv->msrs, this_cpu);
606 + else
607 + reg = &rv->reg;
608 +
609 + wrmsr(rv->msr_no, reg->l, reg->h);
610 +}
611 +
612 +int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
613 +{
614 + int err;
615 + struct msr_info rv;
616 +
617 + memset(&rv, 0, sizeof(rv));
618 +
619 + rv.msr_no = msr_no;
620 + err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
621 + *l = rv.reg.l;
622 + *h = rv.reg.h;
623 +
624 + return err;
625 +}
626 +EXPORT_SYMBOL(rdmsr_on_cpu);
627 +
628 +int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
629 +{
630 + int err;
631 + struct msr_info rv;
632 +
633 + memset(&rv, 0, sizeof(rv));
634 +
635 + rv.msr_no = msr_no;
636 + rv.reg.l = l;
637 + rv.reg.h = h;
638 + err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
639 +
640 + return err;
641 +}
642 +EXPORT_SYMBOL(wrmsr_on_cpu);
643 +
644 +static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
645 + struct msr *msrs,
646 + void (*msr_func) (void *info))
647 +{
648 + struct msr_info rv;
649 + int this_cpu;
650 +
651 + memset(&rv, 0, sizeof(rv));
652 +
653 + rv.msrs = msrs;
654 + rv.msr_no = msr_no;
655 +
656 + this_cpu = get_cpu();
657 +
658 + if (cpumask_test_cpu(this_cpu, mask))
659 + msr_func(&rv);
660 +
661 + smp_call_function_many(mask, msr_func, &rv, 1);
662 + put_cpu();
663 +}
664 +
665 +/* rdmsr on a bunch of CPUs
666 + *
667 + * @mask: which CPUs
668 + * @msr_no: which MSR
669 + * @msrs: array of MSR values
670 + *
671 + */
672 +void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
673 +{
674 + __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
675 +}
676 +EXPORT_SYMBOL(rdmsr_on_cpus);
677 +
678 +/*
679 + * wrmsr on a bunch of CPUs
680 + *
681 + * @mask: which CPUs
682 + * @msr_no: which MSR
683 + * @msrs: array of MSR values
684 + *
685 + */
686 +void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
687 +{
688 + __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
689 +}
690 +EXPORT_SYMBOL(wrmsr_on_cpus);
691 +
692 +/* These "safe" variants are slower and should be used when the target MSR
693 + may not actually exist. */
694 +static void __rdmsr_safe_on_cpu(void *info)
695 +{
696 + struct msr_info *rv = info;
697 +
698 + rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
699 +}
700 +
701 +static void __wrmsr_safe_on_cpu(void *info)
702 +{
703 + struct msr_info *rv = info;
704 +
705 + rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
706 +}
707 +
708 +int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
709 +{
710 + int err;
711 + struct msr_info rv;
712 +
713 + memset(&rv, 0, sizeof(rv));
714 +
715 + rv.msr_no = msr_no;
716 + err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
717 + *l = rv.reg.l;
718 + *h = rv.reg.h;
719 +
720 + return err ? err : rv.err;
721 +}
722 +EXPORT_SYMBOL(rdmsr_safe_on_cpu);
723 +
724 +int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
725 +{
726 + int err;
727 + struct msr_info rv;
728 +
729 + memset(&rv, 0, sizeof(rv));
730 +
731 + rv.msr_no = msr_no;
732 + rv.reg.l = l;
733 + rv.reg.h = h;
734 + err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
735 +
736 + return err ? err : rv.err;
737 +}
738 +EXPORT_SYMBOL(wrmsr_safe_on_cpu);
739 +
740 +/*
741 + * These variants are significantly slower, but allows control over
742 + * the entire 32-bit GPR set.
743 + */
744 +static void __rdmsr_safe_regs_on_cpu(void *info)
745 +{
746 + struct msr_regs_info *rv = info;
747 +
748 + rv->err = rdmsr_safe_regs(rv->regs);
749 +}
750 +
751 +static void __wrmsr_safe_regs_on_cpu(void *info)
752 +{
753 + struct msr_regs_info *rv = info;
754 +
755 + rv->err = wrmsr_safe_regs(rv->regs);
756 +}
757 +
758 +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
759 +{
760 + int err;
761 + struct msr_regs_info rv;
762 +
763 + rv.regs = regs;
764 + rv.err = -EIO;
765 + err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
766 +
767 + return err ? err : rv.err;
768 +}
769 +EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
770 +
771 +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
772 +{
773 + int err;
774 + struct msr_regs_info rv;
775 +
776 + rv.regs = regs;
777 + rv.err = -EIO;
778 + err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
779 +
780 + return err ? err : rv.err;
781 +}
782 +EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
783 diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
784 index 33a1e3c..8f8eebd 100644
785 --- a/arch/x86/lib/msr.c
786 +++ b/arch/x86/lib/msr.c
787 @@ -1,226 +1,23 @@
788 #include <linux/module.h>
789 #include <linux/preempt.h>
790 -#include <linux/smp.h>
791 #include <asm/msr.h>
792
793 -struct msr_info {
794 - u32 msr_no;
795 - struct msr reg;
796 - struct msr *msrs;
797 - int off;
798 - int err;
799 -};
800 -
801 -static void __rdmsr_on_cpu(void *info)
802 -{
803 - struct msr_info *rv = info;
804 - struct msr *reg;
805 - int this_cpu = raw_smp_processor_id();
806 -
807 - if (rv->msrs)
808 - reg = &rv->msrs[this_cpu - rv->off];
809 - else
810 - reg = &rv->reg;
811 -
812 - rdmsr(rv->msr_no, reg->l, reg->h);
813 -}
814 -
815 -static void __wrmsr_on_cpu(void *info)
816 -{
817 - struct msr_info *rv = info;
818 - struct msr *reg;
819 - int this_cpu = raw_smp_processor_id();
820 -
821 - if (rv->msrs)
822 - reg = &rv->msrs[this_cpu - rv->off];
823 - else
824 - reg = &rv->reg;
825 -
826 - wrmsr(rv->msr_no, reg->l, reg->h);
827 -}
828 -
829 -int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
830 -{
831 - int err;
832 - struct msr_info rv;
833 -
834 - memset(&rv, 0, sizeof(rv));
835 -
836 - rv.msr_no = msr_no;
837 - err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
838 - *l = rv.reg.l;
839 - *h = rv.reg.h;
840 -
841 - return err;
842 -}
843 -EXPORT_SYMBOL(rdmsr_on_cpu);
844 -
845 -int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
846 -{
847 - int err;
848 - struct msr_info rv;
849 -
850 - memset(&rv, 0, sizeof(rv));
851 -
852 - rv.msr_no = msr_no;
853 - rv.reg.l = l;
854 - rv.reg.h = h;
855 - err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
856 -
857 - return err;
858 -}
859 -EXPORT_SYMBOL(wrmsr_on_cpu);
860 -
861 -/* rdmsr on a bunch of CPUs
862 - *
863 - * @mask: which CPUs
864 - * @msr_no: which MSR
865 - * @msrs: array of MSR values
866 - *
867 - */
868 -void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
869 -{
870 - struct msr_info rv;
871 - int this_cpu;
872 -
873 - memset(&rv, 0, sizeof(rv));
874 -
875 - rv.off = cpumask_first(mask);
876 - rv.msrs = msrs;
877 - rv.msr_no = msr_no;
878 -
879 - this_cpu = get_cpu();
880 -
881 - if (cpumask_test_cpu(this_cpu, mask))
882 - __rdmsr_on_cpu(&rv);
883 -
884 - smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
885 - put_cpu();
886 -}
887 -EXPORT_SYMBOL(rdmsr_on_cpus);
888 -
889 -/*
890 - * wrmsr on a bunch of CPUs
891 - *
892 - * @mask: which CPUs
893 - * @msr_no: which MSR
894 - * @msrs: array of MSR values
895 - *
896 - */
897 -void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
898 -{
899 - struct msr_info rv;
900 - int this_cpu;
901 -
902 - memset(&rv, 0, sizeof(rv));
903 -
904 - rv.off = cpumask_first(mask);
905 - rv.msrs = msrs;
906 - rv.msr_no = msr_no;
907 -
908 - this_cpu = get_cpu();
909 -
910 - if (cpumask_test_cpu(this_cpu, mask))
911 - __wrmsr_on_cpu(&rv);
912 -
913 - smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
914 - put_cpu();
915 -}
916 -EXPORT_SYMBOL(wrmsr_on_cpus);
917 -
918 -/* These "safe" variants are slower and should be used when the target MSR
919 - may not actually exist. */
920 -static void __rdmsr_safe_on_cpu(void *info)
921 -{
922 - struct msr_info *rv = info;
923 -
924 - rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
925 -}
926 -
927 -static void __wrmsr_safe_on_cpu(void *info)
928 -{
929 - struct msr_info *rv = info;
930 -
931 - rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
932 -}
933 -
934 -int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
935 +struct msr *msrs_alloc(void)
936 {
937 - int err;
938 - struct msr_info rv;
939 + struct msr *msrs = NULL;
940
941 - memset(&rv, 0, sizeof(rv));
942 + msrs = alloc_percpu(struct msr);
943 + if (!msrs) {
944 + pr_warning("%s: error allocating msrs\n", __func__);
945 + return NULL;
946 + }
947
948 - rv.msr_no = msr_no;
949 - err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
950 - *l = rv.reg.l;
951 - *h = rv.reg.h;
952 -
953 - return err ? err : rv.err;
954 + return msrs;
955 }
956 -EXPORT_SYMBOL(rdmsr_safe_on_cpu);
957 +EXPORT_SYMBOL(msrs_alloc);
958
959 -int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
960 +void msrs_free(struct msr *msrs)
961 {
962 - int err;
963 - struct msr_info rv;
964 -
965 - memset(&rv, 0, sizeof(rv));
966 -
967 - rv.msr_no = msr_no;
968 - rv.reg.l = l;
969 - rv.reg.h = h;
970 - err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
971 -
972 - return err ? err : rv.err;
973 -}
974 -EXPORT_SYMBOL(wrmsr_safe_on_cpu);
975 -
976 -/*
977 - * These variants are significantly slower, but allows control over
978 - * the entire 32-bit GPR set.
979 - */
980 -struct msr_regs_info {
981 - u32 *regs;
982 - int err;
983 -};
984 -
985 -static void __rdmsr_safe_regs_on_cpu(void *info)
986 -{
987 - struct msr_regs_info *rv = info;
988 -
989 - rv->err = rdmsr_safe_regs(rv->regs);
990 -}
991 -
992 -static void __wrmsr_safe_regs_on_cpu(void *info)
993 -{
994 - struct msr_regs_info *rv = info;
995 -
996 - rv->err = wrmsr_safe_regs(rv->regs);
997 -}
998 -
999 -int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
1000 -{
1001 - int err;
1002 - struct msr_regs_info rv;
1003 -
1004 - rv.regs = regs;
1005 - rv.err = -EIO;
1006 - err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
1007 -
1008 - return err ? err : rv.err;
1009 -}
1010 -EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
1011 -
1012 -int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
1013 -{
1014 - int err;
1015 - struct msr_regs_info rv;
1016 -
1017 - rv.regs = regs;
1018 - rv.err = -EIO;
1019 - err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
1020 -
1021 - return err ? err : rv.err;
1022 + free_percpu(msrs);
1023 }
1024 -EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
1025 +EXPORT_SYMBOL(msrs_free);
1026 diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
1027 index 0c9c6a9..8a95e83 100644
1028 --- a/drivers/acpi/button.c
1029 +++ b/drivers/acpi/button.c
1030 @@ -282,6 +282,13 @@ static int acpi_lid_send_state(struct acpi_device *device)
1031 if (ret == NOTIFY_DONE)
1032 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
1033 device);
1034 + if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
1035 + /*
1036 + * It is also regarded as success if the notifier_chain
1037 + * returns NOTIFY_OK or NOTIFY_DONE.
1038 + */
1039 + ret = 0;
1040 + }
1041 return ret;
1042 }
1043
1044 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1045 index baef28c..7511029 100644
1046 --- a/drivers/acpi/ec.c
1047 +++ b/drivers/acpi/ec.c
1048 @@ -916,6 +916,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
1049 /* MSI EC needs special treatment, enable it */
1050 static int ec_flag_msi(const struct dmi_system_id *id)
1051 {
1052 + printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
1053 EC_FLAGS_MSI = 1;
1054 EC_FLAGS_VALIDATE_ECDT = 1;
1055 return 0;
1056 @@ -928,8 +929,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
1057 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
1058 {
1059 ec_flag_msi, "MSI hardware", {
1060 - DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
1061 - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
1062 + DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
1063 + {
1064 + ec_flag_msi, "MSI hardware", {
1065 + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
1066 + {
1067 + ec_flag_msi, "MSI hardware", {
1068 + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
1069 {
1070 ec_validate_ecdt, "ASUS hardware", {
1071 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
1072 diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
1073 index f98dffe..f0bad9b 100644
1074 --- a/drivers/ata/pata_cmd64x.c
1075 +++ b/drivers/ata/pata_cmd64x.c
1076 @@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
1077 regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
1078 /* Merge the control bits */
1079 regU |= 1 << adev->devno; /* UDMA on */
1080 - if (adev->dma_mode > 2) /* 15nS timing */
1081 + if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
1082 regU |= 4 << adev->devno;
1083 } else {
1084 regU &= ~ (1 << adev->devno); /* UDMA off */
1085 diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
1086 index 21c5bd6..d16e87e 100644
1087 --- a/drivers/ata/pata_hpt3x2n.c
1088 +++ b/drivers/ata/pata_hpt3x2n.c
1089 @@ -8,7 +8,7 @@
1090 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
1091 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
1092 * Portions Copyright (C) 2003 Red Hat Inc
1093 - * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
1094 + * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
1095 *
1096 *
1097 * TODO
1098 @@ -25,7 +25,7 @@
1099 #include <linux/libata.h>
1100
1101 #define DRV_NAME "pata_hpt3x2n"
1102 -#define DRV_VERSION "0.3.7"
1103 +#define DRV_VERSION "0.3.8"
1104
1105 enum {
1106 HPT_PCI_FAST = (1 << 31),
1107 @@ -262,7 +262,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
1108
1109 static void hpt3x2n_set_clock(struct ata_port *ap, int source)
1110 {
1111 - void __iomem *bmdma = ap->ioaddr.bmdma_addr;
1112 + void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
1113
1114 /* Tristate the bus */
1115 iowrite8(0x80, bmdma+0x73);
1116 @@ -272,9 +272,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
1117 iowrite8(source, bmdma+0x7B);
1118 iowrite8(0xC0, bmdma+0x79);
1119
1120 - /* Reset state machines */
1121 - iowrite8(0x37, bmdma+0x70);
1122 - iowrite8(0x37, bmdma+0x74);
1123 + /* Reset state machines, avoid enabling the disabled channels */
1124 + iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
1125 + iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
1126
1127 /* Complete reset */
1128 iowrite8(0x00, bmdma+0x79);
1129 @@ -284,21 +284,10 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
1130 iowrite8(0x00, bmdma+0x77);
1131 }
1132
1133 -/* Check if our partner interface is busy */
1134 -
1135 -static int hpt3x2n_pair_idle(struct ata_port *ap)
1136 -{
1137 - struct ata_host *host = ap->host;
1138 - struct ata_port *pair = host->ports[ap->port_no ^ 1];
1139 -
1140 - if (pair->hsm_task_state == HSM_ST_IDLE)
1141 - return 1;
1142 - return 0;
1143 -}
1144 -
1145 static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
1146 {
1147 long flags = (long)ap->host->private_data;
1148 +
1149 /* See if we should use the DPLL */
1150 if (writing)
1151 return USE_DPLL; /* Needed for write */
1152 @@ -307,20 +296,35 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
1153 return 0;
1154 }
1155
1156 +static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
1157 +{
1158 + struct ata_port *ap = qc->ap;
1159 + struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
1160 + int rc, flags = (long)ap->host->private_data;
1161 + int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
1162 +
1163 + /* First apply the usual rules */
1164 + rc = ata_std_qc_defer(qc);
1165 + if (rc != 0)
1166 + return rc;
1167 +
1168 + if ((flags & USE_DPLL) != dpll && alt->qc_active)
1169 + return ATA_DEFER_PORT;
1170 + return 0;
1171 +}
1172 +
1173 static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
1174 {
1175 - struct ata_taskfile *tf = &qc->tf;
1176 struct ata_port *ap = qc->ap;
1177 int flags = (long)ap->host->private_data;
1178 + int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
1179
1180 - if (hpt3x2n_pair_idle(ap)) {
1181 - int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
1182 - if ((flags & USE_DPLL) != dpll) {
1183 - if (dpll == 1)
1184 - hpt3x2n_set_clock(ap, 0x21);
1185 - else
1186 - hpt3x2n_set_clock(ap, 0x23);
1187 - }
1188 + if ((flags & USE_DPLL) != dpll) {
1189 + flags &= ~USE_DPLL;
1190 + flags |= dpll;
1191 + ap->host->private_data = (void *)(long)flags;
1192 +
1193 + hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
1194 }
1195 return ata_sff_qc_issue(qc);
1196 }
1197 @@ -337,6 +341,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
1198 .inherits = &ata_bmdma_port_ops,
1199
1200 .bmdma_stop = hpt3x2n_bmdma_stop,
1201 +
1202 + .qc_defer = hpt3x2n_qc_defer,
1203 .qc_issue = hpt3x2n_qc_issue,
1204
1205 .cable_detect = hpt3x2n_cable_detect,
1206 @@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1207 unsigned int f_low, f_high;
1208 int adjust;
1209 unsigned long iobase = pci_resource_start(dev, 4);
1210 - void *hpriv = NULL;
1211 + void *hpriv = (void *)USE_DPLL;
1212 int rc;
1213
1214 rc = pcim_enable_device(dev);
1215 @@ -542,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1216 /* Set our private data up. We only need a few flags so we use
1217 it directly */
1218 if (pci_mhz > 60) {
1219 - hpriv = (void *)PCI66;
1220 + hpriv = (void *)(PCI66 | USE_DPLL);
1221 /*
1222 * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
1223 * the MISC. register to stretch the UltraDMA Tss timing.
1224 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1225 index 44bc8bb..1be7631 100644
1226 --- a/drivers/bluetooth/btusb.c
1227 +++ b/drivers/bluetooth/btusb.c
1228 @@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb)
1229 return;
1230
1231 usb_anchor_urb(urb, &data->bulk_anchor);
1232 + usb_mark_last_busy(data->udev);
1233
1234 err = usb_submit_urb(urb, GFP_ATOMIC);
1235 if (err < 0) {
1236 diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
1237 index 7585c41..c558fa1 100644
1238 --- a/drivers/dma/at_hdmac.c
1239 +++ b/drivers/dma/at_hdmac.c
1240 @@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
1241 dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
1242 cookie, done ? *done : 0, used ? *used : 0);
1243
1244 - spin_lock_bh(atchan->lock);
1245 + spin_lock_bh(&atchan->lock);
1246
1247 last_complete = atchan->completed_cookie;
1248 last_used = chan->cookie;
1249 @@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
1250 ret = dma_async_is_complete(cookie, last_complete, last_used);
1251 }
1252
1253 - spin_unlock_bh(atchan->lock);
1254 + spin_unlock_bh(&atchan->lock);
1255
1256 if (done)
1257 *done = last_complete;
1258 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
1259 index c524d36..dcc4ab7 100644
1260 --- a/drivers/dma/ioat/dma.c
1261 +++ b/drivers/dma/ioat/dma.c
1262 @@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
1263 dma->dev = &pdev->dev;
1264
1265 if (!dma->chancnt) {
1266 - dev_err(dev, "zero channels detected\n");
1267 + dev_err(dev, "channel enumeration error\n");
1268 goto err_setup_interrupts;
1269 }
1270
1271 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
1272 index 45edde9..bbc3e78 100644
1273 --- a/drivers/dma/ioat/dma.h
1274 +++ b/drivers/dma/ioat/dma.h
1275 @@ -60,6 +60,7 @@
1276 * @dca: direct cache access context
1277 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
1278 * @enumerate_channels: hw version specific channel enumeration
1279 + * @reset_hw: hw version specific channel (re)initialization
1280 * @cleanup_tasklet: select between the v2 and v3 cleanup routines
1281 * @timer_fn: select between the v2 and v3 timer watchdog routines
1282 * @self_test: hardware version specific self test for each supported op type
1283 @@ -78,6 +79,7 @@ struct ioatdma_device {
1284 struct dca_provider *dca;
1285 void (*intr_quirk)(struct ioatdma_device *device);
1286 int (*enumerate_channels)(struct ioatdma_device *device);
1287 + int (*reset_hw)(struct ioat_chan_common *chan);
1288 void (*cleanup_tasklet)(unsigned long data);
1289 void (*timer_fn)(unsigned long data);
1290 int (*self_test)(struct ioatdma_device *device);
1291 @@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
1292 writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
1293 }
1294
1295 +static inline void ioat_reset(struct ioat_chan_common *chan)
1296 +{
1297 + u8 ver = chan->device->version;
1298 +
1299 + writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
1300 +}
1301 +
1302 +static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
1303 +{
1304 + u8 ver = chan->device->version;
1305 + u8 cmd;
1306 +
1307 + cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
1308 + return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
1309 +}
1310 +
1311 static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
1312 {
1313 struct ioat_chan_common *chan = &ioat->base;
1314 diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
1315 index 8f1f7f0..5f7a500 100644
1316 --- a/drivers/dma/ioat/dma_v2.c
1317 +++ b/drivers/dma/ioat/dma_v2.c
1318 @@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
1319 __ioat2_start_null_desc(ioat);
1320 }
1321
1322 -static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
1323 +int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
1324 {
1325 - struct ioat_chan_common *chan = &ioat->base;
1326 - unsigned long phys_complete;
1327 + unsigned long end = jiffies + tmo;
1328 + int err = 0;
1329 u32 status;
1330
1331 status = ioat_chansts(chan);
1332 if (is_ioat_active(status) || is_ioat_idle(status))
1333 ioat_suspend(chan);
1334 while (is_ioat_active(status) || is_ioat_idle(status)) {
1335 + if (end && time_after(jiffies, end)) {
1336 + err = -ETIMEDOUT;
1337 + break;
1338 + }
1339 status = ioat_chansts(chan);
1340 cpu_relax();
1341 }
1342
1343 + return err;
1344 +}
1345 +
1346 +int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
1347 +{
1348 + unsigned long end = jiffies + tmo;
1349 + int err = 0;
1350 +
1351 + ioat_reset(chan);
1352 + while (ioat_reset_pending(chan)) {
1353 + if (end && time_after(jiffies, end)) {
1354 + err = -ETIMEDOUT;
1355 + break;
1356 + }
1357 + cpu_relax();
1358 + }
1359 +
1360 + return err;
1361 +}
1362 +
1363 +static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
1364 +{
1365 + struct ioat_chan_common *chan = &ioat->base;
1366 + unsigned long phys_complete;
1367 +
1368 + ioat2_quiesce(chan, 0);
1369 if (ioat_cleanup_preamble(chan, &phys_complete))
1370 __cleanup(ioat, phys_complete);
1371
1372 @@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
1373 spin_unlock_bh(&chan->cleanup_lock);
1374 }
1375
1376 +static int ioat2_reset_hw(struct ioat_chan_common *chan)
1377 +{
1378 + /* throw away whatever the channel was doing and get it initialized */
1379 + u32 chanerr;
1380 +
1381 + ioat2_quiesce(chan, msecs_to_jiffies(100));
1382 +
1383 + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1384 + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1385 +
1386 + return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1387 +}
1388 +
1389 /**
1390 * ioat2_enumerate_channels - find and initialize the device's channels
1391 * @device: the device to be enumerated
1392 @@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
1393 (unsigned long) ioat);
1394 ioat->xfercap_log = xfercap_log;
1395 spin_lock_init(&ioat->ring_lock);
1396 + if (device->reset_hw(&ioat->base)) {
1397 + i = 0;
1398 + break;
1399 + }
1400 }
1401 dma->chancnt = i;
1402 return i;
1403 @@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1404 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1405 struct ioat_chan_common *chan = &ioat->base;
1406 struct ioat_ring_ent **ring;
1407 - u32 chanerr;
1408 int order;
1409
1410 /* have we already been set up? */
1411 @@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1412 /* Setup register to interrupt and write completion status on error */
1413 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
1414
1415 - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1416 - if (chanerr) {
1417 - dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
1418 - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1419 - }
1420 -
1421 /* allocate a completion writeback area */
1422 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
1423 chan->completion = pci_pool_alloc(chan->device->completion_pool,
1424 @@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
1425 tasklet_disable(&chan->cleanup_task);
1426 del_timer_sync(&chan->timer);
1427 device->cleanup_tasklet((unsigned long) ioat);
1428 -
1429 - /* Delay 100ms after reset to allow internal DMA logic to quiesce
1430 - * before removing DMA descriptor resources.
1431 - */
1432 - writeb(IOAT_CHANCMD_RESET,
1433 - chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
1434 - mdelay(100);
1435 + device->reset_hw(chan);
1436
1437 spin_lock_bh(&ioat->ring_lock);
1438 descs = ioat2_ring_space(ioat);
1439 @@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
1440 int err;
1441
1442 device->enumerate_channels = ioat2_enumerate_channels;
1443 + device->reset_hw = ioat2_reset_hw;
1444 device->cleanup_tasklet = ioat2_cleanup_tasklet;
1445 device->timer_fn = ioat2_timer_event;
1446 device->self_test = ioat_dma_self_test;
1447 diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
1448 index 1d849ef..3afad8d 100644
1449 --- a/drivers/dma/ioat/dma_v2.h
1450 +++ b/drivers/dma/ioat/dma_v2.h
1451 @@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
1452 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
1453 void ioat2_cleanup_tasklet(unsigned long data);
1454 void ioat2_timer_event(unsigned long data);
1455 +int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
1456 +int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
1457 extern struct kobj_type ioat2_ktype;
1458 extern struct kmem_cache *ioat2_cache;
1459 #endif /* IOATDMA_V2_H */
1460 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
1461 index 42f6f10..9908c9e 100644
1462 --- a/drivers/dma/ioat/dma_v3.c
1463 +++ b/drivers/dma/ioat/dma_v3.c
1464 @@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
1465
1466 num_descs = ioat2_xferlen_to_descs(ioat, len);
1467 /* we need 2x the number of descriptors to cover greater than 3
1468 - * sources
1469 + * sources (we need 1 extra source in the q-only continuation
1470 + * case and 3 extra sources in the p+q continuation case.
1471 */
1472 - if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
1473 + if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
1474 + (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
1475 with_ext = 1;
1476 num_descs *= 2;
1477 } else
1478 @@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1479 return 0;
1480 }
1481
1482 +static int ioat3_reset_hw(struct ioat_chan_common *chan)
1483 +{
1484 + /* throw away whatever the channel was doing and get it
1485 + * initialized, with ioat3 specific workarounds
1486 + */
1487 + struct ioatdma_device *device = chan->device;
1488 + struct pci_dev *pdev = device->pdev;
1489 + u32 chanerr;
1490 + u16 dev_id;
1491 + int err;
1492 +
1493 + ioat2_quiesce(chan, msecs_to_jiffies(100));
1494 +
1495 + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1496 + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1497 +
1498 + /* -= IOAT ver.3 workarounds =- */
1499 + /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1500 + * that can cause stability issues for IOAT ver.3, and clear any
1501 + * pending errors
1502 + */
1503 + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1504 + err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1505 + if (err) {
1506 + dev_err(&pdev->dev, "channel error register unreachable\n");
1507 + return err;
1508 + }
1509 + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1510 +
1511 + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1512 + * (workaround for spurious config parity error after restart)
1513 + */
1514 + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1515 + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1516 + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1517 +
1518 + return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1519 +}
1520 +
1521 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1522 {
1523 struct pci_dev *pdev = device->pdev;
1524 @@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1525 struct ioat_chan_common *chan;
1526 bool is_raid_device = false;
1527 int err;
1528 - u16 dev_id;
1529 u32 cap;
1530
1531 device->enumerate_channels = ioat2_enumerate_channels;
1532 + device->reset_hw = ioat3_reset_hw;
1533 device->self_test = ioat3_dma_self_test;
1534 dma = &device->common;
1535 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1536 @@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1537 dma->device_prep_dma_xor_val = NULL;
1538 #endif
1539
1540 - /* -= IOAT ver.3 workarounds =- */
1541 - /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1542 - * that can cause stability issues for IOAT ver.3
1543 - */
1544 - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1545 -
1546 - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1547 - * (workaround for spurious config parity error after restart)
1548 - */
1549 - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1550 - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1551 - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1552 -
1553 err = ioat_probe(device);
1554 if (err)
1555 return err;
1556 diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
1557 index f015ec1..e8ae63b 100644
1558 --- a/drivers/dma/ioat/registers.h
1559 +++ b/drivers/dma/ioat/registers.h
1560 @@ -27,6 +27,7 @@
1561
1562 #define IOAT_PCI_DEVICE_ID_OFFSET 0x02
1563 #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
1564 +#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
1565 #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
1566
1567 /* MMIO Device Registers */
1568 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1569 index a38831c..a0bcfba 100644
1570 --- a/drivers/edac/amd64_edac.c
1571 +++ b/drivers/edac/amd64_edac.c
1572 @@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
1573 static int ecc_enable_override;
1574 module_param(ecc_enable_override, int, 0644);
1575
1576 +static struct msr *msrs;
1577 +
1578 /* Lookup table for all possible MC control instances */
1579 struct amd64_pvt;
1580 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
1581 @@ -2618,6 +2620,90 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
1582 return empty;
1583 }
1584
1585 +/* get all cores on this DCT */
1586 +static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
1587 +{
1588 + int cpu;
1589 +
1590 + for_each_online_cpu(cpu)
1591 + if (amd_get_nb_id(cpu) == nid)
1592 + cpumask_set_cpu(cpu, mask);
1593 +}
1594 +
1595 +/* check MCG_CTL on all the cpus on this node */
1596 +static bool amd64_nb_mce_bank_enabled_on_node(int nid)
1597 +{
1598 + cpumask_var_t mask;
1599 + int cpu, nbe;
1600 + bool ret = false;
1601 +
1602 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
1603 + amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
1604 + __func__);
1605 + return false;
1606 + }
1607 +
1608 + get_cpus_on_this_dct_cpumask(mask, nid);
1609 +
1610 + rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
1611 +
1612 + for_each_cpu(cpu, mask) {
1613 + struct msr *reg = per_cpu_ptr(msrs, cpu);
1614 + nbe = reg->l & K8_MSR_MCGCTL_NBE;
1615 +
1616 + debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
1617 + cpu, reg->q,
1618 + (nbe ? "enabled" : "disabled"));
1619 +
1620 + if (!nbe)
1621 + goto out;
1622 + }
1623 + ret = true;
1624 +
1625 +out:
1626 + free_cpumask_var(mask);
1627 + return ret;
1628 +}
1629 +
1630 +static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
1631 +{
1632 + cpumask_var_t cmask;
1633 + int cpu;
1634 +
1635 + if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
1636 + amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
1637 + __func__);
1638 + return false;
1639 + }
1640 +
1641 + get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
1642 +
1643 + rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
1644 +
1645 + for_each_cpu(cpu, cmask) {
1646 +
1647 + struct msr *reg = per_cpu_ptr(msrs, cpu);
1648 +
1649 + if (on) {
1650 + if (reg->l & K8_MSR_MCGCTL_NBE)
1651 + pvt->flags.ecc_report = 1;
1652 +
1653 + reg->l |= K8_MSR_MCGCTL_NBE;
1654 + } else {
1655 + /*
1656 + * Turn off ECC reporting only when it was off before
1657 + */
1658 + if (!pvt->flags.ecc_report)
1659 + reg->l &= ~K8_MSR_MCGCTL_NBE;
1660 + }
1661 + }
1662 + wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
1663 +
1664 + free_cpumask_var(cmask);
1665 +
1666 + return 0;
1667 +}
1668 +
1669 /*
1670 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
1671 * enable it.
1672 @@ -2625,17 +2711,12 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
1673 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
1674 {
1675 struct amd64_pvt *pvt = mci->pvt_info;
1676 - const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
1677 - int cpu, idx = 0, err = 0;
1678 - struct msr msrs[cpumask_weight(cpumask)];
1679 - u32 value;
1680 - u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1681 + int err = 0;
1682 + u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1683
1684 if (!ecc_enable_override)
1685 return;
1686
1687 - memset(msrs, 0, sizeof(msrs));
1688 -
1689 amd64_printk(KERN_WARNING,
1690 "'ecc_enable_override' parameter is active, "
1691 "Enabling AMD ECC hardware now: CAUTION\n");
1692 @@ -2651,16 +2732,9 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
1693 value |= mask;
1694 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
1695
1696 - rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1697 -
1698 - for_each_cpu(cpu, cpumask) {
1699 - if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
1700 - set_bit(idx, &pvt->old_mcgctl);
1701 -
1702 - msrs[idx].l |= K8_MSR_MCGCTL_NBE;
1703 - idx++;
1704 - }
1705 - wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1706 + if (amd64_toggle_ecc_err_reporting(pvt, ON))
1707 + amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
1708 + "MCGCTL!\n");
1709
1710 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
1711 if (err)
1712 @@ -2701,17 +2775,12 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
1713
1714 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
1715 {
1716 - const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
1717 - int cpu, idx = 0, err = 0;
1718 - struct msr msrs[cpumask_weight(cpumask)];
1719 - u32 value;
1720 - u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1721 + int err = 0;
1722 + u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
1723
1724 if (!pvt->nbctl_mcgctl_saved)
1725 return;
1726
1727 - memset(msrs, 0, sizeof(msrs));
1728 -
1729 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
1730 if (err)
1731 debugf0("Reading K8_NBCTL failed\n");
1732 @@ -2721,66 +2790,9 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
1733 /* restore the NB Enable MCGCTL bit */
1734 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
1735
1736 - rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1737 -
1738 - for_each_cpu(cpu, cpumask) {
1739 - msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
1740 - msrs[idx].l |=
1741 - test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
1742 - idx++;
1743 - }
1744 -
1745 - wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
1746 -}
1747 -
1748 -/* get all cores on this DCT */
1749 -static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
1750 -{
1751 - int cpu;
1752 -
1753 - for_each_online_cpu(cpu)
1754 - if (amd_get_nb_id(cpu) == nid)
1755 - cpumask_set_cpu(cpu, mask);
1756 -}
1757 -
1758 -/* check MCG_CTL on all the cpus on this node */
1759 -static bool amd64_nb_mce_bank_enabled_on_node(int nid)
1760 -{
1761 - cpumask_t mask;
1762 - struct msr *msrs;
1763 - int cpu, nbe, idx = 0;
1764 - bool ret = false;
1765 -
1766 - cpumask_clear(&mask);
1767 -
1768 - get_cpus_on_this_dct_cpumask(&mask, nid);
1769 -
1770 - msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
1771 - if (!msrs) {
1772 - amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
1773 - __func__);
1774 - return false;
1775 - }
1776 -
1777 - rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
1778 -
1779 - for_each_cpu(cpu, &mask) {
1780 - nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
1781 -
1782 - debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
1783 - cpu, msrs[idx].q,
1784 - (nbe ? "enabled" : "disabled"));
1785 -
1786 - if (!nbe)
1787 - goto out;
1788 -
1789 - idx++;
1790 - }
1791 - ret = true;
1792 -
1793 -out:
1794 - kfree(msrs);
1795 - return ret;
1796 + if (amd64_toggle_ecc_err_reporting(pvt, OFF))
1797 + amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
1798 + "MCGCTL!\n");
1799 }
1800
1801 /*
1802 @@ -2824,9 +2836,8 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
1803 amd64_printk(KERN_WARNING, "%s", ecc_warning);
1804 return -ENODEV;
1805 }
1806 - } else
1807 - /* CLEAR the override, since BIOS controlled it */
1808 ecc_enable_override = 0;
1809 + }
1810
1811 return 0;
1812 }
1813 @@ -2909,7 +2920,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
1814 pvt->ext_model = boot_cpu_data.x86_model >> 4;
1815 pvt->mc_type_index = mc_type_index;
1816 pvt->ops = family_ops(mc_type_index);
1817 - pvt->old_mcgctl = 0;
1818
1819 /*
1820 * We have the dram_f2_ctl device as an argument, now go reserve its
1821 @@ -3071,16 +3081,15 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
1822
1823 amd64_free_mc_sibling_devices(pvt);
1824
1825 - kfree(pvt);
1826 - mci->pvt_info = NULL;
1827 -
1828 - mci_lookup[pvt->mc_node_id] = NULL;
1829 -
1830 /* unregister from EDAC MCE */
1831 amd_report_gart_errors(false);
1832 amd_unregister_ecc_decoder(amd64_decode_bus_error);
1833
1834 /* Free the EDAC CORE resources */
1835 + mci->pvt_info = NULL;
1836 + mci_lookup[pvt->mc_node_id] = NULL;
1837 +
1838 + kfree(pvt);
1839 edac_mc_free(mci);
1840 }
1841
1842 @@ -3157,23 +3166,29 @@ static void amd64_setup_pci_device(void)
1843 static int __init amd64_edac_init(void)
1844 {
1845 int nb, err = -ENODEV;
1846 + bool load_ok = false;
1847
1848 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
1849
1850 opstate_init();
1851
1852 if (cache_k8_northbridges() < 0)
1853 - return err;
1854 + goto err_ret;
1855 +
1856 + msrs = msrs_alloc();
1857 + if (!msrs)
1858 + goto err_ret;
1859
1860 err = pci_register_driver(&amd64_pci_driver);
1861 if (err)
1862 - return err;
1863 + goto err_pci;
1864
1865 /*
1866 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
1867 * amd64_pvt structs. These will be used in the 2nd stage init function
1868 * to finish initialization of the MC instances.
1869 */
1870 + err = -ENODEV;
1871 for (nb = 0; nb < num_k8_northbridges; nb++) {
1872 if (!pvt_lookup[nb])
1873 continue;
1874 @@ -3181,16 +3196,21 @@ static int __init amd64_edac_init(void)
1875 err = amd64_init_2nd_stage(pvt_lookup[nb]);
1876 if (err)
1877 goto err_2nd_stage;
1878 - }
1879
1880 - amd64_setup_pci_device();
1881 + load_ok = true;
1882 + }
1883
1884 - return 0;
1885 + if (load_ok) {
1886 + amd64_setup_pci_device();
1887 + return 0;
1888 + }
1889
1890 err_2nd_stage:
1891 - debugf0("2nd stage failed\n");
1892 pci_unregister_driver(&amd64_pci_driver);
1893 -
1894 +err_pci:
1895 + msrs_free(msrs);
1896 + msrs = NULL;
1897 +err_ret:
1898 return err;
1899 }
1900
1901 @@ -3200,6 +3220,9 @@ static void __exit amd64_edac_exit(void)
1902 edac_pci_release_generic_ctl(amd64_ctl_pci);
1903
1904 pci_unregister_driver(&amd64_pci_driver);
1905 +
1906 + msrs_free(msrs);
1907 + msrs = NULL;
1908 }
1909
1910 module_init(amd64_edac_init);
1911 diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
1912 index c6f359a..bba6c94 100644
1913 --- a/drivers/edac/amd64_edac.h
1914 +++ b/drivers/edac/amd64_edac.h
1915 @@ -147,6 +147,8 @@
1916 #define MAX_CS_COUNT 8
1917 #define DRAM_REG_COUNT 8
1918
1919 +#define ON true
1920 +#define OFF false
1921
1922 /*
1923 * PCI-defined configuration space registers
1924 @@ -386,10 +388,7 @@ enum {
1925 #define K8_NBCAP_DUAL_NODE BIT(1)
1926 #define K8_NBCAP_DCT_DUAL BIT(0)
1927
1928 -/*
1929 - * MSR Regs
1930 - */
1931 -#define K8_MSR_MCGCTL 0x017b
1932 +/* MSRs */
1933 #define K8_MSR_MCGCTL_NBE BIT(4)
1934
1935 #define K8_MSR_MC4CTL 0x0410
1936 @@ -487,7 +486,6 @@ struct amd64_pvt {
1937 /* Save old hw registers' values before we modified them */
1938 u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
1939 u32 old_nbctl;
1940 - unsigned long old_mcgctl; /* per core on this node */
1941
1942 /* MC Type Index value: socket F vs Family 10h */
1943 u32 mc_type_index;
1944 @@ -495,6 +493,7 @@ struct amd64_pvt {
1945 /* misc settings */
1946 struct flags {
1947 unsigned long cf8_extcfg:1;
1948 + unsigned long ecc_report:1;
1949 } flags;
1950 };
1951
1952 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
1953 index bbfd110..afed886 100644
1954 --- a/drivers/gpu/drm/drm_crtc_helper.c
1955 +++ b/drivers/gpu/drm/drm_crtc_helper.c
1956 @@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
1957 {
1958 int count = 0;
1959
1960 + /* disable all the possible outputs/crtcs before entering KMS mode */
1961 + drm_helper_disable_unused_functions(dev);
1962 +
1963 drm_fb_helper_parse_command_line(dev);
1964
1965 count = drm_helper_probe_connector_modes(dev,
1966 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1967 index c6777cb..19f93f2 100644
1968 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1969 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1970 @@ -249,13 +249,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
1971 if (ASIC_IS_DCE3(rdev))
1972 atombios_enable_crtc_memreq(crtc, 1);
1973 atombios_blank_crtc(crtc, 0);
1974 - drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
1975 + if (rdev->family < CHIP_R600)
1976 + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
1977 radeon_crtc_load_lut(crtc);
1978 break;
1979 case DRM_MODE_DPMS_STANDBY:
1980 case DRM_MODE_DPMS_SUSPEND:
1981 case DRM_MODE_DPMS_OFF:
1982 - drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
1983 + if (rdev->family < CHIP_R600)
1984 + drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
1985 atombios_blank_crtc(crtc, 1);
1986 if (ASIC_IS_DCE3(rdev))
1987 atombios_enable_crtc_memreq(crtc, 0);
1988 diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
1989 index f8a465d..c8942ca 100644
1990 --- a/drivers/gpu/drm/radeon/radeon_test.c
1991 +++ b/drivers/gpu/drm/radeon/radeon_test.c
1992 @@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
1993 /* Number of tests =
1994 * (Total GTT - IB pool - writeback page - ring buffer) / test size
1995 */
1996 - n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
1997 - rdev->cp.ring_size) / size;
1998 + n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
1999 + rdev->cp.ring_size)) / size;
2000
2001 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
2002 if (!gtt_obj) {
2003 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
2004 index ebe38b6..864a371 100644
2005 --- a/drivers/hwmon/sht15.c
2006 +++ b/drivers/hwmon/sht15.c
2007 @@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
2008 int d1 = 0;
2009 int i;
2010
2011 - for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
2012 + for (i = 1; i < ARRAY_SIZE(temppoints); i++)
2013 /* Find pointer to interpolate */
2014 if (data->supply_uV > temppoints[i - 1].vdd) {
2015 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
2016 @@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
2017
2018 const int c1 = -4;
2019 const int c2 = 40500; /* x 10 ^ -6 */
2020 - const int c3 = 2800; /* x10 ^ -9 */
2021 + const int c3 = -2800; /* x10 ^ -9 */
2022
2023 RHlinear = c1*1000
2024 + c2 * data->val_humid/1000
2025 + (data->val_humid * data->val_humid * c3)/1000000;
2026 - return (temp - 25000) * (10000 + 800 * data->val_humid)
2027 + return (temp - 25000) * (10000 + 80 * data->val_humid)
2028 / 1000000 + RHlinear;
2029 }
2030
2031 diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
2032 index 951c57b..ede4658 100644
2033 --- a/drivers/lguest/segments.c
2034 +++ b/drivers/lguest/segments.c
2035 @@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
2036 * We assume the Guest has the same number of GDT entries as the
2037 * Host, otherwise we'd have to dynamically allocate the Guest GDT.
2038 */
2039 - if (num >= ARRAY_SIZE(cpu->arch.gdt))
2040 + if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
2041 kill_guest(cpu, "too many gdt entries %i", num);
2042 + return;
2043 + }
2044
2045 /* Set it up, then fix it. */
2046 cpu->arch.gdt[num].a = lo;
2047 diff --git a/drivers/md/md.c b/drivers/md/md.c
2048 index 02e4551..c6a6685 100644
2049 --- a/drivers/md/md.c
2050 +++ b/drivers/md/md.c
2051 @@ -282,7 +282,9 @@ static void mddev_put(mddev_t *mddev)
2052 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
2053 return;
2054 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
2055 - !mddev->hold_active) {
2056 + mddev->ctime == 0 && !mddev->hold_active) {
2057 + /* Array is not configured at all, and not held active,
2058 + * so destroy it */
2059 list_del(&mddev->all_mddevs);
2060 if (mddev->gendisk) {
2061 /* we did a probe so need to clean up.
2062 @@ -5071,6 +5073,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2063 mddev->minor_version = info->minor_version;
2064 mddev->patch_version = info->patch_version;
2065 mddev->persistent = !info->not_persistent;
2066 + /* ensure mddev_put doesn't delete this now that there
2067 + * is some minimal configuration.
2068 + */
2069 + mddev->ctime = get_seconds();
2070 return 0;
2071 }
2072 mddev->major_version = MD_MAJOR_VERSION;
2073 diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
2074 index 0bc2cf5..2bed9e2 100644
2075 --- a/drivers/media/video/ov511.c
2076 +++ b/drivers/media/video/ov511.c
2077 @@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
2078 goto error;
2079 }
2080
2081 - mutex_lock(&ov->lock);
2082 + mutex_unlock(&ov->lock);
2083
2084 return 0;
2085
2086 diff --git a/drivers/net/e100.c b/drivers/net/e100.c
2087 index d269a68..0c53c92 100644
2088 --- a/drivers/net/e100.c
2089 +++ b/drivers/net/e100.c
2090 @@ -1817,6 +1817,7 @@ static int e100_alloc_cbs(struct nic *nic)
2091 &nic->cbs_dma_addr);
2092 if (!nic->cbs)
2093 return -ENOMEM;
2094 + memset(nic->cbs, 0, count * sizeof(struct cb));
2095
2096 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
2097 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
2098 @@ -1825,7 +1826,6 @@ static int e100_alloc_cbs(struct nic *nic)
2099 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
2100 cb->link = cpu_to_le32(nic->cbs_dma_addr +
2101 ((i+1) % count) * sizeof(struct cb));
2102 - cb->skb = NULL;
2103 }
2104
2105 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
2106 diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
2107 index b091e20..f14d225 100644
2108 --- a/drivers/net/usb/rtl8150.c
2109 +++ b/drivers/net/usb/rtl8150.c
2110 @@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
2111 dbg("%02X:", netdev->dev_addr[i]);
2112 dbg("%02X\n", netdev->dev_addr[i]);
2113 /* Set the IDR registers. */
2114 - set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
2115 + set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
2116 #ifdef EEPROM_WRITE
2117 {
2118 u8 cr;
2119 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
2120 index 95a8e23..8a82c75 100644
2121 --- a/drivers/net/wireless/ath/ath5k/base.c
2122 +++ b/drivers/net/wireless/ath/ath5k/base.c
2123 @@ -2349,6 +2349,9 @@ ath5k_init(struct ath5k_softc *sc)
2124 */
2125 ath5k_stop_locked(sc);
2126
2127 + /* Set PHY calibration interval */
2128 + ah->ah_cal_intval = ath5k_calinterval;
2129 +
2130 /*
2131 * The basic interface to setting the hardware in a good
2132 * state is ``reset''. On return the hardware is known to
2133 @@ -2376,10 +2379,6 @@ ath5k_init(struct ath5k_softc *sc)
2134
2135 /* Set ack to be sent at low bit-rates */
2136 ath5k_hw_set_ack_bitrate_high(ah, false);
2137 -
2138 - /* Set PHY calibration inteval */
2139 - ah->ah_cal_intval = ath5k_calinterval;
2140 -
2141 ret = 0;
2142 done:
2143 mmiowb();
2144 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
2145 index 57f1463..ff4383b 100644
2146 --- a/drivers/net/wireless/ath/ath9k/hw.h
2147 +++ b/drivers/net/wireless/ath/ath9k/hw.h
2148 @@ -408,7 +408,7 @@ struct ath9k_hw_version {
2149 * Using de Bruijin sequence to to look up 1's index in a 32 bit number
2150 * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
2151 */
2152 -#define debruijn32 0x077CB531UL
2153 +#define debruijn32 0x077CB531U
2154
2155 struct ath_gen_timer_configuration {
2156 u32 next_addr;
2157 diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
2158 index d4d9d82..110c16d 100644
2159 --- a/drivers/net/wireless/ath/ath9k/mac.c
2160 +++ b/drivers/net/wireless/ath/ath9k/mac.c
2161 @@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
2162 wait = wait_time;
2163 while (ath9k_hw_numtxpending(ah, q)) {
2164 if ((--wait) == 0) {
2165 - DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
2166 + DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
2167 "Failed to stop TX DMA in 100 "
2168 "msec after killing last frame\n");
2169 break;
2170 diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
2171 index ff65f85..9720c4d 100644
2172 --- a/drivers/net/wireless/ath/ath9k/mac.h
2173 +++ b/drivers/net/wireless/ath/ath9k/mac.h
2174 @@ -77,6 +77,9 @@
2175 #define ATH9K_TXERR_XTXOP 0x08
2176 #define ATH9K_TXERR_TIMER_EXPIRED 0x10
2177 #define ATH9K_TX_ACKED 0x20
2178 +#define ATH9K_TXERR_MASK \
2179 + (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
2180 + ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
2181
2182 #define ATH9K_TX_BA 0x01
2183 #define ATH9K_TX_PWRMGMT 0x02
2184 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2185 index 59359e3..80df8f3 100644
2186 --- a/drivers/net/wireless/ath/ath9k/main.c
2187 +++ b/drivers/net/wireless/ath/ath9k/main.c
2188 @@ -2147,6 +2147,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2189 return; /* another wiphy still in use */
2190 }
2191
2192 + /* Ensure HW is awake when we try to shut it down. */
2193 + ath9k_ps_wakeup(sc);
2194 +
2195 if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) {
2196 ath9k_hw_btcoex_disable(sc->sc_ah);
2197 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE)
2198 @@ -2167,6 +2170,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2199 /* disable HAL and put h/w to sleep */
2200 ath9k_hw_disable(sc->sc_ah);
2201 ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
2202 + ath9k_ps_restore(sc);
2203 +
2204 + /* Finally, put the chip in FULL SLEEP mode */
2205 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
2206
2207 sc->sc_flags |= SC_OP_INVALID;
2208 @@ -2277,8 +2283,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2209 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2210 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2211 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2212 + ath9k_ps_wakeup(sc);
2213 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2214 ath_beacon_return(sc, avp);
2215 + ath9k_ps_restore(sc);
2216 }
2217
2218 sc->sc_flags &= ~SC_OP_BEACONS;
2219 @@ -2724,15 +2732,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2220 case IEEE80211_AMPDU_RX_STOP:
2221 break;
2222 case IEEE80211_AMPDU_TX_START:
2223 + ath9k_ps_wakeup(sc);
2224 ath_tx_aggr_start(sc, sta, tid, ssn);
2225 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2226 + ath9k_ps_restore(sc);
2227 break;
2228 case IEEE80211_AMPDU_TX_STOP:
2229 + ath9k_ps_wakeup(sc);
2230 ath_tx_aggr_stop(sc, sta, tid);
2231 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2232 + ath9k_ps_restore(sc);
2233 break;
2234 case IEEE80211_AMPDU_TX_OPERATIONAL:
2235 + ath9k_ps_wakeup(sc);
2236 ath_tx_aggr_resume(sc, sta, tid);
2237 + ath9k_ps_restore(sc);
2238 break;
2239 default:
2240 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2241 diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
2242 index d83b77f..c0d7e65 100644
2243 --- a/drivers/net/wireless/ath/ath9k/reg.h
2244 +++ b/drivers/net/wireless/ath/ath9k/reg.h
2245 @@ -969,10 +969,10 @@ enum {
2246 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
2247 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
2248 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
2249 +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400
2250 +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10
2251 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
2252 #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
2253 -#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000
2254 -#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1
2255 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
2256 #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
2257 #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
2258 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2259 index 4753909..2c6b063 100644
2260 --- a/drivers/net/wireless/ath/ath9k/xmit.c
2261 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2262 @@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
2263 if (npend) {
2264 int r;
2265
2266 - DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
2267 + DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n");
2268
2269 spin_lock_bh(&sc->sc_resetlock);
2270 - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
2271 + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
2272 if (r)
2273 DPRINTF(sc, ATH_DBG_FATAL,
2274 "Unable to reset hardware; reset status %d\n",
2275 @@ -2020,7 +2020,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2276 if (bf_isaggr(bf))
2277 txq->axq_aggr_depth--;
2278
2279 - txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
2280 + txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
2281 txq->axq_tx_inprogress = false;
2282 spin_unlock_bh(&txq->axq_lock);
2283
2284 diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
2285 index ffdce6f..78016ae 100644
2286 --- a/drivers/net/wireless/b43/rfkill.c
2287 +++ b/drivers/net/wireless/b43/rfkill.c
2288 @@ -33,8 +33,14 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
2289 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
2290 return 1;
2291 } else {
2292 - if (b43_status(dev) >= B43_STAT_STARTED &&
2293 - b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
2294 + /* To prevent CPU fault on PPC, do not read a register
2295 + * unless the interface is started; however, on resume
2296 + * for hibernation, this routine is entered early. When
2297 + * that happens, unconditionally return TRUE.
2298 + */
2299 + if (b43_status(dev) < B43_STAT_STARTED)
2300 + return 1;
2301 + if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
2302 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
2303 return 1;
2304 }
2305 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
2306 index f059b49..9d60f6c 100644
2307 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
2308 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
2309 @@ -2895,6 +2895,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2310 .mod_params = &iwl3945_mod_params,
2311 .use_isr_legacy = true,
2312 .ht_greenfield_support = false,
2313 + .broken_powersave = true,
2314 };
2315
2316 static struct iwl_cfg iwl3945_abg_cfg = {
2317 @@ -2909,6 +2910,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2318 .mod_params = &iwl3945_mod_params,
2319 .use_isr_legacy = true,
2320 .ht_greenfield_support = false,
2321 + .broken_powersave = true,
2322 };
2323
2324 struct pci_device_id iwl3945_hw_card_ids[] = {
2325 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
2326 index 6f703a0..f4e2e84 100644
2327 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
2328 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
2329 @@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2330 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2331
2332 /* calculate tx gain adjustment based on power supply voltage */
2333 - voltage = priv->calib_info->voltage;
2334 + voltage = le16_to_cpu(priv->calib_info->voltage);
2335 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2336 voltage_compensation =
2337 iwl4965_get_voltage_compensation(voltage, init_voltage);
2338 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
2339 index 4ef6804..bc056e9 100644
2340 --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
2341 +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
2342 @@ -92,11 +92,15 @@
2343
2344 static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
2345 {
2346 - u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
2347 - EEPROM_5000_TEMPERATURE);
2348 - /* offset = temperature - voltage / coef */
2349 - s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
2350 - return offset;
2351 + u16 temperature, voltage;
2352 + __le16 *temp_calib =
2353 + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
2354 +
2355 + temperature = le16_to_cpu(temp_calib[0]);
2356 + voltage = le16_to_cpu(temp_calib[1]);
2357 +
2358 + /* offset = temp - volt / coeff */
2359 + return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
2360 }
2361
2362 /* Fixed (non-configurable) rx data from phy */
2363 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
2364 index 6e6f516..94a1225 100644
2365 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
2366 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
2367 @@ -460,14 +460,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
2368 static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
2369 {
2370 struct iwl_calib_xtal_freq_cmd cmd;
2371 - u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
2372 + __le16 *xtal_calib =
2373 + (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
2374
2375 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
2376 cmd.hdr.first_group = 0;
2377 cmd.hdr.groups_num = 1;
2378 cmd.hdr.data_valid = 1;
2379 - cmd.cap_pin1 = (u8)xtal_calib[0];
2380 - cmd.cap_pin2 = (u8)xtal_calib[1];
2381 + cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
2382 + cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
2383 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
2384 (u8 *)&cmd, sizeof(cmd));
2385 }
2386 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
2387 index 028d505..c2d9b7a 100644
2388 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
2389 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
2390 @@ -1149,7 +1149,7 @@ struct iwl_priv {
2391 u32 last_beacon_time;
2392 u64 last_tsf;
2393
2394 - /* eeprom */
2395 + /* eeprom -- this is in the card's little endian byte order */
2396 u8 *eeprom;
2397 int nvm_device_type;
2398 struct iwl_eeprom_calib_info *calib_info;
2399 diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
2400 index e14c995..18dc3a4 100644
2401 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
2402 +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
2403 @@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
2404 return ret;
2405 }
2406
2407 -static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
2408 +static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
2409 {
2410 int ret = 0;
2411 u32 r;
2412 @@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
2413 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
2414 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
2415 }
2416 - *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
2417 + *eeprom_data = cpu_to_le16(r >> 16);
2418 return 0;
2419 }
2420
2421 @@ -379,7 +379,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
2422 */
2423 static bool iwl_is_otp_empty(struct iwl_priv *priv)
2424 {
2425 - u16 next_link_addr = 0, link_value;
2426 + u16 next_link_addr = 0;
2427 + __le16 link_value;
2428 bool is_empty = false;
2429
2430 /* locate the beginning of OTP link list */
2431 @@ -409,7 +410,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
2432 static int iwl_find_otp_image(struct iwl_priv *priv,
2433 u16 *validblockaddr)
2434 {
2435 - u16 next_link_addr = 0, link_value = 0, valid_addr;
2436 + u16 next_link_addr = 0, valid_addr;
2437 + __le16 link_value = 0;
2438 int usedblocks = 0;
2439
2440 /* set addressing mode to absolute to traverse the link list */
2441 @@ -429,7 +431,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
2442 * check for more block on the link list
2443 */
2444 valid_addr = next_link_addr;
2445 - next_link_addr = link_value * sizeof(u16);
2446 + next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
2447 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
2448 usedblocks, next_link_addr);
2449 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
2450 @@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
2451 */
2452 int iwl_eeprom_init(struct iwl_priv *priv)
2453 {
2454 - u16 *e;
2455 + __le16 *e;
2456 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
2457 int sz;
2458 int ret;
2459 @@ -482,7 +484,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
2460 ret = -ENOMEM;
2461 goto alloc_err;
2462 }
2463 - e = (u16 *)priv->eeprom;
2464 + e = (__le16 *)priv->eeprom;
2465
2466 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
2467 if (ret < 0) {
2468 @@ -521,7 +523,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
2469 }
2470 for (addr = validblockaddr; addr < validblockaddr + sz;
2471 addr += sizeof(u16)) {
2472 - u16 eeprom_data;
2473 + __le16 eeprom_data;
2474
2475 ret = iwl_read_otp_word(priv, addr, &eeprom_data);
2476 if (ret)
2477 @@ -545,7 +547,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
2478 goto done;
2479 }
2480 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
2481 - e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
2482 + e[addr / 2] = cpu_to_le16(r >> 16);
2483 }
2484 }
2485 ret = 0;
2486 @@ -709,7 +711,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
2487 ch_info->ht40_min_power = 0;
2488 ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
2489 ch_info->ht40_flags = eeprom_ch->flags;
2490 - ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
2491 + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
2492 + ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
2493
2494 return 0;
2495 }
2496 diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
2497 index 80b9e45..fc93f12 100644
2498 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
2499 +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
2500 @@ -133,7 +133,7 @@ struct iwl_eeprom_channel {
2501 *
2502 */
2503 struct iwl_eeprom_enhanced_txpwr {
2504 - u16 reserved;
2505 + __le16 common;
2506 s8 chain_a_max;
2507 s8 chain_b_max;
2508 s8 chain_c_max;
2509 @@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info {
2510 struct iwl_eeprom_calib_info {
2511 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
2512 u8 saturation_power52; /* half-dBm */
2513 - s16 voltage; /* signed */
2514 + __le16 voltage; /* signed */
2515 struct iwl_eeprom_calib_subband_info
2516 band_info[EEPROM_TX_POWER_BANDS];
2517 } __attribute__ ((packed));
2518 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2519 index d00a803..5f26c93 100644
2520 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2521 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2522 @@ -562,6 +562,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
2523 txq = &priv->txq[txq_id];
2524 q = &txq->q;
2525
2526 + if ((iwl_queue_space(q) < q->high_mark))
2527 + goto drop;
2528 +
2529 spin_lock_irqsave(&priv->lock, flags);
2530
2531 idx = get_cmd_index(q, q->write_ptr, 0);
2532 @@ -3854,9 +3857,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
2533 /* Tell mac80211 our characteristics */
2534 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2535 IEEE80211_HW_NOISE_DBM |
2536 - IEEE80211_HW_SPECTRUM_MGMT |
2537 - IEEE80211_HW_SUPPORTS_PS |
2538 - IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2539 + IEEE80211_HW_SPECTRUM_MGMT;
2540 +
2541 + if (!priv->cfg->broken_powersave)
2542 + hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2543 + IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2544
2545 hw->wiphy->interface_modes =
2546 BIT(NL80211_IFTYPE_STATION) |
2547 diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
2548 index 1b02a4e..93c8989 100644
2549 --- a/drivers/net/wireless/iwmc3200wifi/iwm.h
2550 +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
2551 @@ -258,7 +258,7 @@ struct iwm_priv {
2552
2553 struct sk_buff_head rx_list;
2554 struct list_head rx_tickets;
2555 - struct list_head rx_packets[IWM_RX_ID_HASH];
2556 + struct list_head rx_packets[IWM_RX_ID_HASH + 1];
2557 struct workqueue_struct *rx_wq;
2558 struct work_struct rx_worker;
2559
2560 diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
2561 index be837a0..01c738b 100644
2562 --- a/drivers/net/wireless/libertas/wext.c
2563 +++ b/drivers/net/wireless/libertas/wext.c
2564 @@ -1953,10 +1953,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
2565 if (priv->connect_status == LBS_CONNECTED) {
2566 memcpy(extra, priv->curbssparams.ssid,
2567 priv->curbssparams.ssid_len);
2568 - extra[priv->curbssparams.ssid_len] = '\0';
2569 } else {
2570 memset(extra, 0, 32);
2571 - extra[priv->curbssparams.ssid_len] = '\0';
2572 }
2573 /*
2574 * If none, we may want to get the one that was set
2575 diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
2576 index 7698fdd..31ca241 100644
2577 --- a/drivers/net/wireless/orinoco/wext.c
2578 +++ b/drivers/net/wireless/orinoco/wext.c
2579 @@ -23,7 +23,7 @@
2580 #define MAX_RID_LEN 1024
2581
2582 /* Helper routine to record keys
2583 - * Do not call from interrupt context */
2584 + * It is called under orinoco_lock so it may not sleep */
2585 static int orinoco_set_key(struct orinoco_private *priv, int index,
2586 enum orinoco_alg alg, const u8 *key, int key_len,
2587 const u8 *seq, int seq_len)
2588 @@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
2589 kzfree(priv->keys[index].seq);
2590
2591 if (key_len) {
2592 - priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
2593 + priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
2594 if (!priv->keys[index].key)
2595 goto nomem;
2596 } else
2597 priv->keys[index].key = NULL;
2598
2599 if (seq_len) {
2600 - priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
2601 + priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
2602 if (!priv->keys[index].seq)
2603 goto free_key;
2604 } else
2605 diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
2606 index b20e3ea..9a6ceb4 100644
2607 --- a/drivers/net/wireless/rt2x00/rt61pci.c
2608 +++ b/drivers/net/wireless/rt2x00/rt61pci.c
2609 @@ -2538,6 +2538,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2610 unsigned int i;
2611
2612 /*
2613 + * Disable powersaving as default.
2614 + */
2615 + rt2x00dev->hw->wiphy->ps_default = false;
2616 +
2617 + /*
2618 * Initialize all hw fields.
2619 */
2620 rt2x00dev->hw->flags =
2621 diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
2622 index d8b4229..4d922e4 100644
2623 --- a/drivers/platform/x86/acerhdf.c
2624 +++ b/drivers/platform/x86/acerhdf.c
2625 @@ -640,9 +640,10 @@ static void __exit acerhdf_exit(void)
2626 MODULE_LICENSE("GPL");
2627 MODULE_AUTHOR("Peter Feuerer");
2628 MODULE_DESCRIPTION("Aspire One temperature and fan driver");
2629 -MODULE_ALIAS("dmi:*:*Acer*:*:");
2630 -MODULE_ALIAS("dmi:*:*Gateway*:*:");
2631 -MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
2632 +MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
2633 +MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
2634 +MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
2635 +MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
2636
2637 module_init(acerhdf_init);
2638 module_exit(acerhdf_exit);
2639 diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
2640 index 4e49b4a..8174ec9 100644
2641 --- a/drivers/s390/block/dasd_diag.c
2642 +++ b/drivers/s390/block/dasd_diag.c
2643 @@ -145,6 +145,15 @@ dasd_diag_erp(struct dasd_device *device)
2644
2645 mdsk_term_io(device);
2646 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
2647 + if (rc == 4) {
2648 + if (!(device->features & DASD_FEATURE_READONLY)) {
2649 + dev_warn(&device->cdev->dev,
2650 + "The access mode of a DIAG device changed"
2651 + " to read-only");
2652 + device->features |= DASD_FEATURE_READONLY;
2653 + }
2654 + rc = 0;
2655 + }
2656 if (rc)
2657 dev_warn(&device->cdev->dev, "DIAG ERP failed with "
2658 "rc=%d\n", rc);
2659 @@ -433,16 +442,20 @@ dasd_diag_check_device(struct dasd_device *device)
2660 for (sb = 512; sb < bsize; sb = sb << 1)
2661 block->s2b_shift++;
2662 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
2663 - if (rc) {
2664 + if (rc && (rc != 4)) {
2665 dev_warn(&device->cdev->dev, "DIAG initialization "
2666 "failed with rc=%d\n", rc);
2667 rc = -EIO;
2668 } else {
2669 + if (rc == 4)
2670 + device->features |= DASD_FEATURE_READONLY;
2671 dev_info(&device->cdev->dev,
2672 - "New DASD with %ld byte/block, total size %ld KB\n",
2673 + "New DASD with %ld byte/block, total size %ld KB%s\n",
2674 (unsigned long) block->bp_block,
2675 (unsigned long) (block->blocks <<
2676 - block->s2b_shift) >> 1);
2677 + block->s2b_shift) >> 1,
2678 + (rc == 4) ? ", read-only device" : "");
2679 + rc = 0;
2680 }
2681 out_label:
2682 free_page((long) label);
2683 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
2684 index 76d294f..c3ff9a6 100644
2685 --- a/drivers/scsi/ipr.c
2686 +++ b/drivers/scsi/ipr.c
2687 @@ -6516,6 +6516,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
2688 int rc;
2689
2690 ENTER;
2691 + ioa_cfg->pdev->state_saved = true;
2692 rc = pci_restore_state(ioa_cfg->pdev);
2693
2694 if (rc != PCIBIOS_SUCCESSFUL) {
2695 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2696 index b79fca7..06bbe0d 100644
2697 --- a/drivers/scsi/qla2xxx/qla_os.c
2698 +++ b/drivers/scsi/qla2xxx/qla_os.c
2699 @@ -2016,13 +2016,13 @@ skip_dpc:
2700 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2701 base_vha->host_no, ha));
2702
2703 - base_vha->flags.init_done = 1;
2704 - base_vha->flags.online = 1;
2705 -
2706 ret = scsi_add_host(host, &pdev->dev);
2707 if (ret)
2708 goto probe_failed;
2709
2710 + base_vha->flags.init_done = 1;
2711 + base_vha->flags.online = 1;
2712 +
2713 ha->isp_ops->enable_intrs(ha);
2714
2715 scsi_scan_host(host);
2716 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2717 index c6f70da..45be82f 100644
2718 --- a/drivers/scsi/scsi_transport_fc.c
2719 +++ b/drivers/scsi/scsi_transport_fc.c
2720 @@ -648,11 +648,22 @@ static __init int fc_transport_init(void)
2721 return error;
2722 error = transport_class_register(&fc_vport_class);
2723 if (error)
2724 - return error;
2725 + goto unreg_host_class;
2726 error = transport_class_register(&fc_rport_class);
2727 if (error)
2728 - return error;
2729 - return transport_class_register(&fc_transport_class);
2730 + goto unreg_vport_class;
2731 + error = transport_class_register(&fc_transport_class);
2732 + if (error)
2733 + goto unreg_rport_class;
2734 + return 0;
2735 +
2736 +unreg_rport_class:
2737 + transport_class_unregister(&fc_rport_class);
2738 +unreg_vport_class:
2739 + transport_class_unregister(&fc_vport_class);
2740 +unreg_host_class:
2741 + transport_class_unregister(&fc_host_class);
2742 + return error;
2743 }
2744
2745 static void __exit fc_transport_exit(void)
2746 diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
2747 index 12d58a7..5081f97 100644
2748 --- a/drivers/scsi/st.c
2749 +++ b/drivers/scsi/st.c
2750 @@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
2751 SRpnt->waiting = waiting;
2752
2753 if (STp->buffer->do_dio) {
2754 + mdata->page_order = 0;
2755 mdata->nr_entries = STp->buffer->sg_segs;
2756 mdata->pages = STp->buffer->mapped_pages;
2757 } else {
2758 + mdata->page_order = STp->buffer->reserved_page_order;
2759 mdata->nr_entries =
2760 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
2761 - STp->buffer->map_data.pages = STp->buffer->reserved_pages;
2762 - STp->buffer->map_data.offset = 0;
2763 + mdata->pages = STp->buffer->reserved_pages;
2764 + mdata->offset = 0;
2765 }
2766
2767 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
2768 @@ -3718,7 +3720,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
2769 priority |= __GFP_ZERO;
2770
2771 if (STbuffer->frp_segs) {
2772 - order = STbuffer->map_data.page_order;
2773 + order = STbuffer->reserved_page_order;
2774 b_size = PAGE_SIZE << order;
2775 } else {
2776 for (b_size = PAGE_SIZE, order = 0;
2777 @@ -3751,7 +3753,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
2778 segs++;
2779 }
2780 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
2781 - STbuffer->map_data.page_order = order;
2782 + STbuffer->reserved_page_order = order;
2783
2784 return 1;
2785 }
2786 @@ -3764,7 +3766,7 @@ static void clear_buffer(struct st_buffer * st_bp)
2787
2788 for (i=0; i < st_bp->frp_segs; i++)
2789 memset(page_address(st_bp->reserved_pages[i]), 0,
2790 - PAGE_SIZE << st_bp->map_data.page_order);
2791 + PAGE_SIZE << st_bp->reserved_page_order);
2792 st_bp->cleared = 1;
2793 }
2794
2795 @@ -3772,7 +3774,7 @@ static void clear_buffer(struct st_buffer * st_bp)
2796 /* Release the extra buffer */
2797 static void normalize_buffer(struct st_buffer * STbuffer)
2798 {
2799 - int i, order = STbuffer->map_data.page_order;
2800 + int i, order = STbuffer->reserved_page_order;
2801
2802 for (i = 0; i < STbuffer->frp_segs; i++) {
2803 __free_pages(STbuffer->reserved_pages[i], order);
2804 @@ -3780,7 +3782,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
2805 }
2806 STbuffer->frp_segs = 0;
2807 STbuffer->sg_segs = 0;
2808 - STbuffer->map_data.page_order = 0;
2809 + STbuffer->reserved_page_order = 0;
2810 STbuffer->map_data.offset = 0;
2811 }
2812
2813 @@ -3790,7 +3792,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
2814 static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
2815 {
2816 int i, cnt, res, offset;
2817 - int length = PAGE_SIZE << st_bp->map_data.page_order;
2818 + int length = PAGE_SIZE << st_bp->reserved_page_order;
2819
2820 for (i = 0, offset = st_bp->buffer_bytes;
2821 i < st_bp->frp_segs && offset >= length; i++)
2822 @@ -3822,7 +3824,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
2823 static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
2824 {
2825 int i, cnt, res, offset;
2826 - int length = PAGE_SIZE << st_bp->map_data.page_order;
2827 + int length = PAGE_SIZE << st_bp->reserved_page_order;
2828
2829 for (i = 0, offset = st_bp->read_pointer;
2830 i < st_bp->frp_segs && offset >= length; i++)
2831 @@ -3855,7 +3857,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
2832 {
2833 int src_seg, dst_seg, src_offset = 0, dst_offset;
2834 int count, total;
2835 - int length = PAGE_SIZE << st_bp->map_data.page_order;
2836 + int length = PAGE_SIZE << st_bp->reserved_page_order;
2837
2838 if (offset == 0)
2839 return;
2840 @@ -4577,7 +4579,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
2841 }
2842
2843 mdata->offset = uaddr & ~PAGE_MASK;
2844 - mdata->page_order = 0;
2845 STbp->mapped_pages = pages;
2846
2847 return nr_pages;
2848 diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
2849 index 544dc6b..f91a67c 100644
2850 --- a/drivers/scsi/st.h
2851 +++ b/drivers/scsi/st.h
2852 @@ -46,6 +46,7 @@ struct st_buffer {
2853 struct st_request *last_SRpnt;
2854 struct st_cmdstatus cmdstat;
2855 struct page **reserved_pages;
2856 + int reserved_page_order;
2857 struct page **mapped_pages;
2858 struct rq_map_data map_data;
2859 unsigned char *b_data;
2860 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2861 index 0f857e6..8b0c235 100644
2862 --- a/drivers/usb/core/hub.c
2863 +++ b/drivers/usb/core/hub.c
2864 @@ -1612,12 +1612,12 @@ static inline void announce_device(struct usb_device *udev) { }
2865 #endif
2866
2867 /**
2868 - * usb_configure_device_otg - FIXME (usbcore-internal)
2869 + * usb_enumerate_device_otg - FIXME (usbcore-internal)
2870 * @udev: newly addressed device (in ADDRESS state)
2871 *
2872 - * Do configuration for On-The-Go devices
2873 + * Finish enumeration for On-The-Go devices
2874 */
2875 -static int usb_configure_device_otg(struct usb_device *udev)
2876 +static int usb_enumerate_device_otg(struct usb_device *udev)
2877 {
2878 int err = 0;
2879
2880 @@ -1688,7 +1688,7 @@ fail:
2881
2882
2883 /**
2884 - * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
2885 + * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
2886 * @udev: newly addressed device (in ADDRESS state)
2887 *
2888 * This is only called by usb_new_device() and usb_authorize_device()
2889 @@ -1699,7 +1699,7 @@ fail:
2890 * the string descriptors, as they will be errored out by the device
2891 * until it has been authorized.
2892 */
2893 -static int usb_configure_device(struct usb_device *udev)
2894 +static int usb_enumerate_device(struct usb_device *udev)
2895 {
2896 int err;
2897
2898 @@ -1723,7 +1723,7 @@ static int usb_configure_device(struct usb_device *udev)
2899 udev->descriptor.iManufacturer);
2900 udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
2901 }
2902 - err = usb_configure_device_otg(udev);
2903 + err = usb_enumerate_device_otg(udev);
2904 fail:
2905 return err;
2906 }
2907 @@ -1733,8 +1733,8 @@ fail:
2908 * usb_new_device - perform initial device setup (usbcore-internal)
2909 * @udev: newly addressed device (in ADDRESS state)
2910 *
2911 - * This is called with devices which have been enumerated, but not yet
2912 - * configured. The device descriptor is available, but not descriptors
2913 + * This is called with devices which have been detected but not fully
2914 + * enumerated. The device descriptor is available, but not descriptors
2915 * for any device configuration. The caller must have locked either
2916 * the parent hub (if udev is a normal device) or else the
2917 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
2918 @@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *udev)
2919 if (udev->parent)
2920 usb_autoresume_device(udev->parent);
2921
2922 - usb_detect_quirks(udev); /* Determine quirks */
2923 - err = usb_configure_device(udev); /* detect & probe dev/intfs */
2924 + usb_detect_quirks(udev);
2925 + err = usb_enumerate_device(udev); /* Read descriptors */
2926 if (err < 0)
2927 goto fail;
2928 dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
2929 @@ -1803,21 +1803,23 @@ fail:
2930 */
2931 int usb_deauthorize_device(struct usb_device *usb_dev)
2932 {
2933 - unsigned cnt;
2934 usb_lock_device(usb_dev);
2935 if (usb_dev->authorized == 0)
2936 goto out_unauthorized;
2937 +
2938 usb_dev->authorized = 0;
2939 usb_set_configuration(usb_dev, -1);
2940 +
2941 + kfree(usb_dev->product);
2942 usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
2943 + kfree(usb_dev->manufacturer);
2944 usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
2945 + kfree(usb_dev->serial);
2946 usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
2947 - kfree(usb_dev->config);
2948 - usb_dev->config = NULL;
2949 - for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
2950 - kfree(usb_dev->rawdescriptors[cnt]);
2951 +
2952 + usb_destroy_configuration(usb_dev);
2953 usb_dev->descriptor.bNumConfigurations = 0;
2954 - kfree(usb_dev->rawdescriptors);
2955 +
2956 out_unauthorized:
2957 usb_unlock_device(usb_dev);
2958 return 0;
2959 @@ -1827,15 +1829,11 @@ out_unauthorized:
2960 int usb_authorize_device(struct usb_device *usb_dev)
2961 {
2962 int result = 0, c;
2963 +
2964 usb_lock_device(usb_dev);
2965 if (usb_dev->authorized == 1)
2966 goto out_authorized;
2967 - kfree(usb_dev->product);
2968 - usb_dev->product = NULL;
2969 - kfree(usb_dev->manufacturer);
2970 - usb_dev->manufacturer = NULL;
2971 - kfree(usb_dev->serial);
2972 - usb_dev->serial = NULL;
2973 +
2974 result = usb_autoresume_device(usb_dev);
2975 if (result < 0) {
2976 dev_err(&usb_dev->dev,
2977 @@ -1848,10 +1846,18 @@ int usb_authorize_device(struct usb_device *usb_dev)
2978 "authorization: %d\n", result);
2979 goto error_device_descriptor;
2980 }
2981 +
2982 + kfree(usb_dev->product);
2983 + usb_dev->product = NULL;
2984 + kfree(usb_dev->manufacturer);
2985 + usb_dev->manufacturer = NULL;
2986 + kfree(usb_dev->serial);
2987 + usb_dev->serial = NULL;
2988 +
2989 usb_dev->authorized = 1;
2990 - result = usb_configure_device(usb_dev);
2991 + result = usb_enumerate_device(usb_dev);
2992 if (result < 0)
2993 - goto error_configure;
2994 + goto error_enumerate;
2995 /* Choose and set the configuration. This registers the interfaces
2996 * with the driver core and lets interface drivers bind to them.
2997 */
2998 @@ -1866,8 +1872,10 @@ int usb_authorize_device(struct usb_device *usb_dev)
2999 }
3000 }
3001 dev_info(&usb_dev->dev, "authorized to connect\n");
3002 -error_configure:
3003 +
3004 +error_enumerate:
3005 error_device_descriptor:
3006 + usb_autosuspend_device(usb_dev);
3007 error_autoresume:
3008 out_authorized:
3009 usb_unlock_device(usb_dev); // complements locktree
3010 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
3011 index 7ec3041..8752e55 100644
3012 --- a/drivers/usb/core/sysfs.c
3013 +++ b/drivers/usb/core/sysfs.c
3014 @@ -82,9 +82,13 @@ static ssize_t show_##name(struct device *dev, \
3015 struct device_attribute *attr, char *buf) \
3016 { \
3017 struct usb_device *udev; \
3018 + int retval; \
3019 \
3020 udev = to_usb_device(dev); \
3021 - return sprintf(buf, "%s\n", udev->name); \
3022 + usb_lock_device(udev); \
3023 + retval = sprintf(buf, "%s\n", udev->name); \
3024 + usb_unlock_device(udev); \
3025 + return retval; \
3026 } \
3027 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
3028
3029 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
3030 index 1d8e39a..62ff5e7 100644
3031 --- a/drivers/usb/misc/appledisplay.c
3032 +++ b/drivers/usb/misc/appledisplay.c
3033 @@ -72,8 +72,8 @@ struct appledisplay {
3034 struct usb_device *udev; /* usb device */
3035 struct urb *urb; /* usb request block */
3036 struct backlight_device *bd; /* backlight device */
3037 - char *urbdata; /* interrupt URB data buffer */
3038 - char *msgdata; /* control message data buffer */
3039 + u8 *urbdata; /* interrupt URB data buffer */
3040 + u8 *msgdata; /* control message data buffer */
3041
3042 struct delayed_work work;
3043 int button_pressed;
3044 diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
3045 index 602ee05..59860b3 100644
3046 --- a/drivers/usb/misc/emi62.c
3047 +++ b/drivers/usb/misc/emi62.c
3048 @@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev)
3049 err("%s - error loading firmware: error = %d", __func__, err);
3050 goto wraperr;
3051 }
3052 - } while (i > 0);
3053 + } while (rec);
3054
3055 /* Assert reset (stop the CPU in the EMI) */
3056 err = emi62_set_reset(dev,1);
3057 diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
3058 index 1c44b97..067e5a9 100644
3059 --- a/drivers/usb/musb/musb_gadget_ep0.c
3060 +++ b/drivers/usb/musb/musb_gadget_ep0.c
3061 @@ -647,7 +647,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
3062 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
3063 break;
3064 default:
3065 - ERR("SetupEnd came in a wrong ep0stage %s",
3066 + ERR("SetupEnd came in a wrong ep0stage %s\n",
3067 decode_ep0stage(musb->ep0_state));
3068 }
3069 csr = musb_readw(regs, MUSB_CSR0);
3070 @@ -770,12 +770,18 @@ setup:
3071 handled = service_zero_data_request(
3072 musb, &setup);
3073
3074 + /*
3075 + * We're expecting no data in any case, so
3076 + * always set the DATAEND bit -- doing this
3077 + * here helps avoid SetupEnd interrupt coming
3078 + * in the idle stage when we're stalling...
3079 + */
3080 + musb->ackpend |= MUSB_CSR0_P_DATAEND;
3081 +
3082 /* status stage might be immediate */
3083 - if (handled > 0) {
3084 - musb->ackpend |= MUSB_CSR0_P_DATAEND;
3085 + if (handled > 0)
3086 musb->ep0_state =
3087 MUSB_EP0_STAGE_STATUSIN;
3088 - }
3089 break;
3090
3091 /* sequence #1 (IN to host), includes GET_STATUS
3092 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3093 index dffc8a1..be3dff1 100644
3094 --- a/drivers/usb/serial/option.c
3095 +++ b/drivers/usb/serial/option.c
3096 @@ -340,6 +340,10 @@ static int option_resume(struct usb_serial *serial);
3097 #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
3098 #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
3099
3100 +/* Haier products */
3101 +#define HAIER_VENDOR_ID 0x201e
3102 +#define HAIER_PRODUCT_CE100 0x2009
3103 +
3104 static struct usb_device_id option_ids[] = {
3105 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
3106 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
3107 @@ -641,6 +645,7 @@ static struct usb_device_id option_ids[] = {
3108 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
3109 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
3110 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
3111 + { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
3112 { } /* Terminating entry */
3113 };
3114 MODULE_DEVICE_TABLE(usb, option_ids);
3115 diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
3116 index 3800da7..649fcdf 100644
3117 --- a/drivers/xen/xenbus/xenbus_probe.c
3118 +++ b/drivers/xen/xenbus/xenbus_probe.c
3119 @@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init);
3120
3121 MODULE_LICENSE("GPL");
3122
3123 -static int is_disconnected_device(struct device *dev, void *data)
3124 +static int is_device_connecting(struct device *dev, void *data)
3125 {
3126 struct xenbus_device *xendev = to_xenbus_device(dev);
3127 struct device_driver *drv = data;
3128 @@ -861,14 +861,15 @@ static int is_disconnected_device(struct device *dev, void *data)
3129 return 0;
3130
3131 xendrv = to_xenbus_driver(dev->driver);
3132 - return (xendev->state != XenbusStateConnected ||
3133 - (xendrv->is_ready && !xendrv->is_ready(xendev)));
3134 + return (xendev->state < XenbusStateConnected ||
3135 + (xendev->state == XenbusStateConnected &&
3136 + xendrv->is_ready && !xendrv->is_ready(xendev)));
3137 }
3138
3139 -static int exists_disconnected_device(struct device_driver *drv)
3140 +static int exists_connecting_device(struct device_driver *drv)
3141 {
3142 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
3143 - is_disconnected_device);
3144 + is_device_connecting);
3145 }
3146
3147 static int print_device_status(struct device *dev, void *data)
3148 @@ -884,10 +885,13 @@ static int print_device_status(struct device *dev, void *data)
3149 /* Information only: is this too noisy? */
3150 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
3151 xendev->nodename);
3152 - } else if (xendev->state != XenbusStateConnected) {
3153 + } else if (xendev->state < XenbusStateConnected) {
3154 + enum xenbus_state rstate = XenbusStateUnknown;
3155 + if (xendev->otherend)
3156 + rstate = xenbus_read_driver_state(xendev->otherend);
3157 printk(KERN_WARNING "XENBUS: Timeout connecting "
3158 - "to device: %s (state %d)\n",
3159 - xendev->nodename, xendev->state);
3160 + "to device: %s (local state %d, remote state %d)\n",
3161 + xendev->nodename, xendev->state, rstate);
3162 }
3163
3164 return 0;
3165 @@ -897,7 +901,7 @@ static int print_device_status(struct device *dev, void *data)
3166 static int ready_to_wait_for_devices;
3167
3168 /*
3169 - * On a 10 second timeout, wait for all devices currently configured. We need
3170 + * On a 5-minute timeout, wait for all devices currently configured. We need
3171 * to do this to guarantee that the filesystems and / or network devices
3172 * needed for boot are available, before we can allow the boot to proceed.
3173 *
3174 @@ -912,18 +916,30 @@ static int ready_to_wait_for_devices;
3175 */
3176 static void wait_for_devices(struct xenbus_driver *xendrv)
3177 {
3178 - unsigned long timeout = jiffies + 10*HZ;
3179 + unsigned long start = jiffies;
3180 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
3181 + unsigned int seconds_waited = 0;
3182
3183 if (!ready_to_wait_for_devices || !xen_domain())
3184 return;
3185
3186 - while (exists_disconnected_device(drv)) {
3187 - if (time_after(jiffies, timeout))
3188 - break;
3189 + while (exists_connecting_device(drv)) {
3190 + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
3191 + if (!seconds_waited)
3192 + printk(KERN_WARNING "XENBUS: Waiting for "
3193 + "devices to initialise: ");
3194 + seconds_waited += 5;
3195 + printk("%us...", 300 - seconds_waited);
3196 + if (seconds_waited == 300)
3197 + break;
3198 + }
3199 +
3200 schedule_timeout_interruptible(HZ/10);
3201 }
3202
3203 + if (seconds_waited)
3204 + printk("\n");
3205 +
3206 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
3207 print_device_status);
3208 }
3209 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3210 index 63ea83f..3bbcaa7 100644
3211 --- a/fs/cifs/connect.c
3212 +++ b/fs/cifs/connect.c
3213 @@ -2287,12 +2287,12 @@ int
3214 cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
3215 char *mount_data_global, const char *devname)
3216 {
3217 - int rc = 0;
3218 + int rc;
3219 int xid;
3220 struct smb_vol *volume_info;
3221 - struct cifsSesInfo *pSesInfo = NULL;
3222 - struct cifsTconInfo *tcon = NULL;
3223 - struct TCP_Server_Info *srvTcp = NULL;
3224 + struct cifsSesInfo *pSesInfo;
3225 + struct cifsTconInfo *tcon;
3226 + struct TCP_Server_Info *srvTcp;
3227 char *full_path;
3228 char *mount_data = mount_data_global;
3229 #ifdef CONFIG_CIFS_DFS_UPCALL
3230 @@ -2301,6 +2301,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
3231 int referral_walks_count = 0;
3232 try_mount_again:
3233 #endif
3234 + rc = 0;
3235 + tcon = NULL;
3236 + pSesInfo = NULL;
3237 + srvTcp = NULL;
3238 full_path = NULL;
3239
3240 xid = GetXid();
3241 @@ -2597,6 +2601,7 @@ remote_path_check:
3242
3243 cleanup_volume_info(&volume_info);
3244 referral_walks_count++;
3245 + FreeXid(xid);
3246 goto try_mount_again;
3247 }
3248 #else /* No DFS support, return error on mount */
3249 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3250 index bd2a9dd..d0a2afb 100644
3251 --- a/fs/ext4/ext4.h
3252 +++ b/fs/ext4/ext4.h
3253 @@ -698,6 +698,10 @@ struct ext4_inode_info {
3254 __u16 i_extra_isize;
3255
3256 spinlock_t i_block_reservation_lock;
3257 +#ifdef CONFIG_QUOTA
3258 + /* quota space reservation, managed internally by quota code */
3259 + qsize_t i_reserved_quota;
3260 +#endif
3261
3262 /* completed async DIOs that might need unwritten extents handling */
3263 struct list_head i_aio_dio_complete_list;
3264 @@ -1432,7 +1436,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
3265 extern int ext4_block_truncate_page(handle_t *handle,
3266 struct address_space *mapping, loff_t from);
3267 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
3268 -extern qsize_t ext4_get_reserved_space(struct inode *inode);
3269 +extern qsize_t *ext4_get_reserved_space(struct inode *inode);
3270 extern int flush_aio_dio_completed_IO(struct inode *inode);
3271 /* ioctl.c */
3272 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
3273 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3274 index 1dae9a4..e233879 100644
3275 --- a/fs/ext4/inode.c
3276 +++ b/fs/ext4/inode.c
3277 @@ -1045,17 +1045,12 @@ out:
3278 return err;
3279 }
3280
3281 -qsize_t ext4_get_reserved_space(struct inode *inode)
3282 +#ifdef CONFIG_QUOTA
3283 +qsize_t *ext4_get_reserved_space(struct inode *inode)
3284 {
3285 - unsigned long long total;
3286 -
3287 - spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
3288 - total = EXT4_I(inode)->i_reserved_data_blocks +
3289 - EXT4_I(inode)->i_reserved_meta_blocks;
3290 - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3291 -
3292 - return (total << inode->i_blkbits);
3293 + return &EXT4_I(inode)->i_reserved_quota;
3294 }
3295 +#endif
3296 /*
3297 * Calculate the number of metadata blocks need to reserve
3298 * to allocate @blocks for non extent file based file
3299 @@ -1858,19 +1853,17 @@ repeat:
3300
3301 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
3302 total = md_needed + nrblocks;
3303 + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3304
3305 /*
3306 * Make quota reservation here to prevent quota overflow
3307 * later. Real quota accounting is done at pages writeout
3308 * time.
3309 */
3310 - if (vfs_dq_reserve_block(inode, total)) {
3311 - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3312 + if (vfs_dq_reserve_block(inode, total))
3313 return -EDQUOT;
3314 - }
3315
3316 if (ext4_claim_free_blocks(sbi, total)) {
3317 - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3318 vfs_dq_release_reservation_block(inode, total);
3319 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
3320 yield();
3321 @@ -1878,10 +1871,11 @@ repeat:
3322 }
3323 return -ENOSPC;
3324 }
3325 + spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
3326 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
3327 - EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
3328 -
3329 + EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
3330 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
3331 +
3332 return 0; /* success */
3333 }
3334
3335 @@ -4850,6 +4844,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3336 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3337 inode->i_size = ext4_isize(raw_inode);
3338 ei->i_disksize = inode->i_size;
3339 +#ifdef CONFIG_QUOTA
3340 + ei->i_reserved_quota = 0;
3341 +#endif
3342 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3343 ei->i_block_group = iloc.block_group;
3344 ei->i_last_alloc_group = ~0;
3345 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3346 index 9ae5217..92943f2 100644
3347 --- a/fs/ext4/super.c
3348 +++ b/fs/ext4/super.c
3349 @@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
3350 ei->i_allocated_meta_blocks = 0;
3351 ei->i_delalloc_reserved_flag = 0;
3352 spin_lock_init(&(ei->i_block_reservation_lock));
3353 +#ifdef CONFIG_QUOTA
3354 + ei->i_reserved_quota = 0;
3355 +#endif
3356 INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
3357 ei->cur_aio_dio = NULL;
3358 ei->i_sync_tid = 0;
3359 @@ -1001,7 +1004,9 @@ static const struct dquot_operations ext4_quota_operations = {
3360 .reserve_space = dquot_reserve_space,
3361 .claim_space = dquot_claim_space,
3362 .release_rsv = dquot_release_reserved_space,
3363 +#ifdef CONFIG_QUOTA
3364 .get_reserved_space = ext4_get_reserved_space,
3365 +#endif
3366 .alloc_inode = dquot_alloc_inode,
3367 .free_space = dquot_free_space,
3368 .free_inode = dquot_free_inode,
3369 diff --git a/fs/namei.c b/fs/namei.c
3370 index d11f404..a2b3c28 100644
3371 --- a/fs/namei.c
3372 +++ b/fs/namei.c
3373 @@ -234,6 +234,7 @@ int generic_permission(struct inode *inode, int mask,
3374 /*
3375 * Searching includes executable on directories, else just read.
3376 */
3377 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
3378 if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
3379 if (capable(CAP_DAC_READ_SEARCH))
3380 return 0;
3381 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
3382 index 39b49c4..c4d07a8 100644
3383 --- a/fs/quota/dquot.c
3384 +++ b/fs/quota/dquot.c
3385 @@ -1388,6 +1388,67 @@ void vfs_dq_drop(struct inode *inode)
3386 EXPORT_SYMBOL(vfs_dq_drop);
3387
3388 /*
3389 + * inode_reserved_space is managed internally by quota, and protected by
3390 + * i_lock similar to i_blocks+i_bytes.
3391 + */
3392 +static qsize_t *inode_reserved_space(struct inode * inode)
3393 +{
3394 + /* Filesystem must explicitly define it's own method in order to use
3395 + * quota reservation interface */
3396 + BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
3397 + return inode->i_sb->dq_op->get_reserved_space(inode);
3398 +}
3399 +
3400 +static void inode_add_rsv_space(struct inode *inode, qsize_t number)
3401 +{
3402 + spin_lock(&inode->i_lock);
3403 + *inode_reserved_space(inode) += number;
3404 + spin_unlock(&inode->i_lock);
3405 +}
3406 +
3407 +
3408 +static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3409 +{
3410 + spin_lock(&inode->i_lock);
3411 + *inode_reserved_space(inode) -= number;
3412 + __inode_add_bytes(inode, number);
3413 + spin_unlock(&inode->i_lock);
3414 +}
3415 +
3416 +static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3417 +{
3418 + spin_lock(&inode->i_lock);
3419 + *inode_reserved_space(inode) -= number;
3420 + spin_unlock(&inode->i_lock);
3421 +}
3422 +
3423 +static qsize_t inode_get_rsv_space(struct inode *inode)
3424 +{
3425 + qsize_t ret;
3426 + spin_lock(&inode->i_lock);
3427 + ret = *inode_reserved_space(inode);
3428 + spin_unlock(&inode->i_lock);
3429 + return ret;
3430 +}
3431 +
3432 +static void inode_incr_space(struct inode *inode, qsize_t number,
3433 + int reserve)
3434 +{
3435 + if (reserve)
3436 + inode_add_rsv_space(inode, number);
3437 + else
3438 + inode_add_bytes(inode, number);
3439 +}
3440 +
3441 +static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
3442 +{
3443 + if (reserve)
3444 + inode_sub_rsv_space(inode, number);
3445 + else
3446 + inode_sub_bytes(inode, number);
3447 +}
3448 +
3449 +/*
3450 * Following four functions update i_blocks+i_bytes fields and
3451 * quota information (together with appropriate checks)
3452 * NOTE: We absolutely rely on the fact that caller dirties
3453 @@ -1405,6 +1466,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
3454 int cnt, ret = QUOTA_OK;
3455 char warntype[MAXQUOTAS];
3456
3457 + /*
3458 + * First test before acquiring mutex - solves deadlocks when we
3459 + * re-enter the quota code and are already holding the mutex
3460 + */
3461 + if (IS_NOQUOTA(inode)) {
3462 + inode_incr_space(inode, number, reserve);
3463 + goto out;
3464 + }
3465 +
3466 + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3467 + if (IS_NOQUOTA(inode)) {
3468 + inode_incr_space(inode, number, reserve);
3469 + goto out_unlock;
3470 + }
3471 +
3472 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3473 warntype[cnt] = QUOTA_NL_NOWARN;
3474
3475 @@ -1415,7 +1491,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
3476 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
3477 == NO_QUOTA) {
3478 ret = NO_QUOTA;
3479 - goto out_unlock;
3480 + spin_unlock(&dq_data_lock);
3481 + goto out_flush_warn;
3482 }
3483 }
3484 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3485 @@ -1426,64 +1503,32 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
3486 else
3487 dquot_incr_space(inode->i_dquot[cnt], number);
3488 }
3489 - if (!reserve)
3490 - inode_add_bytes(inode, number);
3491 -out_unlock:
3492 + inode_incr_space(inode, number, reserve);
3493 spin_unlock(&dq_data_lock);
3494 - flush_warnings(inode->i_dquot, warntype);
3495 - return ret;
3496 -}
3497 -
3498 -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
3499 -{
3500 - int cnt, ret = QUOTA_OK;
3501 -
3502 - /*
3503 - * First test before acquiring mutex - solves deadlocks when we
3504 - * re-enter the quota code and are already holding the mutex
3505 - */
3506 - if (IS_NOQUOTA(inode)) {
3507 - inode_add_bytes(inode, number);
3508 - goto out;
3509 - }
3510 -
3511 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3512 - if (IS_NOQUOTA(inode)) {
3513 - inode_add_bytes(inode, number);
3514 - goto out_unlock;
3515 - }
3516 -
3517 - ret = __dquot_alloc_space(inode, number, warn, 0);
3518 - if (ret == NO_QUOTA)
3519 - goto out_unlock;
3520
3521 + if (reserve)
3522 + goto out_flush_warn;
3523 /* Dirtify all the dquots - this can block when journalling */
3524 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3525 if (inode->i_dquot[cnt])
3526 mark_dquot_dirty(inode->i_dquot[cnt]);
3527 +out_flush_warn:
3528 + flush_warnings(inode->i_dquot, warntype);
3529 out_unlock:
3530 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3531 out:
3532 return ret;
3533 }
3534 +
3535 +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
3536 +{
3537 + return __dquot_alloc_space(inode, number, warn, 0);
3538 +}
3539 EXPORT_SYMBOL(dquot_alloc_space);
3540
3541 int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
3542 {
3543 - int ret = QUOTA_OK;
3544 -
3545 - if (IS_NOQUOTA(inode))
3546 - goto out;
3547 -
3548 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3549 - if (IS_NOQUOTA(inode))
3550 - goto out_unlock;
3551 -
3552 - ret = __dquot_alloc_space(inode, number, warn, 1);
3553 -out_unlock:
3554 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3555 -out:
3556 - return ret;
3557 + return __dquot_alloc_space(inode, number, warn, 1);
3558 }
3559 EXPORT_SYMBOL(dquot_reserve_space);
3560
3561 @@ -1540,14 +1585,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
3562 int ret = QUOTA_OK;
3563
3564 if (IS_NOQUOTA(inode)) {
3565 - inode_add_bytes(inode, number);
3566 + inode_claim_rsv_space(inode, number);
3567 goto out;
3568 }
3569
3570 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3571 if (IS_NOQUOTA(inode)) {
3572 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3573 - inode_add_bytes(inode, number);
3574 + inode_claim_rsv_space(inode, number);
3575 goto out;
3576 }
3577
3578 @@ -1559,7 +1604,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
3579 number);
3580 }
3581 /* Update inode bytes */
3582 - inode_add_bytes(inode, number);
3583 + inode_claim_rsv_space(inode, number);
3584 spin_unlock(&dq_data_lock);
3585 /* Dirtify all the dquots - this can block when journalling */
3586 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3587 @@ -1572,38 +1617,9 @@ out:
3588 EXPORT_SYMBOL(dquot_claim_space);
3589
3590 /*
3591 - * Release reserved quota space
3592 - */
3593 -void dquot_release_reserved_space(struct inode *inode, qsize_t number)
3594 -{
3595 - int cnt;
3596 -
3597 - if (IS_NOQUOTA(inode))
3598 - goto out;
3599 -
3600 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3601 - if (IS_NOQUOTA(inode))
3602 - goto out_unlock;
3603 -
3604 - spin_lock(&dq_data_lock);
3605 - /* Release reserved dquots */
3606 - for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3607 - if (inode->i_dquot[cnt])
3608 - dquot_free_reserved_space(inode->i_dquot[cnt], number);
3609 - }
3610 - spin_unlock(&dq_data_lock);
3611 -
3612 -out_unlock:
3613 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3614 -out:
3615 - return;
3616 -}
3617 -EXPORT_SYMBOL(dquot_release_reserved_space);
3618 -
3619 -/*
3620 * This operation can block, but only after everything is updated
3621 */
3622 -int dquot_free_space(struct inode *inode, qsize_t number)
3623 +int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
3624 {
3625 unsigned int cnt;
3626 char warntype[MAXQUOTAS];
3627 @@ -1612,7 +1628,7 @@ int dquot_free_space(struct inode *inode, qsize_t number)
3628 * re-enter the quota code and are already holding the mutex */
3629 if (IS_NOQUOTA(inode)) {
3630 out_sub:
3631 - inode_sub_bytes(inode, number);
3632 + inode_decr_space(inode, number, reserve);
3633 return QUOTA_OK;
3634 }
3635
3636 @@ -1627,21 +1643,43 @@ out_sub:
3637 if (!inode->i_dquot[cnt])
3638 continue;
3639 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
3640 - dquot_decr_space(inode->i_dquot[cnt], number);
3641 + if (reserve)
3642 + dquot_free_reserved_space(inode->i_dquot[cnt], number);
3643 + else
3644 + dquot_decr_space(inode->i_dquot[cnt], number);
3645 }
3646 - inode_sub_bytes(inode, number);
3647 + inode_decr_space(inode, number, reserve);
3648 spin_unlock(&dq_data_lock);
3649 +
3650 + if (reserve)
3651 + goto out_unlock;
3652 /* Dirtify all the dquots - this can block when journalling */
3653 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
3654 if (inode->i_dquot[cnt])
3655 mark_dquot_dirty(inode->i_dquot[cnt]);
3656 +out_unlock:
3657 flush_warnings(inode->i_dquot, warntype);
3658 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
3659 return QUOTA_OK;
3660 }
3661 +
3662 +int dquot_free_space(struct inode *inode, qsize_t number)
3663 +{
3664 + return __dquot_free_space(inode, number, 0);
3665 +}
3666 EXPORT_SYMBOL(dquot_free_space);
3667
3668 /*
3669 + * Release reserved quota space
3670 + */
3671 +void dquot_release_reserved_space(struct inode *inode, qsize_t number)
3672 +{
3673 + __dquot_free_space(inode, number, 1);
3674 +
3675 +}
3676 +EXPORT_SYMBOL(dquot_release_reserved_space);
3677 +
3678 +/*
3679 * This operation can block, but only after everything is updated
3680 */
3681 int dquot_free_inode(const struct inode *inode, qsize_t number)
3682 @@ -1679,19 +1717,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
3683 EXPORT_SYMBOL(dquot_free_inode);
3684
3685 /*
3686 - * call back function, get reserved quota space from underlying fs
3687 - */
3688 -qsize_t dquot_get_reserved_space(struct inode *inode)
3689 -{
3690 - qsize_t reserved_space = 0;
3691 -
3692 - if (sb_any_quota_active(inode->i_sb) &&
3693 - inode->i_sb->dq_op->get_reserved_space)
3694 - reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
3695 - return reserved_space;
3696 -}
3697 -
3698 -/*
3699 * Transfer the number of inode and blocks from one diskquota to an other.
3700 *
3701 * This operation can block, but only after everything is updated
3702 @@ -1734,7 +1759,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
3703 }
3704 spin_lock(&dq_data_lock);
3705 cur_space = inode_get_bytes(inode);
3706 - rsv_space = dquot_get_reserved_space(inode);
3707 + rsv_space = inode_get_rsv_space(inode);
3708 space = cur_space + rsv_space;
3709 /* Build the transfer_from list and check the limits */
3710 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
3711 diff --git a/fs/stat.c b/fs/stat.c
3712 index 075694e..c4ecd52 100644
3713 --- a/fs/stat.c
3714 +++ b/fs/stat.c
3715 @@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
3716 }
3717 #endif /* __ARCH_WANT_STAT64 */
3718
3719 -void inode_add_bytes(struct inode *inode, loff_t bytes)
3720 +/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
3721 +void __inode_add_bytes(struct inode *inode, loff_t bytes)
3722 {
3723 - spin_lock(&inode->i_lock);
3724 inode->i_blocks += bytes >> 9;
3725 bytes &= 511;
3726 inode->i_bytes += bytes;
3727 @@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
3728 inode->i_blocks++;
3729 inode->i_bytes -= 512;
3730 }
3731 +}
3732 +
3733 +void inode_add_bytes(struct inode *inode, loff_t bytes)
3734 +{
3735 + spin_lock(&inode->i_lock);
3736 + __inode_add_bytes(inode, bytes);
3737 spin_unlock(&inode->i_lock);
3738 }
3739
3740 diff --git a/fs/udf/super.c b/fs/udf/super.c
3741 index 9d1b8c2..1e4543c 100644
3742 --- a/fs/udf/super.c
3743 +++ b/fs/udf/super.c
3744 @@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct super_block *sb,
3745 return 0;
3746 }
3747
3748 -static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
3749 +static void udf_find_vat_block(struct super_block *sb, int p_index,
3750 + int type1_index, sector_t start_block)
3751 {
3752 struct udf_sb_info *sbi = UDF_SB(sb);
3753 struct udf_part_map *map = &sbi->s_partmaps[p_index];
3754 + sector_t vat_block;
3755 struct kernel_lb_addr ino;
3756 +
3757 + /*
3758 + * VAT file entry is in the last recorded block. Some broken disks have
3759 + * it a few blocks before so try a bit harder...
3760 + */
3761 + ino.partitionReferenceNum = type1_index;
3762 + for (vat_block = start_block;
3763 + vat_block >= map->s_partition_root &&
3764 + vat_block >= start_block - 3 &&
3765 + !sbi->s_vat_inode; vat_block--) {
3766 + ino.logicalBlockNum = vat_block - map->s_partition_root;
3767 + sbi->s_vat_inode = udf_iget(sb, &ino);
3768 + }
3769 +}
3770 +
3771 +static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
3772 +{
3773 + struct udf_sb_info *sbi = UDF_SB(sb);
3774 + struct udf_part_map *map = &sbi->s_partmaps[p_index];
3775 struct buffer_head *bh = NULL;
3776 struct udf_inode_info *vati;
3777 uint32_t pos;
3778 struct virtualAllocationTable20 *vat20;
3779 sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
3780
3781 - /* VAT file entry is in the last recorded block */
3782 - ino.partitionReferenceNum = type1_index;
3783 - ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
3784 - sbi->s_vat_inode = udf_iget(sb, &ino);
3785 + udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
3786 if (!sbi->s_vat_inode &&
3787 sbi->s_last_block != blocks - 1) {
3788 printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
3789 @@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
3790 "block of the device (%lu).\n",
3791 (unsigned long)sbi->s_last_block,
3792 (unsigned long)blocks - 1);
3793 - ino.partitionReferenceNum = type1_index;
3794 - ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
3795 - sbi->s_vat_inode = udf_iget(sb, &ino);
3796 + udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
3797 }
3798 if (!sbi->s_vat_inode)
3799 return 1;
3800 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
3801 index 789cf5f..d77b547 100644
3802 --- a/include/linux/cpumask.h
3803 +++ b/include/linux/cpumask.h
3804 @@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
3805 #define num_online_cpus() cpumask_weight(cpu_online_mask)
3806 #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
3807 #define num_present_cpus() cpumask_weight(cpu_present_mask)
3808 +#define num_active_cpus() cpumask_weight(cpu_active_mask)
3809 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
3810 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
3811 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
3812 @@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
3813 #define num_online_cpus() 1
3814 #define num_possible_cpus() 1
3815 #define num_present_cpus() 1
3816 +#define num_active_cpus() 1
3817 #define cpu_online(cpu) ((cpu) == 0)
3818 #define cpu_possible(cpu) ((cpu) == 0)
3819 #define cpu_present(cpu) ((cpu) == 0)
3820 diff --git a/include/linux/fs.h b/include/linux/fs.h
3821 index 2620a8c..98ea200 100644
3822 --- a/include/linux/fs.h
3823 +++ b/include/linux/fs.h
3824 @@ -2314,6 +2314,7 @@ extern const struct inode_operations page_symlink_inode_operations;
3825 extern int generic_readlink(struct dentry *, char __user *, int);
3826 extern void generic_fillattr(struct inode *, struct kstat *);
3827 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
3828 +void __inode_add_bytes(struct inode *inode, loff_t bytes);
3829 void inode_add_bytes(struct inode *inode, loff_t bytes);
3830 void inode_sub_bytes(struct inode *inode, loff_t bytes);
3831 loff_t inode_get_bytes(struct inode *inode);
3832 diff --git a/include/linux/quota.h b/include/linux/quota.h
3833 index 78c4889..8fd8efc 100644
3834 --- a/include/linux/quota.h
3835 +++ b/include/linux/quota.h
3836 @@ -313,8 +313,9 @@ struct dquot_operations {
3837 int (*claim_space) (struct inode *, qsize_t);
3838 /* release rsved quota for delayed alloc */
3839 void (*release_rsv) (struct inode *, qsize_t);
3840 - /* get reserved quota for delayed alloc */
3841 - qsize_t (*get_reserved_space) (struct inode *);
3842 + /* get reserved quota for delayed alloc, value returned is managed by
3843 + * quota code only */
3844 + qsize_t *(*get_reserved_space) (struct inode *);
3845 };
3846
3847 /* Operations handling requests from userspace */
3848 diff --git a/include/linux/security.h b/include/linux/security.h
3849 index 239e40d..d40d23f 100644
3850 --- a/include/linux/security.h
3851 +++ b/include/linux/security.h
3852 @@ -95,8 +95,13 @@ struct seq_file;
3853 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
3854 extern int cap_netlink_recv(struct sk_buff *skb, int cap);
3855
3856 +#ifdef CONFIG_MMU
3857 extern unsigned long mmap_min_addr;
3858 extern unsigned long dac_mmap_min_addr;
3859 +#else
3860 +#define dac_mmap_min_addr 0UL
3861 +#endif
3862 +
3863 /*
3864 * Values used in the task_security_ops calls
3865 */
3866 @@ -121,6 +126,7 @@ struct request_sock;
3867 #define LSM_UNSAFE_PTRACE 2
3868 #define LSM_UNSAFE_PTRACE_CAP 4
3869
3870 +#ifdef CONFIG_MMU
3871 /*
3872 * If a hint addr is less than mmap_min_addr change hint to be as
3873 * low as possible but still greater than mmap_min_addr
3874 @@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
3875 }
3876 extern int mmap_min_addr_handler(struct ctl_table *table, int write,
3877 void __user *buffer, size_t *lenp, loff_t *ppos);
3878 +#endif
3879
3880 #ifdef CONFIG_SECURITY
3881
3882 diff --git a/include/net/ip.h b/include/net/ip.h
3883 index 2f47e54..69db943 100644
3884 --- a/include/net/ip.h
3885 +++ b/include/net/ip.h
3886 @@ -342,6 +342,7 @@ enum ip_defrag_users
3887 IP_DEFRAG_CALL_RA_CHAIN,
3888 IP_DEFRAG_CONNTRACK_IN,
3889 IP_DEFRAG_CONNTRACK_OUT,
3890 + IP_DEFRAG_CONNTRACK_BRIDGE_IN,
3891 IP_DEFRAG_VS_IN,
3892 IP_DEFRAG_VS_OUT,
3893 IP_DEFRAG_VS_FWD
3894 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
3895 index 8c31d8a..639bbf0 100644
3896 --- a/include/net/ipv6.h
3897 +++ b/include/net/ipv6.h
3898 @@ -354,8 +354,16 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1,
3899
3900 struct inet_frag_queue;
3901
3902 +enum ip6_defrag_users {
3903 + IP6_DEFRAG_LOCAL_DELIVER,
3904 + IP6_DEFRAG_CONNTRACK_IN,
3905 + IP6_DEFRAG_CONNTRACK_OUT,
3906 + IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
3907 +};
3908 +
3909 struct ip6_create_arg {
3910 __be32 id;
3911 + u32 user;
3912 struct in6_addr *src;
3913 struct in6_addr *dst;
3914 };
3915 diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
3916 index abc55ad..1ee717e 100644
3917 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
3918 +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
3919 @@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
3920
3921 extern int nf_ct_frag6_init(void);
3922 extern void nf_ct_frag6_cleanup(void);
3923 -extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
3924 +extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
3925 extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
3926 struct net_device *in,
3927 struct net_device *out,
3928 diff --git a/kernel/cpu.c b/kernel/cpu.c
3929 index 6ba0f1e..b216886 100644
3930 --- a/kernel/cpu.c
3931 +++ b/kernel/cpu.c
3932 @@ -212,6 +212,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
3933 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
3934 hcpu, -1, &nr_calls);
3935 if (err == NOTIFY_BAD) {
3936 + set_cpu_active(cpu, true);
3937 +
3938 nr_calls--;
3939 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
3940 hcpu, nr_calls, NULL);
3941 @@ -223,11 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
3942
3943 /* Ensure that we are not runnable on dying cpu */
3944 cpumask_copy(old_allowed, &current->cpus_allowed);
3945 - set_cpus_allowed_ptr(current,
3946 - cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
3947 + set_cpus_allowed_ptr(current, cpu_active_mask);
3948
3949 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
3950 if (err) {
3951 + set_cpu_active(cpu, true);
3952 /* CPU didn't die: tell everyone. Can't complain. */
3953 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
3954 hcpu) == NOTIFY_BAD)
3955 @@ -292,9 +294,6 @@ int __ref cpu_down(unsigned int cpu)
3956
3957 err = _cpu_down(cpu, 0);
3958
3959 - if (cpu_online(cpu))
3960 - set_cpu_active(cpu, true);
3961 -
3962 out:
3963 cpu_maps_update_done();
3964 stop_machine_destroy();
3965 @@ -387,6 +386,15 @@ int disable_nonboot_cpus(void)
3966 * with the userspace trying to use the CPU hotplug at the same time
3967 */
3968 cpumask_clear(frozen_cpus);
3969 +
3970 + for_each_online_cpu(cpu) {
3971 + if (cpu == first_cpu)
3972 + continue;
3973 + set_cpu_active(cpu, false);
3974 + }
3975 +
3976 + synchronize_sched();
3977 +
3978 printk("Disabling non-boot CPUs ...\n");
3979 for_each_online_cpu(cpu) {
3980 if (cpu == first_cpu)
3981 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
3982 index b5cb469..39e5121 100644
3983 --- a/kernel/cpuset.c
3984 +++ b/kernel/cpuset.c
3985 @@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
3986 if (retval < 0)
3987 return retval;
3988
3989 - if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
3990 + if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
3991 return -EINVAL;
3992 }
3993 retval = validate_change(cs, trialcs);
3994 @@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
3995 }
3996
3997 /* Continue past cpusets with all cpus, mems online */
3998 - if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
3999 + if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
4000 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
4001 continue;
4002
4003 @@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
4004 /* Remove offline cpus and mems from this cpuset. */
4005 mutex_lock(&callback_mutex);
4006 cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
4007 - cpu_online_mask);
4008 + cpu_active_mask);
4009 nodes_and(cp->mems_allowed, cp->mems_allowed,
4010 node_states[N_HIGH_MEMORY]);
4011 mutex_unlock(&callback_mutex);
4012 @@ -2058,8 +2058,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
4013 switch (phase) {
4014 case CPU_ONLINE:
4015 case CPU_ONLINE_FROZEN:
4016 - case CPU_DEAD:
4017 - case CPU_DEAD_FROZEN:
4018 + case CPU_DOWN_PREPARE:
4019 + case CPU_DOWN_PREPARE_FROZEN:
4020 + case CPU_DOWN_FAILED:
4021 + case CPU_DOWN_FAILED_FROZEN:
4022 break;
4023
4024 default:
4025 @@ -2068,7 +2070,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
4026
4027 cgroup_lock();
4028 mutex_lock(&callback_mutex);
4029 - cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
4030 + cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
4031 mutex_unlock(&callback_mutex);
4032 scan_for_empty_cpusets(&top_cpuset);
4033 ndoms = generate_sched_domains(&doms, &attr);
4034 @@ -2115,7 +2117,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
4035
4036 void __init cpuset_init_smp(void)
4037 {
4038 - cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
4039 + cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
4040 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
4041
4042 hotcpu_notifier(cpuset_track_online_cpus, 0);
4043 diff --git a/kernel/sched.c b/kernel/sched.c
4044 index d079a9f..dd0dccd 100644
4045 --- a/kernel/sched.c
4046 +++ b/kernel/sched.c
4047 @@ -2036,6 +2036,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4048 {
4049 s64 delta;
4050
4051 + if (p->sched_class != &fair_sched_class)
4052 + return 0;
4053 +
4054 /*
4055 * Buddy candidates are cache hot:
4056 */
4057 @@ -2044,9 +2047,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4058 &p->se == cfs_rq_of(&p->se)->last))
4059 return 1;
4060
4061 - if (p->sched_class != &fair_sched_class)
4062 - return 0;
4063 -
4064 if (sysctl_sched_migration_cost == -1)
4065 return 1;
4066 if (sysctl_sched_migration_cost == 0)
4067 @@ -4139,7 +4139,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4068 unsigned long flags;
4069 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4070
4071 - cpumask_copy(cpus, cpu_online_mask);
4072 + cpumask_copy(cpus, cpu_active_mask);
4073
4074 /*
4075 * When power savings policy is enabled for the parent domain, idle
4076 @@ -4302,7 +4302,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4077 int all_pinned = 0;
4078 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4079
4080 - cpumask_copy(cpus, cpu_online_mask);
4081 + cpumask_copy(cpus, cpu_active_mask);
4082
4083 /*
4084 * When power savings policy is enabled for the parent domain, idle
4085 @@ -4699,7 +4699,7 @@ int select_nohz_load_balancer(int stop_tick)
4086 cpumask_set_cpu(cpu, nohz.cpu_mask);
4087
4088 /* time for ilb owner also to sleep */
4089 - if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4090 + if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
4091 if (atomic_read(&nohz.load_balancer) == cpu)
4092 atomic_set(&nohz.load_balancer, -1);
4093 return 0;
4094 @@ -7075,7 +7075,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4095 int ret = 0;
4096
4097 rq = task_rq_lock(p, &flags);
4098 - if (!cpumask_intersects(new_mask, cpu_online_mask)) {
4099 + if (!cpumask_intersects(new_mask, cpu_active_mask)) {
4100 ret = -EINVAL;
4101 goto out;
4102 }
4103 @@ -7097,7 +7097,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4104 if (cpumask_test_cpu(task_cpu(p), new_mask))
4105 goto out;
4106
4107 - if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
4108 + if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
4109 /* Need help from migration thread: drop lock and wait. */
4110 struct task_struct *mt = rq->migration_thread;
4111
4112 @@ -7251,19 +7251,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
4113
4114 again:
4115 /* Look for allowed, online CPU in same node. */
4116 - for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
4117 + for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
4118 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
4119 goto move;
4120
4121 /* Any allowed, online CPU? */
4122 - dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
4123 + dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
4124 if (dest_cpu < nr_cpu_ids)
4125 goto move;
4126
4127 /* No more Mr. Nice Guy. */
4128 if (dest_cpu >= nr_cpu_ids) {
4129 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
4130 - dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
4131 + dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
4132
4133 /*
4134 * Don't tell them about moving exiting tasks or
4135 @@ -7292,7 +7292,7 @@ move:
4136 */
4137 static void migrate_nr_uninterruptible(struct rq *rq_src)
4138 {
4139 - struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
4140 + struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
4141 unsigned long flags;
4142
4143 local_irq_save(flags);
4144 @@ -7546,7 +7546,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4145 static struct ctl_table_header *sd_sysctl_header;
4146 static void register_sched_domain_sysctl(void)
4147 {
4148 - int i, cpu_num = num_online_cpus();
4149 + int i, cpu_num = num_possible_cpus();
4150 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4151 char buf[32];
4152
4153 @@ -7556,7 +7556,7 @@ static void register_sched_domain_sysctl(void)
4154 if (entry == NULL)
4155 return;
4156
4157 - for_each_online_cpu(i) {
4158 + for_each_possible_cpu(i) {
4159 snprintf(buf, 32, "cpu%d", i);
4160 entry->procname = kstrdup(buf, GFP_KERNEL);
4161 entry->mode = 0555;
4162 @@ -7925,6 +7925,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
4163
4164 static void free_rootdomain(struct root_domain *rd)
4165 {
4166 + synchronize_sched();
4167 +
4168 cpupri_cleanup(&rd->cpupri);
4169
4170 free_cpumask_var(rd->rto_mask);
4171 @@ -9042,7 +9044,7 @@ match1:
4172 if (doms_new == NULL) {
4173 ndoms_cur = 0;
4174 doms_new = fallback_doms;
4175 - cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
4176 + cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map);
4177 WARN_ON_ONCE(dattr_new);
4178 }
4179
4180 @@ -9173,8 +9175,10 @@ static int update_sched_domains(struct notifier_block *nfb,
4181 switch (action) {
4182 case CPU_ONLINE:
4183 case CPU_ONLINE_FROZEN:
4184 - case CPU_DEAD:
4185 - case CPU_DEAD_FROZEN:
4186 + case CPU_DOWN_PREPARE:
4187 + case CPU_DOWN_PREPARE_FROZEN:
4188 + case CPU_DOWN_FAILED:
4189 + case CPU_DOWN_FAILED_FROZEN:
4190 partition_sched_domains(1, NULL, NULL);
4191 return NOTIFY_OK;
4192
4193 @@ -9221,7 +9225,7 @@ void __init sched_init_smp(void)
4194 #endif
4195 get_online_cpus();
4196 mutex_lock(&sched_domains_mutex);
4197 - arch_init_sched_domains(cpu_online_mask);
4198 + arch_init_sched_domains(cpu_active_mask);
4199 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
4200 if (cpumask_empty(non_isolated_cpus))
4201 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
4202 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
4203 index 5488a5d..199228b 100644
4204 --- a/kernel/sched_fair.c
4205 +++ b/kernel/sched_fair.c
4206 @@ -1374,6 +1374,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
4207
4208 rcu_read_lock();
4209 for_each_domain(cpu, tmp) {
4210 + if (!(tmp->flags & SD_LOAD_BALANCE))
4211 + continue;
4212 +
4213 /*
4214 * If power savings logic is enabled for a domain, see if we
4215 * are not overloaded, if so, don't balance wider.
4216 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
4217 index 0d949c5..dd84be9 100644
4218 --- a/kernel/sysctl.c
4219 +++ b/kernel/sysctl.c
4220 @@ -1200,6 +1200,7 @@ static struct ctl_table vm_table[] = {
4221 .extra2 = (void *)&hugetlb_infinity,
4222 },
4223 #endif
4224 +#ifdef CONFIG_MMU
4225 {
4226 .ctl_name = VM_LOWMEM_RESERVE_RATIO,
4227 .procname = "lowmem_reserve_ratio",
4228 @@ -1353,6 +1354,7 @@ static struct ctl_table vm_table[] = {
4229 .mode = 0644,
4230 .proc_handler = &mmap_min_addr_handler,
4231 },
4232 +#endif
4233 #ifdef CONFIG_NUMA
4234 {
4235 .ctl_name = CTL_UNNUMBERED,
4236 @@ -1605,7 +1607,8 @@ static struct ctl_table debug_table[] = {
4237 .data = &show_unhandled_signals,
4238 .maxlen = sizeof(int),
4239 .mode = 0644,
4240 - .proc_handler = proc_dointvec
4241 + .proc_handler = proc_dointvec_minmax,
4242 + .extra1 = &zero,
4243 },
4244 #endif
4245 { .ctl_name = 0 }
4246 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
4247 index 620b58a..9484be4 100644
4248 --- a/kernel/time/clockevents.c
4249 +++ b/kernel/time/clockevents.c
4250 @@ -237,8 +237,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
4251 */
4252 void clockevents_notify(unsigned long reason, void *arg)
4253 {
4254 - struct list_head *node, *tmp;
4255 + struct clock_event_device *dev, *tmp;
4256 unsigned long flags;
4257 + int cpu;
4258
4259 spin_lock_irqsave(&clockevents_lock, flags);
4260 clockevents_do_notify(reason, arg);
4261 @@ -249,8 +250,19 @@ void clockevents_notify(unsigned long reason, void *arg)
4262 * Unregister the clock event devices which were
4263 * released from the users in the notify chain.
4264 */
4265 - list_for_each_safe(node, tmp, &clockevents_released)
4266 - list_del(node);
4267 + list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
4268 + list_del(&dev->list);
4269 + /*
4270 + * Now check whether the CPU has left unused per cpu devices
4271 + */
4272 + cpu = *((int *)arg);
4273 + list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
4274 + if (cpumask_test_cpu(cpu, dev->cpumask) &&
4275 + cpumask_weight(dev->cpumask) == 1) {
4276 + BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
4277 + list_del(&dev->list);
4278 + }
4279 + }
4280 break;
4281 default:
4282 break;
4283 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
4284 index ce6b7ea..5a77c7c 100644
4285 --- a/lib/dma-debug.c
4286 +++ b/lib/dma-debug.c
4287 @@ -670,12 +670,13 @@ static int device_dma_allocations(struct device *dev)
4288 return count;
4289 }
4290
4291 -static int dma_debug_device_change(struct notifier_block *nb,
4292 - unsigned long action, void *data)
4293 +static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
4294 {
4295 struct device *dev = data;
4296 int count;
4297
4298 + if (global_disable)
4299 + return 0;
4300
4301 switch (action) {
4302 case BUS_NOTIFY_UNBOUND_DRIVER:
4303 @@ -697,6 +698,9 @@ void dma_debug_add_bus(struct bus_type *bus)
4304 {
4305 struct notifier_block *nb;
4306
4307 + if (global_disable)
4308 + return;
4309 +
4310 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
4311 if (nb == NULL) {
4312 pr_err("dma_debug_add_bus: out of memory\n");
4313 diff --git a/mm/Kconfig b/mm/Kconfig
4314 index 44cf6f0..2c19c0b 100644
4315 --- a/mm/Kconfig
4316 +++ b/mm/Kconfig
4317 @@ -227,6 +227,7 @@ config KSM
4318
4319 config DEFAULT_MMAP_MIN_ADDR
4320 int "Low address space to protect from user allocation"
4321 + depends on MMU
4322 default 4096
4323 help
4324 This is the portion of low virtual memory which should be protected
4325 diff --git a/mm/internal.h b/mm/internal.h
4326 index 22ec8d2..17bc0df 100644
4327 --- a/mm/internal.h
4328 +++ b/mm/internal.h
4329 @@ -107,9 +107,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
4330 }
4331
4332 /*
4333 - * must be called with vma's mmap_sem held for read, and page locked.
4334 + * must be called with vma's mmap_sem held for read or write, and page locked.
4335 */
4336 extern void mlock_vma_page(struct page *page);
4337 +extern void munlock_vma_page(struct page *page);
4338
4339 /*
4340 * Clear the page's PageMlocked(). This can be useful in a situation where
4341 diff --git a/mm/ksm.c b/mm/ksm.c
4342 index 5575f86..e9501f8 100644
4343 --- a/mm/ksm.c
4344 +++ b/mm/ksm.c
4345 @@ -34,6 +34,7 @@
4346 #include <linux/ksm.h>
4347
4348 #include <asm/tlbflush.h>
4349 +#include "internal.h"
4350
4351 /*
4352 * A few notes about the KSM scanning process,
4353 @@ -767,15 +768,14 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
4354 * ptes are necessarily already write-protected. But in either
4355 * case, we need to lock and check page_count is not raised.
4356 */
4357 - if (write_protect_page(vma, oldpage, &orig_pte)) {
4358 - unlock_page(oldpage);
4359 - goto out_putpage;
4360 - }
4361 - unlock_page(oldpage);
4362 -
4363 - if (pages_identical(oldpage, newpage))
4364 + if (write_protect_page(vma, oldpage, &orig_pte) == 0 &&
4365 + pages_identical(oldpage, newpage))
4366 err = replace_page(vma, oldpage, newpage, orig_pte);
4367
4368 + if ((vma->vm_flags & VM_LOCKED) && !err)
4369 + munlock_vma_page(oldpage);
4370 +
4371 + unlock_page(oldpage);
4372 out_putpage:
4373 put_page(oldpage);
4374 put_page(newpage);
4375 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4376 index 6314015..5dc1037 100644
4377 --- a/mm/memcontrol.c
4378 +++ b/mm/memcontrol.c
4379 @@ -758,7 +758,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
4380 task_unlock(task);
4381 if (!curr)
4382 return 0;
4383 - if (curr->use_hierarchy)
4384 + /*
4385 + * We should check use_hierarchy of "mem" not "curr". Because checking
4386 + * use_hierarchy of "curr" here make this function true if hierarchy is
4387 + * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
4388 + * hierarchy(even if use_hierarchy is disabled in "mem").
4389 + */
4390 + if (mem->use_hierarchy)
4391 ret = css_is_ancestor(&curr->css, &mem->css);
4392 else
4393 ret = (curr == mem);
4394 diff --git a/mm/mlock.c b/mm/mlock.c
4395 index bd6f0e4..2e05c97 100644
4396 --- a/mm/mlock.c
4397 +++ b/mm/mlock.c
4398 @@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page)
4399 * not get another chance to clear PageMlocked. If we successfully
4400 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
4401 * mapping the page, it will restore the PageMlocked state, unless the page
4402 - * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
4403 + * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(),
4404 * perhaps redundantly.
4405 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
4406 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
4407 * either of which will restore the PageMlocked state by calling
4408 * mlock_vma_page() above, if it can grab the vma's mmap sem.
4409 */
4410 -static void munlock_vma_page(struct page *page)
4411 +void munlock_vma_page(struct page *page)
4412 {
4413 BUG_ON(!PageLocked(page));
4414
4415 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
4416 index ea2147d..9092b43 100644
4417 --- a/mm/oom_kill.c
4418 +++ b/mm/oom_kill.c
4419 @@ -404,7 +404,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
4420 cpuset_print_task_mems_allowed(current);
4421 task_unlock(current);
4422 dump_stack();
4423 - mem_cgroup_print_oom_info(mem, current);
4424 + mem_cgroup_print_oom_info(mem, p);
4425 show_mem();
4426 if (sysctl_oom_dump_tasks)
4427 dump_tasks(mem);
4428 diff --git a/mm/vmscan.c b/mm/vmscan.c
4429 index 777af57..692807f 100644
4430 --- a/mm/vmscan.c
4431 +++ b/mm/vmscan.c
4432 @@ -1464,20 +1464,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
4433 return low;
4434 }
4435
4436 +static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
4437 + int file)
4438 +{
4439 + if (file)
4440 + return inactive_file_is_low(zone, sc);
4441 + else
4442 + return inactive_anon_is_low(zone, sc);
4443 +}
4444 +
4445 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
4446 struct zone *zone, struct scan_control *sc, int priority)
4447 {
4448 int file = is_file_lru(lru);
4449
4450 - if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
4451 - shrink_active_list(nr_to_scan, zone, sc, priority, file);
4452 + if (is_active_lru(lru)) {
4453 + if (inactive_list_is_low(zone, sc, file))
4454 + shrink_active_list(nr_to_scan, zone, sc, priority, file);
4455 return 0;
4456 }
4457
4458 - if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
4459 - shrink_active_list(nr_to_scan, zone, sc, priority, file);
4460 - return 0;
4461 - }
4462 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
4463 }
4464
4465 diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
4466 index fa2d6b6..331ead3 100644
4467 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c
4468 +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
4469 @@ -14,6 +14,7 @@
4470 #include <net/route.h>
4471 #include <net/ip.h>
4472
4473 +#include <linux/netfilter_bridge.h>
4474 #include <linux/netfilter_ipv4.h>
4475 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
4476
4477 @@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
4478 return err;
4479 }
4480
4481 +static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
4482 + struct sk_buff *skb)
4483 +{
4484 +#ifdef CONFIG_BRIDGE_NETFILTER
4485 + if (skb->nf_bridge &&
4486 + skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
4487 + return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
4488 +#endif
4489 + if (hooknum == NF_INET_PRE_ROUTING)
4490 + return IP_DEFRAG_CONNTRACK_IN;
4491 + else
4492 + return IP_DEFRAG_CONNTRACK_OUT;
4493 +}
4494 +
4495 static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
4496 struct sk_buff *skb,
4497 const struct net_device *in,
4498 @@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
4499 #endif
4500 /* Gather fragments. */
4501 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
4502 - if (nf_ct_ipv4_gather_frags(skb,
4503 - hooknum == NF_INET_PRE_ROUTING ?
4504 - IP_DEFRAG_CONNTRACK_IN :
4505 - IP_DEFRAG_CONNTRACK_OUT))
4506 + enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
4507 + if (nf_ct_ipv4_gather_frags(skb, user))
4508 return NF_STOLEN;
4509 }
4510 return NF_ACCEPT;
4511 diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
4512 index 5f2ec20..0956eba 100644
4513 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
4514 +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
4515 @@ -20,6 +20,7 @@
4516 #include <net/ipv6.h>
4517 #include <net/inet_frag.h>
4518
4519 +#include <linux/netfilter_bridge.h>
4520 #include <linux/netfilter_ipv6.h>
4521 #include <net/netfilter/nf_conntrack.h>
4522 #include <net/netfilter/nf_conntrack_helper.h>
4523 @@ -187,6 +188,21 @@ out:
4524 return nf_conntrack_confirm(skb);
4525 }
4526
4527 +static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
4528 + struct sk_buff *skb)
4529 +{
4530 +#ifdef CONFIG_BRIDGE_NETFILTER
4531 + if (skb->nf_bridge &&
4532 + skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
4533 + return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
4534 +#endif
4535 + if (hooknum == NF_INET_PRE_ROUTING)
4536 + return IP6_DEFRAG_CONNTRACK_IN;
4537 + else
4538 + return IP6_DEFRAG_CONNTRACK_OUT;
4539 +
4540 +}
4541 +
4542 static unsigned int ipv6_defrag(unsigned int hooknum,
4543 struct sk_buff *skb,
4544 const struct net_device *in,
4545 @@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
4546 if (skb->nfct)
4547 return NF_ACCEPT;
4548
4549 - reasm = nf_ct_frag6_gather(skb);
4550 -
4551 + reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
4552 /* queued */
4553 if (reasm == NULL)
4554 return NF_STOLEN;
4555 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
4556 index f3aba25..4b6a539 100644
4557 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
4558 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
4559 @@ -170,13 +170,14 @@ out:
4560 /* Creation primitives. */
4561
4562 static __inline__ struct nf_ct_frag6_queue *
4563 -fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
4564 +fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
4565 {
4566 struct inet_frag_queue *q;
4567 struct ip6_create_arg arg;
4568 unsigned int hash;
4569
4570 arg.id = id;
4571 + arg.user = user;
4572 arg.src = src;
4573 arg.dst = dst;
4574
4575 @@ -561,7 +562,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
4576 return 0;
4577 }
4578
4579 -struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
4580 +struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
4581 {
4582 struct sk_buff *clone;
4583 struct net_device *dev = skb->dev;
4584 @@ -607,7 +608,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
4585 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
4586 nf_ct_frag6_evictor();
4587
4588 - fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
4589 + fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
4590 if (fq == NULL) {
4591 pr_debug("Can't find and can't create new queue\n");
4592 goto ret_orig;
4593 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
4594 index da5bd0e..4d18699 100644
4595 --- a/net/ipv6/reassembly.c
4596 +++ b/net/ipv6/reassembly.c
4597 @@ -72,6 +72,7 @@ struct frag_queue
4598 struct inet_frag_queue q;
4599
4600 __be32 id; /* fragment id */
4601 + u32 user;
4602 struct in6_addr saddr;
4603 struct in6_addr daddr;
4604
4605 @@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
4606 struct ip6_create_arg *arg = a;
4607
4608 fq = container_of(q, struct frag_queue, q);
4609 - return (fq->id == arg->id &&
4610 + return (fq->id == arg->id && fq->user == arg->user &&
4611 ipv6_addr_equal(&fq->saddr, arg->src) &&
4612 ipv6_addr_equal(&fq->daddr, arg->dst));
4613 }
4614 @@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
4615 struct ip6_create_arg *arg = a;
4616
4617 fq->id = arg->id;
4618 + fq->user = arg->user;
4619 ipv6_addr_copy(&fq->saddr, arg->src);
4620 ipv6_addr_copy(&fq->daddr, arg->dst);
4621 }
4622 @@ -244,6 +246,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
4623 unsigned int hash;
4624
4625 arg.id = id;
4626 + arg.user = IP6_DEFRAG_LOCAL_DELIVER;
4627 arg.src = src;
4628 arg.dst = dst;
4629
4630 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
4631 index f1362f3..fbffce9 100644
4632 --- a/net/mac80211/ibss.c
4633 +++ b/net/mac80211/ibss.c
4634 @@ -455,6 +455,10 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
4635
4636 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
4637
4638 + if (time_before(jiffies, ifibss->last_scan_completed +
4639 + IEEE80211_IBSS_MERGE_INTERVAL))
4640 + return;
4641 +
4642 if (ieee80211_sta_active_ibss(sdata))
4643 return;
4644
4645 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4646 index dc5049d..f13d181 100644
4647 --- a/net/mac80211/mlme.c
4648 +++ b/net/mac80211/mlme.c
4649 @@ -904,6 +904,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
4650 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
4651 IEEE80211_STA_BEACON_POLL);
4652
4653 + /*
4654 + * Always handle WMM once after association regardless
4655 + * of the first value the AP uses. Setting -1 here has
4656 + * that effect because the AP values is an unsigned
4657 + * 4-bit value.
4658 + */
4659 + sdata->u.mgd.wmm_last_param_set = -1;
4660 +
4661 ieee80211_led_assoc(local, 1);
4662
4663 sdata->vif.bss_conf.assoc = 1;
4664 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4665 index eaa4118..d398197 100644
4666 --- a/net/mac80211/tx.c
4667 +++ b/net/mac80211/tx.c
4668 @@ -1401,6 +1401,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
4669
4670 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
4671 local->hw.conf.dynamic_ps_timeout > 0 &&
4672 + !local->quiescing &&
4673 !(local->scanning) && local->ps_sdata) {
4674 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
4675 ieee80211_stop_queues_by_reason(&local->hw,
4676 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
4677 index cbc5d20..51e0bd2 100644
4678 --- a/net/mac80211/util.c
4679 +++ b/net/mac80211/util.c
4680 @@ -1031,7 +1031,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
4681
4682 /* restart hardware */
4683 if (local->open_count) {
4684 + /*
4685 + * Upon resume hardware can sometimes be goofy due to
4686 + * various platform / driver / bus issues, so restarting
4687 + * the device may at times not work immediately. Propagate
4688 + * the error.
4689 + */
4690 res = drv_start(local);
4691 + if (res) {
4692 + WARN(local->suspended, "Harware became unavailable "
4693 + "upon resume. This is could be a software issue"
4694 + "prior to suspend or a harware issue\n");
4695 + return res;
4696 + }
4697
4698 ieee80211_led_radio(local, true);
4699 }
4700 diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
4701 index 0a6b7a0..0d86248 100644
4702 --- a/net/wireless/mlme.c
4703 +++ b/net/wireless/mlme.c
4704 @@ -94,7 +94,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
4705 }
4706 }
4707
4708 - WARN_ON(!bss);
4709 + /*
4710 + * We might be coming here because the driver reported
4711 + * a successful association at the same time as the
4712 + * user requested a deauth. In that case, we will have
4713 + * removed the BSS from the auth_bsses list due to the
4714 + * deauth request when the assoc response makes it. If
4715 + * the two code paths acquire the lock the other way
4716 + * around, that's just the standard situation of a
4717 + * deauth being requested while connected.
4718 + */
4719 + if (!bss)
4720 + goto out;
4721 } else if (wdev->conn) {
4722 cfg80211_sme_failed_assoc(wdev);
4723 need_connect_result = false;
4724 diff --git a/security/Makefile b/security/Makefile
4725 index 95ecc06..510bbc8 100644
4726 --- a/security/Makefile
4727 +++ b/security/Makefile
4728 @@ -8,7 +8,8 @@ subdir-$(CONFIG_SECURITY_SMACK) += smack
4729 subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
4730
4731 # always enable default capabilities
4732 -obj-y += commoncap.o min_addr.o
4733 +obj-y += commoncap.o
4734 +obj-$(CONFIG_MMU) += min_addr.o
4735
4736 # Object file lists
4737 obj-$(CONFIG_SECURITY) += security.o capability.o
4738 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
4739 index 06ec722..1cad4c7 100644
4740 --- a/security/keys/keyctl.c
4741 +++ b/security/keys/keyctl.c
4742 @@ -1236,6 +1236,7 @@ long keyctl_get_security(key_serial_t keyid,
4743 */
4744 long keyctl_session_to_parent(void)
4745 {
4746 +#ifdef TIF_NOTIFY_RESUME
4747 struct task_struct *me, *parent;
4748 const struct cred *mycred, *pcred;
4749 struct cred *cred, *oldcred;
4750 @@ -1326,6 +1327,15 @@ not_permitted:
4751 error_keyring:
4752 key_ref_put(keyring_r);
4753 return ret;
4754 +
4755 +#else /* !TIF_NOTIFY_RESUME */
4756 + /*
4757 + * To be removed when TIF_NOTIFY_RESUME has been implemented on
4758 + * m68k/xtensa
4759 + */
4760 +#warning TIF_NOTIFY_RESUME not implemented
4761 + return -EOPNOTSUPP;
4762 +#endif /* !TIF_NOTIFY_RESUME */
4763 }
4764
4765 /*****************************************************************************/
4766 diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
4767 index 8691f4c..f1d9d16 100644
4768 --- a/sound/mips/sgio2audio.c
4769 +++ b/sound/mips/sgio2audio.c
4770 @@ -609,7 +609,7 @@ static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
4771 /* alloc virtual 'dma' area */
4772 if (runtime->dma_area)
4773 vfree(runtime->dma_area);
4774 - runtime->dma_area = vmalloc(size);
4775 + runtime->dma_area = vmalloc_user(size);
4776 if (runtime->dma_area == NULL)
4777 return -ENOMEM;
4778 runtime->dma_bytes = size;
4779 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4780 index 7058371..e40d31f 100644
4781 --- a/sound/pci/hda/patch_realtek.c
4782 +++ b/sound/pci/hda/patch_realtek.c
4783 @@ -9141,6 +9141,8 @@ static struct alc_config_preset alc882_presets[] = {
4784 .dac_nids = alc883_dac_nids,
4785 .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
4786 .adc_nids = alc889_adc_nids,
4787 + .capsrc_nids = alc889_capsrc_nids,
4788 + .capsrc_nids = alc889_capsrc_nids,
4789 .dig_out_nid = ALC883_DIGOUT_NID,
4790 .dig_in_nid = ALC883_DIGIN_NID,
4791 .slave_dig_outs = alc883_slave_dig_outs,
4792 @@ -9187,6 +9189,7 @@ static struct alc_config_preset alc882_presets[] = {
4793 .dac_nids = alc883_dac_nids,
4794 .adc_nids = alc883_adc_nids_alt,
4795 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
4796 + .capsrc_nids = alc883_capsrc_nids,
4797 .dig_out_nid = ALC883_DIGOUT_NID,
4798 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
4799 .channel_mode = alc883_3ST_2ch_modes,
4800 @@ -9333,6 +9336,7 @@ static struct alc_config_preset alc882_presets[] = {
4801 .dac_nids = alc883_dac_nids,
4802 .adc_nids = alc883_adc_nids_alt,
4803 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
4804 + .capsrc_nids = alc883_capsrc_nids,
4805 .num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
4806 .channel_mode = alc883_sixstack_modes,
4807 .input_mux = &alc883_capture_source,
4808 @@ -9394,6 +9398,7 @@ static struct alc_config_preset alc882_presets[] = {
4809 .dac_nids = alc883_dac_nids,
4810 .adc_nids = alc883_adc_nids_alt,
4811 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
4812 + .capsrc_nids = alc883_capsrc_nids,
4813 .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
4814 .channel_mode = alc883_3ST_2ch_modes,
4815 .input_mux = &alc883_lenovo_101e_capture_source,
4816 @@ -9573,6 +9578,7 @@ static struct alc_config_preset alc882_presets[] = {
4817 alc880_gpio1_init_verbs },
4818 .adc_nids = alc883_adc_nids,
4819 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
4820 + .capsrc_nids = alc883_capsrc_nids,
4821 .dac_nids = alc883_dac_nids,
4822 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
4823 .channel_mode = alc889A_mb31_6ch_modes,
4824 diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
4825 index d057e64..5cfa608 100644
4826 --- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
4827 +++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
4828 @@ -51,7 +51,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
4829 return 0; /* already enough large */
4830 vfree(runtime->dma_area);
4831 }
4832 - runtime->dma_area = vmalloc_32(size);
4833 + runtime->dma_area = vmalloc_32_user(size);
4834 if (! runtime->dma_area)
4835 return -ENOMEM;
4836 runtime->dma_bytes = size;
4837 diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
4838 index 98d663a..b0bd1c0 100644
4839 --- a/sound/soc/codecs/wm8974.c
4840 +++ b/sound/soc/codecs/wm8974.c
4841 @@ -47,7 +47,7 @@ static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
4842 };
4843
4844 #define WM8974_POWER1_BIASEN 0x08
4845 -#define WM8974_POWER1_BUFIOEN 0x10
4846 +#define WM8974_POWER1_BUFIOEN 0x04
4847
4848 struct wm8974_priv {
4849 struct snd_soc_codec codec;
4850 diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
4851 index 1fd4e88..e9123f5 100644
4852 --- a/sound/soc/codecs/wm9712.c
4853 +++ b/sound/soc/codecs/wm9712.c
4854 @@ -464,7 +464,8 @@ static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
4855 {
4856 u16 *cache = codec->reg_cache;
4857
4858 - soc_ac97_ops.write(codec->ac97, reg, val);
4859 + if (reg < 0x7c)
4860 + soc_ac97_ops.write(codec->ac97, reg, val);
4861 reg = reg >> 1;
4862 if (reg < (ARRAY_SIZE(wm9712_reg)))
4863 cache[reg] = val;
4864 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
4865 index 8db0374..8803d9d 100644
4866 --- a/sound/usb/usbaudio.c
4867 +++ b/sound/usb/usbaudio.c
4868 @@ -752,7 +752,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
4869 return 0; /* already large enough */
4870 vfree(runtime->dma_area);
4871 }
4872 - runtime->dma_area = vmalloc(size);
4873 + runtime->dma_area = vmalloc_user(size);
4874 if (!runtime->dma_area)
4875 return -ENOMEM;
4876 runtime->dma_bytes = size;

  ViewVC Help
Powered by ViewVC 1.1.20