/[linux-patches]/genpatches-2.6/tags/2.6.30-10/1005_linux-2.6.30.6.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.30-10/1005_linux-2.6.30.6.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1628 - (show annotations) (download)
Fri Nov 6 12:34:12 2009 UTC (9 years ago) by mpagano
File size: 126163 byte(s)
2.6.30-10 release
1 diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
2 index eb98738..391f637 100644
3 --- a/arch/ia64/kernel/pci-dma.c
4 +++ b/arch/ia64/kernel/pci-dma.c
5 @@ -67,11 +67,6 @@ iommu_dma_init(void)
6
7 int iommu_dma_supported(struct device *dev, u64 mask)
8 {
9 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
10 -
11 - if (ops->dma_supported)
12 - return ops->dma_supported(dev, mask);
13 -
14 /* Copied from i386. Doesn't make much sense, because it will
15 only work for pci_alloc_coherent.
16 The caller just has to use GFP_DMA in this case. */
17 diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
18 index b178a1e..40b5cb4 100644
19 --- a/arch/powerpc/platforms/ps3/time.c
20 +++ b/arch/powerpc/platforms/ps3/time.c
21 @@ -21,6 +21,7 @@
22 #include <linux/kernel.h>
23 #include <linux/platform_device.h>
24
25 +#include <asm/firmware.h>
26 #include <asm/rtc.h>
27 #include <asm/lv1call.h>
28 #include <asm/ps3.h>
29 @@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void)
30 {
31 struct platform_device *pdev;
32
33 + if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
34 + return -ENODEV;
35 +
36 pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
37 if (IS_ERR(pdev))
38 return PTR_ERR(pdev);
39 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
40 index a93d1cc..9a9efb0 100644
41 --- a/arch/x86/include/asm/kvm_host.h
42 +++ b/arch/x86/include/asm/kvm_host.h
43 @@ -185,6 +185,7 @@ union kvm_mmu_page_role {
44 unsigned access:3;
45 unsigned invalid:1;
46 unsigned cr4_pge:1;
47 + unsigned nxe:1;
48 };
49 };
50
51 @@ -513,6 +514,8 @@ struct kvm_x86_ops {
52 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
53 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
54 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
55 + void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
56 + u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
57 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
58 unsigned char *hypercall_addr);
59 int (*get_irq)(struct kvm_vcpu *vcpu);
60 diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h
61 index 6a15973..b7ed2c4 100644
62 --- a/arch/x86/include/asm/kvm_x86_emulate.h
63 +++ b/arch/x86/include/asm/kvm_x86_emulate.h
64 @@ -143,6 +143,9 @@ struct decode_cache {
65 struct fetch_cache fetch;
66 };
67
68 +#define X86_SHADOW_INT_MOV_SS 1
69 +#define X86_SHADOW_INT_STI 2
70 +
71 struct x86_emulate_ctxt {
72 /* Register state before/after emulation. */
73 struct kvm_vcpu *vcpu;
74 @@ -152,6 +155,9 @@ struct x86_emulate_ctxt {
75 int mode;
76 u32 cs_base;
77
78 + /* interruptibility state, as a result of execution of STI or MOV SS */
79 + int interruptibility;
80 +
81 /* decode cache */
82 struct decode_cache decode;
83 };
84 diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
85 index dbf5445..6ef00ba 100644
86 --- a/arch/x86/kernel/apic/ipi.c
87 +++ b/arch/x86/kernel/apic/ipi.c
88 @@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
89 unsigned long mask = cpumask_bits(cpumask)[0];
90 unsigned long flags;
91
92 + if (WARN_ONCE(!mask, "empty IPI mask"))
93 + return;
94 +
95 local_irq_save(flags);
96 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
97 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
98 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
99 index d869b3b..61a592e 100644
100 --- a/arch/x86/kernel/cpu/amd.c
101 +++ b/arch/x86/kernel/cpu/amd.c
102 @@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
103 #endif
104 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
105 /* check CPU config space for extended APIC ID */
106 - if (c->x86 >= 0xf) {
107 + if (cpu_has_apic && c->x86 >= 0xf) {
108 unsigned int val;
109 val = read_pci_config(0, 24, 0, 0x68);
110 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
111 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
112 index 32cf11e..d7ce26b 100644
113 --- a/arch/x86/kvm/mmu.c
114 +++ b/arch/x86/kvm/mmu.c
115 @@ -490,16 +490,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
116 *
117 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
118 * containing more mappings.
119 + *
120 + * Returns the number of rmap entries before the spte was added or zero if
121 + * the spte was not added.
122 + *
123 */
124 -static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
125 +static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
126 {
127 struct kvm_mmu_page *sp;
128 struct kvm_rmap_desc *desc;
129 unsigned long *rmapp;
130 - int i;
131 + int i, count = 0;
132
133 if (!is_rmap_pte(*spte))
134 - return;
135 + return count;
136 gfn = unalias_gfn(vcpu->kvm, gfn);
137 sp = page_header(__pa(spte));
138 sp->gfns[spte - sp->spt] = gfn;
139 @@ -516,8 +520,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
140 } else {
141 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
142 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
143 - while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
144 + while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) {
145 desc = desc->more;
146 + count += RMAP_EXT;
147 + }
148 if (desc->shadow_ptes[RMAP_EXT-1]) {
149 desc->more = mmu_alloc_rmap_desc(vcpu);
150 desc = desc->more;
151 @@ -526,6 +532,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
152 ;
153 desc->shadow_ptes[i] = spte;
154 }
155 + return count;
156 }
157
158 static void rmap_desc_remove_entry(unsigned long *rmapp,
159 @@ -755,6 +762,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
160 return young;
161 }
162
163 +#define RMAP_RECYCLE_THRESHOLD 1000
164 +
165 +static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
166 +{
167 + unsigned long *rmapp;
168 +
169 + gfn = unalias_gfn(vcpu->kvm, gfn);
170 + rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
171 +
172 + kvm_unmap_rmapp(vcpu->kvm, rmapp);
173 + kvm_flush_remote_tlbs(vcpu->kvm);
174 +}
175 +
176 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
177 {
178 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
179 @@ -1417,24 +1437,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
180 */
181 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
182 {
183 + int used_pages;
184 +
185 + used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
186 + used_pages = max(0, used_pages);
187 +
188 /*
189 * If we set the number of mmu pages to be smaller be than the
190 * number of actived pages , we must to free some mmu pages before we
191 * change the value
192 */
193
194 - if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
195 - kvm_nr_mmu_pages) {
196 - int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
197 - - kvm->arch.n_free_mmu_pages;
198 -
199 - while (n_used_mmu_pages > kvm_nr_mmu_pages) {
200 + if (used_pages > kvm_nr_mmu_pages) {
201 + while (used_pages > kvm_nr_mmu_pages) {
202 struct kvm_mmu_page *page;
203
204 page = container_of(kvm->arch.active_mmu_pages.prev,
205 struct kvm_mmu_page, link);
206 kvm_mmu_zap_page(kvm, page);
207 - n_used_mmu_pages--;
208 + used_pages--;
209 }
210 kvm->arch.n_free_mmu_pages = 0;
211 }
212 @@ -1770,6 +1791,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
213 {
214 int was_rmapped = 0;
215 int was_writeble = is_writeble_pte(*shadow_pte);
216 + int rmap_count;
217
218 pgprintk("%s: spte %llx access %x write_fault %d"
219 " user_fault %d gfn %lx\n",
220 @@ -1811,9 +1833,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
221
222 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
223 if (!was_rmapped) {
224 - rmap_add(vcpu, shadow_pte, gfn, largepage);
225 + rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
226 if (!is_rmap_pte(*shadow_pte))
227 kvm_release_pfn_clean(pfn);
228 + if (rmap_count > RMAP_RECYCLE_THRESHOLD)
229 + rmap_recycle(vcpu, gfn, largepage);
230 } else {
231 if (was_writeble)
232 kvm_release_pfn_dirty(pfn);
233 @@ -1942,7 +1966,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
234 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
235 }
236
237 -static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
238 +static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
239 +{
240 + int ret = 0;
241 +
242 + if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
243 + set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
244 + ret = 1;
245 + }
246 +
247 + return ret;
248 +}
249 +
250 +static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
251 {
252 int i;
253 gfn_t root_gfn;
254 @@ -1957,13 +1993,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
255 ASSERT(!VALID_PAGE(root));
256 if (tdp_enabled)
257 direct = 1;
258 + if (mmu_check_root(vcpu, root_gfn))
259 + return 1;
260 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
261 PT64_ROOT_LEVEL, direct,
262 ACC_ALL, NULL);
263 root = __pa(sp->spt);
264 ++sp->root_count;
265 vcpu->arch.mmu.root_hpa = root;
266 - return;
267 + return 0;
268 }
269 direct = !is_paging(vcpu);
270 if (tdp_enabled)
271 @@ -1980,6 +2018,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
272 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
273 } else if (vcpu->arch.mmu.root_level == 0)
274 root_gfn = 0;
275 + if (mmu_check_root(vcpu, root_gfn))
276 + return 1;
277 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
278 PT32_ROOT_LEVEL, direct,
279 ACC_ALL, NULL);
280 @@ -1988,6 +2028,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
281 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
282 }
283 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
284 + return 0;
285 }
286
287 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
288 @@ -2006,7 +2047,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
289 for (i = 0; i < 4; ++i) {
290 hpa_t root = vcpu->arch.mmu.pae_root[i];
291
292 - if (root) {
293 + if (root && VALID_PAGE(root)) {
294 root &= PT64_BASE_ADDR_MASK;
295 sp = page_header(root);
296 mmu_sync_children(vcpu, sp);
297 @@ -2290,9 +2331,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
298 goto out;
299 spin_lock(&vcpu->kvm->mmu_lock);
300 kvm_mmu_free_some_pages(vcpu);
301 - mmu_alloc_roots(vcpu);
302 + r = mmu_alloc_roots(vcpu);
303 mmu_sync_roots(vcpu);
304 spin_unlock(&vcpu->kvm->mmu_lock);
305 + if (r)
306 + goto out;
307 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
308 kvm_mmu_flush_tlb(vcpu);
309 out:
310 @@ -2638,14 +2681,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
311
312 static void free_mmu_pages(struct kvm_vcpu *vcpu)
313 {
314 - struct kvm_mmu_page *sp;
315 -
316 - while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
317 - sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
318 - struct kvm_mmu_page, link);
319 - kvm_mmu_zap_page(vcpu->kvm, sp);
320 - cond_resched();
321 - }
322 free_page((unsigned long)vcpu->arch.mmu.pae_root);
323 }
324
325 @@ -2710,7 +2745,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
326 {
327 struct kvm_mmu_page *sp;
328
329 - spin_lock(&kvm->mmu_lock);
330 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
331 int i;
332 u64 *pt;
333 @@ -2725,7 +2759,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
334 pt[i] &= ~PT_WRITABLE_MASK;
335 }
336 kvm_flush_remote_tlbs(kvm);
337 - spin_unlock(&kvm->mmu_lock);
338 }
339
340 void kvm_mmu_zap_all(struct kvm *kvm)
341 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
342 index 1f8510c..5700009 100644
343 --- a/arch/x86/kvm/svm.c
344 +++ b/arch/x86/kvm/svm.c
345 @@ -227,6 +227,27 @@ static int is_external_interrupt(u32 info)
346 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
347 }
348
349 +static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
350 +{
351 + struct vcpu_svm *svm = to_svm(vcpu);
352 + u32 ret = 0;
353 +
354 + if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
355 + ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
356 + return ret & mask;
357 +}
358 +
359 +static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
360 +{
361 + struct vcpu_svm *svm = to_svm(vcpu);
362 +
363 + if (mask == 0)
364 + svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
365 + else
366 + svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
367 +
368 +}
369 +
370 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
371 {
372 struct vcpu_svm *svm = to_svm(vcpu);
373 @@ -240,7 +261,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
374 __func__, kvm_rip_read(vcpu), svm->next_rip);
375
376 kvm_rip_write(vcpu, svm->next_rip);
377 - svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
378 + svm_set_interrupt_shadow(vcpu, 0);
379
380 vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
381 }
382 @@ -715,6 +736,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
383 svm->vmcb->control.tsc_offset += delta;
384 vcpu->cpu = cpu;
385 kvm_migrate_timers(vcpu);
386 + svm->asid_generation = 0;
387 }
388
389 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
390 @@ -1025,7 +1047,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
391 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
392 }
393
394 - svm->vcpu.cpu = svm_data->cpu;
395 svm->asid_generation = svm_data->asid_generation;
396 svm->vmcb->control.asid = svm_data->next_asid++;
397 }
398 @@ -2237,8 +2258,8 @@ static void pre_svm_run(struct vcpu_svm *svm)
399 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
400
401 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
402 - if (svm->vcpu.cpu != cpu ||
403 - svm->asid_generation != svm_data->asid_generation)
404 + /* FIXME: handle wraparound of asid_generation */
405 + if (svm->asid_generation != svm_data->asid_generation)
406 new_asid(svm, svm_data);
407 }
408
409 @@ -2667,6 +2688,8 @@ static struct kvm_x86_ops svm_x86_ops = {
410 .run = svm_vcpu_run,
411 .handle_exit = handle_exit,
412 .skip_emulated_instruction = skip_emulated_instruction,
413 + .set_interrupt_shadow = svm_set_interrupt_shadow,
414 + .get_interrupt_shadow = svm_get_interrupt_shadow,
415 .patch_hypercall = svm_patch_hypercall,
416 .get_irq = svm_get_irq,
417 .set_irq = svm_set_irq,
418 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
419 index fa0adcd..1a0d5cd 100644
420 --- a/arch/x86/kvm/vmx.c
421 +++ b/arch/x86/kvm/vmx.c
422 @@ -732,23 +732,45 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
423 vmcs_writel(GUEST_RFLAGS, rflags);
424 }
425
426 +static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
427 +{
428 + u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
429 + int ret = 0;
430 +
431 + if (interruptibility & GUEST_INTR_STATE_STI)
432 + ret |= X86_SHADOW_INT_STI;
433 + if (interruptibility & GUEST_INTR_STATE_MOV_SS)
434 + ret |= X86_SHADOW_INT_MOV_SS;
435 +
436 + return ret & mask;
437 +}
438 +
439 +static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
440 +{
441 + u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
442 + u32 interruptibility = interruptibility_old;
443 +
444 + interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
445 +
446 + if (mask & X86_SHADOW_INT_MOV_SS)
447 + interruptibility |= GUEST_INTR_STATE_MOV_SS;
448 + if (mask & X86_SHADOW_INT_STI)
449 + interruptibility |= GUEST_INTR_STATE_STI;
450 +
451 + if ((interruptibility != interruptibility_old))
452 + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
453 +}
454 +
455 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
456 {
457 unsigned long rip;
458 - u32 interruptibility;
459
460 rip = kvm_rip_read(vcpu);
461 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
462 kvm_rip_write(vcpu, rip);
463
464 - /*
465 - * We emulated an instruction, so temporary interrupt blocking
466 - * should be removed, if set.
467 - */
468 - interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
469 - if (interruptibility & 3)
470 - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
471 - interruptibility & ~3);
472 + /* skipping an emulated instruction also counts */
473 + vmx_set_interrupt_shadow(vcpu, 0);
474 vcpu->arch.interrupt_window_open = 1;
475 }
476
477 @@ -3738,6 +3760,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
478 .run = vmx_vcpu_run,
479 .handle_exit = kvm_handle_exit,
480 .skip_emulated_instruction = skip_emulated_instruction,
481 + .set_interrupt_shadow = vmx_set_interrupt_shadow,
482 + .get_interrupt_shadow = vmx_get_interrupt_shadow,
483 .patch_hypercall = vmx_patch_hypercall,
484 .get_irq = vmx_get_irq,
485 .set_irq = vmx_inject_irq,
486 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
487 index ee4714b..0b1bfc6 100644
488 --- a/arch/x86/kvm/x86.c
489 +++ b/arch/x86/kvm/x86.c
490 @@ -523,6 +523,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
491 efer |= vcpu->arch.shadow_efer & EFER_LMA;
492
493 vcpu->arch.shadow_efer = efer;
494 +
495 + vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
496 + kvm_mmu_reset_context(vcpu);
497 }
498
499 void kvm_enable_efer_bits(u64 mask)
500 @@ -703,11 +706,48 @@ static bool msr_mtrr_valid(unsigned msr)
501 return false;
502 }
503
504 +static bool valid_pat_type(unsigned t)
505 +{
506 + return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
507 +}
508 +
509 +static bool valid_mtrr_type(unsigned t)
510 +{
511 + return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
512 +}
513 +
514 +static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
515 +{
516 + int i;
517 +
518 + if (!msr_mtrr_valid(msr))
519 + return false;
520 +
521 + if (msr == MSR_IA32_CR_PAT) {
522 + for (i = 0; i < 8; i++)
523 + if (!valid_pat_type((data >> (i * 8)) & 0xff))
524 + return false;
525 + return true;
526 + } else if (msr == MSR_MTRRdefType) {
527 + if (data & ~0xcff)
528 + return false;
529 + return valid_mtrr_type(data & 0xff);
530 + } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
531 + for (i = 0; i < 8 ; i++)
532 + if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
533 + return false;
534 + return true;
535 + }
536 +
537 + /* variable MTRRs */
538 + return valid_mtrr_type(data & 0xff);
539 +}
540 +
541 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
542 {
543 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
544
545 - if (!msr_mtrr_valid(msr))
546 + if (!mtrr_valid(vcpu, msr, data))
547 return 1;
548
549 if (msr == MSR_MTRRdefType) {
550 @@ -895,6 +935,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
551 case MSR_IA32_LASTINTFROMIP:
552 case MSR_IA32_LASTINTTOIP:
553 case MSR_VM_HSAVE_PA:
554 + case MSR_P6_EVNTSEL0:
555 + case MSR_P6_EVNTSEL1:
556 + case MSR_K7_EVNTSEL0:
557 data = 0;
558 break;
559 case MSR_MTRRcap:
560 @@ -1074,14 +1117,13 @@ long kvm_arch_dev_ioctl(struct file *filp,
561 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
562 goto out;
563 r = -E2BIG;
564 - if (n < num_msrs_to_save)
565 + if (n < msr_list.nmsrs)
566 goto out;
567 r = -EFAULT;
568 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
569 num_msrs_to_save * sizeof(u32)))
570 goto out;
571 - if (copy_to_user(user_msr_list->indices
572 - + num_msrs_to_save * sizeof(u32),
573 + if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
574 &emulated_msrs,
575 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
576 goto out;
577 @@ -1250,9 +1292,12 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
578 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
579 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
580 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
581 + bit(X86_FEATURE_MCE) |
582 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
583 - bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
584 - bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
585 + bit(X86_FEATURE_SEP) | bit(X86_FEATURE_MTRR) |
586 + bit(X86_FEATURE_PGE) | bit(X86_FEATURE_MCA) |
587 + bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PAT) |
588 + bit(X86_FEATURE_PSE36) |
589 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
590 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
591 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
592 @@ -1608,10 +1653,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
593 return -EINVAL;
594
595 down_write(&kvm->slots_lock);
596 + spin_lock(&kvm->mmu_lock);
597
598 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
599 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
600
601 + spin_unlock(&kvm->mmu_lock);
602 up_write(&kvm->slots_lock);
603 return 0;
604 }
605 @@ -1787,7 +1834,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
606
607 /* If nothing is dirty, don't bother messing with page tables. */
608 if (is_dirty) {
609 + spin_lock(&kvm->mmu_lock);
610 kvm_mmu_slot_remove_write_access(kvm, log->slot);
611 + spin_unlock(&kvm->mmu_lock);
612 kvm_flush_remote_tlbs(kvm);
613 memslot = &kvm->memslots[log->slot];
614 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
615 @@ -2362,7 +2411,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
616 u16 error_code,
617 int emulation_type)
618 {
619 - int r;
620 + int r, shadow_mask;
621 struct decode_cache *c;
622
623 kvm_clear_exception_queue(vcpu);
624 @@ -2411,6 +2460,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
625 }
626
627 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
628 + shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
629 +
630 + if (r == 0)
631 + kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
632
633 if (vcpu->arch.pio.string)
634 return EMULATE_DO_MMIO;
635 @@ -4419,12 +4472,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
636 }
637 }
638
639 + spin_lock(&kvm->mmu_lock);
640 if (!kvm->arch.n_requested_mmu_pages) {
641 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
642 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
643 }
644
645 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
646 + spin_unlock(&kvm->mmu_lock);
647 kvm_flush_remote_tlbs(kvm);
648
649 return 0;
650 @@ -4433,6 +4488,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
651 void kvm_arch_flush_shadow(struct kvm *kvm)
652 {
653 kvm_mmu_zap_all(kvm);
654 + kvm_reload_remote_mmus(kvm);
655 }
656
657 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
658 diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
659 index ca91749..d80126f 100644
660 --- a/arch/x86/kvm/x86_emulate.c
661 +++ b/arch/x86/kvm/x86_emulate.c
662 @@ -1349,6 +1349,20 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
663 return 0;
664 }
665
666 +void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
667 +{
668 + u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
669 + /*
670 + * an sti; sti; sequence only disable interrupts for the first
671 + * instruction. So, if the last instruction, be it emulated or
672 + * not, left the system with the INT_STI flag enabled, it
673 + * means that the last instruction is an sti. We should not
674 + * leave the flag on in this case. The same goes for mov ss
675 + */
676 + if (!(int_shadow & mask))
677 + ctxt->interruptibility = mask;
678 +}
679 +
680 int
681 x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
682 {
683 @@ -1360,6 +1374,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
684 int io_dir_in;
685 int rc = 0;
686
687 + ctxt->interruptibility = 0;
688 +
689 /* Shadow copy of register state. Committed on successful emulation.
690 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
691 * modify them.
692 @@ -1609,6 +1625,9 @@ special_insn:
693 int err;
694
695 sel = c->src.val;
696 + if (c->modrm_reg == VCPU_SREG_SS)
697 + toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
698 +
699 if (c->modrm_reg <= 5) {
700 type_bits = (c->modrm_reg == 1) ? 9 : 1;
701 err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
702 @@ -1865,6 +1884,7 @@ special_insn:
703 c->dst.type = OP_NONE; /* Disable writeback. */
704 break;
705 case 0xfb: /* sti */
706 + toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
707 ctxt->eflags |= X86_EFLAGS_IF;
708 c->dst.type = OP_NONE; /* Disable writeback. */
709 break;
710 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
711 index 821e970..c814e14 100644
712 --- a/arch/x86/mm/tlb.c
713 +++ b/arch/x86/mm/tlb.c
714 @@ -183,18 +183,17 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask,
715
716 f->flush_mm = mm;
717 f->flush_va = va;
718 - cpumask_andnot(to_cpumask(f->flush_cpumask),
719 - cpumask, cpumask_of(smp_processor_id()));
720 -
721 - /*
722 - * We have to send the IPI only to
723 - * CPUs affected.
724 - */
725 - apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
726 - INVALIDATE_TLB_VECTOR_START + sender);
727 + if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
728 + /*
729 + * We have to send the IPI only to
730 + * CPUs affected.
731 + */
732 + apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
733 + INVALIDATE_TLB_VECTOR_START + sender);
734
735 - while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
736 - cpu_relax();
737 + while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
738 + cpu_relax();
739 + }
740
741 f->flush_mm = NULL;
742 f->flush_va = 0;
743 diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
744 index 39838c6..31adda1 100644
745 --- a/drivers/acpi/processor_thermal.c
746 +++ b/drivers/acpi/processor_thermal.c
747 @@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
748 if (pr->limit.thermal.tx > tx)
749 tx = pr->limit.thermal.tx;
750
751 - result = acpi_processor_set_throttling(pr, tx);
752 + result = acpi_processor_set_throttling(pr, tx, false);
753 if (result)
754 goto end;
755 }
756 @@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
757
758 if (state <= max_pstate) {
759 if (pr->flags.throttling && pr->throttling.state)
760 - result = acpi_processor_set_throttling(pr, 0);
761 + result = acpi_processor_set_throttling(pr, 0, false);
762 cpufreq_set_cur_state(pr->id, state);
763 } else {
764 cpufreq_set_cur_state(pr->id, max_pstate);
765 result = acpi_processor_set_throttling(pr,
766 - state - max_pstate);
767 + state - max_pstate, false);
768 }
769 return result;
770 }
771 diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
772 index 2275437..841be4e 100644
773 --- a/drivers/acpi/processor_throttling.c
774 +++ b/drivers/acpi/processor_throttling.c
775 @@ -62,7 +62,8 @@ struct throttling_tstate {
776 #define THROTTLING_POSTCHANGE (2)
777
778 static int acpi_processor_get_throttling(struct acpi_processor *pr);
779 -int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
780 +int acpi_processor_set_throttling(struct acpi_processor *pr,
781 + int state, bool force);
782
783 static int acpi_processor_update_tsd_coord(void)
784 {
785 @@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
786 */
787 target_state = throttling_limit;
788 }
789 - return acpi_processor_set_throttling(pr, target_state);
790 + return acpi_processor_set_throttling(pr, target_state, false);
791 }
792
793 /*
794 @@ -842,7 +843,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
795 ACPI_WARNING((AE_INFO,
796 "Invalid throttling state, reset"));
797 state = 0;
798 - ret = acpi_processor_set_throttling(pr, state);
799 + ret = acpi_processor_set_throttling(pr, state, true);
800 if (ret)
801 return ret;
802 }
803 @@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
804 }
805
806 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
807 - int state)
808 + int state, bool force)
809 {
810 u32 value = 0;
811 u32 duty_mask = 0;
812 @@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
813 if (!pr->flags.throttling)
814 return -ENODEV;
815
816 - if (state == pr->throttling.state)
817 + if (!force && (state == pr->throttling.state))
818 return 0;
819
820 if (state < pr->throttling_platform_limit)
821 @@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
822 }
823
824 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
825 - int state)
826 + int state, bool force)
827 {
828 int ret;
829 acpi_integer value;
830 @@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
831 if (!pr->flags.throttling)
832 return -ENODEV;
833
834 - if (state == pr->throttling.state)
835 + if (!force && (state == pr->throttling.state))
836 return 0;
837
838 if (state < pr->throttling_platform_limit)
839 @@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
840 return 0;
841 }
842
843 -int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
844 +int acpi_processor_set_throttling(struct acpi_processor *pr,
845 + int state, bool force)
846 {
847 cpumask_var_t saved_mask;
848 int ret = 0;
849 @@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
850 /* FIXME: use work_on_cpu() */
851 set_cpus_allowed_ptr(current, cpumask_of(pr->id));
852 ret = p_throttling->acpi_processor_set_throttling(pr,
853 - t_state.target_state);
854 + t_state.target_state, force);
855 } else {
856 /*
857 * When the T-state coordination is SW_ALL or HW_ALL,
858 @@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
859 set_cpus_allowed_ptr(current, cpumask_of(i));
860 ret = match_pr->throttling.
861 acpi_processor_set_throttling(
862 - match_pr, t_state.target_state);
863 + match_pr, t_state.target_state, force);
864 }
865 }
866 /*
867 @@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
868 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
869 "Disabling throttling (was T%d)\n",
870 pr->throttling.state));
871 - result = acpi_processor_set_throttling(pr, 0);
872 + result = acpi_processor_set_throttling(pr, 0, false);
873 if (result)
874 goto end;
875 }
876 @@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
877 if (strcmp(tmpbuf, charp) != 0)
878 return -EINVAL;
879
880 - result = acpi_processor_set_throttling(pr, state_val);
881 + result = acpi_processor_set_throttling(pr, state_val, false);
882 if (result)
883 return result;
884
885 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
886 index c924230..7b2f499 100644
887 --- a/drivers/ata/libata-core.c
888 +++ b/drivers/ata/libata-core.c
889 @@ -4271,6 +4271,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
890 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
891 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
892
893 + /* this one allows HPA unlocking but fails IOs on the area */
894 + { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
895 +
896 /* Devices which report 1 sector over size HPA */
897 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
898 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
899 diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
900 index 19e0bc6..504f849 100644
901 --- a/drivers/media/video/gspca/ov534.c
902 +++ b/drivers/media/video/gspca/ov534.c
903 @@ -832,9 +832,11 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
904 __u32 this_pts;
905 u16 this_fid;
906 int remaining_len = len;
907 + int payload_len;
908
909 + payload_len = (sd->sensor == SENSOR_OV772X) ? 2048 : 2040;
910 do {
911 - len = min(remaining_len, 2040); /*fixme: was 2048*/
912 + len = min(remaining_len, payload_len);
913
914 /* Payloads are prefixed with a UVC-style header. We
915 consider a frame to start when the FID toggles, or the PTS
916 diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
917 index 16a4138..4567e90 100644
918 --- a/drivers/net/ehea/ehea.h
919 +++ b/drivers/net/ehea/ehea.h
920 @@ -40,7 +40,7 @@
921 #include <asm/io.h>
922
923 #define DRV_NAME "ehea"
924 -#define DRV_VERSION "EHEA_0101"
925 +#define DRV_VERSION "EHEA_0102"
926
927 /* eHEA capability flags */
928 #define DLPAR_PORT_ADD_REM 1
929 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
930 index 0a7a288..9bc4775 100644
931 --- a/drivers/net/ehea/ehea_main.c
932 +++ b/drivers/net/ehea/ehea_main.c
933 @@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
934 {
935 int ret, i;
936
937 + if (pr->qp)
938 + netif_napi_del(&pr->napi);
939 +
940 ret = ehea_destroy_qp(pr->qp);
941
942 if (!ret) {
943 diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c
944 index 5996ff9..f839c83 100644
945 --- a/drivers/net/wireless/ar9170/main.c
946 +++ b/drivers/net/wireless/ar9170/main.c
947 @@ -1486,13 +1486,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
948 int ret;
949
950 mutex_lock(&ar->mutex);
951 - if ((param) && !(queue > ar->hw->queues)) {
952 + if (queue < __AR9170_NUM_TXQ) {
953 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
954 param, sizeof(*param));
955
956 ret = ar9170_set_qos(ar);
957 - } else
958 + } else {
959 ret = -EINVAL;
960 + }
961
962 mutex_unlock(&ar->mutex);
963 return ret;
964 diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
965 index 2ad9faf..fc3a95f 100644
966 --- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
967 +++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
968 @@ -53,22 +53,31 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
969 switch (state) {
970 case RFKILL_STATE_UNBLOCKED:
971 if (iwl_is_rfkill_hw(priv)) {
972 + /* pass error to rfkill core, make it state HARD
973 + * BLOCKED (rfkill->mutex taken) and disable
974 + * software kill switch */
975 err = -EBUSY;
976 - goto out_unlock;
977 + priv->rfkill->state = RFKILL_STATE_HARD_BLOCKED;
978 }
979 iwl_radio_kill_sw_enable_radio(priv);
980 break;
981 case RFKILL_STATE_SOFT_BLOCKED:
982 iwl_radio_kill_sw_disable_radio(priv);
983 + /* rfkill->mutex is taken */
984 + if (priv->rfkill->state == RFKILL_STATE_HARD_BLOCKED) {
985 + /* force rfkill core state to be SOFT BLOCKED,
986 + * otherwise core will be unable to disable software
987 + * kill switch */
988 + priv->rfkill->state = RFKILL_STATE_SOFT_BLOCKED;
989 + }
990 break;
991 default:
992 IWL_WARN(priv, "we received unexpected RFKILL state %d\n",
993 state);
994 break;
995 }
996 -out_unlock:
997 - mutex_unlock(&priv->mutex);
998
999 + mutex_unlock(&priv->mutex);
1000 return err;
1001 }
1002
1003 @@ -132,14 +141,11 @@ void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
1004 if (!priv->rfkill)
1005 return;
1006
1007 - if (iwl_is_rfkill_hw(priv)) {
1008 + if (iwl_is_rfkill_sw(priv))
1009 + rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
1010 + else if (iwl_is_rfkill_hw(priv))
1011 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
1012 - return;
1013 - }
1014 -
1015 - if (!iwl_is_rfkill_sw(priv))
1016 - rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
1017 else
1018 - rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
1019 + rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
1020 }
1021 EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
1022 diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
1023 index 44ab03a..da2e2d4 100644
1024 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c
1025 +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
1026 @@ -560,6 +560,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
1027 unsigned long flags;
1028
1029 spin_lock_irqsave(&priv->sta_lock, flags);
1030 + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
1031 + keyconf->keyidx);
1032
1033 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
1034 IWL_ERR(priv, "index %d not used in uCode key table.\n",
1035 @@ -567,6 +569,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
1036
1037 priv->default_wep_key--;
1038 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
1039 + if (iwl_is_rfkill(priv)) {
1040 + IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
1041 + spin_unlock_irqrestore(&priv->sta_lock, flags);
1042 + return 0;
1043 + }
1044 ret = iwl_send_static_wepkey_cmd(priv, 1);
1045 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
1046 keyconf->keyidx, ret);
1047 @@ -847,6 +854,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1048 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1049 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1050
1051 + if (iwl_is_rfkill(priv)) {
1052 + IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
1053 + spin_unlock_irqrestore(&priv->sta_lock, flags);
1054 + return 0;
1055 + }
1056 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1057 spin_unlock_irqrestore(&priv->sta_lock, flags);
1058 return ret;
1059 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
1060 index 84bd6f1..c242b54 100644
1061 --- a/drivers/net/wireless/rt2x00/rt2x00.h
1062 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
1063 @@ -814,13 +814,15 @@ struct rt2x00_dev {
1064 static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
1065 const unsigned int word, u32 *data)
1066 {
1067 - *data = rt2x00dev->rf[word];
1068 + BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
1069 + *data = rt2x00dev->rf[word - 1];
1070 }
1071
1072 static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
1073 const unsigned int word, u32 data)
1074 {
1075 - rt2x00dev->rf[word] = data;
1076 + BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
1077 + rt2x00dev->rf[word - 1] = data;
1078 }
1079
1080 /*
1081 diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
1082 index 043b208..f215a59 100644
1083 --- a/drivers/platform/x86/wmi.c
1084 +++ b/drivers/platform/x86/wmi.c
1085 @@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
1086 acpi_status status;
1087 struct acpi_object_list input;
1088 union acpi_object params[3];
1089 - char method[4] = "WM";
1090 + char method[5] = "WM";
1091
1092 if (!find_guid(guid_string, &wblock))
1093 return AE_ERROR;
1094 @@ -328,8 +328,8 @@ struct acpi_buffer *out)
1095 acpi_status status, wc_status = AE_ERROR;
1096 struct acpi_object_list input, wc_input;
1097 union acpi_object wc_params[1], wq_params[1];
1098 - char method[4];
1099 - char wc_method[4] = "WC";
1100 + char method[5];
1101 + char wc_method[5] = "WC";
1102
1103 if (!guid_string || !out)
1104 return AE_BAD_PARAMETER;
1105 @@ -410,7 +410,7 @@ const struct acpi_buffer *in)
1106 acpi_handle handle;
1107 struct acpi_object_list input;
1108 union acpi_object params[2];
1109 - char method[4] = "WS";
1110 + char method[5] = "WS";
1111
1112 if (!guid_string || !in)
1113 return AE_BAD_DATA;
1114 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
1115 index f3da592..35a1386 100644
1116 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
1117 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
1118 @@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work)
1119 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1120 }
1121
1122 +/**
1123 + * mpt2sas_base_start_watchdog - start the fault_reset_work_q
1124 + * @ioc: pointer to scsi command object
1125 + * Context: sleep.
1126 + *
1127 + * Return nothing.
1128 + */
1129 +void
1130 +mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
1131 +{
1132 + unsigned long flags;
1133 +
1134 + if (ioc->fault_reset_work_q)
1135 + return;
1136 +
1137 + /* initialize fault polling */
1138 + INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
1139 + snprintf(ioc->fault_reset_work_q_name,
1140 + sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
1141 + ioc->fault_reset_work_q =
1142 + create_singlethread_workqueue(ioc->fault_reset_work_q_name);
1143 + if (!ioc->fault_reset_work_q) {
1144 + printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
1145 + ioc->name, __func__, __LINE__);
1146 + return;
1147 + }
1148 + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1149 + if (ioc->fault_reset_work_q)
1150 + queue_delayed_work(ioc->fault_reset_work_q,
1151 + &ioc->fault_reset_work,
1152 + msecs_to_jiffies(FAULT_POLLING_INTERVAL));
1153 + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1154 +}
1155 +
1156 +/**
1157 + * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
1158 + * @ioc: pointer to scsi command object
1159 + * Context: sleep.
1160 + *
1161 + * Return nothing.
1162 + */
1163 +void
1164 +mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
1165 +{
1166 + unsigned long flags;
1167 + struct workqueue_struct *wq;
1168 +
1169 + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1170 + wq = ioc->fault_reset_work_q;
1171 + ioc->fault_reset_work_q = NULL;
1172 + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1173 + if (wq) {
1174 + if (!cancel_delayed_work(&ioc->fault_reset_work))
1175 + flush_workqueue(wq);
1176 + destroy_workqueue(wq);
1177 + }
1178 +}
1179 +
1180 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
1181 /**
1182 * _base_sas_ioc_info - verbose translation of the ioc status
1183 @@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
1184 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1185 return;
1186
1187 + /* each nexus loss loginfo */
1188 + if (log_info == 0x31170000)
1189 + return;
1190 +
1191 /* eat the loginfos associated with task aborts */
1192 if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
1193 0x31140000 || log_info == 0x31130000))
1194 @@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1195 }
1196 }
1197
1198 - pci_set_drvdata(pdev, ioc->shost);
1199 _base_mask_interrupts(ioc);
1200 r = _base_enable_msix(ioc);
1201 if (r)
1202 @@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1203 ioc->pci_irq = -1;
1204 pci_release_selected_regions(ioc->pdev, ioc->bars);
1205 pci_disable_device(pdev);
1206 - pci_set_drvdata(pdev, NULL);
1207 return r;
1208 }
1209
1210 @@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
1211 ioc->chip_phys = 0;
1212 pci_release_selected_regions(ioc->pdev, ioc->bars);
1213 pci_disable_device(pdev);
1214 - pci_set_drvdata(pdev, NULL);
1215 return;
1216 }
1217
1218 @@ -3205,7 +3264,6 @@ int
1219 mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
1220 {
1221 int r, i;
1222 - unsigned long flags;
1223
1224 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1225 __func__));
1226 @@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
1227 if (r)
1228 return r;
1229
1230 + pci_set_drvdata(ioc->pdev, ioc->shost);
1231 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
1232 if (r)
1233 goto out_free_resources;
1234 @@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
1235 if (r)
1236 goto out_free_resources;
1237
1238 - /* initialize fault polling */
1239 - INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
1240 - snprintf(ioc->fault_reset_work_q_name,
1241 - sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
1242 - ioc->fault_reset_work_q =
1243 - create_singlethread_workqueue(ioc->fault_reset_work_q_name);
1244 - if (!ioc->fault_reset_work_q) {
1245 - printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
1246 - ioc->name, __func__, __LINE__);
1247 - goto out_free_resources;
1248 - }
1249 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1250 - if (ioc->fault_reset_work_q)
1251 - queue_delayed_work(ioc->fault_reset_work_q,
1252 - &ioc->fault_reset_work,
1253 - msecs_to_jiffies(FAULT_POLLING_INTERVAL));
1254 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1255 + mpt2sas_base_start_watchdog(ioc);
1256 return 0;
1257
1258 out_free_resources:
1259 @@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
1260 ioc->remove_host = 1;
1261 mpt2sas_base_free_resources(ioc);
1262 _base_release_memory_pools(ioc);
1263 + pci_set_drvdata(ioc->pdev, NULL);
1264 kfree(ioc->tm_cmds.reply);
1265 kfree(ioc->transport_cmds.reply);
1266 kfree(ioc->config_cmds.reply);
1267 @@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
1268 void
1269 mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
1270 {
1271 - unsigned long flags;
1272 - struct workqueue_struct *wq;
1273
1274 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1275 __func__));
1276
1277 - spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
1278 - wq = ioc->fault_reset_work_q;
1279 - ioc->fault_reset_work_q = NULL;
1280 - spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
1281 - if (!cancel_delayed_work(&ioc->fault_reset_work))
1282 - flush_workqueue(wq);
1283 - destroy_workqueue(wq);
1284 -
1285 + mpt2sas_base_stop_watchdog(ioc);
1286 mpt2sas_base_free_resources(ioc);
1287 _base_release_memory_pools(ioc);
1288 + pci_set_drvdata(ioc->pdev, NULL);
1289 kfree(ioc->pfacts);
1290 kfree(ioc->ctl_cmds.reply);
1291 kfree(ioc->base_cmds.reply);
1292 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
1293 index 36b1d10..1dd7c9a 100644
1294 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h
1295 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
1296 @@ -672,6 +672,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
1297
1298 /* base shared API */
1299 extern struct list_head mpt2sas_ioc_list;
1300 +void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
1301 +void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
1302
1303 int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
1304 void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
1305 diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
1306 index 58cfb97..6ddee16 100644
1307 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c
1308 +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
1309 @@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
1310 Mpi2ConfigRequest_t *config_request;
1311 int r;
1312 u8 retry_count;
1313 - u8 issue_reset;
1314 + u8 issue_host_reset = 0;
1315 u16 wait_state_count;
1316
1317 + mutex_lock(&ioc->config_cmds.mutex);
1318 if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
1319 printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
1320 ioc->name, __func__);
1321 + mutex_unlock(&ioc->config_cmds.mutex);
1322 return -EAGAIN;
1323 }
1324 retry_count = 0;
1325
1326 retry_config:
1327 + if (retry_count) {
1328 + if (retry_count > 2) /* attempt only 2 retries */
1329 + return -EFAULT;
1330 + printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
1331 + ioc->name, __func__, retry_count);
1332 + }
1333 wait_state_count = 0;
1334 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1335 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1336 @@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
1337 printk(MPT2SAS_ERR_FMT
1338 "%s: failed due to ioc not operational\n",
1339 ioc->name, __func__);
1340 - ioc->config_cmds.status = MPT2_CMD_NOT_USED;
1341 - return -EFAULT;
1342 + r = -EFAULT;
1343 + goto out;
1344 }
1345 ssleep(1);
1346 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
1347 @@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
1348 if (!smid) {
1349 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
1350 ioc->name, __func__);
1351 - ioc->config_cmds.status = MPT2_CMD_NOT_USED;
1352 - return -EAGAIN;
1353 + r = -EAGAIN;
1354 + goto out;
1355 }
1356
1357 r = 0;
1358 @@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
1359 ioc->name, __func__);
1360 _debug_dump_mf(mpi_request,
1361 sizeof(Mpi2ConfigRequest_t)/4);
1362 - if (!(ioc->config_cmds.status & MPT2_CMD_RESET))
1363 - issue_reset = 1;
1364 - goto issue_host_reset;
1365 + retry_count++;
1366 + if (ioc->config_cmds.smid == smid)
1367 + mpt2sas_base_free_smid(ioc, smid);
1368 + if ((ioc->shost_recovery) ||
1369 + (ioc->config_cmds.status & MPT2_CMD_RESET))
1370 + goto retry_config;
1371 + issue_host_reset = 1;
1372 + r = -EFAULT;
1373 + goto out;
1374 }
1375 if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
1376 memcpy(mpi_reply, ioc->config_cmds.reply,
1377 @@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
1378 if (retry_count)
1379 printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
1380 ioc->name, __func__);
1381 +out:
1382 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
1383 - return r;
1384 -
1385 - issue_host_reset:
1386 - if (issue_reset)
1387 + mutex_unlock(&ioc->config_cmds.mutex);
1388 + if (issue_host_reset)
1389 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
1390 FORCE_BIG_HAMMER);
1391 - ioc->config_cmds.status = MPT2_CMD_NOT_USED;
1392 - if (!retry_count) {
1393 - printk(MPT2SAS_INFO_FMT "%s: attempting retry\n",
1394 - ioc->name, __func__);
1395 - retry_count++;
1396 - goto retry_config;
1397 - }
1398 - return -EFAULT;
1399 + return r;
1400 }
1401
1402 /**
1403 @@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
1404 int r;
1405 struct config_request mem;
1406
1407 - mutex_lock(&ioc->config_cmds.mutex);
1408 memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
1409 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1410 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1411 @@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
1412 _config_free_config_dma_memory(ioc, &mem);
1413
1414 out:
1415 - mutex_unlock(&ioc->config_cmds.mutex);
1416 return r;
1417 }
1418
1419 @@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
1420 int r;
1421 struct config_request mem;
1422
1423 - mutex_lock(&ioc->config_cmds.mutex);
1424 memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
1425 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1426 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1427 @@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
1428 _config_free_config_dma_memory(ioc, &mem);
1429
1430 out:
1431 - mutex_unlock(&ioc->config_cmds.mutex);
1432 return r;
1433 }
1434
1435 @@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1436 int r;
1437 struct config_request mem;
1438
1439 - mutex_lock(&ioc->config_cmds.mutex);
1440 memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
1441 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1442 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1443 @@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1444 _config_free_config_dma_memory(ioc, &mem);
1445
1446 out:
1447 - mutex_unlock(&ioc->config_cmds.mutex);
1448 return r;
1449 }
1450
1451 @@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
1452 int r;
1453 struct config_request mem;
1454
1455 - mutex_lock(&ioc->config_cmds.mutex);
1456 memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
1457 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1458 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1459 @@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
1460 _config_free_config_dma_memory(ioc, &mem);
1461
1462 out:
1463 - mutex_unlock(&ioc->config_cmds.mutex);
1464 return r;
1465 }
1466
1467 @@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
1468 int r;
1469 struct config_request mem;
1470
1471 - mutex_lock(&ioc->config_cmds.mutex);
1472 memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
1473 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1474 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1475 @@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
1476 _config_free_config_dma_memory(ioc, &mem);
1477
1478 out:
1479 - mutex_unlock(&ioc->config_cmds.mutex);
1480 return r;
1481 }
1482
1483 @@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
1484 int r;
1485 struct config_request mem;
1486
1487 - mutex_lock(&ioc->config_cmds.mutex);
1488 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1489 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1490 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1491 @@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
1492 _config_free_config_dma_memory(ioc, &mem);
1493
1494 out:
1495 - mutex_unlock(&ioc->config_cmds.mutex);
1496 return r;
1497 }
1498
1499 @@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
1500 int r;
1501 struct config_request mem;
1502
1503 - mutex_lock(&ioc->config_cmds.mutex);
1504 memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
1505 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1506 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1507 @@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
1508 _config_free_config_dma_memory(ioc, &mem);
1509
1510 out:
1511 - mutex_unlock(&ioc->config_cmds.mutex);
1512 return r;
1513 }
1514
1515 @@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1516 int r;
1517 struct config_request mem;
1518
1519 - mutex_lock(&ioc->config_cmds.mutex);
1520 memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
1521 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1522 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1523 @@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1524 _config_free_config_dma_memory(ioc, &mem);
1525
1526 out:
1527 - mutex_unlock(&ioc->config_cmds.mutex);
1528 return r;
1529 }
1530
1531 @@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1532 int r;
1533 struct config_request mem;
1534
1535 - mutex_lock(&ioc->config_cmds.mutex);
1536 memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
1537 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1538 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1539 @@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1540 _config_free_config_dma_memory(ioc, &mem);
1541
1542 out:
1543 - mutex_unlock(&ioc->config_cmds.mutex);
1544 return r;
1545 }
1546
1547 @@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
1548 Mpi2ConfigReply_t mpi_reply;
1549 Mpi2SasIOUnitPage0_t config_page;
1550
1551 - mutex_lock(&ioc->config_cmds.mutex);
1552 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1553 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1554 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
1555 @@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
1556 _config_free_config_dma_memory(ioc, &mem);
1557
1558 out:
1559 - mutex_unlock(&ioc->config_cmds.mutex);
1560 return r;
1561 }
1562
1563 @@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1564 Mpi2ConfigRequest_t mpi_request;
1565 int r;
1566 struct config_request mem;
1567 -
1568 - mutex_lock(&ioc->config_cmds.mutex);
1569 memset(config_page, 0, sz);
1570 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1571 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1572 @@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1573 _config_free_config_dma_memory(ioc, &mem);
1574
1575 out:
1576 - mutex_unlock(&ioc->config_cmds.mutex);
1577 return r;
1578 }
1579
1580 @@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1581 int r;
1582 struct config_request mem;
1583
1584 - mutex_lock(&ioc->config_cmds.mutex);
1585 memset(config_page, 0, sz);
1586 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1587 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1588 @@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1589 _config_free_config_dma_memory(ioc, &mem);
1590
1591 out:
1592 - mutex_unlock(&ioc->config_cmds.mutex);
1593 return r;
1594 }
1595
1596 @@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1597 int r;
1598 struct config_request mem;
1599
1600 - mutex_lock(&ioc->config_cmds.mutex);
1601 memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
1602 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1603 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1604 @@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1605 _config_free_config_dma_memory(ioc, &mem);
1606
1607 out:
1608 - mutex_unlock(&ioc->config_cmds.mutex);
1609 return r;
1610 }
1611
1612 @@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1613 int r;
1614 struct config_request mem;
1615
1616 - mutex_lock(&ioc->config_cmds.mutex);
1617 memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
1618 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1619 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1620 @@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1621 _config_free_config_dma_memory(ioc, &mem);
1622
1623 out:
1624 - mutex_unlock(&ioc->config_cmds.mutex);
1625 return r;
1626 }
1627
1628 @@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1629 int r;
1630 struct config_request mem;
1631
1632 - mutex_lock(&ioc->config_cmds.mutex);
1633 memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
1634 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1635 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1636 @@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1637 _config_free_config_dma_memory(ioc, &mem);
1638
1639 out:
1640 - mutex_unlock(&ioc->config_cmds.mutex);
1641 return r;
1642 }
1643
1644 @@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1645 int r;
1646 struct config_request mem;
1647
1648 - mutex_lock(&ioc->config_cmds.mutex);
1649 memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
1650 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1651 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1652 @@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1653 _config_free_config_dma_memory(ioc, &mem);
1654
1655 out:
1656 - mutex_unlock(&ioc->config_cmds.mutex);
1657 return r;
1658 }
1659
1660 @@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1661 int r;
1662 struct config_request mem;
1663
1664 - mutex_lock(&ioc->config_cmds.mutex);
1665 memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
1666 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1667 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1668 @@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1669 _config_free_config_dma_memory(ioc, &mem);
1670
1671 out:
1672 - mutex_unlock(&ioc->config_cmds.mutex);
1673 return r;
1674 }
1675
1676 @@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1677 int r;
1678 struct config_request mem;
1679
1680 - mutex_lock(&ioc->config_cmds.mutex);
1681 memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
1682 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1683 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1684 @@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1685 _config_free_config_dma_memory(ioc, &mem);
1686
1687 out:
1688 - mutex_unlock(&ioc->config_cmds.mutex);
1689 return r;
1690 }
1691
1692 @@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1693 struct config_request mem;
1694 u16 ioc_status;
1695
1696 - mutex_lock(&ioc->config_cmds.mutex);
1697 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1698 *num_pds = 0;
1699 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1700 @@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1701 _config_free_config_dma_memory(ioc, &mem);
1702
1703 out:
1704 - mutex_unlock(&ioc->config_cmds.mutex);
1705 return r;
1706 }
1707
1708 @@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1709 int r;
1710 struct config_request mem;
1711
1712 - mutex_lock(&ioc->config_cmds.mutex);
1713 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1714 memset(config_page, 0, sz);
1715 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1716 @@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1717 _config_free_config_dma_memory(ioc, &mem);
1718
1719 out:
1720 - mutex_unlock(&ioc->config_cmds.mutex);
1721 return r;
1722 }
1723
1724 @@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1725 int r;
1726 struct config_request mem;
1727
1728 - mutex_lock(&ioc->config_cmds.mutex);
1729 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1730 memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
1731 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1732 @@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1733 _config_free_config_dma_memory(ioc, &mem);
1734
1735 out:
1736 - mutex_unlock(&ioc->config_cmds.mutex);
1737 return r;
1738 }
1739
1740 @@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1741 struct config_request mem;
1742 u16 ioc_status;
1743
1744 - mutex_lock(&ioc->config_cmds.mutex);
1745 *volume_handle = 0;
1746 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1747 mpi_request.Function = MPI2_FUNCTION_CONFIG;
1748 @@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1749 _config_free_config_dma_memory(ioc, &mem);
1750
1751 out:
1752 - mutex_unlock(&ioc->config_cmds.mutex);
1753 return r;
1754 }
1755
1756 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1757 index e3a7967..7dacc68 100644
1758 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1759 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1760 @@ -2560,6 +2560,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
1761 char *desc_ioc_state = NULL;
1762 char *desc_scsi_status = NULL;
1763 char *desc_scsi_state = ioc->tmp_string;
1764 + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
1765 +
1766 + if (log_info == 0x31170000)
1767 + return;
1768
1769 switch (ioc_status) {
1770 case MPI2_IOCSTATUS_SUCCESS:
1771 @@ -3205,7 +3209,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1772 __le64 sas_address;
1773 int i;
1774 unsigned long flags;
1775 - struct _sas_port *mpt2sas_port;
1776 + struct _sas_port *mpt2sas_port = NULL;
1777 int rc = 0;
1778
1779 if (!handle)
1780 @@ -3297,12 +3301,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1781 &expander_pg1, i, handle))) {
1782 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1783 ioc->name, __FILE__, __LINE__, __func__);
1784 - continue;
1785 + rc = -1;
1786 + goto out_fail;
1787 }
1788 sas_expander->phy[i].handle = handle;
1789 sas_expander->phy[i].phy_id = i;
1790 - mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i],
1791 - expander_pg1, sas_expander->parent_dev);
1792 +
1793 + if ((mpt2sas_transport_add_expander_phy(ioc,
1794 + &sas_expander->phy[i], expander_pg1,
1795 + sas_expander->parent_dev))) {
1796 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1797 + ioc->name, __FILE__, __LINE__, __func__);
1798 + rc = -1;
1799 + goto out_fail;
1800 + }
1801 }
1802
1803 if (sas_expander->enclosure_handle) {
1804 @@ -3319,8 +3331,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
1805
1806 out_fail:
1807
1808 - if (sas_expander)
1809 - kfree(sas_expander->phy);
1810 + if (mpt2sas_port)
1811 + mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
1812 + sas_expander->parent_handle);
1813 kfree(sas_expander);
1814 return rc;
1815 }
1816 @@ -3442,12 +3455,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
1817 sas_device->hidden_raid_component = is_pd;
1818
1819 /* get enclosure_logical_id */
1820 - if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0,
1821 - MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
1822 - sas_device->enclosure_handle))) {
1823 + if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
1824 + ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
1825 + sas_device->enclosure_handle)))
1826 sas_device->enclosure_logical_id =
1827 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
1828 - }
1829
1830 /* get device name */
1831 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
1832 @@ -4029,12 +4041,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
1833 u16 handle = le16_to_cpu(element->VolDevHandle);
1834 int rc;
1835
1836 -#if 0 /* RAID_HACKS */
1837 - if (le32_to_cpu(event_data->Flags) &
1838 - MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
1839 - return;
1840 -#endif
1841 -
1842 mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
1843 if (!wwid) {
1844 printk(MPT2SAS_ERR_FMT
1845 @@ -4089,12 +4095,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc,
1846 unsigned long flags;
1847 struct MPT2SAS_TARGET *sas_target_priv_data;
1848
1849 -#if 0 /* RAID_HACKS */
1850 - if (le32_to_cpu(event_data->Flags) &
1851 - MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
1852 - return;
1853 -#endif
1854 -
1855 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1856 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
1857 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1858 @@ -4207,14 +4207,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
1859 struct _sas_device *sas_device;
1860 unsigned long flags;
1861 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
1862 + Mpi2ConfigReply_t mpi_reply;
1863 + Mpi2SasDevicePage0_t sas_device_pg0;
1864 + u32 ioc_status;
1865
1866 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1867 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
1868 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1869 - if (sas_device)
1870 + if (sas_device) {
1871 sas_device->hidden_raid_component = 1;
1872 - else
1873 - _scsih_add_device(ioc, handle, 0, 1);
1874 + return;
1875 + }
1876 +
1877 + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1878 + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1879 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1880 + ioc->name, __FILE__, __LINE__, __func__);
1881 + return;
1882 + }
1883 +
1884 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1885 + MPI2_IOCSTATUS_MASK;
1886 + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1887 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1888 + ioc->name, __FILE__, __LINE__, __func__);
1889 + return;
1890 + }
1891 +
1892 + _scsih_link_change(ioc,
1893 + le16_to_cpu(sas_device_pg0.ParentDevHandle),
1894 + handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
1895 +
1896 + _scsih_add_device(ioc, handle, 0, 1);
1897 }
1898
1899 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
1900 @@ -4314,12 +4338,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
1901 {
1902 Mpi2EventIrConfigElement_t *element;
1903 int i;
1904 + u8 foreign_config;
1905
1906 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
1907 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
1908 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
1909
1910 #endif
1911 + foreign_config = (le32_to_cpu(event_data->Flags) &
1912 + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
1913
1914 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
1915 for (i = 0; i < event_data->NumElements; i++, element++) {
1916 @@ -4327,11 +4354,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
1917 switch (element->ReasonCode) {
1918 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
1919 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
1920 - _scsih_sas_volume_add(ioc, element);
1921 + if (!foreign_config)
1922 + _scsih_sas_volume_add(ioc, element);
1923 break;
1924 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
1925 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
1926 - _scsih_sas_volume_delete(ioc, element);
1927 + if (!foreign_config)
1928 + _scsih_sas_volume_delete(ioc, element);
1929 break;
1930 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
1931 _scsih_sas_pd_hide(ioc, element);
1932 @@ -4450,6 +4479,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
1933 u32 state;
1934 struct _sas_device *sas_device;
1935 unsigned long flags;
1936 + Mpi2ConfigReply_t mpi_reply;
1937 + Mpi2SasDevicePage0_t sas_device_pg0;
1938 + u32 ioc_status;
1939
1940 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
1941 return;
1942 @@ -4466,22 +4498,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
1943 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1944
1945 switch (state) {
1946 -#if 0
1947 - case MPI2_RAID_PD_STATE_OFFLINE:
1948 - if (sas_device)
1949 - _scsih_remove_device(ioc, handle);
1950 - break;
1951 -#endif
1952 case MPI2_RAID_PD_STATE_ONLINE:
1953 case MPI2_RAID_PD_STATE_DEGRADED:
1954 case MPI2_RAID_PD_STATE_REBUILDING:
1955 case MPI2_RAID_PD_STATE_OPTIMAL:
1956 - if (sas_device)
1957 + if (sas_device) {
1958 sas_device->hidden_raid_component = 1;
1959 - else
1960 - _scsih_add_device(ioc, handle, 0, 1);
1961 + return;
1962 + }
1963 +
1964 + if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
1965 + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
1966 + handle))) {
1967 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1968 + ioc->name, __FILE__, __LINE__, __func__);
1969 + return;
1970 + }
1971 +
1972 + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1973 + MPI2_IOCSTATUS_MASK;
1974 + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1975 + printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
1976 + ioc->name, __FILE__, __LINE__, __func__);
1977 + return;
1978 + }
1979 +
1980 + _scsih_link_change(ioc,
1981 + le16_to_cpu(sas_device_pg0.ParentDevHandle),
1982 + handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
1983 +
1984 + _scsih_add_device(ioc, handle, 0, 1);
1985 +
1986 break;
1987
1988 + case MPI2_RAID_PD_STATE_OFFLINE:
1989 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
1990 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
1991 case MPI2_RAID_PD_STATE_HOT_SPARE:
1992 @@ -5549,6 +5599,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
1993 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1994 u32 device_state;
1995
1996 + mpt2sas_base_stop_watchdog(ioc);
1997 flush_scheduled_work();
1998 scsi_block_requests(shost);
1999 device_state = pci_choose_state(pdev, state);
2000 @@ -5591,6 +5642,7 @@ scsih_resume(struct pci_dev *pdev)
2001
2002 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
2003 scsi_unblock_requests(shost);
2004 + mpt2sas_base_start_watchdog(ioc);
2005 return 0;
2006 }
2007 #endif /* CONFIG_PM */
2008 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
2009 index e7d4479..798f362 100644
2010 --- a/drivers/usb/core/hcd.h
2011 +++ b/drivers/usb/core/hcd.h
2012 @@ -224,6 +224,10 @@ struct hc_driver {
2013 void (*relinquish_port)(struct usb_hcd *, int);
2014 /* has a port been handed over to a companion? */
2015 int (*port_handed_over)(struct usb_hcd *, int);
2016 +
2017 + /* CLEAR_TT_BUFFER completion callback */
2018 + void (*clear_tt_buffer_complete)(struct usb_hcd *,
2019 + struct usb_host_endpoint *);
2020 };
2021
2022 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
2023 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2024 index be86ae3..2fc5b57 100644
2025 --- a/drivers/usb/core/hub.c
2026 +++ b/drivers/usb/core/hub.c
2027 @@ -448,10 +448,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
2028 * talking to TTs must queue control transfers (not just bulk and iso), so
2029 * both can talk to the same hub concurrently.
2030 */
2031 -static void hub_tt_kevent (struct work_struct *work)
2032 +static void hub_tt_work(struct work_struct *work)
2033 {
2034 struct usb_hub *hub =
2035 - container_of(work, struct usb_hub, tt.kevent);
2036 + container_of(work, struct usb_hub, tt.clear_work);
2037 unsigned long flags;
2038 int limit = 100;
2039
2040 @@ -460,6 +460,7 @@ static void hub_tt_kevent (struct work_struct *work)
2041 struct list_head *temp;
2042 struct usb_tt_clear *clear;
2043 struct usb_device *hdev = hub->hdev;
2044 + const struct hc_driver *drv;
2045 int status;
2046
2047 temp = hub->tt.clear_list.next;
2048 @@ -469,21 +470,25 @@ static void hub_tt_kevent (struct work_struct *work)
2049 /* drop lock so HCD can concurrently report other TT errors */
2050 spin_unlock_irqrestore (&hub->tt.lock, flags);
2051 status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
2052 - spin_lock_irqsave (&hub->tt.lock, flags);
2053 -
2054 if (status)
2055 dev_err (&hdev->dev,
2056 "clear tt %d (%04x) error %d\n",
2057 clear->tt, clear->devinfo, status);
2058 +
2059 + /* Tell the HCD, even if the operation failed */
2060 + drv = clear->hcd->driver;
2061 + if (drv->clear_tt_buffer_complete)
2062 + (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep);
2063 +
2064 kfree(clear);
2065 + spin_lock_irqsave(&hub->tt.lock, flags);
2066 }
2067 spin_unlock_irqrestore (&hub->tt.lock, flags);
2068 }
2069
2070 /**
2071 - * usb_hub_tt_clear_buffer - clear control/bulk TT state in high speed hub
2072 - * @udev: the device whose split transaction failed
2073 - * @pipe: identifies the endpoint of the failed transaction
2074 + * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
2075 + * @urb: an URB associated with the failed or incomplete split transaction
2076 *
2077 * High speed HCDs use this to tell the hub driver that some split control or
2078 * bulk transaction failed in a way that requires clearing internal state of
2079 @@ -493,8 +498,10 @@ static void hub_tt_kevent (struct work_struct *work)
2080 * It may not be possible for that hub to handle additional full (or low)
2081 * speed transactions until that state is fully cleared out.
2082 */
2083 -void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
2084 +int usb_hub_clear_tt_buffer(struct urb *urb)
2085 {
2086 + struct usb_device *udev = urb->dev;
2087 + int pipe = urb->pipe;
2088 struct usb_tt *tt = udev->tt;
2089 unsigned long flags;
2090 struct usb_tt_clear *clear;
2091 @@ -506,7 +513,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
2092 if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
2093 dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
2094 /* FIXME recover somehow ... RESET_TT? */
2095 - return;
2096 + return -ENOMEM;
2097 }
2098
2099 /* info that CLEAR_TT_BUFFER needs */
2100 @@ -518,14 +525,19 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
2101 : (USB_ENDPOINT_XFER_BULK << 11);
2102 if (usb_pipein (pipe))
2103 clear->devinfo |= 1 << 15;
2104 -
2105 +
2106 + /* info for completion callback */
2107 + clear->hcd = bus_to_hcd(udev->bus);
2108 + clear->ep = urb->ep;
2109 +
2110 /* tell keventd to clear state for this TT */
2111 spin_lock_irqsave (&tt->lock, flags);
2112 list_add_tail (&clear->clear_list, &tt->clear_list);
2113 - schedule_work (&tt->kevent);
2114 + schedule_work(&tt->clear_work);
2115 spin_unlock_irqrestore (&tt->lock, flags);
2116 + return 0;
2117 }
2118 -EXPORT_SYMBOL_GPL(usb_hub_tt_clear_buffer);
2119 +EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
2120
2121 /* If do_delay is false, return the number of milliseconds the caller
2122 * needs to delay.
2123 @@ -816,7 +828,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
2124 if (hub->has_indicators)
2125 cancel_delayed_work_sync(&hub->leds);
2126 if (hub->tt.hub)
2127 - cancel_work_sync(&hub->tt.kevent);
2128 + cancel_work_sync(&hub->tt.clear_work);
2129 }
2130
2131 /* caller has locked the hub device */
2132 @@ -933,7 +945,7 @@ static int hub_configure(struct usb_hub *hub,
2133
2134 spin_lock_init (&hub->tt.lock);
2135 INIT_LIST_HEAD (&hub->tt.clear_list);
2136 - INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
2137 + INIT_WORK(&hub->tt.clear_work, hub_tt_work);
2138 switch (hdev->descriptor.bDeviceProtocol) {
2139 case 0:
2140 break;
2141 diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
2142 index 2a116ce..528c411 100644
2143 --- a/drivers/usb/core/hub.h
2144 +++ b/drivers/usb/core/hub.h
2145 @@ -185,16 +185,18 @@ struct usb_tt {
2146 /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
2147 spinlock_t lock;
2148 struct list_head clear_list; /* of usb_tt_clear */
2149 - struct work_struct kevent;
2150 + struct work_struct clear_work;
2151 };
2152
2153 struct usb_tt_clear {
2154 struct list_head clear_list;
2155 unsigned tt;
2156 u16 devinfo;
2157 + struct usb_hcd *hcd;
2158 + struct usb_host_endpoint *ep;
2159 };
2160
2161 -extern void usb_hub_tt_clear_buffer(struct usb_device *dev, int pipe);
2162 +extern int usb_hub_clear_tt_buffer(struct urb *urb);
2163 extern void usb_ep0_reinit(struct usb_device *);
2164
2165 #endif /* __LINUX_HUB_H */
2166 diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
2167 index bf69f47..5c25b1a 100644
2168 --- a/drivers/usb/host/ehci-au1xxx.c
2169 +++ b/drivers/usb/host/ehci-au1xxx.c
2170 @@ -112,6 +112,8 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
2171 .bus_resume = ehci_bus_resume,
2172 .relinquish_port = ehci_relinquish_port,
2173 .port_handed_over = ehci_port_handed_over,
2174 +
2175 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2176 };
2177
2178 static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
2179 diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
2180 index 01c3da3..7fb1ef0 100644
2181 --- a/drivers/usb/host/ehci-fsl.c
2182 +++ b/drivers/usb/host/ehci-fsl.c
2183 @@ -324,6 +324,8 @@ static const struct hc_driver ehci_fsl_hc_driver = {
2184 .bus_resume = ehci_bus_resume,
2185 .relinquish_port = ehci_relinquish_port,
2186 .port_handed_over = ehci_port_handed_over,
2187 +
2188 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2189 };
2190
2191 static int ehci_fsl_drv_probe(struct platform_device *pdev)
2192 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2193 index c637207..d75b8cf 100644
2194 --- a/drivers/usb/host/ehci-hcd.c
2195 +++ b/drivers/usb/host/ehci-hcd.c
2196 @@ -903,7 +903,8 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2197 /* already started */
2198 break;
2199 case QH_STATE_IDLE:
2200 - WARN_ON(1);
2201 + /* QH might be waiting for a Clear-TT-Buffer */
2202 + qh_completions(ehci, qh);
2203 break;
2204 }
2205 break;
2206 @@ -1003,6 +1004,8 @@ idle_timeout:
2207 schedule_timeout_uninterruptible(1);
2208 goto rescan;
2209 case QH_STATE_IDLE: /* fully unlinked */
2210 + if (qh->clearing_tt)
2211 + goto idle_timeout;
2212 if (list_empty (&qh->qtd_list)) {
2213 qh_put (qh);
2214 break;
2215 diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
2216 index 9c32063..8573b03 100644
2217 --- a/drivers/usb/host/ehci-ixp4xx.c
2218 +++ b/drivers/usb/host/ehci-ixp4xx.c
2219 @@ -60,6 +60,8 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
2220 #endif
2221 .relinquish_port = ehci_relinquish_port,
2222 .port_handed_over = ehci_port_handed_over,
2223 +
2224 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2225 };
2226
2227 static int ixp4xx_ehci_probe(struct platform_device *pdev)
2228 diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
2229 index 9d48790..64ab30a 100644
2230 --- a/drivers/usb/host/ehci-orion.c
2231 +++ b/drivers/usb/host/ehci-orion.c
2232 @@ -164,6 +164,8 @@ static const struct hc_driver ehci_orion_hc_driver = {
2233 .bus_resume = ehci_bus_resume,
2234 .relinquish_port = ehci_relinquish_port,
2235 .port_handed_over = ehci_port_handed_over,
2236 +
2237 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2238 };
2239
2240 static void __init
2241 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
2242 index 5aa8bce..a26b7f7 100644
2243 --- a/drivers/usb/host/ehci-pci.c
2244 +++ b/drivers/usb/host/ehci-pci.c
2245 @@ -408,6 +408,8 @@ static const struct hc_driver ehci_pci_hc_driver = {
2246 .bus_resume = ehci_bus_resume,
2247 .relinquish_port = ehci_relinquish_port,
2248 .port_handed_over = ehci_port_handed_over,
2249 +
2250 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2251 };
2252
2253 /*-------------------------------------------------------------------------*/
2254 diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
2255 index ef732b7..8b6556e 100644
2256 --- a/drivers/usb/host/ehci-ppc-of.c
2257 +++ b/drivers/usb/host/ehci-ppc-of.c
2258 @@ -78,6 +78,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
2259 #endif
2260 .relinquish_port = ehci_relinquish_port,
2261 .port_handed_over = ehci_port_handed_over,
2262 +
2263 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2264 };
2265
2266
2267 diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
2268 index 1ba9f9a..efefc91 100644
2269 --- a/drivers/usb/host/ehci-ps3.c
2270 +++ b/drivers/usb/host/ehci-ps3.c
2271 @@ -74,6 +74,8 @@ static const struct hc_driver ps3_ehci_hc_driver = {
2272 #endif
2273 .relinquish_port = ehci_relinquish_port,
2274 .port_handed_over = ehci_port_handed_over,
2275 +
2276 + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
2277 };
2278
2279 static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
2280 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
2281 index 1976b1b..a39f2c6 100644
2282 --- a/drivers/usb/host/ehci-q.c
2283 +++ b/drivers/usb/host/ehci-q.c
2284 @@ -139,6 +139,55 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
2285
2286 /*-------------------------------------------------------------------------*/
2287
2288 +static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
2289 +
2290 +static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd,
2291 + struct usb_host_endpoint *ep)
2292 +{
2293 + struct ehci_hcd *ehci = hcd_to_ehci(hcd);
2294 + struct ehci_qh *qh = ep->hcpriv;
2295 + unsigned long flags;
2296 +
2297 + spin_lock_irqsave(&ehci->lock, flags);
2298 + qh->clearing_tt = 0;
2299 + if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
2300 + && HC_IS_RUNNING(hcd->state))
2301 + qh_link_async(ehci, qh);
2302 + spin_unlock_irqrestore(&ehci->lock, flags);
2303 +}
2304 +
2305 +static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
2306 + struct urb *urb, u32 token)
2307 +{
2308 +
2309 + /* If an async split transaction gets an error or is unlinked,
2310 + * the TT buffer may be left in an indeterminate state. We
2311 + * have to clear the TT buffer.
2312 + *
2313 + * Note: this routine is never called for Isochronous transfers.
2314 + */
2315 + if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
2316 +#ifdef DEBUG
2317 + struct usb_device *tt = urb->dev->tt->hub;
2318 + dev_dbg(&tt->dev,
2319 + "clear tt buffer port %d, a%d ep%d t%08x\n",
2320 + urb->dev->ttport, urb->dev->devnum,
2321 + usb_pipeendpoint(urb->pipe), token);
2322 +#endif /* DEBUG */
2323 + if (!ehci_is_TDI(ehci)
2324 + || urb->dev->tt->hub !=
2325 + ehci_to_hcd(ehci)->self.root_hub) {
2326 + if (usb_hub_clear_tt_buffer(urb) == 0)
2327 + qh->clearing_tt = 1;
2328 + } else {
2329 +
2330 + /* REVISIT ARC-derived cores don't clear the root
2331 + * hub TT buffer in this way...
2332 + */
2333 + }
2334 + }
2335 +}
2336 +
2337 static int qtd_copy_status (
2338 struct ehci_hcd *ehci,
2339 struct urb *urb,
2340 @@ -195,28 +244,6 @@ static int qtd_copy_status (
2341 usb_pipeendpoint (urb->pipe),
2342 usb_pipein (urb->pipe) ? "in" : "out",
2343 token, status);
2344 -
2345 - /* if async CSPLIT failed, try cleaning out the TT buffer */
2346 - if (status != -EPIPE
2347 - && urb->dev->tt
2348 - && !usb_pipeint(urb->pipe)
2349 - && ((token & QTD_STS_MMF) != 0
2350 - || QTD_CERR(token) == 0)
2351 - && (!ehci_is_TDI(ehci)
2352 - || urb->dev->tt->hub !=
2353 - ehci_to_hcd(ehci)->self.root_hub)) {
2354 -#ifdef DEBUG
2355 - struct usb_device *tt = urb->dev->tt->hub;
2356 - dev_dbg (&tt->dev,
2357 - "clear tt buffer port %d, a%d ep%d t%08x\n",
2358 - urb->dev->ttport, urb->dev->devnum,
2359 - usb_pipeendpoint (urb->pipe), token);
2360 -#endif /* DEBUG */
2361 - /* REVISIT ARC-derived cores don't clear the root
2362 - * hub TT buffer in this way...
2363 - */
2364 - usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
2365 - }
2366 }
2367
2368 return status;
2369 @@ -407,9 +434,16 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
2370 /* qh unlinked; token in overlay may be most current */
2371 if (state == QH_STATE_IDLE
2372 && cpu_to_hc32(ehci, qtd->qtd_dma)
2373 - == qh->hw_current)
2374 + == qh->hw_current) {
2375 token = hc32_to_cpu(ehci, qh->hw_token);
2376
2377 + /* An unlink may leave an incomplete
2378 + * async transaction in the TT buffer.
2379 + * We have to clear it.
2380 + */
2381 + ehci_clear_tt_buffer(ehci, qh, urb, token);
2382 + }
2383 +
2384 /* force halt for unlinked or blocked qh, so we'll
2385 * patch the qh later and so that completions can't
2386 * activate it while we "know" it's stopped.
2387 @@ -435,6 +469,13 @@ halt:
2388 && (qtd->hw_alt_next
2389 & EHCI_LIST_END(ehci)))
2390 last_status = -EINPROGRESS;
2391 +
2392 + /* As part of low/full-speed endpoint-halt processing
2393 + * we must clear the TT buffer (11.17.5).
2394 + */
2395 + if (unlikely(last_status != -EINPROGRESS &&
2396 + last_status != -EREMOTEIO))
2397 + ehci_clear_tt_buffer(ehci, qh, urb, token);
2398 }
2399
2400 /* if we're removing something not at the queue head,
2401 @@ -864,6 +905,10 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
2402 __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
2403 struct ehci_qh *head;
2404
2405 + /* Don't link a QH if there's a Clear-TT-Buffer pending */
2406 + if (unlikely(qh->clearing_tt))
2407 + return;
2408 +
2409 /* (re)start the async schedule? */
2410 head = ehci->async;
2411 timer_action_done (ehci, TIMER_ASYNC_OFF);
2412 @@ -893,6 +938,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
2413 head->qh_next.qh = qh;
2414 head->hw_next = dma;
2415
2416 + qh_get(qh);
2417 qh->xacterrs = QH_XACTERR_MAX;
2418 qh->qh_state = QH_STATE_LINKED;
2419 /* qtd completions reported later by interrupt */
2420 @@ -1033,7 +1079,7 @@ submit_async (
2421 * the HC and TT handle it when the TT has a buffer ready.
2422 */
2423 if (likely (qh->qh_state == QH_STATE_IDLE))
2424 - qh_link_async (ehci, qh_get (qh));
2425 + qh_link_async(ehci, qh);
2426 done:
2427 spin_unlock_irqrestore (&ehci->lock, flags);
2428 if (unlikely (qh == NULL))
2429 @@ -1068,8 +1114,6 @@ static void end_unlink_async (struct ehci_hcd *ehci)
2430 && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
2431 qh_link_async (ehci, qh);
2432 else {
2433 - qh_put (qh); // refcount from async list
2434 -
2435 /* it's not free to turn the async schedule on/off; leave it
2436 * active but idle for a while once it empties.
2437 */
2438 @@ -1077,6 +1121,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
2439 && ehci->async->qh_next.qh == NULL)
2440 timer_action (ehci, TIMER_ASYNC_OFF);
2441 }
2442 + qh_put(qh); /* refcount from async list */
2443
2444 if (next) {
2445 ehci->reclaim = NULL;
2446 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2447 index 6cff195..ec5af22 100644
2448 --- a/drivers/usb/host/ehci.h
2449 +++ b/drivers/usb/host/ehci.h
2450 @@ -353,7 +353,9 @@ struct ehci_qh {
2451 unsigned short period; /* polling interval */
2452 unsigned short start; /* where polling starts */
2453 #define NO_FRAME ((unsigned short)~0) /* pick new start */
2454 +
2455 struct usb_device *dev; /* access to TT */
2456 + unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
2457 } __attribute__ ((aligned (32)));
2458
2459 /*-------------------------------------------------------------------------*/
2460 diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
2461 index 2493f05..d886bf9 100644
2462 --- a/drivers/video/xen-fbfront.c
2463 +++ b/drivers/video/xen-fbfront.c
2464 @@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
2465
2466 xenfb_init_shared_page(info, fb_info);
2467
2468 + ret = xenfb_connect_backend(dev, info);
2469 + if (ret < 0)
2470 + goto error;
2471 +
2472 ret = register_framebuffer(fb_info);
2473 if (ret) {
2474 fb_deferred_io_cleanup(fb_info);
2475 @@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
2476 }
2477 info->fb_info = fb_info;
2478
2479 - ret = xenfb_connect_backend(dev, info);
2480 - if (ret < 0)
2481 - goto error;
2482 -
2483 xenfb_make_preferred_console();
2484 return 0;
2485
2486 diff --git a/fs/buffer.c b/fs/buffer.c
2487 index 4910612..941c78b 100644
2488 --- a/fs/buffer.c
2489 +++ b/fs/buffer.c
2490 @@ -1165,8 +1165,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
2491
2492 if (!test_set_buffer_dirty(bh)) {
2493 struct page *page = bh->b_page;
2494 - if (!TestSetPageDirty(page))
2495 - __set_page_dirty(page, page_mapping(page), 0);
2496 + if (!TestSetPageDirty(page)) {
2497 + struct address_space *mapping = page_mapping(page);
2498 + if (mapping)
2499 + __set_page_dirty(page, mapping, 0);
2500 + }
2501 }
2502 }
2503
2504 diff --git a/fs/exec.c b/fs/exec.c
2505 index 895823d..42414e5 100644
2506 --- a/fs/exec.c
2507 +++ b/fs/exec.c
2508 @@ -677,8 +677,8 @@ exit:
2509 }
2510 EXPORT_SYMBOL(open_exec);
2511
2512 -int kernel_read(struct file *file, unsigned long offset,
2513 - char *addr, unsigned long count)
2514 +int kernel_read(struct file *file, loff_t offset,
2515 + char *addr, unsigned long count)
2516 {
2517 mm_segment_t old_fs;
2518 loff_t pos = offset;
2519 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
2520 index c1462d4..7ae4e4b 100644
2521 --- a/fs/hugetlbfs/inode.c
2522 +++ b/fs/hugetlbfs/inode.c
2523 @@ -934,26 +934,28 @@ static int can_do_hugetlb_shm(void)
2524 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
2525 }
2526
2527 -struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag)
2528 +struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
2529 + struct user_struct **user)
2530 {
2531 int error = -ENOMEM;
2532 - int unlock_shm = 0;
2533 struct file *file;
2534 struct inode *inode;
2535 struct dentry *dentry, *root;
2536 struct qstr quick_string;
2537 - struct user_struct *user = current_user();
2538
2539 + *user = NULL;
2540 if (!hugetlbfs_vfsmount)
2541 return ERR_PTR(-ENOENT);
2542
2543 if (!can_do_hugetlb_shm()) {
2544 - if (user_shm_lock(size, user)) {
2545 - unlock_shm = 1;
2546 + *user = current_user();
2547 + if (user_shm_lock(size, *user)) {
2548 WARN_ONCE(1,
2549 "Using mlock ulimits for SHM_HUGETLB deprecated\n");
2550 - } else
2551 + } else {
2552 + *user = NULL;
2553 return ERR_PTR(-EPERM);
2554 + }
2555 }
2556
2557 root = hugetlbfs_vfsmount->mnt_root;
2558 @@ -994,8 +996,10 @@ out_inode:
2559 out_dentry:
2560 dput(dentry);
2561 out_shm_unlock:
2562 - if (unlock_shm)
2563 - user_shm_unlock(size, user);
2564 + if (*user) {
2565 + user_shm_unlock(size, *user);
2566 + *user = NULL;
2567 + }
2568 return ERR_PTR(error);
2569 }
2570
2571 diff --git a/fs/inode.c b/fs/inode.c
2572 index bca0c61..a9e8ef0 100644
2573 --- a/fs/inode.c
2574 +++ b/fs/inode.c
2575 @@ -118,12 +118,11 @@ static void wake_up_inode(struct inode *inode)
2576 * These are initializations that need to be done on every inode
2577 * allocation as the fields are not initialised by slab allocation.
2578 */
2579 -struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
2580 +int inode_init_always(struct super_block *sb, struct inode *inode)
2581 {
2582 static const struct address_space_operations empty_aops;
2583 static struct inode_operations empty_iops;
2584 static const struct file_operations empty_fops;
2585 -
2586 struct address_space *const mapping = &inode->i_data;
2587
2588 inode->i_sb = sb;
2589 @@ -150,7 +149,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
2590 inode->dirtied_when = 0;
2591
2592 if (security_inode_alloc(inode))
2593 - goto out_free_inode;
2594 + goto out;
2595
2596 /* allocate and initialize an i_integrity */
2597 if (ima_inode_alloc(inode))
2598 @@ -189,16 +188,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode)
2599 inode->i_private = NULL;
2600 inode->i_mapping = mapping;
2601
2602 - return inode;
2603 + return 0;
2604
2605 out_free_security:
2606 security_inode_free(inode);
2607 -out_free_inode:
2608 - if (inode->i_sb->s_op->destroy_inode)
2609 - inode->i_sb->s_op->destroy_inode(inode);
2610 - else
2611 - kmem_cache_free(inode_cachep, (inode));
2612 - return NULL;
2613 +out:
2614 + return -ENOMEM;
2615 }
2616 EXPORT_SYMBOL(inode_init_always);
2617
2618 @@ -211,23 +206,36 @@ static struct inode *alloc_inode(struct super_block *sb)
2619 else
2620 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
2621
2622 - if (inode)
2623 - return inode_init_always(sb, inode);
2624 - return NULL;
2625 + if (!inode)
2626 + return NULL;
2627 +
2628 + if (unlikely(inode_init_always(sb, inode))) {
2629 + if (inode->i_sb->s_op->destroy_inode)
2630 + inode->i_sb->s_op->destroy_inode(inode);
2631 + else
2632 + kmem_cache_free(inode_cachep, inode);
2633 + return NULL;
2634 + }
2635 +
2636 + return inode;
2637 }
2638
2639 -void destroy_inode(struct inode *inode)
2640 +void __destroy_inode(struct inode *inode)
2641 {
2642 BUG_ON(inode_has_buffers(inode));
2643 ima_inode_free(inode);
2644 security_inode_free(inode);
2645 +}
2646 +EXPORT_SYMBOL(__destroy_inode);
2647 +
2648 +void destroy_inode(struct inode *inode)
2649 +{
2650 + __destroy_inode(inode);
2651 if (inode->i_sb->s_op->destroy_inode)
2652 inode->i_sb->s_op->destroy_inode(inode);
2653 else
2654 kmem_cache_free(inode_cachep, (inode));
2655 }
2656 -EXPORT_SYMBOL(destroy_inode);
2657 -
2658
2659 /*
2660 * These are initializations that only need to be done
2661 diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
2662 index b2c52b3..044990a 100644
2663 --- a/fs/ocfs2/aops.c
2664 +++ b/fs/ocfs2/aops.c
2665 @@ -894,18 +894,17 @@ struct ocfs2_write_cluster_desc {
2666 */
2667 unsigned c_new;
2668 unsigned c_unwritten;
2669 + unsigned c_needs_zero;
2670 };
2671
2672 -static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
2673 -{
2674 - return d->c_new || d->c_unwritten;
2675 -}
2676 -
2677 struct ocfs2_write_ctxt {
2678 /* Logical cluster position / len of write */
2679 u32 w_cpos;
2680 u32 w_clen;
2681
2682 + /* First cluster allocated in a nonsparse extend */
2683 + u32 w_first_new_cpos;
2684 +
2685 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
2686
2687 /*
2688 @@ -983,6 +982,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
2689 return -ENOMEM;
2690
2691 wc->w_cpos = pos >> osb->s_clustersize_bits;
2692 + wc->w_first_new_cpos = UINT_MAX;
2693 cend = (pos + len - 1) >> osb->s_clustersize_bits;
2694 wc->w_clen = cend - wc->w_cpos + 1;
2695 get_bh(di_bh);
2696 @@ -1217,20 +1217,18 @@ out:
2697 */
2698 static int ocfs2_write_cluster(struct address_space *mapping,
2699 u32 phys, unsigned int unwritten,
2700 + unsigned int should_zero,
2701 struct ocfs2_alloc_context *data_ac,
2702 struct ocfs2_alloc_context *meta_ac,
2703 struct ocfs2_write_ctxt *wc, u32 cpos,
2704 loff_t user_pos, unsigned user_len)
2705 {
2706 - int ret, i, new, should_zero = 0;
2707 + int ret, i, new;
2708 u64 v_blkno, p_blkno;
2709 struct inode *inode = mapping->host;
2710 struct ocfs2_extent_tree et;
2711
2712 new = phys == 0 ? 1 : 0;
2713 - if (new || unwritten)
2714 - should_zero = 1;
2715 -
2716 if (new) {
2717 u32 tmp_pos;
2718
2719 @@ -1341,7 +1339,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
2720 local_len = osb->s_clustersize - cluster_off;
2721
2722 ret = ocfs2_write_cluster(mapping, desc->c_phys,
2723 - desc->c_unwritten, data_ac, meta_ac,
2724 + desc->c_unwritten,
2725 + desc->c_needs_zero,
2726 + data_ac, meta_ac,
2727 wc, desc->c_cpos, pos, local_len);
2728 if (ret) {
2729 mlog_errno(ret);
2730 @@ -1391,14 +1391,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
2731 * newly allocated cluster.
2732 */
2733 desc = &wc->w_desc[0];
2734 - if (ocfs2_should_zero_cluster(desc))
2735 + if (desc->c_needs_zero)
2736 ocfs2_figure_cluster_boundaries(osb,
2737 desc->c_cpos,
2738 &wc->w_target_from,
2739 NULL);
2740
2741 desc = &wc->w_desc[wc->w_clen - 1];
2742 - if (ocfs2_should_zero_cluster(desc))
2743 + if (desc->c_needs_zero)
2744 ocfs2_figure_cluster_boundaries(osb,
2745 desc->c_cpos,
2746 NULL,
2747 @@ -1466,13 +1466,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
2748 phys++;
2749 }
2750
2751 + /*
2752 + * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
2753 + * file that got extended. w_first_new_cpos tells us
2754 + * where the newly allocated clusters are so we can
2755 + * zero them.
2756 + */
2757 + if (desc->c_cpos >= wc->w_first_new_cpos) {
2758 + BUG_ON(phys == 0);
2759 + desc->c_needs_zero = 1;
2760 + }
2761 +
2762 desc->c_phys = phys;
2763 if (phys == 0) {
2764 desc->c_new = 1;
2765 + desc->c_needs_zero = 1;
2766 *clusters_to_alloc = *clusters_to_alloc + 1;
2767 }
2768 - if (ext_flags & OCFS2_EXT_UNWRITTEN)
2769 +
2770 + if (ext_flags & OCFS2_EXT_UNWRITTEN) {
2771 desc->c_unwritten = 1;
2772 + desc->c_needs_zero = 1;
2773 + }
2774
2775 num_clusters--;
2776 }
2777 @@ -1632,10 +1647,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
2778 if (newsize <= i_size_read(inode))
2779 return 0;
2780
2781 - ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
2782 + ret = ocfs2_extend_no_holes(inode, newsize, pos);
2783 if (ret)
2784 mlog_errno(ret);
2785
2786 + wc->w_first_new_cpos =
2787 + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
2788 +
2789 return ret;
2790 }
2791
2792 @@ -1644,7 +1662,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
2793 struct page **pagep, void **fsdata,
2794 struct buffer_head *di_bh, struct page *mmap_page)
2795 {
2796 - int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
2797 + int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
2798 unsigned int clusters_to_alloc, extents_to_split;
2799 struct ocfs2_write_ctxt *wc;
2800 struct inode *inode = mapping->host;
2801 @@ -1722,8 +1740,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
2802
2803 }
2804
2805 - ocfs2_set_target_boundaries(osb, wc, pos, len,
2806 - clusters_to_alloc + extents_to_split);
2807 + /*
2808 + * We have to zero sparse allocated clusters, unwritten extent clusters,
2809 + * and non-sparse clusters we just extended. For non-sparse writes,
2810 + * we know zeros will only be needed in the first and/or last cluster.
2811 + */
2812 + if (clusters_to_alloc || extents_to_split ||
2813 + (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
2814 + wc->w_desc[wc->w_clen - 1].c_needs_zero)))
2815 + cluster_of_pages = 1;
2816 + else
2817 + cluster_of_pages = 0;
2818 +
2819 + ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
2820
2821 handle = ocfs2_start_trans(osb, credits);
2822 if (IS_ERR(handle)) {
2823 @@ -1756,8 +1785,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
2824 * extent.
2825 */
2826 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
2827 - clusters_to_alloc + extents_to_split,
2828 - mmap_page);
2829 + cluster_of_pages, mmap_page);
2830 if (ret) {
2831 mlog_errno(ret);
2832 goto out_quota;
2833 diff --git a/fs/select.c b/fs/select.c
2834 index 0fe0e14..6d76b82 100644
2835 --- a/fs/select.c
2836 +++ b/fs/select.c
2837 @@ -110,6 +110,7 @@ void poll_initwait(struct poll_wqueues *pwq)
2838 {
2839 init_poll_funcptr(&pwq->pt, __pollwait);
2840 pwq->polling_task = current;
2841 + pwq->triggered = 0;
2842 pwq->error = 0;
2843 pwq->table = NULL;
2844 pwq->inline_index = 0;
2845 diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
2846 index 89b81ee..1863b0d 100644
2847 --- a/fs/xfs/xfs_iget.c
2848 +++ b/fs/xfs/xfs_iget.c
2849 @@ -63,6 +63,10 @@ xfs_inode_alloc(
2850 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
2851 if (!ip)
2852 return NULL;
2853 + if (inode_init_always(mp->m_super, VFS_I(ip))) {
2854 + kmem_zone_free(xfs_inode_zone, ip);
2855 + return NULL;
2856 + }
2857
2858 ASSERT(atomic_read(&ip->i_iocount) == 0);
2859 ASSERT(atomic_read(&ip->i_pincount) == 0);
2860 @@ -104,17 +108,6 @@ xfs_inode_alloc(
2861 #ifdef XFS_DIR2_TRACE
2862 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
2863 #endif
2864 - /*
2865 - * Now initialise the VFS inode. We do this after the xfs_inode
2866 - * initialisation as internal failures will result in ->destroy_inode
2867 - * being called and that will pass down through the reclaim path and
2868 - * free the XFS inode. This path requires the XFS inode to already be
2869 - * initialised. Hence if this call fails, the xfs_inode has already
2870 - * been freed and we should not reference it at all in the error
2871 - * handling.
2872 - */
2873 - if (!inode_init_always(mp->m_super, VFS_I(ip)))
2874 - return NULL;
2875
2876 /* prevent anyone from using this yet */
2877 VFS_I(ip)->i_state = I_NEW|I_LOCK;
2878 @@ -122,6 +115,71 @@ xfs_inode_alloc(
2879 return ip;
2880 }
2881
2882 +STATIC void
2883 +xfs_inode_free(
2884 + struct xfs_inode *ip)
2885 +{
2886 + switch (ip->i_d.di_mode & S_IFMT) {
2887 + case S_IFREG:
2888 + case S_IFDIR:
2889 + case S_IFLNK:
2890 + xfs_idestroy_fork(ip, XFS_DATA_FORK);
2891 + break;
2892 + }
2893 +
2894 + if (ip->i_afp)
2895 + xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2896 +
2897 +#ifdef XFS_INODE_TRACE
2898 + ktrace_free(ip->i_trace);
2899 +#endif
2900 +#ifdef XFS_BMAP_TRACE
2901 + ktrace_free(ip->i_xtrace);
2902 +#endif
2903 +#ifdef XFS_BTREE_TRACE
2904 + ktrace_free(ip->i_btrace);
2905 +#endif
2906 +#ifdef XFS_RW_TRACE
2907 + ktrace_free(ip->i_rwtrace);
2908 +#endif
2909 +#ifdef XFS_ILOCK_TRACE
2910 + ktrace_free(ip->i_lock_trace);
2911 +#endif
2912 +#ifdef XFS_DIR2_TRACE
2913 + ktrace_free(ip->i_dir_trace);
2914 +#endif
2915 +
2916 + if (ip->i_itemp) {
2917 + /*
2918 + * Only if we are shutting down the fs will we see an
2919 + * inode still in the AIL. If it is there, we should remove
2920 + * it to prevent a use-after-free from occurring.
2921 + */
2922 + xfs_log_item_t *lip = &ip->i_itemp->ili_item;
2923 + struct xfs_ail *ailp = lip->li_ailp;
2924 +
2925 + ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2926 + XFS_FORCED_SHUTDOWN(ip->i_mount));
2927 + if (lip->li_flags & XFS_LI_IN_AIL) {
2928 + spin_lock(&ailp->xa_lock);
2929 + if (lip->li_flags & XFS_LI_IN_AIL)
2930 + xfs_trans_ail_delete(ailp, lip);
2931 + else
2932 + spin_unlock(&ailp->xa_lock);
2933 + }
2934 + xfs_inode_item_destroy(ip);
2935 + ip->i_itemp = NULL;
2936 + }
2937 +
2938 + /* asserts to verify all state is correct here */
2939 + ASSERT(atomic_read(&ip->i_iocount) == 0);
2940 + ASSERT(atomic_read(&ip->i_pincount) == 0);
2941 + ASSERT(!spin_is_locked(&ip->i_flags_lock));
2942 + ASSERT(completion_done(&ip->i_flush));
2943 +
2944 + kmem_zone_free(xfs_inode_zone, ip);
2945 +}
2946 +
2947 /*
2948 * Check the validity of the inode we just found it the cache
2949 */
2950 @@ -166,7 +224,7 @@ xfs_iget_cache_hit(
2951 * errors cleanly, then tag it so it can be set up correctly
2952 * later.
2953 */
2954 - if (!inode_init_always(mp->m_super, VFS_I(ip))) {
2955 + if (inode_init_always(mp->m_super, VFS_I(ip))) {
2956 error = ENOMEM;
2957 goto out_error;
2958 }
2959 @@ -298,7 +356,8 @@ out_preload_end:
2960 if (lock_flags)
2961 xfs_iunlock(ip, lock_flags);
2962 out_destroy:
2963 - xfs_destroy_inode(ip);
2964 + __destroy_inode(VFS_I(ip));
2965 + xfs_inode_free(ip);
2966 return error;
2967 }
2968
2969 @@ -506,62 +565,7 @@ xfs_ireclaim(
2970 XFS_QM_DQDETACH(ip->i_mount, ip);
2971 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
2972
2973 - switch (ip->i_d.di_mode & S_IFMT) {
2974 - case S_IFREG:
2975 - case S_IFDIR:
2976 - case S_IFLNK:
2977 - xfs_idestroy_fork(ip, XFS_DATA_FORK);
2978 - break;
2979 - }
2980 -
2981 - if (ip->i_afp)
2982 - xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2983 -
2984 -#ifdef XFS_INODE_TRACE
2985 - ktrace_free(ip->i_trace);
2986 -#endif
2987 -#ifdef XFS_BMAP_TRACE
2988 - ktrace_free(ip->i_xtrace);
2989 -#endif
2990 -#ifdef XFS_BTREE_TRACE
2991 - ktrace_free(ip->i_btrace);
2992 -#endif
2993 -#ifdef XFS_RW_TRACE
2994 - ktrace_free(ip->i_rwtrace);
2995 -#endif
2996 -#ifdef XFS_ILOCK_TRACE
2997 - ktrace_free(ip->i_lock_trace);
2998 -#endif
2999 -#ifdef XFS_DIR2_TRACE
3000 - ktrace_free(ip->i_dir_trace);
3001 -#endif
3002 - if (ip->i_itemp) {
3003 - /*
3004 - * Only if we are shutting down the fs will we see an
3005 - * inode still in the AIL. If it is there, we should remove
3006 - * it to prevent a use-after-free from occurring.
3007 - */
3008 - xfs_log_item_t *lip = &ip->i_itemp->ili_item;
3009 - struct xfs_ail *ailp = lip->li_ailp;
3010 -
3011 - ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
3012 - XFS_FORCED_SHUTDOWN(ip->i_mount));
3013 - if (lip->li_flags & XFS_LI_IN_AIL) {
3014 - spin_lock(&ailp->xa_lock);
3015 - if (lip->li_flags & XFS_LI_IN_AIL)
3016 - xfs_trans_ail_delete(ailp, lip);
3017 - else
3018 - spin_unlock(&ailp->xa_lock);
3019 - }
3020 - xfs_inode_item_destroy(ip);
3021 - ip->i_itemp = NULL;
3022 - }
3023 - /* asserts to verify all state is correct here */
3024 - ASSERT(atomic_read(&ip->i_iocount) == 0);
3025 - ASSERT(atomic_read(&ip->i_pincount) == 0);
3026 - ASSERT(!spin_is_locked(&ip->i_flags_lock));
3027 - ASSERT(completion_done(&ip->i_flush));
3028 - kmem_zone_free(xfs_inode_zone, ip);
3029 + xfs_inode_free(ip);
3030 }
3031
3032 /*
3033 diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
3034 index f879c1b..71c20ec 100644
3035 --- a/fs/xfs/xfs_inode.h
3036 +++ b/fs/xfs/xfs_inode.h
3037 @@ -309,23 +309,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
3038 }
3039
3040 /*
3041 - * Get rid of a partially initialized inode.
3042 - *
3043 - * We have to go through destroy_inode to make sure allocations
3044 - * from init_inode_always like the security data are undone.
3045 - *
3046 - * We mark the inode bad so that it takes the short cut in
3047 - * the reclaim path instead of going through the flush path
3048 - * which doesn't make sense for an inode that has never seen the
3049 - * light of day.
3050 - */
3051 -static inline void xfs_destroy_inode(struct xfs_inode *ip)
3052 -{
3053 - make_bad_inode(VFS_I(ip));
3054 - return destroy_inode(VFS_I(ip));
3055 -}
3056 -
3057 -/*
3058 * i_flags helper functions
3059 */
3060 static inline void
3061 diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
3062 index 3750f04..9dbdff3 100644
3063 --- a/fs/xfs/xfs_log.c
3064 +++ b/fs/xfs/xfs_log.c
3065 @@ -3180,7 +3180,7 @@ try_again:
3066 STATIC void
3067 xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3068 {
3069 - ASSERT(spin_is_locked(&log->l_icloglock));
3070 + assert_spin_locked(&log->l_icloglock);
3071
3072 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3073 xlog_state_switch_iclogs(log, iclog, 0);
3074 diff --git a/include/acpi/processor.h b/include/acpi/processor.h
3075 index 4927c06..e498c79 100644
3076 --- a/include/acpi/processor.h
3077 +++ b/include/acpi/processor.h
3078 @@ -174,7 +174,7 @@ struct acpi_processor_throttling {
3079 cpumask_var_t shared_cpu_map;
3080 int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
3081 int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
3082 - int state);
3083 + int state, bool force);
3084
3085 u32 address;
3086 u8 duty_offset;
3087 @@ -320,7 +320,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
3088 /* in processor_throttling.c */
3089 int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
3090 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
3091 -extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
3092 +extern int acpi_processor_set_throttling(struct acpi_processor *pr,
3093 + int state, bool force);
3094 extern const struct file_operations acpi_processor_throttling_fops;
3095 extern void acpi_processor_throttling_init(void);
3096 /* in processor_idle.c */
3097 diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
3098 index 2878811..756d78b 100644
3099 --- a/include/linux/bitmap.h
3100 +++ b/include/linux/bitmap.h
3101 @@ -94,13 +94,13 @@ extern void __bitmap_shift_right(unsigned long *dst,
3102 const unsigned long *src, int shift, int bits);
3103 extern void __bitmap_shift_left(unsigned long *dst,
3104 const unsigned long *src, int shift, int bits);
3105 -extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
3106 +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
3107 const unsigned long *bitmap2, int bits);
3108 extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
3109 const unsigned long *bitmap2, int bits);
3110 extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
3111 const unsigned long *bitmap2, int bits);
3112 -extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
3113 +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
3114 const unsigned long *bitmap2, int bits);
3115 extern int __bitmap_intersects(const unsigned long *bitmap1,
3116 const unsigned long *bitmap2, int bits);
3117 @@ -171,13 +171,12 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
3118 }
3119 }
3120
3121 -static inline void bitmap_and(unsigned long *dst, const unsigned long *src1,
3122 +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
3123 const unsigned long *src2, int nbits)
3124 {
3125 if (small_const_nbits(nbits))
3126 - *dst = *src1 & *src2;
3127 - else
3128 - __bitmap_and(dst, src1, src2, nbits);
3129 + return (*dst = *src1 & *src2) != 0;
3130 + return __bitmap_and(dst, src1, src2, nbits);
3131 }
3132
3133 static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
3134 @@ -198,13 +197,12 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
3135 __bitmap_xor(dst, src1, src2, nbits);
3136 }
3137
3138 -static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
3139 +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
3140 const unsigned long *src2, int nbits)
3141 {
3142 if (small_const_nbits(nbits))
3143 - *dst = *src1 & ~(*src2);
3144 - else
3145 - __bitmap_andnot(dst, src1, src2, nbits);
3146 + return (*dst = *src1 & ~(*src2)) != 0;
3147 + return __bitmap_andnot(dst, src1, src2, nbits);
3148 }
3149
3150 static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
3151 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
3152 index c5ac87c..796df12 100644
3153 --- a/include/linux/cpumask.h
3154 +++ b/include/linux/cpumask.h
3155 @@ -43,10 +43,10 @@
3156 * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
3157 * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
3158 *
3159 - * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
3160 + * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
3161 * void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
3162 * void cpus_xor(dst, src1, src2) dst = src1 ^ src2
3163 - * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2
3164 + * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
3165 * void cpus_complement(dst, src) dst = ~src
3166 *
3167 * int cpus_equal(mask1, mask2) Does mask1 == mask2?
3168 @@ -179,10 +179,10 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
3169 }
3170
3171 #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
3172 -static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
3173 +static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
3174 const cpumask_t *src2p, int nbits)
3175 {
3176 - bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
3177 + return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
3178 }
3179
3180 #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
3181 @@ -201,10 +201,10 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
3182
3183 #define cpus_andnot(dst, src1, src2) \
3184 __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
3185 -static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
3186 +static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
3187 const cpumask_t *src2p, int nbits)
3188 {
3189 - bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
3190 + return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
3191 }
3192
3193 #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
3194 @@ -738,11 +738,11 @@ static inline void cpumask_clear(struct cpumask *dstp)
3195 * @src1p: the first input
3196 * @src2p: the second input
3197 */
3198 -static inline void cpumask_and(struct cpumask *dstp,
3199 +static inline int cpumask_and(struct cpumask *dstp,
3200 const struct cpumask *src1p,
3201 const struct cpumask *src2p)
3202 {
3203 - bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
3204 + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
3205 cpumask_bits(src2p), nr_cpumask_bits);
3206 }
3207
3208 @@ -779,11 +779,11 @@ static inline void cpumask_xor(struct cpumask *dstp,
3209 * @src1p: the first input
3210 * @src2p: the second input
3211 */
3212 -static inline void cpumask_andnot(struct cpumask *dstp,
3213 +static inline int cpumask_andnot(struct cpumask *dstp,
3214 const struct cpumask *src1p,
3215 const struct cpumask *src2p)
3216 {
3217 - bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
3218 + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
3219 cpumask_bits(src2p), nr_cpumask_bits);
3220 }
3221
3222 diff --git a/include/linux/fs.h b/include/linux/fs.h
3223 index 3b534e5..53618df 100644
3224 --- a/include/linux/fs.h
3225 +++ b/include/linux/fs.h
3226 @@ -2121,7 +2121,7 @@ extern struct file *do_filp_open(int dfd, const char *pathname,
3227 int open_flag, int mode, int acc_mode);
3228 extern int may_open(struct path *, int, int);
3229
3230 -extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
3231 +extern int kernel_read(struct file *, loff_t, char *, unsigned long);
3232 extern struct file * open_exec(const char *);
3233
3234 /* fs/dcache.c -- generic fs support functions */
3235 @@ -2135,7 +2135,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
3236
3237 extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
3238
3239 -extern struct inode * inode_init_always(struct super_block *, struct inode *);
3240 +extern int inode_init_always(struct super_block *, struct inode *);
3241 extern void inode_init_once(struct inode *);
3242 extern void inode_add_to_lists(struct super_block *, struct inode *);
3243 extern void iput(struct inode *);
3244 @@ -2162,6 +2162,7 @@ extern void __iget(struct inode * inode);
3245 extern void iget_failed(struct inode *);
3246 extern void clear_inode(struct inode *);
3247 extern void destroy_inode(struct inode *);
3248 +extern void __destroy_inode(struct inode *);
3249 extern struct inode *new_inode(struct super_block *);
3250 extern int should_remove_suid(struct dentry *);
3251 extern int file_remove_suid(struct file *);
3252 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3253 index 03be7f2..7e2f1ef 100644
3254 --- a/include/linux/hugetlb.h
3255 +++ b/include/linux/hugetlb.h
3256 @@ -10,6 +10,7 @@
3257 #include <asm/tlbflush.h>
3258
3259 struct ctl_table;
3260 +struct user_struct;
3261
3262 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
3263 {
3264 @@ -139,7 +140,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
3265
3266 extern const struct file_operations hugetlbfs_file_operations;
3267 extern struct vm_operations_struct hugetlb_vm_ops;
3268 -struct file *hugetlb_file_setup(const char *name, size_t, int);
3269 +struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
3270 + struct user_struct **user);
3271 int hugetlb_get_quota(struct address_space *mapping, long delta);
3272 void hugetlb_put_quota(struct address_space *mapping, long delta);
3273
3274 @@ -161,7 +163,7 @@ static inline void set_file_hugepages(struct file *file)
3275
3276 #define is_file_hugepages(file) 0
3277 #define set_file_hugepages(file) BUG()
3278 -#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS)
3279 +#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS)
3280
3281 #endif /* !CONFIG_HUGETLBFS */
3282
3283 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
3284 index 5eed8fa..340e909 100644
3285 --- a/include/linux/kvm_host.h
3286 +++ b/include/linux/kvm_host.h
3287 @@ -110,6 +110,7 @@ struct kvm_memory_slot {
3288
3289 struct kvm_kernel_irq_routing_entry {
3290 u32 gsi;
3291 + u32 type;
3292 int (*set)(struct kvm_kernel_irq_routing_entry *e,
3293 struct kvm *kvm, int level);
3294 union {
3295 diff --git a/init/main.c b/init/main.c
3296 index d721dad..303903c 100644
3297 --- a/init/main.c
3298 +++ b/init/main.c
3299 @@ -702,13 +702,14 @@ asmlinkage void __init start_kernel(void)
3300 int initcall_debug;
3301 core_param(initcall_debug, initcall_debug, bool, 0644);
3302
3303 +static char msgbuf[64];
3304 +static struct boot_trace_call call;
3305 +static struct boot_trace_ret ret;
3306 +
3307 int do_one_initcall(initcall_t fn)
3308 {
3309 int count = preempt_count();
3310 ktime_t calltime, delta, rettime;
3311 - char msgbuf[64];
3312 - struct boot_trace_call call;
3313 - struct boot_trace_ret ret;
3314
3315 if (initcall_debug) {
3316 call.caller = task_pid_nr(current);
3317 diff --git a/ipc/shm.c b/ipc/shm.c
3318 index 4259716..30b1265 100644
3319 --- a/ipc/shm.c
3320 +++ b/ipc/shm.c
3321 @@ -174,7 +174,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3322 shm_unlock(shp);
3323 if (!is_file_hugepages(shp->shm_file))
3324 shmem_lock(shp->shm_file, 0, shp->mlock_user);
3325 - else
3326 + else if (shp->mlock_user)
3327 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
3328 shp->mlock_user);
3329 fput (shp->shm_file);
3330 @@ -369,8 +369,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
3331 /* hugetlb_file_setup applies strict accounting */
3332 if (shmflg & SHM_NORESERVE)
3333 acctflag = VM_NORESERVE;
3334 - file = hugetlb_file_setup(name, size, acctflag);
3335 - shp->mlock_user = current_user();
3336 + file = hugetlb_file_setup(name, size, acctflag,
3337 + &shp->mlock_user);
3338 } else {
3339 /*
3340 * Do not allow no accounting for OVERCOMMIT_NEVER, even
3341 @@ -411,6 +411,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
3342 return error;
3343
3344 no_id:
3345 + if (shp->mlock_user) /* shmflg & SHM_HUGETLB case */
3346 + user_shm_unlock(size, shp->mlock_user);
3347 fput(file);
3348 no_file:
3349 security_shm_free(shp);
3350 diff --git a/kernel/fork.c b/kernel/fork.c
3351 index 9c1f52d..f4be1ee 100644
3352 --- a/kernel/fork.c
3353 +++ b/kernel/fork.c
3354 @@ -816,11 +816,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
3355 {
3356 struct signal_struct *sig;
3357
3358 - if (clone_flags & CLONE_THREAD) {
3359 - atomic_inc(&current->signal->count);
3360 - atomic_inc(&current->signal->live);
3361 + if (clone_flags & CLONE_THREAD)
3362 return 0;
3363 - }
3364
3365 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
3366 tsk->signal = sig;
3367 @@ -878,16 +875,6 @@ void __cleanup_signal(struct signal_struct *sig)
3368 kmem_cache_free(signal_cachep, sig);
3369 }
3370
3371 -static void cleanup_signal(struct task_struct *tsk)
3372 -{
3373 - struct signal_struct *sig = tsk->signal;
3374 -
3375 - atomic_dec(&sig->live);
3376 -
3377 - if (atomic_dec_and_test(&sig->count))
3378 - __cleanup_signal(sig);
3379 -}
3380 -
3381 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
3382 {
3383 unsigned long new_flags = p->flags;
3384 @@ -1237,6 +1224,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
3385 }
3386
3387 if (clone_flags & CLONE_THREAD) {
3388 + atomic_inc(&current->signal->count);
3389 + atomic_inc(&current->signal->live);
3390 p->group_leader = current->group_leader;
3391 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
3392 }
3393 @@ -1281,7 +1270,8 @@ bad_fork_cleanup_mm:
3394 if (p->mm)
3395 mmput(p->mm);
3396 bad_fork_cleanup_signal:
3397 - cleanup_signal(p);
3398 + if (!(clone_flags & CLONE_THREAD))
3399 + __cleanup_signal(p->signal);
3400 bad_fork_cleanup_sighand:
3401 __cleanup_sighand(p->sighand);
3402 bad_fork_cleanup_fs:
3403 diff --git a/kernel/kthread.c b/kernel/kthread.c
3404 index 4ebaf85..7fbaa09 100644
3405 --- a/kernel/kthread.c
3406 +++ b/kernel/kthread.c
3407 @@ -216,12 +216,12 @@ int kthread_stop(struct task_struct *k)
3408 /* Now set kthread_should_stop() to true, and wake it up. */
3409 kthread_stop_info.k = k;
3410 wake_up_process(k);
3411 - put_task_struct(k);
3412
3413 /* Once it dies, reset stop ptr, gather result and we're done. */
3414 wait_for_completion(&kthread_stop_info.done);
3415 kthread_stop_info.k = NULL;
3416 ret = kthread_stop_info.err;
3417 + put_task_struct(k);
3418 mutex_unlock(&kthread_stop_lock);
3419
3420 trace_sched_kthread_stop_ret(ret);
3421 diff --git a/kernel/signal.c b/kernel/signal.c
3422 index d803473..2dfc931 100644
3423 --- a/kernel/signal.c
3424 +++ b/kernel/signal.c
3425 @@ -2414,11 +2414,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
3426 stack_t oss;
3427 int error;
3428
3429 - if (uoss) {
3430 - oss.ss_sp = (void __user *) current->sas_ss_sp;
3431 - oss.ss_size = current->sas_ss_size;
3432 - oss.ss_flags = sas_ss_flags(sp);
3433 - }
3434 + oss.ss_sp = (void __user *) current->sas_ss_sp;
3435 + oss.ss_size = current->sas_ss_size;
3436 + oss.ss_flags = sas_ss_flags(sp);
3437
3438 if (uss) {
3439 void __user *ss_sp;
3440 @@ -2461,13 +2459,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
3441 current->sas_ss_size = ss_size;
3442 }
3443
3444 + error = 0;
3445 if (uoss) {
3446 error = -EFAULT;
3447 - if (copy_to_user(uoss, &oss, sizeof(oss)))
3448 + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3449 goto out;
3450 + error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3451 + __put_user(oss.ss_size, &uoss->ss_size) |
3452 + __put_user(oss.ss_flags, &uoss->ss_flags);
3453 }
3454
3455 - error = 0;
3456 out:
3457 return error;
3458 }
3459 diff --git a/lib/bitmap.c b/lib/bitmap.c
3460 index 35a1f7f..7025658 100644
3461 --- a/lib/bitmap.c
3462 +++ b/lib/bitmap.c
3463 @@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst,
3464 }
3465 EXPORT_SYMBOL(__bitmap_shift_left);
3466
3467 -void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
3468 +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
3469 const unsigned long *bitmap2, int bits)
3470 {
3471 int k;
3472 int nr = BITS_TO_LONGS(bits);
3473 + unsigned long result = 0;
3474
3475 for (k = 0; k < nr; k++)
3476 - dst[k] = bitmap1[k] & bitmap2[k];
3477 + result |= (dst[k] = bitmap1[k] & bitmap2[k]);
3478 + return result != 0;
3479 }
3480 EXPORT_SYMBOL(__bitmap_and);
3481
3482 @@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
3483 }
3484 EXPORT_SYMBOL(__bitmap_xor);
3485
3486 -void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
3487 +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
3488 const unsigned long *bitmap2, int bits)
3489 {
3490 int k;
3491 int nr = BITS_TO_LONGS(bits);
3492 + unsigned long result = 0;
3493
3494 for (k = 0; k < nr; k++)
3495 - dst[k] = bitmap1[k] & ~bitmap2[k];
3496 + result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
3497 + return result != 0;
3498 }
3499 EXPORT_SYMBOL(__bitmap_andnot);
3500
3501 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3502 index 6bf3cc4..b91020e 100644
3503 --- a/mm/page_alloc.c
3504 +++ b/mm/page_alloc.c
3505 @@ -2342,7 +2342,6 @@ static void build_zonelists(pg_data_t *pgdat)
3506 prev_node = local_node;
3507 nodes_clear(used_mask);
3508
3509 - memset(node_load, 0, sizeof(node_load));
3510 memset(node_order, 0, sizeof(node_order));
3511 j = 0;
3512
3513 @@ -2451,6 +2450,9 @@ static int __build_all_zonelists(void *dummy)
3514 {
3515 int nid;
3516
3517 +#ifdef CONFIG_NUMA
3518 + memset(node_load, 0, sizeof(node_load));
3519 +#endif
3520 for_each_online_node(nid) {
3521 pg_data_t *pgdat = NODE_DATA(nid);
3522
3523 diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
3524 index d6a9243..e8e9bad 100644
3525 --- a/net/appletalk/ddp.c
3526 +++ b/net/appletalk/ddp.c
3527 @@ -1242,6 +1242,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
3528 return -ENOBUFS;
3529
3530 *uaddr_len = sizeof(struct sockaddr_at);
3531 + memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
3532
3533 if (peer) {
3534 if (sk->sk_state != TCP_ESTABLISHED)
3535 diff --git a/net/can/raw.c b/net/can/raw.c
3536 index 6aa154e..5df3bf6 100644
3537 --- a/net/can/raw.c
3538 +++ b/net/can/raw.c
3539 @@ -397,6 +397,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
3540 if (peer)
3541 return -EOPNOTSUPP;
3542
3543 + memset(addr, 0, sizeof(*addr));
3544 addr->can_family = AF_CAN;
3545 addr->can_ifindex = ro->ifindex;
3546
3547 diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
3548 index 6f479fa..3bafb21 100644
3549 --- a/net/econet/af_econet.c
3550 +++ b/net/econet/af_econet.c
3551 @@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
3552 if (peer)
3553 return -EOPNOTSUPP;
3554
3555 + memset(sec, 0, sizeof(*sec));
3556 mutex_lock(&econet_mutex);
3557
3558 sk = sock->sk;
3559 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
3560 index 3e7e910..d1d88e6 100644
3561 --- a/net/ipv4/ip_output.c
3562 +++ b/net/ipv4/ip_output.c
3563 @@ -814,6 +814,8 @@ int ip_append_data(struct sock *sk,
3564 inet->cork.addr = ipc->addr;
3565 }
3566 rt = *rtp;
3567 + if (unlikely(!rt))
3568 + return -EFAULT;
3569 /*
3570 * We steal reference to this route, caller should not release it
3571 */
3572 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
3573 index 61f5538..55e315a 100644
3574 --- a/net/ipv6/af_inet6.c
3575 +++ b/net/ipv6/af_inet6.c
3576 @@ -294,8 +294,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3577 v4addr != htonl(INADDR_ANY) &&
3578 chk_addr_ret != RTN_LOCAL &&
3579 chk_addr_ret != RTN_MULTICAST &&
3580 - chk_addr_ret != RTN_BROADCAST)
3581 + chk_addr_ret != RTN_BROADCAST) {
3582 + err = -EADDRNOTAVAIL;
3583 goto out;
3584 + }
3585 } else {
3586 if (addr_type != IPV6_ADDR_ANY) {
3587 struct net_device *dev = NULL;
3588 diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
3589 index e0fbcff..b06224b 100644
3590 --- a/net/irda/af_irda.c
3591 +++ b/net/irda/af_irda.c
3592 @@ -714,6 +714,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
3593 struct sock *sk = sock->sk;
3594 struct irda_sock *self = irda_sk(sk);
3595
3596 + memset(&saddr, 0, sizeof(saddr));
3597 if (peer) {
3598 if (sk->sk_state != TCP_ESTABLISHED)
3599 return -ENOTCONN;
3600 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
3601 index febae70..515d556 100644
3602 --- a/net/llc/af_llc.c
3603 +++ b/net/llc/af_llc.c
3604 @@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
3605 struct llc_sock *llc = llc_sk(sk);
3606 int rc = 0;
3607
3608 + memset(&sllc, 0, sizeof(sllc));
3609 lock_sock(sk);
3610 if (sock_flag(sk, SOCK_ZAPPED))
3611 goto out;
3612 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
3613 index 947aaaa..baf0f77 100644
3614 --- a/net/mac80211/agg-tx.c
3615 +++ b/net/mac80211/agg-tx.c
3616 @@ -376,6 +376,14 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
3617 &local->hw, queue,
3618 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
3619
3620 + if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
3621 + return;
3622 +
3623 + if (WARN(!sta->ampdu_mlme.tid_tx[tid],
3624 + "TID %d gone but expected when splicing aggregates from"
3625 + "the pending queue\n", tid))
3626 + return;
3627 +
3628 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
3629 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
3630 /* mark queue as pending, it is stopped already */
3631 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
3632 index 3be0e01..0c3e755 100644
3633 --- a/net/netrom/af_netrom.c
3634 +++ b/net/netrom/af_netrom.c
3635 @@ -848,6 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
3636 sax->fsa_ax25.sax25_family = AF_NETROM;
3637 sax->fsa_ax25.sax25_ndigis = 1;
3638 sax->fsa_ax25.sax25_call = nr->user_addr;
3639 + memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
3640 sax->fsa_digipeater[0] = nr->dest_addr;
3641 *uaddr_len = sizeof(struct full_sockaddr_ax25);
3642 } else {
3643 diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
3644 index 877a7f6..ebe1cc9 100644
3645 --- a/net/rose/af_rose.c
3646 +++ b/net/rose/af_rose.c
3647 @@ -957,6 +957,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
3648 struct rose_sock *rose = rose_sk(sk);
3649 int n;
3650
3651 + memset(srose, 0, sizeof(*srose));
3652 if (peer != 0) {
3653 if (sk->sk_state != TCP_ESTABLISHED)
3654 return -ENOTCONN;
3655 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
3656 index 5abab09..8d02e05 100644
3657 --- a/net/sunrpc/clnt.c
3658 +++ b/net/sunrpc/clnt.c
3659 @@ -876,6 +876,7 @@ static inline void
3660 rpc_task_force_reencode(struct rpc_task *task)
3661 {
3662 task->tk_rqstp->rq_snd_buf.len = 0;
3663 + task->tk_rqstp->rq_bytes_sent = 0;
3664 }
3665
3666 static inline void
3667 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
3668 index 50d572b..2ae3aff 100644
3669 --- a/security/integrity/ima/ima_crypto.c
3670 +++ b/security/integrity/ima/ima_crypto.c
3671 @@ -45,9 +45,9 @@ int ima_calc_hash(struct file *file, char *digest)
3672 {
3673 struct hash_desc desc;
3674 struct scatterlist sg[1];
3675 - loff_t i_size;
3676 + loff_t i_size, offset = 0;
3677 char *rbuf;
3678 - int rc, offset = 0;
3679 + int rc;
3680
3681 rc = init_desc(&desc);
3682 if (rc != 0)
3683 @@ -67,6 +67,8 @@ int ima_calc_hash(struct file *file, char *digest)
3684 rc = rbuf_len;
3685 break;
3686 }
3687 + if (rbuf_len == 0)
3688 + break;
3689 offset += rbuf_len;
3690 sg_init_one(sg, rbuf, rbuf_len);
3691
3692 diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
3693 index d659995..2a2c2ca 100644
3694 --- a/sound/core/pcm_lib.c
3695 +++ b/sound/core/pcm_lib.c
3696 @@ -876,47 +876,24 @@ static int snd_interval_ratden(struct snd_interval *i,
3697 int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask)
3698 {
3699 unsigned int k;
3700 - int changed = 0;
3701 + struct snd_interval list_range;
3702
3703 if (!count) {
3704 i->empty = 1;
3705 return -EINVAL;
3706 }
3707 + snd_interval_any(&list_range);
3708 + list_range.min = UINT_MAX;
3709 + list_range.max = 0;
3710 for (k = 0; k < count; k++) {
3711 if (mask && !(mask & (1 << k)))
3712 continue;
3713 - if (i->min == list[k] && !i->openmin)
3714 - goto _l1;
3715 - if (i->min < list[k]) {
3716 - i->min = list[k];
3717 - i->openmin = 0;
3718 - changed = 1;
3719 - goto _l1;
3720 - }
3721 - }
3722 - i->empty = 1;
3723 - return -EINVAL;
3724 - _l1:
3725 - for (k = count; k-- > 0;) {
3726 - if (mask && !(mask & (1 << k)))
3727 + if (!snd_interval_test(i, list[k]))
3728 continue;
3729 - if (i->max == list[k] && !i->openmax)
3730 - goto _l2;
3731 - if (i->max > list[k]) {
3732 - i->max = list[k];
3733 - i->openmax = 0;
3734 - changed = 1;
3735 - goto _l2;
3736 - }
3737 + list_range.min = min(list_range.min, list[k]);
3738 + list_range.max = max(list_range.max, list[k]);
3739 }
3740 - i->empty = 1;
3741 - return -EINVAL;
3742 - _l2:
3743 - if (snd_interval_checkempty(i)) {
3744 - i->empty = 1;
3745 - return -EINVAL;
3746 - }
3747 - return changed;
3748 + return snd_interval_refine(i, &list_range);
3749 }
3750
3751 EXPORT_SYMBOL(snd_interval_list);
3752 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3753 index 1df7692..c734840 100644
3754 --- a/sound/pci/hda/patch_realtek.c
3755 +++ b/sound/pci/hda/patch_realtek.c
3756 @@ -6186,9 +6186,9 @@ static struct hda_verb alc885_mbp_ch2_init[] = {
3757 };
3758
3759 /*
3760 - * 6ch mode
3761 + * 4ch mode
3762 */
3763 -static struct hda_verb alc885_mbp_ch6_init[] = {
3764 +static struct hda_verb alc885_mbp_ch4_init[] = {
3765 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
3766 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
3767 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
3768 @@ -6197,9 +6197,9 @@ static struct hda_verb alc885_mbp_ch6_init[] = {
3769 { } /* end */
3770 };
3771
3772 -static struct hda_channel_mode alc885_mbp_6ch_modes[2] = {
3773 +static struct hda_channel_mode alc885_mbp_4ch_modes[2] = {
3774 { 2, alc885_mbp_ch2_init },
3775 - { 6, alc885_mbp_ch6_init },
3776 + { 4, alc885_mbp_ch4_init },
3777 };
3778
3779
3780 @@ -6232,10 +6232,11 @@ static struct snd_kcontrol_new alc882_base_mixer[] = {
3781 };
3782
3783 static struct snd_kcontrol_new alc885_mbp3_mixer[] = {
3784 - HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
3785 - HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
3786 - HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
3787 - HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
3788 + HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
3789 + HDA_BIND_MUTE ("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
3790 + HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
3791 + HDA_BIND_MUTE ("Headphone Playback Switch", 0x0e, 0x02, HDA_INPUT),
3792 + HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
3793 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
3794 HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
3795 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
3796 @@ -6481,14 +6482,18 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
3797 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
3798 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
3799 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
3800 + /* HP mixer */
3801 + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
3802 + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
3803 + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
3804 /* Front Pin: output 0 (0x0c) */
3805 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
3806 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
3807 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
3808 - /* HP Pin: output 0 (0x0d) */
3809 + /* HP Pin: output 0 (0x0e) */
3810 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4},
3811 - {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
3812 - {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
3813 + {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
3814 + {0x15, AC_VERB_SET_CONNECT_SEL, 0x02},
3815 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
3816 /* Mic (rear) pin: input vref at 80% */
3817 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
3818 @@ -6885,10 +6890,11 @@ static struct alc_config_preset alc882_presets[] = {
3819 .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
3820 .init_verbs = { alc885_mbp3_init_verbs,
3821 alc880_gpio1_init_verbs },
3822 - .num_dacs = ARRAY_SIZE(alc882_dac_nids),
3823 + .num_dacs = 2,
3824 .dac_nids = alc882_dac_nids,
3825 - .channel_mode = alc885_mbp_6ch_modes,
3826 - .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
3827 + .hp_nid = 0x04,
3828 + .channel_mode = alc885_mbp_4ch_modes,
3829 + .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
3830 .input_mux = &alc882_capture_source,
3831 .dig_out_nid = ALC882_DIGOUT_NID,
3832 .dig_in_nid = ALC882_DIGIN_NID,
3833 diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
3834 index 864ac54..8f2018a 100644
3835 --- a/virt/kvm/irq_comm.c
3836 +++ b/virt/kvm/irq_comm.c
3837 @@ -141,7 +141,8 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
3838 unsigned gsi = pin;
3839
3840 list_for_each_entry(e, &kvm->irq_routing, link)
3841 - if (e->irqchip.irqchip == irqchip &&
3842 + if (e->type == KVM_IRQ_ROUTING_IRQCHIP &&
3843 + e->irqchip.irqchip == irqchip &&
3844 e->irqchip.pin == pin) {
3845 gsi = e->gsi;
3846 break;
3847 @@ -240,6 +241,7 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
3848 int delta;
3849
3850 e->gsi = ue->gsi;
3851 + e->type = ue->type;
3852 switch (ue->type) {
3853 case KVM_IRQ_ROUTING_IRQCHIP:
3854 delta = 0;
3855 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
3856 index 1489829..ad38135 100644
3857 --- a/virt/kvm/kvm_main.c
3858 +++ b/virt/kvm/kvm_main.c
3859 @@ -881,6 +881,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
3860 #endif
3861 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
3862 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
3863 +#else
3864 + kvm_arch_flush_shadow(kvm);
3865 #endif
3866 kvm_arch_destroy_vm(kvm);
3867 mmdrop(mm);
3868 @@ -1055,8 +1057,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
3869
3870 kvm_free_physmem_slot(&old, npages ? &new : NULL);
3871 /* Slot deletion case: we have to update the current slot */
3872 + spin_lock(&kvm->mmu_lock);
3873 if (!npages)
3874 *memslot = old;
3875 + spin_unlock(&kvm->mmu_lock);
3876 #ifdef CONFIG_DMAR
3877 /* map the pages in iommu page table */
3878 r = kvm_iommu_map_pages(kvm, base_gfn, npages);

  ViewVC Help
Powered by ViewVC 1.1.20