/[linux-patches]/genpatches-2.6/trunk/3.6/1008_linux-3.6.9.patch
Gentoo

Contents of /genpatches-2.6/trunk/3.6/1008_linux-3.6.9.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2239 - (show annotations) (download)
Sat Dec 8 23:13:39 2012 UTC (2 years ago) by mpagano
File size: 58273 byte(s)
Linux patch 3.6.9
1 diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
2 index 12d3952e..32bc56b 100755
3 --- a/Documentation/dvb/get_dvb_firmware
4 +++ b/Documentation/dvb/get_dvb_firmware
5 @@ -116,7 +116,7 @@ sub tda10045 {
6
7 sub tda10046 {
8 my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip";
9 - my $url = "http://www.tt-download.com/download/updates/219/$sourcefile";
10 + my $url = "http://technotrend.com.ua/download/software/219/$sourcefile";
11 my $hash = "6a7e1e2f2644b162ff0502367553c72d";
12 my $outfile = "dvb-fe-tda10046.fw";
13 my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
14 diff --git a/Makefile b/Makefile
15 index c5cc2f0..978af72 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 3
20 PATCHLEVEL = 6
21 -SUBLEVEL = 8
22 +SUBLEVEL = 9
23 EXTRAVERSION =
24 NAME = Terrified Chipmunk
25
26 diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
27 index fd49aed..5dede04 100644
28 --- a/arch/parisc/kernel/signal32.c
29 +++ b/arch/parisc/kernel/signal32.c
30 @@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
31 {
32 compat_sigset_t s;
33
34 - if (sz != sizeof *set) panic("put_sigset32()");
35 + if (sz != sizeof *set)
36 + return -EINVAL;
37 sigset_64to32(&s, set);
38
39 return copy_to_user(up, &s, sizeof s);
40 @@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
41 compat_sigset_t s;
42 int r;
43
44 - if (sz != sizeof *set) panic("put_sigset32()");
45 + if (sz != sizeof *set)
46 + return -EINVAL;
47
48 if ((r = copy_from_user(&s, up, sz)) == 0) {
49 sigset_32to64(set, &s);
50 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
51 index 7426e40..f76c108 100644
52 --- a/arch/parisc/kernel/sys_parisc.c
53 +++ b/arch/parisc/kernel/sys_parisc.c
54 @@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
55 struct vm_area_struct *vma;
56 int offset = mapping ? get_offset(mapping) : 0;
57
58 + offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
59 +
60 addr = DCACHE_ALIGN(addr - offset) + offset;
61
62 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
63 diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
64 index baf92cd..041e28d 100644
65 --- a/arch/powerpc/platforms/pseries/eeh_driver.c
66 +++ b/arch/powerpc/platforms/pseries/eeh_driver.c
67 @@ -25,6 +25,7 @@
68 #include <linux/delay.h>
69 #include <linux/interrupt.h>
70 #include <linux/irq.h>
71 +#include <linux/module.h>
72 #include <linux/pci.h>
73 #include <asm/eeh.h>
74 #include <asm/eeh_event.h>
75 @@ -47,6 +48,41 @@ static inline const char *eeh_pcid_name(struct pci_dev *pdev)
76 return "";
77 }
78
79 +/**
80 + * eeh_pcid_get - Get the PCI device driver
81 + * @pdev: PCI device
82 + *
83 + * The function is used to retrieve the PCI device driver for
84 + * the indicated PCI device. Besides, we will increase the reference
85 + * of the PCI device driver to prevent that being unloaded on
86 + * the fly. Otherwise, kernel crash would be seen.
87 + */
88 +static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
89 +{
90 + if (!pdev || !pdev->driver)
91 + return NULL;
92 +
93 + if (!try_module_get(pdev->driver->driver.owner))
94 + return NULL;
95 +
96 + return pdev->driver;
97 +}
98 +
99 +/**
100 + * eeh_pcid_put - Dereference on the PCI device driver
101 + * @pdev: PCI device
102 + *
103 + * The function is called to do dereference on the PCI device
104 + * driver of the indicated PCI device.
105 + */
106 +static inline void eeh_pcid_put(struct pci_dev *pdev)
107 +{
108 + if (!pdev || !pdev->driver)
109 + return;
110 +
111 + module_put(pdev->driver->driver.owner);
112 +}
113 +
114 #if 0
115 static void print_device_node_tree(struct pci_dn *pdn, int dent)
116 {
117 @@ -126,18 +162,20 @@ static void eeh_enable_irq(struct pci_dev *dev)
118 static int eeh_report_error(struct pci_dev *dev, void *userdata)
119 {
120 enum pci_ers_result rc, *res = userdata;
121 - struct pci_driver *driver = dev->driver;
122 + struct pci_driver *driver;
123
124 dev->error_state = pci_channel_io_frozen;
125
126 - if (!driver)
127 - return 0;
128 + driver = eeh_pcid_get(dev);
129 + if (!driver) return 0;
130
131 eeh_disable_irq(dev);
132
133 if (!driver->err_handler ||
134 - !driver->err_handler->error_detected)
135 + !driver->err_handler->error_detected) {
136 + eeh_pcid_put(dev);
137 return 0;
138 + }
139
140 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
141
142 @@ -145,6 +183,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
143 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
144 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
145
146 + eeh_pcid_put(dev);
147 return 0;
148 }
149
150 @@ -160,12 +199,16 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
151 static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
152 {
153 enum pci_ers_result rc, *res = userdata;
154 - struct pci_driver *driver = dev->driver;
155 + struct pci_driver *driver;
156 +
157 + driver = eeh_pcid_get(dev);
158 + if (!driver) return 0;
159
160 - if (!driver ||
161 - !driver->err_handler ||
162 - !driver->err_handler->mmio_enabled)
163 + if (!driver->err_handler ||
164 + !driver->err_handler->mmio_enabled) {
165 + eeh_pcid_put(dev);
166 return 0;
167 + }
168
169 rc = driver->err_handler->mmio_enabled(dev);
170
171 @@ -173,6 +216,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
172 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
173 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
174
175 + eeh_pcid_put(dev);
176 return 0;
177 }
178
179 @@ -189,18 +233,20 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
180 static int eeh_report_reset(struct pci_dev *dev, void *userdata)
181 {
182 enum pci_ers_result rc, *res = userdata;
183 - struct pci_driver *driver = dev->driver;
184 -
185 - if (!driver)
186 - return 0;
187 + struct pci_driver *driver;
188
189 dev->error_state = pci_channel_io_normal;
190
191 + driver = eeh_pcid_get(dev);
192 + if (!driver) return 0;
193 +
194 eeh_enable_irq(dev);
195
196 if (!driver->err_handler ||
197 - !driver->err_handler->slot_reset)
198 + !driver->err_handler->slot_reset) {
199 + eeh_pcid_put(dev);
200 return 0;
201 + }
202
203 rc = driver->err_handler->slot_reset(dev);
204 if ((*res == PCI_ERS_RESULT_NONE) ||
205 @@ -208,6 +254,7 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
206 if (*res == PCI_ERS_RESULT_DISCONNECT &&
207 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
208
209 + eeh_pcid_put(dev);
210 return 0;
211 }
212
213 @@ -222,21 +269,24 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
214 */
215 static int eeh_report_resume(struct pci_dev *dev, void *userdata)
216 {
217 - struct pci_driver *driver = dev->driver;
218 + struct pci_driver *driver;
219
220 dev->error_state = pci_channel_io_normal;
221
222 - if (!driver)
223 - return 0;
224 + driver = eeh_pcid_get(dev);
225 + if (!driver) return 0;
226
227 eeh_enable_irq(dev);
228
229 if (!driver->err_handler ||
230 - !driver->err_handler->resume)
231 + !driver->err_handler->resume) {
232 + eeh_pcid_put(dev);
233 return 0;
234 + }
235
236 driver->err_handler->resume(dev);
237
238 + eeh_pcid_put(dev);
239 return 0;
240 }
241
242 @@ -250,21 +300,24 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
243 */
244 static int eeh_report_failure(struct pci_dev *dev, void *userdata)
245 {
246 - struct pci_driver *driver = dev->driver;
247 + struct pci_driver *driver;
248
249 dev->error_state = pci_channel_io_perm_failure;
250
251 - if (!driver)
252 - return 0;
253 + driver = eeh_pcid_get(dev);
254 + if (!driver) return 0;
255
256 eeh_disable_irq(dev);
257
258 if (!driver->err_handler ||
259 - !driver->err_handler->error_detected)
260 + !driver->err_handler->error_detected) {
261 + eeh_pcid_put(dev);
262 return 0;
263 + }
264
265 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
266
267 + eeh_pcid_put(dev);
268 return 0;
269 }
270
271 diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
272 index 867de2f..689e1ba 100644
273 --- a/arch/sparc/kernel/signal_64.c
274 +++ b/arch/sparc/kernel/signal_64.c
275 @@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
276 err |= restore_fpu_state(regs, fpu_save);
277
278 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
279 - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
280 -
281 - if (err)
282 + if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
283 goto segv;
284
285 err |= __get_user(rwin_save, &sf->rwin_save);
286 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
287 index b3e0227..90201aa 100644
288 --- a/arch/x86/boot/compressed/eboot.c
289 +++ b/arch/x86/boot/compressed/eboot.c
290 @@ -12,6 +12,8 @@
291 #include <asm/setup.h>
292 #include <asm/desc.h>
293
294 +#undef memcpy /* Use memcpy from misc.c */
295 +
296 #include "eboot.h"
297
298 static efi_system_table_t *sys_table;
299 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
300 index dcfde52..19f16eb 100644
301 --- a/arch/x86/include/asm/ptrace.h
302 +++ b/arch/x86/include/asm/ptrace.h
303 @@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
304 }
305 #endif
306
307 -/*
308 - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
309 - * when it traps. The previous stack will be directly underneath the saved
310 - * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
311 - *
312 - * This is valid only for kernel mode traps.
313 - */
314 -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
315 -{
316 #ifdef CONFIG_X86_32
317 - return (unsigned long)(&regs->sp);
318 +extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
319 #else
320 +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
321 +{
322 return regs->sp;
323 -#endif
324 }
325 +#endif
326
327 #define GET_IP(regs) ((regs)->ip)
328 #define GET_FP(regs) ((regs)->bp)
329 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
330 index 82746f9..5d8cf0d 100644
331 --- a/arch/x86/kernel/microcode_amd.c
332 +++ b/arch/x86/kernel/microcode_amd.c
333 @@ -97,6 +97,7 @@ static unsigned int verify_ucode_size(int cpu, u32 patch_size,
334 #define F1XH_MPB_MAX_SIZE 2048
335 #define F14H_MPB_MAX_SIZE 1824
336 #define F15H_MPB_MAX_SIZE 4096
337 +#define F16H_MPB_MAX_SIZE 3458
338
339 switch (c->x86) {
340 case 0x14:
341 @@ -105,6 +106,9 @@ static unsigned int verify_ucode_size(int cpu, u32 patch_size,
342 case 0x15:
343 max_size = F15H_MPB_MAX_SIZE;
344 break;
345 + case 0x16:
346 + max_size = F16H_MPB_MAX_SIZE;
347 + break;
348 default:
349 max_size = F1XH_MPB_MAX_SIZE;
350 break;
351 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
352 index c4c6a5c..9ee1787 100644
353 --- a/arch/x86/kernel/ptrace.c
354 +++ b/arch/x86/kernel/ptrace.c
355 @@ -21,6 +21,7 @@
356 #include <linux/signal.h>
357 #include <linux/perf_event.h>
358 #include <linux/hw_breakpoint.h>
359 +#include <linux/module.h>
360
361 #include <asm/uaccess.h>
362 #include <asm/pgtable.h>
363 @@ -165,6 +166,35 @@ static inline bool invalid_selector(u16 value)
364
365 #define FLAG_MASK FLAG_MASK_32
366
367 +/*
368 + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
369 + * when it traps. The previous stack will be directly underneath the saved
370 + * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
371 + *
372 + * Now, if the stack is empty, '&regs->sp' is out of range. In this
373 + * case we try to take the previous stack. To always return a non-null
374 + * stack pointer we fall back to regs as stack if no previous stack
375 + * exists.
376 + *
377 + * This is valid only for kernel mode traps.
378 + */
379 +unsigned long kernel_stack_pointer(struct pt_regs *regs)
380 +{
381 + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
382 + unsigned long sp = (unsigned long)&regs->sp;
383 + struct thread_info *tinfo;
384 +
385 + if (context == (sp & ~(THREAD_SIZE - 1)))
386 + return sp;
387 +
388 + tinfo = (struct thread_info *)context;
389 + if (tinfo->previous_esp)
390 + return tinfo->previous_esp;
391 +
392 + return (unsigned long)regs;
393 +}
394 +EXPORT_SYMBOL_GPL(kernel_stack_pointer);
395 +
396 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
397 {
398 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
399 diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
400 index a10e460..58fc514 100644
401 --- a/arch/x86/kvm/cpuid.h
402 +++ b/arch/x86/kvm/cpuid.h
403 @@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
404 {
405 struct kvm_cpuid_entry2 *best;
406
407 + if (!static_cpu_has(X86_FEATURE_XSAVE))
408 + return 0;
409 +
410 best = kvm_find_cpuid_entry(vcpu, 1, 0);
411 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
412 }
413 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
414 index 2966c84..a201790 100644
415 --- a/arch/x86/kvm/x86.c
416 +++ b/arch/x86/kvm/x86.c
417 @@ -5762,6 +5762,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
418 int pending_vec, max_bits, idx;
419 struct desc_ptr dt;
420
421 + if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
422 + return -EINVAL;
423 +
424 dt.size = sregs->idt.limit;
425 dt.address = sregs->idt.base;
426 kvm_x86_ops->set_idt(vcpu, &dt);
427 diff --git a/block/blk-exec.c b/block/blk-exec.c
428 index 8b6dc5b..f71eac3 100644
429 --- a/block/blk-exec.c
430 +++ b/block/blk-exec.c
431 @@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
432 rq_end_io_fn *done)
433 {
434 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
435 + bool is_pm_resume;
436
437 WARN_ON(irqs_disabled());
438
439 rq->rq_disk = bd_disk;
440 rq->end_io = done;
441 + /*
442 + * need to check this before __blk_run_queue(), because rq can
443 + * be freed before that returns.
444 + */
445 + is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
446
447 spin_lock_irq(q->queue_lock);
448
449 @@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
450 __elv_add_request(q, rq, where);
451 __blk_run_queue(q);
452 /* the queue is stopped so it won't be run */
453 - if (rq->cmd_type == REQ_TYPE_PM_RESUME)
454 + if (is_pm_resume)
455 q->request_fn(q);
456 spin_unlock_irq(q->queue_lock);
457 }
458 diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
459 index 44a4256..08608de 100644
460 --- a/drivers/ata/sata_svw.c
461 +++ b/drivers/ata/sata_svw.c
462 @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
463 return 0;
464 }
465
466 +static int k2_sata_softreset(struct ata_link *link,
467 + unsigned int *class, unsigned long deadline)
468 +{
469 + u8 dmactl;
470 + void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
471 +
472 + dmactl = readb(mmio + ATA_DMA_CMD);
473 +
474 + /* Clear the start bit */
475 + if (dmactl & ATA_DMA_START) {
476 + dmactl &= ~ATA_DMA_START;
477 + writeb(dmactl, mmio + ATA_DMA_CMD);
478 + }
479 +
480 + return ata_sff_softreset(link, class, deadline);
481 +}
482 +
483 +static int k2_sata_hardreset(struct ata_link *link,
484 + unsigned int *class, unsigned long deadline)
485 +{
486 + u8 dmactl;
487 + void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
488 +
489 + dmactl = readb(mmio + ATA_DMA_CMD);
490 +
491 + /* Clear the start bit */
492 + if (dmactl & ATA_DMA_START) {
493 + dmactl &= ~ATA_DMA_START;
494 + writeb(dmactl, mmio + ATA_DMA_CMD);
495 + }
496 +
497 + return sata_sff_hardreset(link, class, deadline);
498 +}
499
500 static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
501 {
502 @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
503
504 static struct ata_port_operations k2_sata_ops = {
505 .inherits = &ata_bmdma_port_ops,
506 + .softreset = k2_sata_softreset,
507 + .hardreset = k2_sata_hardreset,
508 .sff_tf_load = k2_sata_tf_load,
509 .sff_tf_read = k2_sata_tf_read,
510 .sff_check_status = k2_stat_check_status,
511 diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
512 index 74a67e0..fbbd4ed 100644
513 --- a/drivers/base/power/qos.c
514 +++ b/drivers/base/power/qos.c
515 @@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
516 if (ancestor)
517 error = dev_pm_qos_add_request(ancestor, req, value);
518
519 - if (error)
520 + if (error < 0)
521 req->dev = NULL;
522
523 return error;
524 diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
525 index bd2f33e..bc6b64f 100644
526 --- a/drivers/gpu/drm/radeon/radeon_agp.c
527 +++ b/drivers/gpu/drm/radeon/radeon_agp.c
528 @@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
529 /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
530 { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
531 PCI_VENDOR_ID_DELL, 0x00e3, 2},
532 - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
533 + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
534 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
535 PCI_VENDOR_ID_DELL, 0x0149, 1},
536 + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
537 + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
538 + PCI_VENDOR_ID_IBM, 0x0531, 1},
539 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
540 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
541 0x1025, 0x0061, 1},
542 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
543 index 1dcb76f..ab8ce9f 100644
544 --- a/drivers/hid/hid-ids.h
545 +++ b/drivers/hid/hid-ids.h
546 @@ -296,6 +296,9 @@
547 #define USB_VENDOR_ID_EZKEY 0x0518
548 #define USB_DEVICE_ID_BTC_8193 0x0002
549
550 +#define USB_VENDOR_ID_FREESCALE 0x15A2
551 +#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
552 +
553 #define USB_VENDOR_ID_FRUCTEL 0x25B6
554 #define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002
555
556 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
557 index 991e85c..8865fa3 100644
558 --- a/drivers/hid/usbhid/hid-quirks.c
559 +++ b/drivers/hid/usbhid/hid-quirks.c
560 @@ -70,6 +70,7 @@ static const struct hid_blacklist {
561 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
562 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
563 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
564 + { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
565 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
566 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
567 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
568 diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
569 index 5275887..c44950d 100644
570 --- a/drivers/isdn/gigaset/bas-gigaset.c
571 +++ b/drivers/isdn/gigaset/bas-gigaset.c
572 @@ -617,7 +617,13 @@ static void int_in_work(struct work_struct *work)
573 if (rc == 0)
574 /* success, resubmit interrupt read URB */
575 rc = usb_submit_urb(urb, GFP_ATOMIC);
576 - if (rc != 0 && rc != -ENODEV) {
577 +
578 + switch (rc) {
579 + case 0: /* success */
580 + case -ENODEV: /* device gone */
581 + case -EINVAL: /* URB already resubmitted, or terminal badness */
582 + break;
583 + default: /* failure: try to recover by resetting the device */
584 dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
585 rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
586 if (rc == 0) {
587 @@ -2442,7 +2448,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
588 }
589
590 /* gigaset_suspend
591 - * This function is called before the USB connection is suspended.
592 + * This function is called before the USB connection is suspended
593 + * or before the USB device is reset.
594 + * In the latter case, message == PMSG_ON.
595 */
596 static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
597 {
598 @@ -2498,7 +2506,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
599 del_timer_sync(&ucs->timer_atrdy);
600 del_timer_sync(&ucs->timer_cmd_in);
601 del_timer_sync(&ucs->timer_int_in);
602 - cancel_work_sync(&ucs->int_in_wq);
603 +
604 + /* don't try to cancel int_in_wq from within reset as it
605 + * might be the one requesting the reset
606 + */
607 + if (message.event != PM_EVENT_ON)
608 + cancel_work_sync(&ucs->int_in_wq);
609
610 gig_dbg(DEBUG_SUSPEND, "suspend complete");
611 return 0;
612 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
613 index 67ffa39..4256200 100644
614 --- a/drivers/md/dm.c
615 +++ b/drivers/md/dm.c
616 @@ -754,8 +754,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
617 if (!md_in_flight(md))
618 wake_up(&md->wait);
619
620 + /*
621 + * Run this off this callpath, as drivers could invoke end_io while
622 + * inside their request_fn (and holding the queue lock). Calling
623 + * back into ->request_fn() could deadlock attempting to grab the
624 + * queue lock again.
625 + */
626 if (run_queue)
627 - blk_run_queue(md->queue);
628 + blk_run_queue_async(md->queue);
629
630 /*
631 * dm_put() must be at the end of this function. See the comment above
632 diff --git a/drivers/md/md.c b/drivers/md/md.c
633 index 308e87b..c7b000f 100644
634 --- a/drivers/md/md.c
635 +++ b/drivers/md/md.c
636 @@ -1832,10 +1832,10 @@ retry:
637 memset(bbp, 0xff, PAGE_SIZE);
638
639 for (i = 0 ; i < bb->count ; i++) {
640 - u64 internal_bb = *p++;
641 + u64 internal_bb = p[i];
642 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
643 | BB_LEN(internal_bb));
644 - *bbp++ = cpu_to_le64(store_bb);
645 + bbp[i] = cpu_to_le64(store_bb);
646 }
647 bb->changed = 0;
648 if (read_seqretry(&bb->lock, seq))
649 @@ -7907,9 +7907,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
650 sector_t *first_bad, int *bad_sectors)
651 {
652 int hi;
653 - int lo = 0;
654 + int lo;
655 u64 *p = bb->page;
656 - int rv = 0;
657 + int rv;
658 sector_t target = s + sectors;
659 unsigned seq;
660
661 @@ -7924,7 +7924,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
662
663 retry:
664 seq = read_seqbegin(&bb->lock);
665 -
666 + lo = 0;
667 + rv = 0;
668 hi = bb->count;
669
670 /* Binary search between lo and hi for 'target'
671 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
672 index a48c215..c52d893 100644
673 --- a/drivers/md/raid10.c
674 +++ b/drivers/md/raid10.c
675 @@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
676 */
677 one_write_done(r10_bio);
678 if (dec_rdev)
679 - rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
680 + rdev_dec_pending(rdev, conf->mddev);
681 }
682
683 /*
684 @@ -1287,18 +1287,21 @@ retry_write:
685 blocked_rdev = rrdev;
686 break;
687 }
688 + if (rdev && (test_bit(Faulty, &rdev->flags)
689 + || test_bit(Unmerged, &rdev->flags)))
690 + rdev = NULL;
691 if (rrdev && (test_bit(Faulty, &rrdev->flags)
692 || test_bit(Unmerged, &rrdev->flags)))
693 rrdev = NULL;
694
695 r10_bio->devs[i].bio = NULL;
696 r10_bio->devs[i].repl_bio = NULL;
697 - if (!rdev || test_bit(Faulty, &rdev->flags) ||
698 - test_bit(Unmerged, &rdev->flags)) {
699 +
700 + if (!rdev && !rrdev) {
701 set_bit(R10BIO_Degraded, &r10_bio->state);
702 continue;
703 }
704 - if (test_bit(WriteErrorSeen, &rdev->flags)) {
705 + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
706 sector_t first_bad;
707 sector_t dev_sector = r10_bio->devs[i].addr;
708 int bad_sectors;
709 @@ -1340,8 +1343,10 @@ retry_write:
710 max_sectors = good_sectors;
711 }
712 }
713 - r10_bio->devs[i].bio = bio;
714 - atomic_inc(&rdev->nr_pending);
715 + if (rdev) {
716 + r10_bio->devs[i].bio = bio;
717 + atomic_inc(&rdev->nr_pending);
718 + }
719 if (rrdev) {
720 r10_bio->devs[i].repl_bio = bio;
721 atomic_inc(&rrdev->nr_pending);
722 @@ -1397,58 +1402,57 @@ retry_write:
723 for (i = 0; i < conf->copies; i++) {
724 struct bio *mbio;
725 int d = r10_bio->devs[i].devnum;
726 - if (!r10_bio->devs[i].bio)
727 - continue;
728 -
729 - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
730 - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
731 - max_sectors);
732 - r10_bio->devs[i].bio = mbio;
733 -
734 - mbio->bi_sector = (r10_bio->devs[i].addr+
735 - choose_data_offset(r10_bio,
736 - conf->mirrors[d].rdev));
737 - mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
738 - mbio->bi_end_io = raid10_end_write_request;
739 - mbio->bi_rw = WRITE | do_sync | do_fua;
740 - mbio->bi_private = r10_bio;
741
742 - atomic_inc(&r10_bio->remaining);
743 - spin_lock_irqsave(&conf->device_lock, flags);
744 - bio_list_add(&conf->pending_bio_list, mbio);
745 - conf->pending_count++;
746 - spin_unlock_irqrestore(&conf->device_lock, flags);
747 - if (!mddev_check_plugged(mddev))
748 - md_wakeup_thread(mddev->thread);
749 -
750 - if (!r10_bio->devs[i].repl_bio)
751 - continue;
752 + if (r10_bio->devs[i].bio) {
753 + struct md_rdev *rdev = conf->mirrors[d].rdev;
754 + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
755 + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
756 + max_sectors);
757 + r10_bio->devs[i].bio = mbio;
758 +
759 + mbio->bi_sector = (r10_bio->devs[i].addr +
760 + choose_data_offset(r10_bio, rdev));
761 + mbio->bi_bdev = rdev->bdev;
762 + mbio->bi_end_io = raid10_end_write_request;
763 + mbio->bi_rw = WRITE | do_sync | do_fua;
764 + mbio->bi_private = r10_bio;
765
766 - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
767 - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
768 - max_sectors);
769 - r10_bio->devs[i].repl_bio = mbio;
770 + atomic_inc(&r10_bio->remaining);
771 + spin_lock_irqsave(&conf->device_lock, flags);
772 + bio_list_add(&conf->pending_bio_list, mbio);
773 + conf->pending_count++;
774 + spin_unlock_irqrestore(&conf->device_lock, flags);
775 + if (!mddev_check_plugged(mddev))
776 + md_wakeup_thread(mddev->thread);
777 + }
778
779 - /* We are actively writing to the original device
780 - * so it cannot disappear, so the replacement cannot
781 - * become NULL here
782 - */
783 - mbio->bi_sector = (r10_bio->devs[i].addr +
784 - choose_data_offset(
785 - r10_bio,
786 - conf->mirrors[d].replacement));
787 - mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
788 - mbio->bi_end_io = raid10_end_write_request;
789 - mbio->bi_rw = WRITE | do_sync | do_fua;
790 - mbio->bi_private = r10_bio;
791 + if (r10_bio->devs[i].repl_bio) {
792 + struct md_rdev *rdev = conf->mirrors[d].replacement;
793 + if (rdev == NULL) {
794 + /* Replacement just got moved to main 'rdev' */
795 + smp_mb();
796 + rdev = conf->mirrors[d].rdev;
797 + }
798 + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
799 + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
800 + max_sectors);
801 + r10_bio->devs[i].repl_bio = mbio;
802 +
803 + mbio->bi_sector = (r10_bio->devs[i].addr +
804 + choose_data_offset(r10_bio, rdev));
805 + mbio->bi_bdev = rdev->bdev;
806 + mbio->bi_end_io = raid10_end_write_request;
807 + mbio->bi_rw = WRITE | do_sync | do_fua;
808 + mbio->bi_private = r10_bio;
809
810 - atomic_inc(&r10_bio->remaining);
811 - spin_lock_irqsave(&conf->device_lock, flags);
812 - bio_list_add(&conf->pending_bio_list, mbio);
813 - conf->pending_count++;
814 - spin_unlock_irqrestore(&conf->device_lock, flags);
815 - if (!mddev_check_plugged(mddev))
816 - md_wakeup_thread(mddev->thread);
817 + atomic_inc(&r10_bio->remaining);
818 + spin_lock_irqsave(&conf->device_lock, flags);
819 + bio_list_add(&conf->pending_bio_list, mbio);
820 + conf->pending_count++;
821 + spin_unlock_irqrestore(&conf->device_lock, flags);
822 + if (!mddev_check_plugged(mddev))
823 + md_wakeup_thread(mddev->thread);
824 + }
825 }
826
827 /* Don't remove the bias on 'remaining' (one_write_done) until
828 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
829 index a50c205..02b7a4a 100644
830 --- a/drivers/mmc/host/sdhci-s3c.c
831 +++ b/drivers/mmc/host/sdhci-s3c.c
832 @@ -656,7 +656,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
833
834 pm_runtime_disable(&pdev->dev);
835
836 - for (ptr = 0; ptr < 3; ptr++) {
837 + for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
838 if (sc->clk_bus[ptr]) {
839 clk_disable(sc->clk_bus[ptr]);
840 clk_put(sc->clk_bus[ptr]);
841 diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
842 index 8f52fc8..5a5cd2a 100644
843 --- a/drivers/mtd/devices/slram.c
844 +++ b/drivers/mtd/devices/slram.c
845 @@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
846
847 if (*(szlength) != '+') {
848 devlength = simple_strtoul(szlength, &buffer, 0);
849 - devlength = handle_unit(devlength, buffer) - devstart;
850 + devlength = handle_unit(devlength, buffer);
851 if (devlength < devstart)
852 goto err_out;
853
854 diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
855 index 64be8f0..d9127e2 100644
856 --- a/drivers/mtd/ofpart.c
857 +++ b/drivers/mtd/ofpart.c
858 @@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
859 nr_parts = plen / sizeof(part[0]);
860
861 *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
862 - if (!pparts)
863 + if (!*pparts)
864 return -ENOMEM;
865
866 names = of_get_property(dp, "partition-names", &plen);
867 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
868 index 86f26a1..25723d8 100644
869 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c
870 +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
871 @@ -519,8 +519,10 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
872 mc->pdev->dev.can.state = new_state;
873
874 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
875 + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
876 +
877 peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
878 - skb->tstamp = timeval_to_ktime(tv);
879 + hwts->hwtstamp = timeval_to_ktime(tv);
880 }
881
882 netif_rx(skb);
883 @@ -605,6 +607,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
884 struct sk_buff *skb;
885 struct can_frame *cf;
886 struct timeval tv;
887 + struct skb_shared_hwtstamps *hwts;
888
889 skb = alloc_can_skb(mc->netdev, &cf);
890 if (!skb)
891 @@ -652,7 +655,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
892
893 /* convert timestamp into kernel time */
894 peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
895 - skb->tstamp = timeval_to_ktime(tv);
896 + hwts = skb_hwtstamps(skb);
897 + hwts->hwtstamp = timeval_to_ktime(tv);
898
899 /* push the skb */
900 netif_rx(skb);
901 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
902 index 629c4ba..c95913a 100644
903 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
904 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
905 @@ -532,6 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
906 struct can_frame *can_frame;
907 struct sk_buff *skb;
908 struct timeval tv;
909 + struct skb_shared_hwtstamps *hwts;
910
911 skb = alloc_can_skb(netdev, &can_frame);
912 if (!skb)
913 @@ -549,7 +550,8 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
914 memcpy(can_frame->data, rx->data, can_frame->can_dlc);
915
916 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
917 - skb->tstamp = timeval_to_ktime(tv);
918 + hwts = skb_hwtstamps(skb);
919 + hwts->hwtstamp = timeval_to_ktime(tv);
920
921 netif_rx(skb);
922 netdev->stats.rx_packets++;
923 @@ -570,6 +572,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
924 u8 err_mask = 0;
925 struct sk_buff *skb;
926 struct timeval tv;
927 + struct skb_shared_hwtstamps *hwts;
928
929 /* nothing should be sent while in BUS_OFF state */
930 if (dev->can.state == CAN_STATE_BUS_OFF)
931 @@ -664,7 +667,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
932 dev->can.state = new_state;
933
934 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
935 - skb->tstamp = timeval_to_ktime(tv);
936 + hwts = skb_hwtstamps(skb);
937 + hwts->hwtstamp = timeval_to_ktime(tv);
938 netif_rx(skb);
939 netdev->stats.rx_packets++;
940 netdev->stats.rx_bytes += can_frame->can_dlc;
941 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
942 index 90e41db..dbf37e4 100644
943 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
944 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
945 @@ -70,6 +70,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
946
947 switch (hw->device_id) {
948 case IXGBE_DEV_ID_X540T:
949 + case IXGBE_DEV_ID_X540T1:
950 return 0;
951 case IXGBE_DEV_ID_82599_T3_LOM:
952 return 0;
953 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
954 index 4326f74..1fff36d 100644
955 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
956 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
957 @@ -114,6 +114,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
958 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
959 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
960 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
961 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
962 /* required last entry */
963 {0, }
964 };
965 @@ -7010,6 +7011,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
966 is_wol_supported = 1;
967 break;
968 case IXGBE_DEV_ID_X540T:
969 + case IXGBE_DEV_ID_X540T1:
970 /* check eeprom to see if enabled wol */
971 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
972 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
973 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
974 index 400f86a..0722f33 100644
975 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
976 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
977 @@ -65,6 +65,7 @@
978 #define IXGBE_DEV_ID_82599_LS 0x154F
979 #define IXGBE_DEV_ID_X540T 0x1528
980 #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
981 +#define IXGBE_DEV_ID_X540T1 0x1560
982
983 /* VF Device IDs */
984 #define IXGBE_DEV_ID_82599_VF 0x10ED
985 diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
986 index a5f7bce..7a2cf52 100644
987 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
988 +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
989 @@ -1352,6 +1352,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
990 vif_priv->ctx = ctx;
991 ctx->vif = vif;
992
993 + /*
994 + * In SNIFFER device type, the firmware reports the FCS to
995 + * the host, rather than snipping it off. Unfortunately,
996 + * mac80211 doesn't (yet) provide a per-packet flag for
997 + * this, so that we have to set the hardware flag based
998 + * on the interfaces added. As the monitor interface can
999 + * only be present by itself, and will be removed before
1000 + * other interfaces are added, this is safe.
1001 + */
1002 + if (vif->type == NL80211_IFTYPE_MONITOR)
1003 + priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
1004 + else
1005 + priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
1006 +
1007 err = iwl_setup_interface(priv, ctx);
1008 if (!err || reset)
1009 goto out;
1010 diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
1011 index 6baf8de..b9d6152 100644
1012 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c
1013 +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
1014 @@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
1015 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
1016 {
1017 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1018 - u16 rd_ptr, wr_ptr;
1019 - int n_bd = trans_pcie->txq[txq_id].q.n_bd;
1020
1021 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1022 WARN_ONCE(1, "queue %d not used", txq_id);
1023 return;
1024 }
1025
1026 - rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
1027 - wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
1028 -
1029 - WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
1030 - txq_id, rd_ptr, wr_ptr);
1031 -
1032 iwl_txq_set_inactive(trans, txq_id);
1033 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1034 }
1035 diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
1036 index 565527a..95382f1 100644
1037 --- a/drivers/net/wireless/mwifiex/cmdevt.c
1038 +++ b/drivers/net/wireless/mwifiex/cmdevt.c
1039 @@ -887,9 +887,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
1040 return;
1041 }
1042 cmd_node = adapter->curr_cmd;
1043 - if (cmd_node->wait_q_enabled)
1044 - adapter->cmd_wait_q.status = -ETIMEDOUT;
1045 -
1046 if (cmd_node) {
1047 adapter->dbg.timeout_cmd_id =
1048 adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
1049 @@ -935,6 +932,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
1050
1051 dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
1052 adapter->ps_mode, adapter->ps_state);
1053 +
1054 + if (cmd_node->wait_q_enabled) {
1055 + adapter->cmd_wait_q.status = -ETIMEDOUT;
1056 + wake_up_interruptible(&adapter->cmd_wait_q.wait);
1057 + mwifiex_cancel_pending_ioctl(adapter);
1058 + /* reset cmd_sent flag to unblock new commands */
1059 + adapter->cmd_sent = false;
1060 + }
1061 }
1062 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
1063 mwifiex_init_fw_complete(adapter);
1064 diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
1065 index fc8a9bf..82cf0fa 100644
1066 --- a/drivers/net/wireless/mwifiex/sdio.c
1067 +++ b/drivers/net/wireless/mwifiex/sdio.c
1068 @@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
1069 struct sdio_mmc_card *card;
1070 struct mwifiex_adapter *adapter;
1071 mmc_pm_flag_t pm_flag = 0;
1072 - int hs_actived = 0;
1073 int i;
1074 int ret = 0;
1075
1076 @@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
1077 adapter = card->adapter;
1078
1079 /* Enable the Host Sleep */
1080 - hs_actived = mwifiex_enable_hs(adapter);
1081 - if (hs_actived) {
1082 - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
1083 - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
1084 + if (!mwifiex_enable_hs(adapter)) {
1085 + dev_err(adapter->dev, "cmd: failed to suspend\n");
1086 + return -EFAULT;
1087 }
1088
1089 + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
1090 + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
1091 +
1092 /* Indicate device suspended */
1093 adapter->is_suspended = true;
1094
1095 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
1096 index 9970c2b..b7e6607 100644
1097 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
1098 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
1099 @@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
1100 /*=== Customer ID ===*/
1101 /****** 8188CU ********/
1102 {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
1103 + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
1104 {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
1105 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
1106 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
1107 diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
1108 index d606f52..83ba14e 100644
1109 --- a/drivers/nfc/pn533.c
1110 +++ b/drivers/nfc/pn533.c
1111 @@ -1618,11 +1618,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
1112 static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
1113 u8 *params, int params_len)
1114 {
1115 - struct pn533_cmd_jump_dep *cmd;
1116 struct pn533_cmd_jump_dep_response *resp;
1117 struct nfc_target nfc_target;
1118 u8 target_gt_len;
1119 int rc;
1120 + struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg;
1121 + u8 active = cmd->active;
1122 +
1123 + kfree(arg);
1124
1125 if (params_len == -ENOENT) {
1126 nfc_dev_dbg(&dev->interface->dev, "");
1127 @@ -1644,7 +1647,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
1128 }
1129
1130 resp = (struct pn533_cmd_jump_dep_response *) params;
1131 - cmd = (struct pn533_cmd_jump_dep *) arg;
1132 rc = resp->status & PN533_CMD_RET_MASK;
1133 if (rc != PN533_CMD_RET_SUCCESS) {
1134 nfc_dev_err(&dev->interface->dev,
1135 @@ -1674,7 +1676,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
1136 if (rc == 0)
1137 rc = nfc_dep_link_is_up(dev->nfc_dev,
1138 dev->nfc_dev->targets[0].idx,
1139 - !cmd->active, NFC_RF_INITIATOR);
1140 + !active, NFC_RF_INITIATOR);
1141
1142 return 0;
1143 }
1144 @@ -1759,12 +1761,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
1145 rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
1146 dev->in_maxlen, pn533_in_dep_link_up_complete,
1147 cmd, GFP_KERNEL);
1148 - if (rc)
1149 - goto out;
1150 -
1151 -
1152 -out:
1153 - kfree(cmd);
1154 + if (rc < 0)
1155 + kfree(cmd);
1156
1157 return rc;
1158 }
1159 @@ -2018,8 +2016,12 @@ error:
1160 static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
1161 u8 *params, int params_len)
1162 {
1163 + struct sk_buff *skb_out = arg;
1164 +
1165 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1166
1167 + dev_kfree_skb(skb_out);
1168 +
1169 if (params_len < 0) {
1170 nfc_dev_err(&dev->interface->dev,
1171 "Error %d when sending data",
1172 @@ -2057,7 +2059,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
1173
1174 rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
1175 dev->in_maxlen, pn533_tm_send_complete,
1176 - NULL, GFP_KERNEL);
1177 + skb, GFP_KERNEL);
1178 if (rc) {
1179 nfc_dev_err(&dev->interface->dev,
1180 "Error %d when trying to send data", rc);
1181 diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
1182 index 7a0431c..94483c9 100644
1183 --- a/drivers/scsi/isci/request.c
1184 +++ b/drivers/scsi/isci/request.c
1185 @@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1186 frame_index,
1187 (void **)&frame_buffer);
1188
1189 - sci_controller_copy_sata_response(&ireq->stp.req,
1190 + sci_controller_copy_sata_response(&ireq->stp.rsp,
1191 frame_header,
1192 frame_buffer);
1193
1194 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1195 index 101b41c..82e1fde3 100644
1196 --- a/fs/ext4/resize.c
1197 +++ b/fs/ext4/resize.c
1198 @@ -979,8 +979,6 @@ static void update_backups(struct super_block *sb,
1199 goto exit_err;
1200 }
1201
1202 - ext4_superblock_csum_set(sb);
1203 -
1204 while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
1205 struct buffer_head *bh;
1206
1207 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
1208 index 5602d73..af321a6 100644
1209 --- a/fs/fs-writeback.c
1210 +++ b/fs/fs-writeback.c
1211 @@ -228,6 +228,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1212 static void inode_sync_complete(struct inode *inode)
1213 {
1214 inode->i_state &= ~I_SYNC;
1215 + /* If inode is clean an unused, put it into LRU now... */
1216 + inode_add_lru(inode);
1217 /* Waiters must see I_SYNC cleared before being woken up */
1218 smp_mb();
1219 wake_up_bit(&inode->i_state, __I_SYNC);
1220 diff --git a/fs/inode.c b/fs/inode.c
1221 index ac8d904..7c14897 100644
1222 --- a/fs/inode.c
1223 +++ b/fs/inode.c
1224 @@ -408,6 +408,19 @@ static void inode_lru_list_add(struct inode *inode)
1225 spin_unlock(&inode->i_sb->s_inode_lru_lock);
1226 }
1227
1228 +/*
1229 + * Add inode to LRU if needed (inode is unused and clean).
1230 + *
1231 + * Needs inode->i_lock held.
1232 + */
1233 +void inode_add_lru(struct inode *inode)
1234 +{
1235 + if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
1236 + !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
1237 + inode_lru_list_add(inode);
1238 +}
1239 +
1240 +
1241 static void inode_lru_list_del(struct inode *inode)
1242 {
1243 spin_lock(&inode->i_sb->s_inode_lru_lock);
1244 @@ -1390,8 +1403,7 @@ static void iput_final(struct inode *inode)
1245
1246 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1247 inode->i_state |= I_REFERENCED;
1248 - if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1249 - inode_lru_list_add(inode);
1250 + inode_add_lru(inode);
1251 spin_unlock(&inode->i_lock);
1252 return;
1253 }
1254 diff --git a/fs/internal.h b/fs/internal.h
1255 index 371bcc4..52813bd 100644
1256 --- a/fs/internal.h
1257 +++ b/fs/internal.h
1258 @@ -110,6 +110,7 @@ extern int open_check_o_direct(struct file *f);
1259 * inode.c
1260 */
1261 extern spinlock_t inode_sb_list_lock;
1262 +extern void inode_add_lru(struct inode *inode);
1263
1264 /*
1265 * fs-writeback.c
1266 diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
1267 index 78b7f84..7f5120b 100644
1268 --- a/fs/jbd/transaction.c
1269 +++ b/fs/jbd/transaction.c
1270 @@ -1961,7 +1961,9 @@ retry:
1271 spin_unlock(&journal->j_list_lock);
1272 jbd_unlock_bh_state(bh);
1273 spin_unlock(&journal->j_state_lock);
1274 + unlock_buffer(bh);
1275 log_wait_commit(journal, tid);
1276 + lock_buffer(bh);
1277 goto retry;
1278 }
1279 /*
1280 diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
1281 index db3889b..8608f87 100644
1282 --- a/fs/jffs2/file.c
1283 +++ b/fs/jffs2/file.c
1284 @@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
1285 struct page *pg;
1286 struct inode *inode = mapping->host;
1287 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
1288 + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
1289 + struct jffs2_raw_inode ri;
1290 + uint32_t alloc_len = 0;
1291 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1292 uint32_t pageofs = index << PAGE_CACHE_SHIFT;
1293 int ret = 0;
1294
1295 + jffs2_dbg(1, "%s()\n", __func__);
1296 +
1297 + if (pageofs > inode->i_size) {
1298 + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
1299 + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
1300 + if (ret)
1301 + return ret;
1302 + }
1303 +
1304 + mutex_lock(&f->sem);
1305 pg = grab_cache_page_write_begin(mapping, index, flags);
1306 - if (!pg)
1307 + if (!pg) {
1308 + if (alloc_len)
1309 + jffs2_complete_reservation(c);
1310 + mutex_unlock(&f->sem);
1311 return -ENOMEM;
1312 + }
1313 *pagep = pg;
1314
1315 - jffs2_dbg(1, "%s()\n", __func__);
1316 -
1317 - if (pageofs > inode->i_size) {
1318 + if (alloc_len) {
1319 /* Make new hole frag from old EOF to new page */
1320 - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
1321 - struct jffs2_raw_inode ri;
1322 struct jffs2_full_dnode *fn;
1323 - uint32_t alloc_len;
1324
1325 jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
1326 (unsigned int)inode->i_size, pageofs);
1327
1328 - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
1329 - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
1330 - if (ret)
1331 - goto out_page;
1332 -
1333 - mutex_lock(&f->sem);
1334 memset(&ri, 0, sizeof(ri));
1335
1336 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1337 @@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
1338 if (IS_ERR(fn)) {
1339 ret = PTR_ERR(fn);
1340 jffs2_complete_reservation(c);
1341 - mutex_unlock(&f->sem);
1342 goto out_page;
1343 }
1344 ret = jffs2_add_full_dnode_to_inode(c, f, fn);
1345 @@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
1346 jffs2_mark_node_obsolete(c, fn->raw);
1347 jffs2_free_full_dnode(fn);
1348 jffs2_complete_reservation(c);
1349 - mutex_unlock(&f->sem);
1350 goto out_page;
1351 }
1352 jffs2_complete_reservation(c);
1353 inode->i_size = pageofs;
1354 - mutex_unlock(&f->sem);
1355 }
1356
1357 /*
1358 @@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
1359 * case of a short-copy.
1360 */
1361 if (!PageUptodate(pg)) {
1362 - mutex_lock(&f->sem);
1363 ret = jffs2_do_readpage_nolock(inode, pg);
1364 - mutex_unlock(&f->sem);
1365 if (ret)
1366 goto out_page;
1367 }
1368 + mutex_unlock(&f->sem);
1369 jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
1370 return ret;
1371
1372 out_page:
1373 unlock_page(pg);
1374 page_cache_release(pg);
1375 + mutex_unlock(&f->sem);
1376 return ret;
1377 }
1378
1379 diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
1380 index 0b311bc..6a37656 100644
1381 --- a/fs/pstore/ram.c
1382 +++ b/fs/pstore/ram.c
1383 @@ -406,7 +406,7 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
1384 goto fail_init_fprz;
1385
1386 if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
1387 - pr_err("memory size too small, minimum is %lu\n",
1388 + pr_err("memory size too small, minimum is %zu\n",
1389 cxt->console_size + cxt->record_size +
1390 cxt->ftrace_size);
1391 goto fail_cnt;
1392 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
1393 index af1cbaf..c5c35e6 100644
1394 --- a/include/drm/drm_pciids.h
1395 +++ b/include/drm/drm_pciids.h
1396 @@ -210,6 +210,7 @@
1397 {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
1398 {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
1399 {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
1400 + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
1401 {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
1402 {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
1403 {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1404 diff --git a/kernel/futex.c b/kernel/futex.c
1405 index 20ef219..19eb089 100644
1406 --- a/kernel/futex.c
1407 +++ b/kernel/futex.c
1408 @@ -843,6 +843,9 @@ static void wake_futex(struct futex_q *q)
1409 {
1410 struct task_struct *p = q->task;
1411
1412 + if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1413 + return;
1414 +
1415 /*
1416 * We set q->lock_ptr = NULL _before_ we wake up the task. If
1417 * a non-futex wake up happens on another CPU then the task
1418 @@ -1078,6 +1081,10 @@ retry_private:
1419
1420 plist_for_each_entry_safe(this, next, head, list) {
1421 if (match_futex (&this->key, &key1)) {
1422 + if (this->pi_state || this->rt_waiter) {
1423 + ret = -EINVAL;
1424 + goto out_unlock;
1425 + }
1426 wake_futex(this);
1427 if (++ret >= nr_wake)
1428 break;
1429 @@ -1090,6 +1097,10 @@ retry_private:
1430 op_ret = 0;
1431 plist_for_each_entry_safe(this, next, head, list) {
1432 if (match_futex (&this->key, &key2)) {
1433 + if (this->pi_state || this->rt_waiter) {
1434 + ret = -EINVAL;
1435 + goto out_unlock;
1436 + }
1437 wake_futex(this);
1438 if (++op_ret >= nr_wake2)
1439 break;
1440 @@ -1098,6 +1109,7 @@ retry_private:
1441 ret += op_ret;
1442 }
1443
1444 +out_unlock:
1445 double_unlock_hb(hb1, hb2);
1446 out_put_keys:
1447 put_futex_key(&key2);
1448 @@ -1387,9 +1399,13 @@ retry_private:
1449 /*
1450 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1451 * be paired with each other and no other futex ops.
1452 + *
1453 + * We should never be requeueing a futex_q with a pi_state,
1454 + * which is awaiting a futex_unlock_pi().
1455 */
1456 if ((requeue_pi && !this->rt_waiter) ||
1457 - (!requeue_pi && this->rt_waiter)) {
1458 + (!requeue_pi && this->rt_waiter) ||
1459 + this->pi_state) {
1460 ret = -EINVAL;
1461 break;
1462 }
1463 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
1464 index 4b1dfba..775fa0f 100644
1465 --- a/kernel/watchdog.c
1466 +++ b/kernel/watchdog.c
1467 @@ -113,7 +113,7 @@ static unsigned long get_timestamp(int this_cpu)
1468 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
1469 }
1470
1471 -static unsigned long get_sample_period(void)
1472 +static u64 get_sample_period(void)
1473 {
1474 /*
1475 * convert watchdog_thresh from seconds to ns
1476 @@ -122,7 +122,7 @@ static unsigned long get_sample_period(void)
1477 * and hard thresholds) to increment before the
1478 * hardlockup detector generates a warning
1479 */
1480 - return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
1481 + return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
1482 }
1483
1484 /* Commands for resetting the watchdog */
1485 diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
1486 index 29f9862..280405b 100644
1487 --- a/lib/mpi/longlong.h
1488 +++ b/lib/mpi/longlong.h
1489 @@ -703,7 +703,14 @@ do { \
1490 ************** MIPS *****************
1491 ***************************************/
1492 #if defined(__mips__) && W_TYPE_SIZE == 32
1493 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
1494 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
1495 +#define umul_ppmm(w1, w0, u, v) \
1496 +do { \
1497 + UDItype __ll = (UDItype)(u) * (v); \
1498 + w1 = __ll >> 32; \
1499 + w0 = __ll; \
1500 +} while (0)
1501 +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
1502 #define umul_ppmm(w1, w0, u, v) \
1503 __asm__ ("multu %2,%3" \
1504 : "=l" ((USItype)(w0)), \
1505 @@ -728,7 +735,15 @@ do { \
1506 ************** MIPS/64 **************
1507 ***************************************/
1508 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
1509 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
1510 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
1511 +#define umul_ppmm(w1, w0, u, v) \
1512 +do { \
1513 + typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
1514 + __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
1515 + w1 = __ll >> 64; \
1516 + w0 = __ll; \
1517 +} while (0)
1518 +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7
1519 #define umul_ppmm(w1, w0, u, v) \
1520 __asm__ ("dmultu %2,%3" \
1521 : "=l" ((UDItype)(w0)), \
1522 diff --git a/mm/vmscan.c b/mm/vmscan.c
1523 index a018dfc..40db7d1 100644
1524 --- a/mm/vmscan.c
1525 +++ b/mm/vmscan.c
1526 @@ -2176,9 +2176,12 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
1527 * Throttle direct reclaimers if backing storage is backed by the network
1528 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
1529 * depleted. kswapd will continue to make progress and wake the processes
1530 - * when the low watermark is reached
1531 + * when the low watermark is reached.
1532 + *
1533 + * Returns true if a fatal signal was delivered during throttling. If this
1534 + * happens, the page allocator should not consider triggering the OOM killer.
1535 */
1536 -static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
1537 +static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
1538 nodemask_t *nodemask)
1539 {
1540 struct zone *zone;
1541 @@ -2193,13 +2196,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
1542 * processes to block on log_wait_commit().
1543 */
1544 if (current->flags & PF_KTHREAD)
1545 - return;
1546 + goto out;
1547 +
1548 + /*
1549 + * If a fatal signal is pending, this process should not throttle.
1550 + * It should return quickly so it can exit and free its memory
1551 + */
1552 + if (fatal_signal_pending(current))
1553 + goto out;
1554
1555 /* Check if the pfmemalloc reserves are ok */
1556 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
1557 pgdat = zone->zone_pgdat;
1558 if (pfmemalloc_watermark_ok(pgdat))
1559 - return;
1560 + goto out;
1561
1562 /* Account for the throttling */
1563 count_vm_event(PGSCAN_DIRECT_THROTTLE);
1564 @@ -2215,12 +2225,20 @@ static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
1565 if (!(gfp_mask & __GFP_FS)) {
1566 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
1567 pfmemalloc_watermark_ok(pgdat), HZ);
1568 - return;
1569 +
1570 + goto check_pending;
1571 }
1572
1573 /* Throttle until kswapd wakes the process */
1574 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
1575 pfmemalloc_watermark_ok(pgdat));
1576 +
1577 +check_pending:
1578 + if (fatal_signal_pending(current))
1579 + return true;
1580 +
1581 +out:
1582 + return false;
1583 }
1584
1585 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1586 @@ -2242,13 +2260,12 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1587 .gfp_mask = sc.gfp_mask,
1588 };
1589
1590 - throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
1591 -
1592 /*
1593 - * Do not enter reclaim if fatal signal is pending. 1 is returned so
1594 - * that the page allocator does not consider triggering OOM
1595 + * Do not enter reclaim if fatal signal was delivered while throttled.
1596 + * 1 is returned so that the page allocator does not OOM kill at this
1597 + * point.
1598 */
1599 - if (fatal_signal_pending(current))
1600 + if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
1601 return 1;
1602
1603 trace_mm_vmscan_direct_reclaim_begin(order,
1604 diff --git a/net/can/bcm.c b/net/can/bcm.c
1605 index 151b773..3910c1f 100644
1606 --- a/net/can/bcm.c
1607 +++ b/net/can/bcm.c
1608 @@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1609 op->sk = sk;
1610 op->ifindex = ifindex;
1611
1612 + /* ifindex for timeout events w/o previous frame reception */
1613 + op->rx_ifindex = ifindex;
1614 +
1615 /* initialize uninitialized (kzalloc) structure */
1616 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1617 op->timer.function = bcm_rx_timeout_handler;
1618 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
1619 index 7260717..20bb371 100644
1620 --- a/net/core/net-sysfs.c
1621 +++ b/net/core/net-sysfs.c
1622 @@ -417,6 +417,17 @@ static struct attribute_group netstat_group = {
1623 .name = "statistics",
1624 .attrs = netstat_attrs,
1625 };
1626 +
1627 +#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1628 +static struct attribute *wireless_attrs[] = {
1629 + NULL
1630 +};
1631 +
1632 +static struct attribute_group wireless_group = {
1633 + .name = "wireless",
1634 + .attrs = wireless_attrs,
1635 +};
1636 +#endif
1637 #endif /* CONFIG_SYSFS */
1638
1639 #ifdef CONFIG_RPS
1640 @@ -1397,6 +1408,15 @@ int netdev_register_kobject(struct net_device *net)
1641 groups++;
1642
1643 *groups++ = &netstat_group;
1644 +
1645 +#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1646 + if (net->ieee80211_ptr)
1647 + *groups++ = &wireless_group;
1648 +#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1649 + else if (net->wireless_handlers)
1650 + *groups++ = &wireless_group;
1651 +#endif
1652 +#endif
1653 #endif /* CONFIG_SYSFS */
1654
1655 error = device_add(dev);
1656 diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
1657 index 327aa07..a5894dd 100644
1658 --- a/net/mac80211/ibss.c
1659 +++ b/net/mac80211/ibss.c
1660 @@ -1117,10 +1117,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1661
1662 mutex_lock(&sdata->u.ibss.mtx);
1663
1664 - sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
1665 - memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
1666 - sdata->u.ibss.ssid_len = 0;
1667 -
1668 active_ibss = ieee80211_sta_active_ibss(sdata);
1669
1670 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
1671 @@ -1141,6 +1137,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1672 }
1673 }
1674
1675 + ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
1676 + memset(ifibss->bssid, 0, ETH_ALEN);
1677 + ifibss->ssid_len = 0;
1678 +
1679 sta_info_flush(sdata->local, sdata);
1680
1681 spin_lock_bh(&ifibss->incomplete_lock);
1682 diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
1683 index 7dd983a..83a3592 100644
1684 --- a/net/nfc/llcp/llcp.c
1685 +++ b/net/nfc/llcp/llcp.c
1686 @@ -1190,7 +1190,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
1687 local->remote_miu = LLCP_DEFAULT_MIU;
1688 local->remote_lto = LLCP_DEFAULT_LTO;
1689
1690 - list_add(&llcp_devices, &local->list);
1691 + list_add(&local->list, &llcp_devices);
1692
1693 return 0;
1694
1695 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
1696 index 2bb9bee..10fc710 100644
1697 --- a/sound/pci/hda/patch_cirrus.c
1698 +++ b/sound/pci/hda/patch_cirrus.c
1699 @@ -461,6 +461,7 @@ static int parse_output(struct hda_codec *codec)
1700 memcpy(cfg->speaker_pins, cfg->line_out_pins,
1701 sizeof(cfg->speaker_pins));
1702 cfg->line_outs = 0;
1703 + memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins));
1704 }
1705
1706 return 0;
1707 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1708 index f6b5995..e1b7061 100644
1709 --- a/sound/pci/hda/patch_realtek.c
1710 +++ b/sound/pci/hda/patch_realtek.c
1711 @@ -4280,6 +4280,7 @@ static void alc_auto_init_std(struct hda_codec *codec)
1712 ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir))
1713
1714 static const struct snd_pci_quirk beep_white_list[] = {
1715 + SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
1716 SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
1717 SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
1718 SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
1719 @@ -7089,6 +7090,9 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
1720 { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
1721 { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
1722 { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
1723 + { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
1724 + { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
1725 + { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
1726 { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
1727 .patch = patch_alc861 },
1728 { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
1729 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
1730 index c83f614..eeefbce 100644
1731 --- a/sound/usb/midi.c
1732 +++ b/sound/usb/midi.c
1733 @@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint {
1734 struct snd_usb_midi_out_endpoint* ep;
1735 struct snd_rawmidi_substream *substream;
1736 int active;
1737 + bool autopm_reference;
1738 uint8_t cable; /* cable number << 4 */
1739 uint8_t state;
1740 #define STATE_UNKNOWN 0
1741 @@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
1742 return -ENXIO;
1743 }
1744 err = usb_autopm_get_interface(umidi->iface);
1745 - if (err < 0)
1746 + port->autopm_reference = err >= 0;
1747 + if (err < 0 && err != -EACCES)
1748 return -EIO;
1749 substream->runtime->private_data = port;
1750 port->state = STATE_UNKNOWN;
1751 @@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
1752 static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
1753 {
1754 struct snd_usb_midi* umidi = substream->rmidi->private_data;
1755 + struct usbmidi_out_port *port = substream->runtime->private_data;
1756
1757 substream_open(substream, 0);
1758 - usb_autopm_put_interface(umidi->iface);
1759 + if (port->autopm_reference)
1760 + usb_autopm_put_interface(umidi->iface);
1761 return 0;
1762 }
1763

  ViewVC Help
Powered by ViewVC 1.1.20