/[linux-patches]/genpatches-2.6/tags/2.6.32-47/1028_linux-2.6.32.29.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-47/1028_linux-2.6.32.29.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2037 - (show annotations) (download)
Wed Dec 28 14:38:55 2011 UTC (6 years, 9 months ago) by psomas
File size: 156368 byte(s)
2.6.32-47 release
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 5f6aa11..c840e7d 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -878,6 +878,7 @@ and is between 256 and 4096 characters. It is defined in the file
6 i8042.panicblink=
7 [HW] Frequency with which keyboard LEDs should blink
8 when kernel panics (default is 0.5 sec)
9 + i8042.notimeout [HW] Ignore timeout condition signalled by conroller
10 i8042.reset [HW] Reset the controller during init and cleanup
11 i8042.unlock [HW] Unlock (ignore) the keylock
12
13 @@ -2577,6 +2578,10 @@ and is between 256 and 4096 characters. It is defined in the file
14 disables clocksource verification at runtime.
15 Used to enable high-resolution timer mode on older
16 hardware, and in virtualized environment.
17 + [x86] noirqtime: Do not use TSC to do irq accounting.
18 + Used to run time disable IRQ_TIME_ACCOUNTING on any
19 + platforms where RDTSC is slow and this accounting
20 + can add overhead.
21
22 turbografx.map[2|3]= [HW,JOY]
23 TurboGraFX parallel port interface
24 diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
25 index 927a381..1ff461e 100644
26 --- a/arch/ia64/include/asm/system.h
27 +++ b/arch/ia64/include/asm/system.h
28 @@ -281,10 +281,6 @@ void cpu_idle_wait(void);
29
30 void default_idle(void);
31
32 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING
33 -extern void account_system_vtime(struct task_struct *);
34 -#endif
35 -
36 #endif /* __KERNEL__ */
37
38 #endif /* __ASSEMBLY__ */
39 diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
40 index df971fa..4896ed0 100644
41 --- a/arch/parisc/kernel/firmware.c
42 +++ b/arch/parisc/kernel/firmware.c
43 @@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
44 unsigned int i;
45 unsigned long flags;
46
47 - for (i = 0; i < count && i < 79;) {
48 + for (i = 0; i < count;) {
49 switch(str[i]) {
50 case '\n':
51 iodc_dbuf[i+0] = '\r';
52 iodc_dbuf[i+1] = '\n';
53 i += 2;
54 goto print;
55 - case '\b': /* BS */
56 - i--; /* overwrite last */
57 default:
58 iodc_dbuf[i] = str[i];
59 i++;
60 @@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
61 }
62 }
63
64 - /* if we're at the end of line, and not already inserting a newline,
65 - * insert one anyway. iodc console doesn't claim to support >79 char
66 - * lines. don't account for this in the return value.
67 - */
68 - if (i == 79 && iodc_dbuf[i-1] != '\n') {
69 - iodc_dbuf[i+0] = '\r';
70 - iodc_dbuf[i+1] = '\n';
71 - }
72 -
73 print:
74 spin_lock_irqsave(&pdc_lock, flags);
75 real32_call(PAGE0->mem_cons.iodc_io,
76 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
77 index bb8e006..094a12a 100644
78 --- a/arch/powerpc/include/asm/system.h
79 +++ b/arch/powerpc/include/asm/system.h
80 @@ -540,10 +540,6 @@ extern void reloc_got2(unsigned long);
81
82 #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
83
84 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING
85 -extern void account_system_vtime(struct task_struct *);
86 -#endif
87 -
88 extern struct dentry *powerpc_debugfs_root;
89
90 #endif /* __KERNEL__ */
91 diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
92 index 55cba4a..f8cd9fb 100644
93 --- a/arch/powerpc/kernel/cpu_setup_6xx.S
94 +++ b/arch/powerpc/kernel/cpu_setup_6xx.S
95 @@ -18,7 +18,7 @@
96 #include <asm/mmu.h>
97
98 _GLOBAL(__setup_cpu_603)
99 - mflr r4
100 + mflr r5
101 BEGIN_MMU_FTR_SECTION
102 li r10,0
103 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
104 @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
105 bl __init_fpu_registers
106 END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
107 bl setup_common_caches
108 - mtlr r4
109 + mtlr r5
110 blr
111 _GLOBAL(__setup_cpu_604)
112 - mflr r4
113 + mflr r5
114 bl setup_common_caches
115 bl setup_604_hid0
116 - mtlr r4
117 + mtlr r5
118 blr
119 _GLOBAL(__setup_cpu_750)
120 - mflr r4
121 + mflr r5
122 bl __init_fpu_registers
123 bl setup_common_caches
124 bl setup_750_7400_hid0
125 - mtlr r4
126 + mtlr r5
127 blr
128 _GLOBAL(__setup_cpu_750cx)
129 - mflr r4
130 + mflr r5
131 bl __init_fpu_registers
132 bl setup_common_caches
133 bl setup_750_7400_hid0
134 bl setup_750cx
135 - mtlr r4
136 + mtlr r5
137 blr
138 _GLOBAL(__setup_cpu_750fx)
139 - mflr r4
140 + mflr r5
141 bl __init_fpu_registers
142 bl setup_common_caches
143 bl setup_750_7400_hid0
144 bl setup_750fx
145 - mtlr r4
146 + mtlr r5
147 blr
148 _GLOBAL(__setup_cpu_7400)
149 - mflr r4
150 + mflr r5
151 bl __init_fpu_registers
152 bl setup_7400_workarounds
153 bl setup_common_caches
154 bl setup_750_7400_hid0
155 - mtlr r4
156 + mtlr r5
157 blr
158 _GLOBAL(__setup_cpu_7410)
159 - mflr r4
160 + mflr r5
161 bl __init_fpu_registers
162 bl setup_7410_workarounds
163 bl setup_common_caches
164 bl setup_750_7400_hid0
165 li r3,0
166 mtspr SPRN_L2CR2,r3
167 - mtlr r4
168 + mtlr r5
169 blr
170 _GLOBAL(__setup_cpu_745x)
171 - mflr r4
172 + mflr r5
173 bl setup_common_caches
174 bl setup_745x_specifics
175 - mtlr r4
176 + mtlr r5
177 blr
178
179 /* Enable caches for 603's, 604, 750 & 7400 */
180 @@ -194,10 +194,10 @@ setup_750cx:
181 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
182 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
183 bnelr
184 - lwz r6,CPU_SPEC_FEATURES(r5)
185 + lwz r6,CPU_SPEC_FEATURES(r4)
186 li r7,CPU_FTR_CAN_NAP
187 andc r6,r6,r7
188 - stw r6,CPU_SPEC_FEATURES(r5)
189 + stw r6,CPU_SPEC_FEATURES(r4)
190 blr
191
192 /* 750fx specific
193 @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
194 andis. r11,r11,L3CR_L3E@h
195 beq 1f
196 END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
197 - lwz r6,CPU_SPEC_FEATURES(r5)
198 + lwz r6,CPU_SPEC_FEATURES(r4)
199 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
200 beq 1f
201 li r7,CPU_FTR_CAN_NAP
202 andc r6,r6,r7
203 - stw r6,CPU_SPEC_FEATURES(r5)
204 + stw r6,CPU_SPEC_FEATURES(r4)
205 1:
206 mfspr r11,SPRN_HID0
207
208 diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
209 index 757a83f..fa79af5 100644
210 --- a/arch/powerpc/sysdev/fsl_rio.c
211 +++ b/arch/powerpc/sysdev/fsl_rio.c
212 @@ -832,7 +832,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
213 if (dsr & DOORBELL_DSR_QFI) {
214 pr_info("RIO: doorbell queue full\n");
215 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
216 - goto out;
217 }
218
219 /* XXX Need to check/dispatch until queue empty */
220 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
221 index 379661d..6b3a2e2 100644
222 --- a/arch/s390/include/asm/system.h
223 +++ b/arch/s390/include/asm/system.h
224 @@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
225
226 extern void account_vtime(struct task_struct *, struct task_struct *);
227 extern void account_tick_vtime(struct task_struct *);
228 -extern void account_system_vtime(struct task_struct *);
229
230 #ifdef CONFIG_PFAULT
231 extern void pfault_irq_init(void);
232 diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
233 index 7bdd7c8..4a76d94 100644
234 --- a/arch/s390/include/asm/vdso.h
235 +++ b/arch/s390/include/asm/vdso.h
236 @@ -7,7 +7,7 @@
237 #define VDSO32_LBASE 0
238 #define VDSO64_LBASE 0
239
240 -#define VDSO_VERSION_STRING LINUX_2.6.26
241 +#define VDSO_VERSION_STRING LINUX_2.6.29
242
243 #ifndef __ASSEMBLY__
244
245 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
246 index cb5a57c..73ae02a 100644
247 --- a/arch/x86/Kconfig
248 +++ b/arch/x86/Kconfig
249 @@ -753,6 +753,17 @@ config SCHED_MC
250 making when dealing with multi-core CPU chips at a cost of slightly
251 increased overhead in some places. If unsure say N here.
252
253 +config IRQ_TIME_ACCOUNTING
254 + bool "Fine granularity task level IRQ time accounting"
255 + default n
256 + ---help---
257 + Select this option to enable fine granularity task irq time
258 + accounting. This is done by reading a timestamp on each
259 + transitions between softirq and hardirq state, so there can be a
260 + small performance impact.
261 +
262 + If in doubt, say N here.
263 +
264 source "kernel/Kconfig.preempt"
265
266 config X86_UP_APIC
267 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
268 index 4a2d4e0..8b5393e 100644
269 --- a/arch/x86/include/asm/mmu_context.h
270 +++ b/arch/x86/include/asm/mmu_context.h
271 @@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
272 unsigned cpu = smp_processor_id();
273
274 if (likely(prev != next)) {
275 - /* stop flush ipis for the previous mm */
276 - cpumask_clear_cpu(cpu, mm_cpumask(prev));
277 #ifdef CONFIG_SMP
278 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
279 percpu_write(cpu_tlbstate.active_mm, next);
280 @@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
281 /* Re-load page tables */
282 load_cr3(next->pgd);
283
284 + /* stop flush ipis for the previous mm */
285 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
286 +
287 /*
288 * load the LDT, if the LDT is different:
289 */
290 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
291 index 84e83de..419e328 100644
292 --- a/arch/x86/kernel/cpu/mtrr/main.c
293 +++ b/arch/x86/kernel/cpu/mtrr/main.c
294 @@ -762,13 +762,21 @@ void set_mtrr_aps_delayed_init(void)
295 }
296
297 /*
298 - * MTRR initialization for all AP's
299 + * Delayed MTRR initialization for all AP's
300 */
301 void mtrr_aps_init(void)
302 {
303 if (!use_intel())
304 return;
305
306 + /*
307 + * Check if someone has requested the delay of AP MTRR initialization,
308 + * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
309 + * then we are done.
310 + */
311 + if (!mtrr_aps_delayed_init)
312 + return;
313 +
314 set_mtrr(~0U, 0, 0, 0);
315 mtrr_aps_delayed_init = false;
316 }
317 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
318 index aaefa71..bc07543 100644
319 --- a/arch/x86/kernel/tsc.c
320 +++ b/arch/x86/kernel/tsc.c
321 @@ -104,10 +104,14 @@ int __init notsc_setup(char *str)
322
323 __setup("notsc", notsc_setup);
324
325 +static int no_sched_irq_time;
326 +
327 static int __init tsc_setup(char *str)
328 {
329 if (!strcmp(str, "reliable"))
330 tsc_clocksource_reliable = 1;
331 + if (!strncmp(str, "noirqtime", 9))
332 + no_sched_irq_time = 1;
333 return 1;
334 }
335
336 @@ -802,6 +806,7 @@ void mark_tsc_unstable(char *reason)
337 if (!tsc_unstable) {
338 tsc_unstable = 1;
339 sched_clock_stable = 0;
340 + disable_sched_clock_irqtime();
341 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
342 /* Change only the rating, when not registered */
343 if (clocksource_tsc.mult)
344 @@ -990,6 +995,9 @@ void __init tsc_init(void)
345 /* now allow native_sched_clock() to use rdtsc */
346 tsc_disabled = 0;
347
348 + if (!no_sched_irq_time)
349 + enable_sched_clock_irqtime();
350 +
351 lpj = ((u64)tsc_khz * 1000);
352 do_div(lpj, HZ);
353 lpj_fine = lpj;
354 diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
355 index 2bc2dbe..99d41be 100644
356 --- a/drivers/ata/pata_mpc52xx.c
357 +++ b/drivers/ata/pata_mpc52xx.c
358 @@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
359 };
360
361 static struct ata_port_operations mpc52xx_ata_port_ops = {
362 - .inherits = &ata_sff_port_ops,
363 + .inherits = &ata_bmdma_port_ops,
364 .sff_dev_select = mpc52xx_ata_dev_select,
365 .set_piomode = mpc52xx_ata_set_piomode,
366 .set_dmamode = mpc52xx_ata_set_dmamode,
367 diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
368 index b8a5d65..b0e168f 100644
369 --- a/drivers/char/hvc_iucv.c
370 +++ b/drivers/char/hvc_iucv.c
371 @@ -139,6 +139,8 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
372 *
373 * This function allocates a new struct iucv_tty_buffer element and, optionally,
374 * allocates an internal data buffer with the specified size @size.
375 + * The internal data buffer is always allocated with GFP_DMA which is
376 + * required for receiving and sending data with IUCV.
377 * Note: The total message size arises from the internal buffer size and the
378 * members of the iucv_tty_msg structure.
379 * The function returns NULL if memory allocation has failed.
380 @@ -154,7 +156,7 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
381
382 if (size > 0) {
383 bufp->msg.length = MSG_SIZE(size);
384 - bufp->mbuf = kmalloc(bufp->msg.length, flags);
385 + bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
386 if (!bufp->mbuf) {
387 mempool_free(bufp, hvc_iucv_mempool);
388 return NULL;
389 @@ -237,7 +239,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
390 if (!rb->mbuf) { /* message not yet received ... */
391 /* allocate mem to store msg data; if no memory is available
392 * then leave the buffer on the list and re-try later */
393 - rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
394 + rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
395 if (!rb->mbuf)
396 return -ENOMEM;
397
398 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
399 index 47c2d27..8548ae7 100644
400 --- a/drivers/char/tpm/tpm.c
401 +++ b/drivers/char/tpm/tpm.c
402 @@ -353,12 +353,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
403 tpm_protected_ordinal_duration[ordinal &
404 TPM_PROTECTED_ORDINAL_MASK];
405
406 - if (duration_idx != TPM_UNDEFINED)
407 + if (duration_idx != TPM_UNDEFINED) {
408 duration = chip->vendor.duration[duration_idx];
409 - if (duration <= 0)
410 + /* if duration is 0, it's because chip->vendor.duration wasn't */
411 + /* filled yet, so we set the lowest timeout just to give enough */
412 + /* time for tpm_get_timeouts() to succeed */
413 + return (duration <= 0 ? HZ : duration);
414 + } else
415 return 2 * 60 * HZ;
416 - else
417 - return duration;
418 }
419 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
420
421 @@ -564,9 +566,11 @@ duration:
422 if (rc)
423 return;
424
425 - if (be32_to_cpu(tpm_cmd.header.out.return_code)
426 - != 3 * sizeof(u32))
427 + if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
428 + be32_to_cpu(tpm_cmd.header.out.length)
429 + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
430 return;
431 +
432 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
433 chip->vendor.duration[TPM_SHORT] =
434 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
435 @@ -910,6 +914,18 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
436 }
437 EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
438
439 +ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
440 + char *buf)
441 +{
442 + struct tpm_chip *chip = dev_get_drvdata(dev);
443 +
444 + return sprintf(buf, "%d %d %d\n",
445 + jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
446 + jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
447 + jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
448 +}
449 +EXPORT_SYMBOL_GPL(tpm_show_timeouts);
450 +
451 ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
452 const char *buf, size_t count)
453 {
454 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
455 index 792868d..ba1779c 100644
456 --- a/drivers/char/tpm/tpm.h
457 +++ b/drivers/char/tpm/tpm.h
458 @@ -56,6 +56,8 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
459 char *);
460 extern ssize_t tpm_show_temp_deactivated(struct device *,
461 struct device_attribute *attr, char *);
462 +extern ssize_t tpm_show_timeouts(struct device *,
463 + struct device_attribute *attr, char *);
464
465 struct tpm_chip;
466
467 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
468 index ca15c04..2a7af69 100644
469 --- a/drivers/char/tpm/tpm_tis.c
470 +++ b/drivers/char/tpm/tpm_tis.c
471 @@ -354,6 +354,7 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
472 NULL);
473 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
474 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
475 +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
476
477 static struct attribute *tis_attrs[] = {
478 &dev_attr_pubek.attr,
479 @@ -363,7 +364,8 @@ static struct attribute *tis_attrs[] = {
480 &dev_attr_owned.attr,
481 &dev_attr_temp_deactivated.attr,
482 &dev_attr_caps.attr,
483 - &dev_attr_cancel.attr, NULL,
484 + &dev_attr_cancel.attr,
485 + &dev_attr_timeouts.attr, NULL,
486 };
487
488 static struct attribute_group tis_attr_grp = {
489 diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
490 index 96eddd1..020cf28 100644
491 --- a/drivers/gpu/drm/Kconfig
492 +++ b/drivers/gpu/drm/Kconfig
493 @@ -92,7 +92,10 @@ config DRM_I830
494 config DRM_I915
495 tristate "i915 driver"
496 depends on AGP_INTEL
497 + # we need shmfs for the swappable backing store, and in particular
498 + # the shmem_readpage() which depends upon tmpfs
499 select SHMEM
500 + select TMPFS
501 select DRM_KMS_HELPER
502 select FB_CFB_FILLRECT
503 select FB_CFB_COPYAREA
504 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
505 index d5b7361..44626bc 100644
506 --- a/drivers/gpu/drm/i915/intel_lvds.c
507 +++ b/drivers/gpu/drm/i915/intel_lvds.c
508 @@ -884,6 +884,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
509 },
510 {
511 .callback = intel_no_lvds_dmi_callback,
512 + .ident = "AOpen i915GMm-HFS",
513 + .matches = {
514 + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
515 + DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
516 + },
517 + },
518 + {
519 + .callback = intel_no_lvds_dmi_callback,
520 .ident = "Aopen i945GTt-VFA",
521 .matches = {
522 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
523 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
524 index e5e22b1..4e928b9 100644
525 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
526 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
527 @@ -152,6 +152,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
528 return false;
529 }
530
531 + /* mac rv630, rv730, others */
532 + if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
533 + (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
534 + *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
535 + *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
536 + }
537 +
538 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
539 if ((dev->pdev->device == 0x9598) &&
540 (dev->pdev->subsystem_vendor == 0x1043) &&
541 @@ -1210,7 +1217,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
542 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
543
544 /* tell the bios not to handle mode switching */
545 - bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
546 + bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
547
548 if (rdev->family >= CHIP_R600) {
549 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
550 @@ -1261,10 +1268,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
551 else
552 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
553
554 - if (lock)
555 + if (lock) {
556 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
557 - else
558 + bios_6_scratch &= ~ATOM_S6_ACC_MODE;
559 + } else {
560 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
561 + bios_6_scratch |= ATOM_S6_ACC_MODE;
562 + }
563
564 if (rdev->family >= CHIP_R600)
565 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
566 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
567 index 6f68315..083a181 100644
568 --- a/drivers/gpu/drm/radeon/radeon_display.c
569 +++ b/drivers/gpu/drm/radeon/radeon_display.c
570 @@ -540,6 +540,10 @@ void radeon_compute_pll(struct radeon_pll *pll,
571 *frac_fb_div_p = best_frac_feedback_div;
572 *ref_div_p = best_ref_div;
573 *post_div_p = best_post_div;
574 + DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
575 + freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
576 + best_ref_div, best_post_div);
577 +
578 }
579
580 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
581 diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
582 index 39e82a4..ccdf559 100644
583 --- a/drivers/hwmon/via686a.c
584 +++ b/drivers/hwmon/via686a.c
585 @@ -687,6 +687,13 @@ static int __devexit via686a_remove(struct platform_device *pdev)
586 return 0;
587 }
588
589 +static void via686a_update_fan_div(struct via686a_data *data)
590 +{
591 + int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
592 + data->fan_div[0] = (reg >> 4) & 0x03;
593 + data->fan_div[1] = reg >> 6;
594 +}
595 +
596 static void __devinit via686a_init_device(struct via686a_data *data)
597 {
598 u8 reg;
599 @@ -700,6 +707,9 @@ static void __devinit via686a_init_device(struct via686a_data *data)
600 via686a_write_value(data, VIA686A_REG_TEMP_MODE,
601 (reg & ~VIA686A_TEMP_MODE_MASK)
602 | VIA686A_TEMP_MODE_CONTINUOUS);
603 +
604 + /* Pre-read fan clock divisor values */
605 + via686a_update_fan_div(data);
606 }
607
608 static struct via686a_data *via686a_update_device(struct device *dev)
609 @@ -751,9 +761,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
610 (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
611 0xc0) >> 6;
612
613 - i = via686a_read_value(data, VIA686A_REG_FANDIV);
614 - data->fan_div[0] = (i >> 4) & 0x03;
615 - data->fan_div[1] = i >> 6;
616 + via686a_update_fan_div(data);
617 data->alarms =
618 via686a_read_value(data,
619 VIA686A_REG_ALARM1) |
620 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
621 index 8066db7..71a5f89 100644
622 --- a/drivers/i2c/i2c-core.c
623 +++ b/drivers/i2c/i2c-core.c
624 @@ -745,6 +745,14 @@ static int i2c_do_del_adapter(struct device_driver *d, void *data)
625 static int __unregister_client(struct device *dev, void *dummy)
626 {
627 struct i2c_client *client = i2c_verify_client(dev);
628 + if (client && strcmp(client->name, "dummy"))
629 + i2c_unregister_device(client);
630 + return 0;
631 +}
632 +
633 +static int __unregister_dummy(struct device *dev, void *dummy)
634 +{
635 + struct i2c_client *client = i2c_verify_client(dev);
636 if (client)
637 i2c_unregister_device(client);
638 return 0;
639 @@ -793,8 +801,12 @@ int i2c_del_adapter(struct i2c_adapter *adap)
640 }
641
642 /* Detach any active clients. This can't fail, thus we do not
643 - checking the returned value. */
644 + * check the returned value. This is a two-pass process, because
645 + * we can't remove the dummy devices during the first pass: they
646 + * could have been instantiated by real devices wishing to clean
647 + * them up properly, so we give them a chance to do that first. */
648 res = device_for_each_child(&adap->dev, NULL, __unregister_client);
649 + res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
650
651 #ifdef CONFIG_I2C_COMPAT
652 class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
653 diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
654 index 0d1d334..bbedd57 100644
655 --- a/drivers/input/mouse/bcm5974.c
656 +++ b/drivers/input/mouse/bcm5974.c
657 @@ -55,6 +55,14 @@
658 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
659 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
660 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
661 +/* MacbookAir3,2 (unibody), aka wellspring5 */
662 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f
663 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240
664 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241
665 +/* MacbookAir3,1 (unibody), aka wellspring4 */
666 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242
667 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243
668 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244
669
670 #define BCM5974_DEVICE(prod) { \
671 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
672 @@ -80,6 +88,14 @@ static const struct usb_device_id bcm5974_table[] = {
673 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
674 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
675 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
676 + /* MacbookAir3,2 */
677 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
678 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
679 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
680 + /* MacbookAir3,1 */
681 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
682 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
683 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
684 /* Terminating entry */
685 {}
686 };
687 @@ -233,6 +249,30 @@ static const struct bcm5974_config bcm5974_config_table[] = {
688 { DIM_X, DIM_X / SN_COORD, -4460, 5166 },
689 { DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
690 },
691 + {
692 + USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
693 + USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
694 + USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
695 + HAS_INTEGRATED_BUTTON,
696 + 0x84, sizeof(struct bt_data),
697 + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
698 + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
699 + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
700 + { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
701 + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
702 + },
703 + {
704 + USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
705 + USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
706 + USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
707 + HAS_INTEGRATED_BUTTON,
708 + 0x84, sizeof(struct bt_data),
709 + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
710 + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
711 + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
712 + { DIM_X, DIM_X / SN_COORD, -4616, 5112 },
713 + { DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
714 + },
715 {}
716 };
717
718 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
719 index 21ef4b5..fc58fba 100644
720 --- a/drivers/input/serio/i8042-x86ia64io.h
721 +++ b/drivers/input/serio/i8042-x86ia64io.h
722 @@ -416,6 +416,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
723 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
724 },
725 },
726 + {
727 + /* Dell Vostro V13 */
728 + .matches = {
729 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
730 + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
731 + },
732 + },
733 { }
734 };
735
736 @@ -537,6 +544,17 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
737 };
738 #endif
739
740 +static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
741 + {
742 + /* Dell Vostro V13 */
743 + .matches = {
744 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
745 + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
746 + },
747 + },
748 + { }
749 +};
750 +
751 /*
752 * Some Wistron based laptops need us to explicitly enable the 'Dritek
753 * keyboard extension' to make their extra keys start generating scancodes.
754 @@ -866,6 +884,9 @@ static int __init i8042_platform_init(void)
755 if (dmi_check_system(i8042_dmi_nomux_table))
756 i8042_nomux = true;
757
758 + if (dmi_check_system(i8042_dmi_notimeout_table))
759 + i8042_notimeout = true;
760 +
761 if (dmi_check_system(i8042_dmi_dritek_table))
762 i8042_dritek = true;
763 #endif /* CONFIG_X86 */
764 diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
765 index 16f5ab2..db9d1ea 100644
766 --- a/drivers/input/serio/i8042.c
767 +++ b/drivers/input/serio/i8042.c
768 @@ -64,6 +64,10 @@ static unsigned int i8042_blink_frequency = 500;
769 module_param_named(panicblink, i8042_blink_frequency, uint, 0600);
770 MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics");
771
772 +static bool i8042_notimeout;
773 +module_param_named(notimeout, i8042_notimeout, bool, 0);
774 +MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
775 +
776 #ifdef CONFIG_X86
777 static bool i8042_dritek;
778 module_param_named(dritek, i8042_dritek, bool, 0);
779 @@ -434,7 +438,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
780 } else {
781
782 dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
783 - ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
784 + ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
785
786 port_no = (str & I8042_STR_AUXDATA) ?
787 I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
788 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
789 index f336c69..fcf717c 100644
790 --- a/drivers/md/dm-mpath.c
791 +++ b/drivers/md/dm-mpath.c
792 @@ -33,7 +33,6 @@ struct pgpath {
793 unsigned fail_count; /* Cumulative failure count */
794
795 struct dm_path path;
796 - struct work_struct deactivate_path;
797 struct work_struct activate_path;
798 };
799
800 @@ -113,7 +112,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
801 static void process_queued_ios(struct work_struct *work);
802 static void trigger_event(struct work_struct *work);
803 static void activate_path(struct work_struct *work);
804 -static void deactivate_path(struct work_struct *work);
805
806
807 /*-----------------------------------------------
808 @@ -126,7 +124,6 @@ static struct pgpath *alloc_pgpath(void)
809
810 if (pgpath) {
811 pgpath->is_active = 1;
812 - INIT_WORK(&pgpath->deactivate_path, deactivate_path);
813 INIT_WORK(&pgpath->activate_path, activate_path);
814 }
815
816 @@ -138,14 +135,6 @@ static void free_pgpath(struct pgpath *pgpath)
817 kfree(pgpath);
818 }
819
820 -static void deactivate_path(struct work_struct *work)
821 -{
822 - struct pgpath *pgpath =
823 - container_of(work, struct pgpath, deactivate_path);
824 -
825 - blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
826 -}
827 -
828 static struct priority_group *alloc_priority_group(void)
829 {
830 struct priority_group *pg;
831 @@ -949,7 +938,6 @@ static int fail_path(struct pgpath *pgpath)
832 pgpath->path.dev->name, m->nr_valid_paths);
833
834 schedule_work(&m->trigger_event);
835 - queue_work(kmultipathd, &pgpath->deactivate_path);
836
837 out:
838 spin_unlock_irqrestore(&m->lock, flags);
839 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
840 index d7786e3..d186687 100644
841 --- a/drivers/md/dm.c
842 +++ b/drivers/md/dm.c
843 @@ -1925,13 +1925,14 @@ static void event_callback(void *context)
844 wake_up(&md->eventq);
845 }
846
847 +/*
848 + * Protected by md->suspend_lock obtained by dm_swap_table().
849 + */
850 static void __set_size(struct mapped_device *md, sector_t size)
851 {
852 set_capacity(md->disk, size);
853
854 - mutex_lock(&md->bdev->bd_inode->i_mutex);
855 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
856 - mutex_unlock(&md->bdev->bd_inode->i_mutex);
857 }
858
859 static int __bind(struct mapped_device *md, struct dm_table *t,
860 diff --git a/drivers/md/md.c b/drivers/md/md.c
861 index 2c66c7e..68bfb68 100644
862 --- a/drivers/md/md.c
863 +++ b/drivers/md/md.c
864 @@ -4802,9 +4802,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
865 /* set saved_raid_disk if appropriate */
866 if (!mddev->persistent) {
867 if (info->state & (1<<MD_DISK_SYNC) &&
868 - info->raid_disk < mddev->raid_disks)
869 + info->raid_disk < mddev->raid_disks) {
870 rdev->raid_disk = info->raid_disk;
871 - else
872 + set_bit(In_sync, &rdev->flags);
873 + } else
874 rdev->raid_disk = -1;
875 } else
876 super_types[mddev->major_version].
877 diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
878 index 35edee0..30052db 100644
879 --- a/drivers/media/radio/radio-aimslab.c
880 +++ b/drivers/media/radio/radio-aimslab.c
881 @@ -31,7 +31,6 @@
882 #include <linux/module.h> /* Modules */
883 #include <linux/init.h> /* Initdata */
884 #include <linux/ioport.h> /* request_region */
885 -#include <linux/delay.h> /* udelay */
886 #include <linux/videodev2.h> /* kernel radio structs */
887 #include <linux/version.h> /* for KERNEL_VERSION MACRO */
888 #include <linux/io.h> /* outb, outb_p */
889 @@ -71,27 +70,17 @@ static struct rtrack rtrack_card;
890
891 /* local things */
892
893 -static void sleep_delay(long n)
894 -{
895 - /* Sleep nicely for 'n' uS */
896 - int d = n / msecs_to_jiffies(1000);
897 - if (!d)
898 - udelay(n);
899 - else
900 - msleep(jiffies_to_msecs(d));
901 -}
902 -
903 static void rt_decvol(struct rtrack *rt)
904 {
905 outb(0x58, rt->io); /* volume down + sigstr + on */
906 - sleep_delay(100000);
907 + msleep(100);
908 outb(0xd8, rt->io); /* volume steady + sigstr + on */
909 }
910
911 static void rt_incvol(struct rtrack *rt)
912 {
913 outb(0x98, rt->io); /* volume up + sigstr + on */
914 - sleep_delay(100000);
915 + msleep(100);
916 outb(0xd8, rt->io); /* volume steady + sigstr + on */
917 }
918
919 @@ -120,7 +109,7 @@ static int rt_setvol(struct rtrack *rt, int vol)
920
921 if (vol == 0) { /* volume = 0 means mute the card */
922 outb(0x48, rt->io); /* volume down but still "on" */
923 - sleep_delay(2000000); /* make sure it's totally down */
924 + msleep(2000); /* make sure it's totally down */
925 outb(0xd0, rt->io); /* volume steady, off */
926 rt->curvol = 0; /* track the volume state! */
927 mutex_unlock(&rt->lock);
928 @@ -155,7 +144,7 @@ static void send_0_byte(struct rtrack *rt)
929 outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */
930 outb_p(128+64+16+8+2+1, rt->io); /* clock */
931 }
932 - sleep_delay(1000);
933 + msleep(1);
934 }
935
936 static void send_1_byte(struct rtrack *rt)
937 @@ -169,7 +158,7 @@ static void send_1_byte(struct rtrack *rt)
938 outb_p(128+64+16+8+4+2+1, rt->io); /* clock */
939 }
940
941 - sleep_delay(1000);
942 + msleep(1);
943 }
944
945 static int rt_setfreq(struct rtrack *rt, unsigned long freq)
946 @@ -423,7 +412,7 @@ static int __init rtrack_init(void)
947
948 /* this ensures that the volume is all the way down */
949 outb(0x48, rt->io); /* volume down but still "on" */
950 - sleep_delay(2000000); /* make sure it's totally down */
951 + msleep(2000); /* make sure it's totally down */
952 outb(0xc0, rt->io); /* steady volume, mute card */
953
954 return 0;
955 diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
956 index c0fd5c6..331d1ec 100644
957 --- a/drivers/media/video/em28xx/em28xx-cards.c
958 +++ b/drivers/media/video/em28xx/em28xx-cards.c
959 @@ -1525,11 +1525,11 @@ struct em28xx_board em28xx_boards[] = {
960 .input = { {
961 .type = EM28XX_VMUX_COMPOSITE1,
962 .vmux = SAA7115_COMPOSITE0,
963 - .amux = EM28XX_AMUX_VIDEO2,
964 + .amux = EM28XX_AMUX_LINE_IN,
965 }, {
966 .type = EM28XX_VMUX_SVIDEO,
967 .vmux = SAA7115_SVIDEO3,
968 - .amux = EM28XX_AMUX_VIDEO2,
969 + .amux = EM28XX_AMUX_LINE_IN,
970 } },
971 },
972 [EM2860_BOARD_TERRATEC_AV350] = {
973 diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
974 index ad11969..8a51256 100644
975 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
976 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
977 @@ -214,8 +214,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
978
979 /* returns delimiter padding required given the packet length */
980 #define ATH_AGGR_GET_NDELIM(_len) \
981 - (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
982 - (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
983 + (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
984 + DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
985
986 #define BAW_WITHIN(_start, _bawsz, _seqno) \
987 ((((_seqno) - (_start)) & 4095) < (_bawsz))
988 diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
989 index b4ff1dc..6992f8f 100644
990 --- a/drivers/net/wireless/hostap/hostap_cs.c
991 +++ b/drivers/net/wireless/hostap/hostap_cs.c
992 @@ -662,12 +662,6 @@ static int prism2_config(struct pcmcia_device *link)
993 link->dev_node = &hw_priv->node;
994
995 /*
996 - * Make sure the IRQ handler cannot proceed until at least
997 - * dev->base_addr is initialized.
998 - */
999 - spin_lock_irqsave(&local->irq_init_lock, flags);
1000 -
1001 - /*
1002 * Allocate an interrupt line. Note that this does not assign a
1003 * handler to the interrupt, unless the 'Handler' member of the
1004 * irq structure is initialized.
1005 @@ -690,9 +684,10 @@ static int prism2_config(struct pcmcia_device *link)
1006 CS_CHECK(RequestConfiguration,
1007 pcmcia_request_configuration(link, &link->conf));
1008
1009 + /* IRQ handler cannot proceed until at dev->base_addr is initialized */
1010 + spin_lock_irqsave(&local->irq_init_lock, flags);
1011 dev->irq = link->irq.AssignedIRQ;
1012 dev->base_addr = link->io.BasePort1;
1013 -
1014 spin_unlock_irqrestore(&local->irq_init_lock, flags);
1015
1016 /* Finally, report what we've done */
1017 @@ -724,7 +719,6 @@ static int prism2_config(struct pcmcia_device *link)
1018 return ret;
1019
1020 cs_failed:
1021 - spin_unlock_irqrestore(&local->irq_init_lock, flags);
1022 cs_error(link, last_fn, last_ret);
1023
1024 failed:
1025 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
1026 index 166bedd..0e56d78 100644
1027 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
1028 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
1029 @@ -1044,6 +1044,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1030 /* only Re-enable if diabled by irq */
1031 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1032 iwl_enable_interrupts(priv);
1033 + /* Re-enable RF_KILL if it occurred */
1034 + else if (handled & CSR_INT_BIT_RF_KILL)
1035 + iwl_enable_rfkill_int(priv);
1036
1037 #ifdef CONFIG_IWLWIFI_DEBUG
1038 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1039 @@ -1245,6 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1040 /* only Re-enable if diabled by irq */
1041 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1042 iwl_enable_interrupts(priv);
1043 + /* Re-enable RF_KILL if it occurred */
1044 + else if (handled & CSR_INT_BIT_RF_KILL)
1045 + iwl_enable_rfkill_int(priv);
1046
1047 spin_unlock_irqrestore(&priv->lock, flags);
1048
1049 @@ -2358,9 +2364,10 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
1050
1051 flush_workqueue(priv->workqueue);
1052
1053 - /* enable interrupts again in order to receive rfkill changes */
1054 + /* User space software may expect getting rfkill changes
1055 + * even if interface is down */
1056 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
1057 - iwl_enable_interrupts(priv);
1058 + iwl_enable_rfkill_int(priv);
1059
1060 IWL_DEBUG_MAC80211(priv, "leave\n");
1061 }
1062 @@ -3060,14 +3067,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1063 * 8. Setup and register mac80211
1064 **********************************/
1065
1066 - /* enable interrupts if needed: hw bug w/a */
1067 + /* enable rfkill interrupt: hw bug w/a */
1068 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
1069 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1070 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1071 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
1072 }
1073
1074 - iwl_enable_interrupts(priv);
1075 + iwl_enable_rfkill_int(priv);
1076
1077 err = iwl_setup_mac(priv);
1078 if (err)
1079 diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
1080 index f8481e8..bf2a33f 100644
1081 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
1082 +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
1083 @@ -160,6 +160,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv)
1084 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
1085 }
1086
1087 +static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
1088 +{
1089 + IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
1090 + iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
1091 +}
1092 +
1093 static inline void iwl_enable_interrupts(struct iwl_priv *priv)
1094 {
1095 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
1096 diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
1097 index 0edd7b4..9000787 100644
1098 --- a/drivers/net/wireless/p54/txrx.c
1099 +++ b/drivers/net/wireless/p54/txrx.c
1100 @@ -617,7 +617,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
1101 else
1102 *burst_possible = false;
1103
1104 - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
1105 + if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
1106 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
1107
1108 if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
1109 diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
1110 index 14e7bb2..1585577 100644
1111 --- a/drivers/net/wireless/rt2x00/rt73usb.c
1112 +++ b/drivers/net/wireless/rt2x00/rt73usb.c
1113 @@ -2400,6 +2400,7 @@ static struct usb_device_id rt73usb_device_table[] = {
1114 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
1115 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
1116 { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
1117 + { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
1118 /* Qcom */
1119 { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
1120 { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
1121 diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
1122 index f7b68ca..4ae494b 100644
1123 --- a/drivers/pci/pci-stub.c
1124 +++ b/drivers/pci/pci-stub.c
1125 @@ -54,6 +54,9 @@ static int __init pci_stub_init(void)
1126 subdevice = PCI_ANY_ID, class=0, class_mask=0;
1127 int fields;
1128
1129 + if (!strlen(id))
1130 + continue;
1131 +
1132 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1133 &vendor, &device, &subvendor, &subdevice,
1134 &class, &class_mask);
1135 diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
1136 index 6f1dba5..ad31df1 100644
1137 --- a/drivers/power/ds2760_battery.c
1138 +++ b/drivers/power/ds2760_battery.c
1139 @@ -211,7 +211,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
1140 if (di->rem_capacity > 100)
1141 di->rem_capacity = 100;
1142
1143 - if (di->current_uA >= 100L)
1144 + if (di->current_uA < -100L)
1145 di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
1146 / (di->current_uA / 100L);
1147 else
1148 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
1149 index 66c2d6a..2ac43f0 100644
1150 --- a/drivers/rtc/rtc-cmos.c
1151 +++ b/drivers/rtc/rtc-cmos.c
1152 @@ -36,6 +36,7 @@
1153 #include <linux/platform_device.h>
1154 #include <linux/mod_devicetable.h>
1155 #include <linux/log2.h>
1156 +#include <linux/pm.h>
1157
1158 /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
1159 #include <asm-generic/rtc.h>
1160 @@ -855,7 +856,7 @@ static void __exit cmos_do_remove(struct device *dev)
1161
1162 #ifdef CONFIG_PM
1163
1164 -static int cmos_suspend(struct device *dev, pm_message_t mesg)
1165 +static int cmos_suspend(struct device *dev)
1166 {
1167 struct cmos_rtc *cmos = dev_get_drvdata(dev);
1168 unsigned char tmp;
1169 @@ -902,7 +903,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
1170 */
1171 static inline int cmos_poweroff(struct device *dev)
1172 {
1173 - return cmos_suspend(dev, PMSG_HIBERNATE);
1174 + return cmos_suspend(dev);
1175 }
1176
1177 static int cmos_resume(struct device *dev)
1178 @@ -949,9 +950,9 @@ static int cmos_resume(struct device *dev)
1179 return 0;
1180 }
1181
1182 +static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
1183 +
1184 #else
1185 -#define cmos_suspend NULL
1186 -#define cmos_resume NULL
1187
1188 static inline int cmos_poweroff(struct device *dev)
1189 {
1190 @@ -1087,7 +1088,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
1191
1192 static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
1193 {
1194 - return cmos_suspend(&pnp->dev, mesg);
1195 + return cmos_suspend(&pnp->dev);
1196 }
1197
1198 static int cmos_pnp_resume(struct pnp_dev *pnp)
1199 @@ -1167,8 +1168,9 @@ static struct platform_driver cmos_platform_driver = {
1200 .shutdown = cmos_platform_shutdown,
1201 .driver = {
1202 .name = (char *) driver_name,
1203 - .suspend = cmos_suspend,
1204 - .resume = cmos_resume,
1205 +#ifdef CONFIG_PM
1206 + .pm = &cmos_pm_ops,
1207 +#endif
1208 }
1209 };
1210
1211 diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
1212 index 39fb9aa..974f462 100644
1213 --- a/drivers/scsi/libsas/sas_scsi_host.c
1214 +++ b/drivers/scsi/libsas/sas_scsi_host.c
1215 @@ -648,6 +648,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
1216
1217 spin_lock_irqsave(shost->host_lock, flags);
1218 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1219 + shost->host_eh_scheduled = 0;
1220 spin_unlock_irqrestore(shost->host_lock, flags);
1221
1222 SAS_DPRINTK("Enter %s\n", __func__);
1223 diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
1224 index 670241e..4381bfa 100644
1225 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c
1226 +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
1227 @@ -1947,9 +1947,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
1228 /* adjust hba_queue_depth, reply_free_queue_depth,
1229 * and queue_size
1230 */
1231 - ioc->hba_queue_depth -= queue_diff;
1232 - ioc->reply_free_queue_depth -= queue_diff;
1233 - queue_size -= queue_diff;
1234 + ioc->hba_queue_depth -= (queue_diff / 2);
1235 + ioc->reply_free_queue_depth -= (queue_diff / 2);
1236 + queue_size = facts->MaxReplyDescriptorPostQueueDepth;
1237 }
1238 ioc->reply_post_queue_depth = queue_size;
1239
1240 @@ -3595,6 +3595,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
1241 static void
1242 _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
1243 {
1244 + mpt2sas_scsih_reset_handler(ioc, reset_phase);
1245 + mpt2sas_ctl_reset_handler(ioc, reset_phase);
1246 switch (reset_phase) {
1247 case MPT2_IOC_PRE_RESET:
1248 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
1249 @@ -3625,8 +3627,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
1250 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
1251 break;
1252 }
1253 - mpt2sas_scsih_reset_handler(ioc, reset_phase);
1254 - mpt2sas_ctl_reset_handler(ioc, reset_phase);
1255 }
1256
1257 /**
1258 @@ -3680,6 +3680,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
1259 {
1260 int r;
1261 unsigned long flags;
1262 + u8 pe_complete = ioc->wait_for_port_enable_to_complete;
1263
1264 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
1265 __func__));
1266 @@ -3701,6 +3702,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
1267 if (r)
1268 goto out;
1269 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
1270 +
1271 + /* If this hard reset is called while port enable is active, then
1272 + * there is no reason to call make_ioc_operational
1273 + */
1274 + if (pe_complete) {
1275 + r = -EFAULT;
1276 + goto out;
1277 + }
1278 r = _base_make_ioc_operational(ioc, sleep_flag);
1279 if (!r)
1280 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
1281 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1282 index f10bf70..9e75206 100644
1283 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1284 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
1285 @@ -2585,9 +2585,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
1286 u16 handle;
1287
1288 for (i = 0 ; i < event_data->NumEntries; i++) {
1289 - if (event_data->PHY[i].PhyStatus &
1290 - MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
1291 - continue;
1292 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
1293 if (!handle)
1294 continue;
1295 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1296 index 81a9d25..568d363 100644
1297 --- a/drivers/scsi/sd.c
1298 +++ b/drivers/scsi/sd.c
1299 @@ -1040,6 +1040,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1300 u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
1301 u64 bad_lba;
1302 int info_valid;
1303 + /*
1304 + * resid is optional but mostly filled in. When it's unused,
1305 + * its value is zero, so we assume the whole buffer transferred
1306 + */
1307 + unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
1308 + unsigned int good_bytes;
1309
1310 if (!blk_fs_request(scmd->request))
1311 return 0;
1312 @@ -1073,7 +1079,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1313 /* This computation should always be done in terms of
1314 * the resolution of the device's medium.
1315 */
1316 - return (bad_lba - start_lba) * scmd->device->sector_size;
1317 + good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
1318 + return min(good_bytes, transferred);
1319 }
1320
1321 /**
1322 diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
1323 index 5ed1b82..6a451e8 100644
1324 --- a/drivers/serial/8250.c
1325 +++ b/drivers/serial/8250.c
1326 @@ -255,7 +255,8 @@ static const struct serial8250_config uart_config[] = {
1327 .fifo_size = 128,
1328 .tx_loadsz = 128,
1329 .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
1330 - .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
1331 + /* UART_CAP_EFR breaks billionon CF bluetooth card. */
1332 + .flags = UART_CAP_FIFO | UART_CAP_SLEEP,
1333 },
1334 [PORT_RSA] = {
1335 .name = "RSA",
1336 diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
1337 index 0d2c2eb..59a6106 100644
1338 --- a/drivers/staging/comedi/drivers/jr3_pci.c
1339 +++ b/drivers/staging/comedi/drivers/jr3_pci.c
1340 @@ -52,6 +52,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci)
1341
1342 #define PCI_VENDOR_ID_JR3 0x1762
1343 #define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
1344 +#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
1345 #define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
1346 #define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
1347 #define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
1348 @@ -71,6 +72,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
1349 {
1350 PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
1351 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
1352 + PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
1353 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
1354 PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
1355 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
1356 PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
1357 @@ -807,6 +810,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
1358 devpriv->n_channels = 1;
1359 }
1360 break;
1361 + case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
1362 + devpriv->n_channels = 1;
1363 + }
1364 + break;
1365 case PCI_DEVICE_ID_JR3_2_CHANNEL:{
1366 devpriv->n_channels = 2;
1367 }
1368 diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
1369 index dc3f398..4ac745a 100644
1370 --- a/drivers/staging/comedi/drivers/ni_labpc.c
1371 +++ b/drivers/staging/comedi/drivers/ni_labpc.c
1372 @@ -528,7 +528,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
1373 /* grab our IRQ */
1374 if (irq) {
1375 isr_flags = 0;
1376 - if (thisboard->bustype == pci_bustype)
1377 + if (thisboard->bustype == pci_bustype
1378 + || thisboard->bustype == pcmcia_bustype)
1379 isr_flags |= IRQF_SHARED;
1380 if (request_irq(irq, labpc_interrupt, isr_flags,
1381 driver_labpc.driver_name, dev)) {
1382 diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
1383 index 62b2828..871a202 100644
1384 --- a/drivers/staging/hv/blkvsc_drv.c
1385 +++ b/drivers/staging/hv/blkvsc_drv.c
1386 @@ -378,6 +378,7 @@ static int blkvsc_probe(struct device *device)
1387 blkdev->gd->first_minor = 0;
1388 blkdev->gd->fops = &block_ops;
1389 blkdev->gd->private_data = blkdev;
1390 + blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
1391 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
1392
1393 blkvsc_do_inquiry(blkdev);
1394 diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
1395 index 547261d..a5101e3 100644
1396 --- a/drivers/staging/hv/netvsc_drv.c
1397 +++ b/drivers/staging/hv/netvsc_drv.c
1398 @@ -296,6 +296,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj,
1399 if (status == 1) {
1400 netif_carrier_on(net);
1401 netif_wake_queue(net);
1402 + netif_notify_peers(net);
1403 } else {
1404 netif_carrier_off(net);
1405 netif_stop_queue(net);
1406 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
1407 index c201802..5ea5d57 100644
1408 --- a/drivers/staging/usbip/vhci_hcd.c
1409 +++ b/drivers/staging/usbip/vhci_hcd.c
1410 @@ -798,20 +798,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1411 spin_unlock_irqrestore(&vdev->priv_lock, flags2);
1412 }
1413
1414 -
1415 - if (!vdev->ud.tcp_socket) {
1416 - /* tcp connection is closed */
1417 - usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
1418 - urb);
1419 -
1420 - usb_hcd_unlink_urb_from_ep(hcd, urb);
1421 -
1422 - spin_unlock_irqrestore(&the_controller->lock, flags);
1423 - usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
1424 - urb->status);
1425 - spin_lock_irqsave(&the_controller->lock, flags);
1426 - }
1427 -
1428 spin_unlock_irqrestore(&the_controller->lock, flags);
1429
1430 usbip_dbg_vhci_hc("leave\n");
1431 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1432 index e3017c4..399dd67 100644
1433 --- a/drivers/usb/class/cdc-acm.c
1434 +++ b/drivers/usb/class/cdc-acm.c
1435 @@ -1596,6 +1596,7 @@ static struct usb_device_id acm_ids[] = {
1436 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1437 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1438 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1439 + { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
1440 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1441
1442 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1443 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1444 index 12254e1..fc722a0 100644
1445 --- a/drivers/usb/core/hub.c
1446 +++ b/drivers/usb/core/hub.c
1447 @@ -648,6 +648,8 @@ static void hub_init_func3(struct work_struct *ws);
1448 static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1449 {
1450 struct usb_device *hdev = hub->hdev;
1451 + struct usb_hcd *hcd;
1452 + int ret;
1453 int port1;
1454 int status;
1455 bool need_debounce_delay = false;
1456 @@ -686,6 +688,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1457 atomic_set(&to_usb_interface(hub->intfdev)->
1458 pm_usage_cnt, 1);
1459 return; /* Continues at init2: below */
1460 + } else if (type == HUB_RESET_RESUME) {
1461 + /* The internal host controller state for the hub device
1462 + * may be gone after a host power loss on system resume.
1463 + * Update the device's info so the HW knows it's a hub.
1464 + */
1465 + hcd = bus_to_hcd(hdev->bus);
1466 + if (hcd->driver->update_hub_device) {
1467 + ret = hcd->driver->update_hub_device(hcd, hdev,
1468 + &hub->tt, GFP_NOIO);
1469 + if (ret < 0) {
1470 + dev_err(hub->intfdev, "Host not "
1471 + "accepting hub info "
1472 + "update.\n");
1473 + dev_err(hub->intfdev, "LS/FS devices "
1474 + "and hubs may not work "
1475 + "under this hub\n.");
1476 + }
1477 + }
1478 + hub_power_on(hub, true);
1479 } else {
1480 hub_power_on(hub, true);
1481 }
1482 @@ -2683,6 +2704,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
1483 udev->ttport = hdev->ttport;
1484 } else if (udev->speed != USB_SPEED_HIGH
1485 && hdev->speed == USB_SPEED_HIGH) {
1486 + if (!hub->tt.hub) {
1487 + dev_err(&udev->dev, "parent hub has no TT\n");
1488 + retval = -EINVAL;
1489 + goto fail;
1490 + }
1491 udev->tt = &hub->tt;
1492 udev->ttport = port1;
1493 }
1494 diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
1495 index 2d867fd..8966d5d 100644
1496 --- a/drivers/usb/gadget/printer.c
1497 +++ b/drivers/usb/gadget/printer.c
1498 @@ -130,31 +130,31 @@ static struct printer_dev usb_printer_gadget;
1499 * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
1500 */
1501
1502 -static ushort __initdata idVendor;
1503 +static ushort idVendor;
1504 module_param(idVendor, ushort, S_IRUGO);
1505 MODULE_PARM_DESC(idVendor, "USB Vendor ID");
1506
1507 -static ushort __initdata idProduct;
1508 +static ushort idProduct;
1509 module_param(idProduct, ushort, S_IRUGO);
1510 MODULE_PARM_DESC(idProduct, "USB Product ID");
1511
1512 -static ushort __initdata bcdDevice;
1513 +static ushort bcdDevice;
1514 module_param(bcdDevice, ushort, S_IRUGO);
1515 MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
1516
1517 -static char *__initdata iManufacturer;
1518 +static char *iManufacturer;
1519 module_param(iManufacturer, charp, S_IRUGO);
1520 MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
1521
1522 -static char *__initdata iProduct;
1523 +static char *iProduct;
1524 module_param(iProduct, charp, S_IRUGO);
1525 MODULE_PARM_DESC(iProduct, "USB Product string");
1526
1527 -static char *__initdata iSerialNum;
1528 +static char *iSerialNum;
1529 module_param(iSerialNum, charp, S_IRUGO);
1530 MODULE_PARM_DESC(iSerialNum, "1");
1531
1532 -static char *__initdata iPNPstring;
1533 +static char *iPNPstring;
1534 module_param(iPNPstring, charp, S_IRUGO);
1535 MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
1536
1537 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
1538 index 8198fc0..7b2e99c 100644
1539 --- a/drivers/usb/host/ehci-hcd.c
1540 +++ b/drivers/usb/host/ehci-hcd.c
1541 @@ -103,6 +103,9 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
1542
1543 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
1544
1545 +/* for ASPM quirk of ISOC on AMD SB800 */
1546 +static struct pci_dev *amd_nb_dev;
1547 +
1548 /*-------------------------------------------------------------------------*/
1549
1550 #include "ehci.h"
1551 @@ -502,6 +505,11 @@ static void ehci_stop (struct usb_hcd *hcd)
1552 spin_unlock_irq (&ehci->lock);
1553 ehci_mem_cleanup (ehci);
1554
1555 + if (amd_nb_dev) {
1556 + pci_dev_put(amd_nb_dev);
1557 + amd_nb_dev = NULL;
1558 + }
1559 +
1560 #ifdef EHCI_STATS
1561 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
1562 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
1563 @@ -537,6 +545,8 @@ static int ehci_init(struct usb_hcd *hcd)
1564 ehci->iaa_watchdog.function = ehci_iaa_watchdog;
1565 ehci->iaa_watchdog.data = (unsigned long) ehci;
1566
1567 + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
1568 +
1569 /*
1570 * hw default: 1K periodic list heads, one per frame.
1571 * periodic_size can shrink by USBCMD update if hcc_params allows.
1572 @@ -544,11 +554,20 @@ static int ehci_init(struct usb_hcd *hcd)
1573 ehci->periodic_size = DEFAULT_I_TDPS;
1574 INIT_LIST_HEAD(&ehci->cached_itd_list);
1575 INIT_LIST_HEAD(&ehci->cached_sitd_list);
1576 +
1577 + if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
1578 + /* periodic schedule size can be smaller than default */
1579 + switch (EHCI_TUNE_FLS) {
1580 + case 0: ehci->periodic_size = 1024; break;
1581 + case 1: ehci->periodic_size = 512; break;
1582 + case 2: ehci->periodic_size = 256; break;
1583 + default: BUG();
1584 + }
1585 + }
1586 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
1587 return retval;
1588
1589 /* controllers may cache some of the periodic schedule ... */
1590 - hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
1591 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
1592 ehci->i_thresh = 8;
1593 else // N microframes cached
1594 @@ -597,12 +616,6 @@ static int ehci_init(struct usb_hcd *hcd)
1595 /* periodic schedule size can be smaller than default */
1596 temp &= ~(3 << 2);
1597 temp |= (EHCI_TUNE_FLS << 2);
1598 - switch (EHCI_TUNE_FLS) {
1599 - case 0: ehci->periodic_size = 1024; break;
1600 - case 1: ehci->periodic_size = 512; break;
1601 - case 2: ehci->periodic_size = 256; break;
1602 - default: BUG();
1603 - }
1604 }
1605 ehci->command = temp;
1606
1607 diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
1608 index ead5f4f..d72b121 100644
1609 --- a/drivers/usb/host/ehci-pci.c
1610 +++ b/drivers/usb/host/ehci-pci.c
1611 @@ -41,6 +41,42 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
1612 return 0;
1613 }
1614
1615 +static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
1616 +{
1617 + struct pci_dev *amd_smbus_dev;
1618 + u8 rev = 0;
1619 +
1620 + amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
1621 + if (amd_smbus_dev) {
1622 + pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
1623 + if (rev < 0x40) {
1624 + pci_dev_put(amd_smbus_dev);
1625 + amd_smbus_dev = NULL;
1626 + return 0;
1627 + }
1628 + } else {
1629 + amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
1630 + if (!amd_smbus_dev)
1631 + return 0;
1632 + pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
1633 + if (rev < 0x11 || rev > 0x18) {
1634 + pci_dev_put(amd_smbus_dev);
1635 + amd_smbus_dev = NULL;
1636 + return 0;
1637 + }
1638 + }
1639 +
1640 + if (!amd_nb_dev)
1641 + amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
1642 +
1643 + ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
1644 +
1645 + pci_dev_put(amd_smbus_dev);
1646 + amd_smbus_dev = NULL;
1647 +
1648 + return 1;
1649 +}
1650 +
1651 /* called during probe() after chip reset completes */
1652 static int ehci_pci_setup(struct usb_hcd *hcd)
1653 {
1654 @@ -99,6 +135,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
1655 /* cache this readonly data; minimize chip reads */
1656 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
1657
1658 + if (ehci_quirk_amd_hudson(ehci))
1659 + ehci->amd_l1_fix = 1;
1660 +
1661 retval = ehci_halt(ehci);
1662 if (retval)
1663 return retval;
1664 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
1665 index 6746a8a..072f368 100644
1666 --- a/drivers/usb/host/ehci-sched.c
1667 +++ b/drivers/usb/host/ehci-sched.c
1668 @@ -1576,6 +1576,63 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1669 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1670 }
1671
1672 +#define AB_REG_BAR_LOW 0xe0
1673 +#define AB_REG_BAR_HIGH 0xe1
1674 +#define AB_INDX(addr) ((addr) + 0x00)
1675 +#define AB_DATA(addr) ((addr) + 0x04)
1676 +#define NB_PCIE_INDX_ADDR 0xe0
1677 +#define NB_PCIE_INDX_DATA 0xe4
1678 +#define NB_PIF0_PWRDOWN_0 0x01100012
1679 +#define NB_PIF0_PWRDOWN_1 0x01100013
1680 +
1681 +static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
1682 +{
1683 + u32 addr, addr_low, addr_high, val;
1684 +
1685 + outb_p(AB_REG_BAR_LOW, 0xcd6);
1686 + addr_low = inb_p(0xcd7);
1687 + outb_p(AB_REG_BAR_HIGH, 0xcd6);
1688 + addr_high = inb_p(0xcd7);
1689 + addr = addr_high << 8 | addr_low;
1690 + outl_p(0x30, AB_INDX(addr));
1691 + outl_p(0x40, AB_DATA(addr));
1692 + outl_p(0x34, AB_INDX(addr));
1693 + val = inl_p(AB_DATA(addr));
1694 +
1695 + if (disable) {
1696 + val &= ~0x8;
1697 + val |= (1 << 4) | (1 << 9);
1698 + } else {
1699 + val |= 0x8;
1700 + val &= ~((1 << 4) | (1 << 9));
1701 + }
1702 + outl_p(val, AB_DATA(addr));
1703 +
1704 + if (amd_nb_dev) {
1705 + addr = NB_PIF0_PWRDOWN_0;
1706 + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
1707 + pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
1708 + if (disable)
1709 + val &= ~(0x3f << 7);
1710 + else
1711 + val |= 0x3f << 7;
1712 +
1713 + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
1714 +
1715 + addr = NB_PIF0_PWRDOWN_1;
1716 + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
1717 + pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
1718 + if (disable)
1719 + val &= ~(0x3f << 7);
1720 + else
1721 + val |= 0x3f << 7;
1722 +
1723 + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
1724 + }
1725 +
1726 + return;
1727 +}
1728 +
1729 /* fit urb's itds into the selected schedule slot; activate as needed */
1730 static int
1731 itd_link_urb (
1732 @@ -1603,6 +1660,12 @@ itd_link_urb (
1733 next_uframe >> 3, next_uframe & 0x7);
1734 stream->start = jiffies;
1735 }
1736 +
1737 + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1738 + if (ehci->amd_l1_fix == 1)
1739 + ehci_quirk_amd_L1(ehci, 1);
1740 + }
1741 +
1742 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1743
1744 /* fill iTDs uframe by uframe */
1745 @@ -1729,6 +1792,11 @@ itd_complete (
1746 (void) disable_periodic(ehci);
1747 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1748
1749 + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1750 + if (ehci->amd_l1_fix == 1)
1751 + ehci_quirk_amd_L1(ehci, 0);
1752 + }
1753 +
1754 if (unlikely(list_is_singular(&stream->td_list))) {
1755 ehci_to_hcd(ehci)->self.bandwidth_allocated
1756 -= stream->bandwidth;
1757 @@ -2016,6 +2084,12 @@ sitd_link_urb (
1758 stream->interval, hc32_to_cpu(ehci, stream->splits));
1759 stream->start = jiffies;
1760 }
1761 +
1762 + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1763 + if (ehci->amd_l1_fix == 1)
1764 + ehci_quirk_amd_L1(ehci, 1);
1765 + }
1766 +
1767 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1768
1769 /* fill sITDs frame by frame */
1770 @@ -2118,6 +2192,11 @@ sitd_complete (
1771 (void) disable_periodic(ehci);
1772 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1773
1774 + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1775 + if (ehci->amd_l1_fix == 1)
1776 + ehci_quirk_amd_L1(ehci, 0);
1777 + }
1778 +
1779 if (list_is_singular(&stream->td_list)) {
1780 ehci_to_hcd(ehci)->self.bandwidth_allocated
1781 -= stream->bandwidth;
1782 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
1783 index 556c0b4..ac321ef 100644
1784 --- a/drivers/usb/host/ehci.h
1785 +++ b/drivers/usb/host/ehci.h
1786 @@ -130,6 +130,7 @@ struct ehci_hcd { /* one per controller */
1787 unsigned has_amcc_usb23:1;
1788 unsigned need_io_watchdog:1;
1789 unsigned broken_periodic:1;
1790 + unsigned amd_l1_fix:1;
1791
1792 /* required for usb32 quirk */
1793 #define OHCI_CTRL_HCFS (3 << 6)
1794 diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
1795 index 59eff72..e50823a 100644
1796 --- a/drivers/usb/serial/ch341.c
1797 +++ b/drivers/usb/serial/ch341.c
1798 @@ -479,12 +479,22 @@ static void ch341_read_int_callback(struct urb *urb)
1799 if (actual_length >= 4) {
1800 struct ch341_private *priv = usb_get_serial_port_data(port);
1801 unsigned long flags;
1802 + u8 prev_line_status = priv->line_status;
1803
1804 spin_lock_irqsave(&priv->lock, flags);
1805 priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
1806 if ((data[1] & CH341_MULT_STAT))
1807 priv->multi_status_change = 1;
1808 spin_unlock_irqrestore(&priv->lock, flags);
1809 +
1810 + if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
1811 + struct tty_struct *tty = tty_port_tty_get(&port->port);
1812 + if (tty)
1813 + usb_serial_handle_dcd_change(port, tty,
1814 + priv->line_status & CH341_BIT_DCD);
1815 + tty_kref_put(tty);
1816 + }
1817 +
1818 wake_up_interruptible(&priv->delta_msr_wait);
1819 }
1820
1821 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1822 index 9f8f0d0..05afb5c 100644
1823 --- a/drivers/usb/serial/cp210x.c
1824 +++ b/drivers/usb/serial/cp210x.c
1825 @@ -51,7 +51,6 @@ static void cp210x_break_ctl(struct tty_struct *, int);
1826 static int cp210x_startup(struct usb_serial *);
1827 static void cp210x_disconnect(struct usb_serial *);
1828 static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
1829 -static int cp210x_carrier_raised(struct usb_serial_port *p);
1830
1831 static int debug;
1832
1833 @@ -88,7 +87,6 @@ static struct usb_device_id id_table [] = {
1834 { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
1835 { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
1836 { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
1837 - { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
1838 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
1839 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
1840 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1841 @@ -111,7 +109,9 @@ static struct usb_device_id id_table [] = {
1842 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1843 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
1844 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
1845 + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
1846 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
1847 + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
1848 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
1849 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1850 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1851 @@ -165,8 +165,7 @@ static struct usb_serial_driver cp210x_device = {
1852 .tiocmset = cp210x_tiocmset,
1853 .attach = cp210x_startup,
1854 .disconnect = cp210x_disconnect,
1855 - .dtr_rts = cp210x_dtr_rts,
1856 - .carrier_raised = cp210x_carrier_raised
1857 + .dtr_rts = cp210x_dtr_rts
1858 };
1859
1860 /* Config request types */
1861 @@ -800,15 +799,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
1862 return result;
1863 }
1864
1865 -static int cp210x_carrier_raised(struct usb_serial_port *p)
1866 -{
1867 - unsigned int control;
1868 - cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
1869 - if (control & CONTROL_DCD)
1870 - return 1;
1871 - return 0;
1872 -}
1873 -
1874 static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
1875 {
1876 struct usb_serial_port *port = tty->driver_data;
1877 diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
1878 index 68e80be..2acfb38 100644
1879 --- a/drivers/usb/serial/digi_acceleport.c
1880 +++ b/drivers/usb/serial/digi_acceleport.c
1881 @@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty);
1882 static int digi_chars_in_buffer(struct tty_struct *tty);
1883 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
1884 static void digi_close(struct usb_serial_port *port);
1885 -static int digi_carrier_raised(struct usb_serial_port *port);
1886 static void digi_dtr_rts(struct usb_serial_port *port, int on);
1887 static int digi_startup_device(struct usb_serial *serial);
1888 static int digi_startup(struct usb_serial *serial);
1889 @@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = {
1890 .open = digi_open,
1891 .close = digi_close,
1892 .dtr_rts = digi_dtr_rts,
1893 - .carrier_raised = digi_carrier_raised,
1894 .write = digi_write,
1895 .write_room = digi_write_room,
1896 .write_bulk_callback = digi_write_bulk_callback,
1897 @@ -1338,14 +1336,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on)
1898 digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
1899 }
1900
1901 -static int digi_carrier_raised(struct usb_serial_port *port)
1902 -{
1903 - struct digi_port *priv = usb_get_serial_port_data(port);
1904 - if (priv->dp_modem_signals & TIOCM_CD)
1905 - return 1;
1906 - return 0;
1907 -}
1908 -
1909 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
1910 {
1911 int ret;
1912 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1913 index df9c632..e371888 100644
1914 --- a/drivers/usb/serial/ftdi_sio.c
1915 +++ b/drivers/usb/serial/ftdi_sio.c
1916 @@ -104,6 +104,7 @@ struct ftdi_sio_quirk {
1917 static int ftdi_jtag_probe(struct usb_serial *serial);
1918 static int ftdi_mtxorb_hack_setup(struct usb_serial *serial);
1919 static int ftdi_NDI_device_setup(struct usb_serial *serial);
1920 +static int ftdi_stmclite_probe(struct usb_serial *serial);
1921 static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
1922 static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
1923
1924 @@ -127,6 +128,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
1925 .port_probe = ftdi_HE_TIRA1_setup,
1926 };
1927
1928 +static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
1929 + .probe = ftdi_stmclite_probe,
1930 +};
1931 +
1932 /*
1933 * The 8U232AM has the same API as the sio except for:
1934 * - it can support MUCH higher baudrates; up to:
1935 @@ -620,6 +625,7 @@ static struct usb_device_id id_table_combined [] = {
1936 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
1937 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
1938 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
1939 + { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
1940 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
1941 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
1942 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
1943 @@ -681,7 +687,17 @@ static struct usb_device_id id_table_combined [] = {
1944 { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
1945 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
1946 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
1947 - { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
1948 + { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
1949 + { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
1950 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
1951 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
1952 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
1953 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
1954 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
1955 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
1956 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
1957 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
1958 + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
1959 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
1960 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
1961 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
1962 @@ -805,6 +821,8 @@ static struct usb_device_id id_table_combined [] = {
1963 { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
1964 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
1965 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1966 + { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
1967 + .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
1968 { }, /* Optional parameter entry */
1969 { } /* Terminating entry */
1970 };
1971 @@ -1738,6 +1756,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1972 }
1973
1974 /*
1975 + * First and second port on STMCLiteadaptors is reserved for JTAG interface
1976 + * and the forth port for pio
1977 + */
1978 +static int ftdi_stmclite_probe(struct usb_serial *serial)
1979 +{
1980 + struct usb_device *udev = serial->dev;
1981 + struct usb_interface *interface = serial->interface;
1982 +
1983 + dbg("%s", __func__);
1984 +
1985 + if (interface == udev->actconfig->interface[2])
1986 + return 0;
1987 +
1988 + dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
1989 +
1990 + return -ENODEV;
1991 +}
1992 +
1993 +/*
1994 * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
1995 * We have to correct it if we want to read from it.
1996 */
1997 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1998 index 7d28f1c..c8d0fec 100644
1999 --- a/drivers/usb/serial/ftdi_sio_ids.h
2000 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2001 @@ -518,6 +518,12 @@
2002 #define RATOC_PRODUCT_ID_USB60F 0xb020
2003
2004 /*
2005 + * Acton Research Corp.
2006 + */
2007 +#define ACTON_VID 0x0647 /* Vendor ID */
2008 +#define ACTON_SPECTRAPRO_PID 0x0100
2009 +
2010 +/*
2011 * Contec products (http://www.contec.com)
2012 * Submitted by Daniel Sangorrin
2013 */
2014 @@ -576,11 +582,23 @@
2015 #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
2016
2017 /*
2018 - * Icom ID-1 digital transceiver
2019 + * Definitions for Icom Inc. devices
2020 */
2021 -
2022 -#define ICOM_ID1_VID 0x0C26
2023 -#define ICOM_ID1_PID 0x0004
2024 +#define ICOM_VID 0x0C26 /* Icom vendor ID */
2025 +/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
2026 +#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */
2027 +/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
2028 +#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */
2029 +/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
2030 +#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */
2031 +#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */
2032 +#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/
2033 +#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */
2034 +#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */
2035 +#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */
2036 +#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */
2037 +#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */
2038 +#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */
2039
2040 /*
2041 * GN Otometrics (http://www.otometrics.com)
2042 @@ -1029,6 +1047,12 @@
2043 #define WHT_PID 0x0004 /* Wireless Handheld Terminal */
2044
2045 /*
2046 + * STMicroelectonics
2047 + */
2048 +#define ST_VID 0x0483
2049 +#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
2050 +
2051 +/*
2052 * Papouch products (http://www.papouch.com/)
2053 * Submitted by Folkert van Heusden
2054 */
2055 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
2056 index e0fb294..a7b8a55 100644
2057 --- a/drivers/usb/serial/generic.c
2058 +++ b/drivers/usb/serial/generic.c
2059 @@ -578,6 +578,26 @@ int usb_serial_handle_break(struct usb_serial_port *port)
2060 }
2061 EXPORT_SYMBOL_GPL(usb_serial_handle_break);
2062
2063 +/**
2064 + * usb_serial_handle_dcd_change - handle a change of carrier detect state
2065 + * @port: usb_serial_port structure for the open port
2066 + * @tty: tty_struct structure for the port
2067 + * @status: new carrier detect status, nonzero if active
2068 + */
2069 +void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
2070 + struct tty_struct *tty, unsigned int status)
2071 +{
2072 + struct tty_port *port = &usb_port->port;
2073 +
2074 + dbg("%s - port %d, status %d", __func__, usb_port->number, status);
2075 +
2076 + if (status)
2077 + wake_up_interruptible(&port->open_wait);
2078 + else if (tty && !C_CLOCAL(tty))
2079 + tty_hangup(tty);
2080 +}
2081 +EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
2082 +
2083 int usb_serial_generic_resume(struct usb_serial *serial)
2084 {
2085 struct usb_serial_port *port;
2086 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
2087 index b97960a..72b256c 100644
2088 --- a/drivers/usb/serial/io_edgeport.c
2089 +++ b/drivers/usb/serial/io_edgeport.c
2090 @@ -2935,8 +2935,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2091
2092 dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
2093
2094 - edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
2095 - edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
2096 + edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
2097 + edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
2098 edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
2099
2100 for (rec = ihex_next_binrec(rec); rec;
2101 diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
2102 index 1296a09..0761b5a 100644
2103 --- a/drivers/usb/serial/keyspan_pda.c
2104 +++ b/drivers/usb/serial/keyspan_pda.c
2105 @@ -663,22 +663,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on)
2106 }
2107 }
2108
2109 -static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
2110 -{
2111 - struct usb_serial *serial = port->serial;
2112 - unsigned char modembits;
2113 -
2114 - /* If we can read the modem status and the DCD is low then
2115 - carrier is not raised yet */
2116 - if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
2117 - if (!(modembits & (1>>6)))
2118 - return 0;
2119 - }
2120 - /* Carrier raised, or we failed (eg disconnected) so
2121 - progress accordingly */
2122 - return 1;
2123 -}
2124 -
2125
2126 static int keyspan_pda_open(struct tty_struct *tty,
2127 struct usb_serial_port *port)
2128 @@ -854,7 +838,6 @@ static struct usb_serial_driver keyspan_pda_device = {
2129 .id_table = id_table_std,
2130 .num_ports = 1,
2131 .dtr_rts = keyspan_pda_dtr_rts,
2132 - .carrier_raised = keyspan_pda_carrier_raised,
2133 .open = keyspan_pda_open,
2134 .close = keyspan_pda_close,
2135 .write = keyspan_pda_write,
2136 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2137 index c2e6983..cf5ff7d 100644
2138 --- a/drivers/usb/serial/option.c
2139 +++ b/drivers/usb/serial/option.c
2140 @@ -369,7 +369,16 @@ static int option_resume(struct usb_serial *serial);
2141 #define HAIER_VENDOR_ID 0x201e
2142 #define HAIER_PRODUCT_CE100 0x2009
2143
2144 -#define CINTERION_VENDOR_ID 0x0681
2145 +/* Cinterion (formerly Siemens) products */
2146 +#define SIEMENS_VENDOR_ID 0x0681
2147 +#define CINTERION_VENDOR_ID 0x1e2d
2148 +#define CINTERION_PRODUCT_HC25_MDM 0x0047
2149 +#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
2150 +#define CINTERION_PRODUCT_HC28_MDM 0x004C
2151 +#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
2152 +#define CINTERION_PRODUCT_EU3_E 0x0051
2153 +#define CINTERION_PRODUCT_EU3_P 0x0052
2154 +#define CINTERION_PRODUCT_PH8 0x0053
2155
2156 /* Olivetti products */
2157 #define OLIVETTI_VENDOR_ID 0x0b3c
2158 @@ -895,7 +904,17 @@ static struct usb_device_id option_ids[] = {
2159 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
2160 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
2161 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
2162 - { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
2163 + /* Cinterion */
2164 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
2165 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
2166 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
2167 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
2168 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
2169 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
2170 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
2171 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
2172 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
2173 +
2174 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
2175 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
2176 { } /* Terminating entry */
2177 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
2178 index ecb1708..b336017 100644
2179 --- a/drivers/usb/serial/pl2303.c
2180 +++ b/drivers/usb/serial/pl2303.c
2181 @@ -59,6 +59,8 @@ static struct usb_device_id id_table [] = {
2182 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
2183 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
2184 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
2185 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
2186 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
2187 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
2188 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
2189 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
2190 @@ -955,9 +957,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
2191 {
2192
2193 struct pl2303_private *priv = usb_get_serial_port_data(port);
2194 + struct tty_struct *tty;
2195 unsigned long flags;
2196 u8 status_idx = UART_STATE;
2197 u8 length = UART_STATE + 1;
2198 + u8 prev_line_status;
2199 u16 idv, idp;
2200
2201 idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
2202 @@ -979,11 +983,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
2203
2204 /* Save off the uart status for others to look at */
2205 spin_lock_irqsave(&priv->lock, flags);
2206 + prev_line_status = priv->line_status;
2207 priv->line_status = data[status_idx];
2208 spin_unlock_irqrestore(&priv->lock, flags);
2209 if (priv->line_status & UART_BREAK_ERROR)
2210 usb_serial_handle_break(port);
2211 wake_up_interruptible(&priv->delta_msr_wait);
2212 +
2213 + tty = tty_port_tty_get(&port->port);
2214 + if (!tty)
2215 + return;
2216 + if ((priv->line_status ^ prev_line_status) & UART_DCD)
2217 + usb_serial_handle_dcd_change(port, tty,
2218 + priv->line_status & UART_DCD);
2219 + tty_kref_put(tty);
2220 }
2221
2222 static void pl2303_read_int_callback(struct urb *urb)
2223 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
2224 index 01bc64b..4d043e4 100644
2225 --- a/drivers/usb/serial/pl2303.h
2226 +++ b/drivers/usb/serial/pl2303.h
2227 @@ -20,6 +20,8 @@
2228 #define PL2303_PRODUCT_ID_ALDIGA 0x0611
2229 #define PL2303_PRODUCT_ID_MMX 0x0612
2230 #define PL2303_PRODUCT_ID_GPRS 0x0609
2231 +#define PL2303_PRODUCT_ID_HCR331 0x331a
2232 +#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
2233
2234 #define ATEN_VENDOR_ID 0x0557
2235 #define ATEN_VENDOR_ID2 0x0547
2236 diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
2237 index 1e58220..f863d2d 100644
2238 --- a/drivers/usb/serial/spcp8x5.c
2239 +++ b/drivers/usb/serial/spcp8x5.c
2240 @@ -137,7 +137,7 @@ struct spcp8x5_usb_ctrl_arg {
2241
2242 /* how come ??? */
2243 #define UART_STATE 0x08
2244 -#define UART_STATE_TRANSIENT_MASK 0x74
2245 +#define UART_STATE_TRANSIENT_MASK 0x75
2246 #define UART_DCD 0x01
2247 #define UART_DSR 0x02
2248 #define UART_BREAK_ERROR 0x04
2249 @@ -734,6 +734,10 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
2250 tty_insert_flip_char(tty, data[i], tty_flag);
2251 tty_flip_buffer_push(tty);
2252 }
2253 +
2254 + if (status & UART_DCD)
2255 + usb_serial_handle_dcd_change(port, tty,
2256 + priv->line_status & MSR_STATUS_LINE_DCD);
2257 tty_kref_put(tty);
2258
2259 /* Schedule the next read _if_ we are still open */
2260 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
2261 index 1e9dc88..18de038 100644
2262 --- a/drivers/usb/serial/ti_usb_3410_5052.c
2263 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
2264 @@ -366,9 +366,9 @@ failed_1port:
2265
2266 static void __exit ti_exit(void)
2267 {
2268 + usb_deregister(&ti_usb_driver);
2269 usb_serial_deregister(&ti_1port_device);
2270 usb_serial_deregister(&ti_2port_device);
2271 - usb_deregister(&ti_usb_driver);
2272 }
2273
2274
2275 diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
2276 index 44be6d7..fba2824 100644
2277 --- a/drivers/usb/storage/unusual_cypress.h
2278 +++ b/drivers/usb/storage/unusual_cypress.h
2279 @@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
2280 "Cypress ISD-300LP",
2281 US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
2282
2283 +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
2284 + "Super Top",
2285 + "USB 2.0 SATA BRIDGE",
2286 + US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0),
2287 +
2288 #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
2289 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2290 index 2ca0298..6ca33f2 100644
2291 --- a/drivers/usb/storage/unusual_devs.h
2292 +++ b/drivers/usb/storage/unusual_devs.h
2293 @@ -1043,6 +1043,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
2294 US_SC_DEVICE, US_PR_DEVICE, NULL,
2295 US_FL_BULK32),
2296
2297 +/* Reported by <ttkspam@free.fr>
2298 + * The device reports a vendor-specific device class, requiring an
2299 + * explicit vendor/product match.
2300 + */
2301 +UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
2302 + "MagicPixel",
2303 + "FW_Omega2",
2304 + US_SC_DEVICE, US_PR_DEVICE, NULL, 0),
2305 +
2306 /* Andrew Lunn <andrew@lunn.ch>
2307 * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
2308 * on LUN 4.
2309 @@ -1401,6 +1410,13 @@ UNUSUAL_DEV( 0x0fca, 0x0006, 0x0001, 0x0001,
2310 US_FL_IGNORE_DEVICE ),
2311 #endif
2312
2313 +/* Submitted by Nick Holloway */
2314 +UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
2315 + "VTech",
2316 + "Kidizoom",
2317 + US_SC_DEVICE, US_PR_DEVICE, NULL,
2318 + US_FL_FIX_CAPACITY ),
2319 +
2320 /* Reported by Michael Stattmann <michael@stattmann.com> */
2321 UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
2322 "Sony Ericsson",
2323 @@ -1880,6 +1896,22 @@ UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000,
2324 US_SC_DEVICE, US_PR_DEVICE, NULL,
2325 US_FL_BAD_SENSE ),
2326
2327 +/* Patch by Richard Sch├╝tz <r.schtz@t-online.de>
2328 + * This external hard drive enclosure uses a JMicron chip which
2329 + * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
2330 +UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
2331 + "TrekStor GmbH & Co. KG",
2332 + "DataStation maxi g.u",
2333 + US_SC_DEVICE, US_PR_DEVICE, NULL,
2334 + US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
2335 +
2336 +/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
2337 +UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
2338 + "Coby Electronics",
2339 + "MP3 Player",
2340 + US_SC_DEVICE, US_PR_DEVICE, NULL,
2341 + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
2342 +
2343 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
2344 "ST",
2345 "2A",
2346 diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
2347 index d43859f..5fed283 100644
2348 --- a/drivers/virtio/virtio_pci.c
2349 +++ b/drivers/virtio/virtio_pci.c
2350 @@ -95,11 +95,6 @@ static struct pci_device_id virtio_pci_id_table[] = {
2351
2352 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
2353
2354 -/* A PCI device has it's own struct device and so does a virtio device so
2355 - * we create a place for the virtio devices to show up in sysfs. I think it
2356 - * would make more sense for virtio to not insist on having it's own device. */
2357 -static struct device *virtio_pci_root;
2358 -
2359 /* Convert a generic virtio device to our structure */
2360 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
2361 {
2362 @@ -628,7 +623,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
2363 if (vp_dev == NULL)
2364 return -ENOMEM;
2365
2366 - vp_dev->vdev.dev.parent = virtio_pci_root;
2367 + vp_dev->vdev.dev.parent = &pci_dev->dev;
2368 vp_dev->vdev.dev.release = virtio_pci_release_dev;
2369 vp_dev->vdev.config = &virtio_pci_config_ops;
2370 vp_dev->pci_dev = pci_dev;
2371 @@ -715,17 +710,7 @@ static struct pci_driver virtio_pci_driver = {
2372
2373 static int __init virtio_pci_init(void)
2374 {
2375 - int err;
2376 -
2377 - virtio_pci_root = root_device_register("virtio-pci");
2378 - if (IS_ERR(virtio_pci_root))
2379 - return PTR_ERR(virtio_pci_root);
2380 -
2381 - err = pci_register_driver(&virtio_pci_driver);
2382 - if (err)
2383 - root_device_unregister(virtio_pci_root);
2384 -
2385 - return err;
2386 + return pci_register_driver(&virtio_pci_driver);
2387 }
2388
2389 module_init(virtio_pci_init);
2390 @@ -733,7 +718,6 @@ module_init(virtio_pci_init);
2391 static void __exit virtio_pci_exit(void)
2392 {
2393 pci_unregister_driver(&virtio_pci_driver);
2394 - root_device_unregister(virtio_pci_root);
2395 }
2396
2397 module_exit(virtio_pci_exit);
2398 diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
2399 index 0d28982..e74a670 100644
2400 --- a/fs/nfs/direct.c
2401 +++ b/fs/nfs/direct.c
2402 @@ -401,15 +401,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
2403 pos += vec->iov_len;
2404 }
2405
2406 + /*
2407 + * If no bytes were started, return the error, and let the
2408 + * generic layer handle the completion.
2409 + */
2410 + if (requested_bytes == 0) {
2411 + nfs_direct_req_release(dreq);
2412 + return result < 0 ? result : -EIO;
2413 + }
2414 +
2415 if (put_dreq(dreq))
2416 nfs_direct_complete(dreq);
2417 -
2418 - if (requested_bytes != 0)
2419 - return 0;
2420 -
2421 - if (result < 0)
2422 - return result;
2423 - return -EIO;
2424 + return 0;
2425 }
2426
2427 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
2428 @@ -829,15 +832,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
2429 pos += vec->iov_len;
2430 }
2431
2432 + /*
2433 + * If no bytes were started, return the error, and let the
2434 + * generic layer handle the completion.
2435 + */
2436 + if (requested_bytes == 0) {
2437 + nfs_direct_req_release(dreq);
2438 + return result < 0 ? result : -EIO;
2439 + }
2440 +
2441 if (put_dreq(dreq))
2442 nfs_direct_write_complete(dreq, dreq->inode);
2443 -
2444 - if (requested_bytes != 0)
2445 - return 0;
2446 -
2447 - if (result < 0)
2448 - return result;
2449 - return -EIO;
2450 + return 0;
2451 }
2452
2453 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
2454 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2455 index 3d016e9..d2674f9 100644
2456 --- a/include/drm/drm_pciids.h
2457 +++ b/include/drm/drm_pciids.h
2458 @@ -28,7 +28,6 @@
2459 {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
2460 {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
2461 {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
2462 - {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
2463 {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
2464 {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
2465 {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
2466 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
2467 index 6d527ee..a75d3a0 100644
2468 --- a/include/linux/hardirq.h
2469 +++ b/include/linux/hardirq.h
2470 @@ -64,6 +64,8 @@
2471 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
2472 #define NMI_OFFSET (1UL << NMI_SHIFT)
2473
2474 +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
2475 +
2476 #ifndef PREEMPT_ACTIVE
2477 #define PREEMPT_ACTIVE_BITS 1
2478 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
2479 @@ -82,10 +84,13 @@
2480 /*
2481 * Are we doing bottom half or hardware interrupt processing?
2482 * Are we in a softirq context? Interrupt context?
2483 + * in_softirq - Are we currently processing softirq or have bh disabled?
2484 + * in_serving_softirq - Are we currently processing softirq?
2485 */
2486 #define in_irq() (hardirq_count())
2487 #define in_softirq() (softirq_count())
2488 #define in_interrupt() (irq_count())
2489 +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
2490
2491 /*
2492 * Are we in NMI context?
2493 @@ -132,10 +137,12 @@ extern void synchronize_irq(unsigned int irq);
2494
2495 struct task_struct;
2496
2497 -#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2498 +#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
2499 static inline void account_system_vtime(struct task_struct *tsk)
2500 {
2501 }
2502 +#else
2503 +extern void account_system_vtime(struct task_struct *tsk);
2504 #endif
2505
2506 #if defined(CONFIG_NO_HZ)
2507 diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
2508 index e766072..a1c9e21 100644
2509 --- a/include/linux/ieee80211.h
2510 +++ b/include/linux/ieee80211.h
2511 @@ -872,7 +872,7 @@ struct ieee80211_ht_info {
2512 /* block-ack parameters */
2513 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
2514 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
2515 -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
2516 +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
2517 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
2518 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
2519
2520 diff --git a/include/linux/klist.h b/include/linux/klist.h
2521 index e91a4e5..a370ce5 100644
2522 --- a/include/linux/klist.h
2523 +++ b/include/linux/klist.h
2524 @@ -22,7 +22,7 @@ struct klist {
2525 struct list_head k_list;
2526 void (*get)(struct klist_node *);
2527 void (*put)(struct klist_node *);
2528 -} __attribute__ ((aligned (4)));
2529 +} __attribute__ ((aligned (sizeof(void *))));
2530
2531 #define KLIST_INIT(_name, _get, _put) \
2532 { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
2533 diff --git a/include/linux/sched.h b/include/linux/sched.h
2534 index 957a25f..71849bf 100644
2535 --- a/include/linux/sched.h
2536 +++ b/include/linux/sched.h
2537 @@ -728,14 +728,6 @@ struct user_struct {
2538 uid_t uid;
2539 struct user_namespace *user_ns;
2540
2541 -#ifdef CONFIG_USER_SCHED
2542 - struct task_group *tg;
2543 -#ifdef CONFIG_SYSFS
2544 - struct kobject kobj;
2545 - struct delayed_work work;
2546 -#endif
2547 -#endif
2548 -
2549 #ifdef CONFIG_PERF_EVENTS
2550 atomic_long_t locked_vm;
2551 #endif
2552 @@ -902,6 +894,7 @@ struct sched_group {
2553 * single CPU.
2554 */
2555 unsigned int cpu_power;
2556 + unsigned int group_weight;
2557
2558 /*
2559 * The CPUs this group covers.
2560 @@ -1121,7 +1114,7 @@ struct sched_class {
2561 struct task_struct *task);
2562
2563 #ifdef CONFIG_FAIR_GROUP_SCHED
2564 - void (*moved_group) (struct task_struct *p, int on_rq);
2565 + void (*task_move_group) (struct task_struct *p, int on_rq);
2566 #endif
2567 };
2568
2569 @@ -1736,8 +1729,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
2570 /*
2571 * Per process flags
2572 */
2573 -#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
2574 - /* Not implemented yet, only for 486*/
2575 +#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
2576 #define PF_STARTING 0x00000002 /* being created */
2577 #define PF_EXITING 0x00000004 /* getting shut down */
2578 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
2579 @@ -1874,6 +1866,19 @@ extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2580 */
2581 extern unsigned long long cpu_clock(int cpu);
2582
2583 +#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2584 +/*
2585 + * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2586 + * The reason for this explicit opt-in is not to have perf penalty with
2587 + * slow sched_clocks.
2588 + */
2589 +extern void enable_sched_clock_irqtime(void);
2590 +extern void disable_sched_clock_irqtime(void);
2591 +#else
2592 +static inline void enable_sched_clock_irqtime(void) {}
2593 +static inline void disable_sched_clock_irqtime(void) {}
2594 +#endif
2595 +
2596 extern unsigned long long
2597 task_sched_runtime(struct task_struct *task);
2598 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
2599 @@ -2409,9 +2414,9 @@ extern int __cond_resched_lock(spinlock_t *lock);
2600
2601 extern int __cond_resched_softirq(void);
2602
2603 -#define cond_resched_softirq() ({ \
2604 - __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
2605 - __cond_resched_softirq(); \
2606 +#define cond_resched_softirq() ({ \
2607 + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2608 + __cond_resched_softirq(); \
2609 })
2610
2611 /*
2612 @@ -2500,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2613
2614 extern void normalize_rt_tasks(void);
2615
2616 -#ifdef CONFIG_GROUP_SCHED
2617 +#ifdef CONFIG_CGROUP_SCHED
2618
2619 extern struct task_group init_task_group;
2620 -#ifdef CONFIG_USER_SCHED
2621 -extern struct task_group root_task_group;
2622 -extern void set_tg_uid(struct user_struct *user);
2623 -#endif
2624
2625 extern struct task_group *sched_create_group(struct task_group *parent);
2626 extern void sched_destroy_group(struct task_group *tg);
2627 diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
2628 index ce911eb..bb911e3 100644
2629 --- a/include/linux/usb/serial.h
2630 +++ b/include/linux/usb/serial.h
2631 @@ -326,6 +326,9 @@ extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
2632 struct usb_serial_port *port,
2633 unsigned int ch);
2634 extern int usb_serial_handle_break(struct usb_serial_port *port);
2635 +extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
2636 + struct tty_struct *tty,
2637 + unsigned int status);
2638
2639
2640 extern int usb_serial_bus_register(struct usb_serial_driver *device);
2641 diff --git a/init/Kconfig b/init/Kconfig
2642 index eb4b337..0d6388a 100644
2643 --- a/init/Kconfig
2644 +++ b/init/Kconfig
2645 @@ -426,57 +426,6 @@ config LOG_BUF_SHIFT
2646 config HAVE_UNSTABLE_SCHED_CLOCK
2647 bool
2648
2649 -config GROUP_SCHED
2650 - bool "Group CPU scheduler"
2651 - depends on EXPERIMENTAL
2652 - default n
2653 - help
2654 - This feature lets CPU scheduler recognize task groups and control CPU
2655 - bandwidth allocation to such task groups.
2656 - In order to create a group from arbitrary set of processes, use
2657 - CONFIG_CGROUPS. (See Control Group support.)
2658 -
2659 -config FAIR_GROUP_SCHED
2660 - bool "Group scheduling for SCHED_OTHER"
2661 - depends on GROUP_SCHED
2662 - default GROUP_SCHED
2663 -
2664 -config RT_GROUP_SCHED
2665 - bool "Group scheduling for SCHED_RR/FIFO"
2666 - depends on EXPERIMENTAL
2667 - depends on GROUP_SCHED
2668 - default n
2669 - help
2670 - This feature lets you explicitly allocate real CPU bandwidth
2671 - to users or control groups (depending on the "Basis for grouping tasks"
2672 - setting below. If enabled, it will also make it impossible to
2673 - schedule realtime tasks for non-root users until you allocate
2674 - realtime bandwidth for them.
2675 - See Documentation/scheduler/sched-rt-group.txt for more information.
2676 -
2677 -choice
2678 - depends on GROUP_SCHED
2679 - prompt "Basis for grouping tasks"
2680 - default USER_SCHED
2681 -
2682 -config USER_SCHED
2683 - bool "user id"
2684 - help
2685 - This option will choose userid as the basis for grouping
2686 - tasks, thus providing equal CPU bandwidth to each user.
2687 -
2688 -config CGROUP_SCHED
2689 - bool "Control groups"
2690 - depends on CGROUPS
2691 - help
2692 - This option allows you to create arbitrary task groups
2693 - using the "cgroup" pseudo filesystem and control
2694 - the cpu bandwidth allocated to each such task group.
2695 - Refer to Documentation/cgroups/cgroups.txt for more
2696 - information on "cgroup" pseudo filesystem.
2697 -
2698 -endchoice
2699 -
2700 menuconfig CGROUPS
2701 boolean "Control Group support"
2702 help
2703 @@ -597,6 +546,35 @@ config CGROUP_MEM_RES_CTLR_SWAP
2704 Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
2705 size is 4096bytes, 512k per 1Gbytes of swap.
2706
2707 +menuconfig CGROUP_SCHED
2708 + bool "Group CPU scheduler"
2709 + depends on EXPERIMENTAL && CGROUPS
2710 + default n
2711 + help
2712 + This feature lets CPU scheduler recognize task groups and control CPU
2713 + bandwidth allocation to such task groups. It uses cgroups to group
2714 + tasks.
2715 +
2716 +if CGROUP_SCHED
2717 +config FAIR_GROUP_SCHED
2718 + bool "Group scheduling for SCHED_OTHER"
2719 + depends on CGROUP_SCHED
2720 + default CGROUP_SCHED
2721 +
2722 +config RT_GROUP_SCHED
2723 + bool "Group scheduling for SCHED_RR/FIFO"
2724 + depends on EXPERIMENTAL
2725 + depends on CGROUP_SCHED
2726 + default n
2727 + help
2728 + This feature lets you explicitly allocate real CPU bandwidth
2729 + to task groups. If enabled, it will also make it impossible to
2730 + schedule realtime tasks for non-root users until you allocate
2731 + realtime bandwidth for them.
2732 + See Documentation/scheduler/sched-rt-group.txt for more information.
2733 +
2734 +endif #CGROUP_SCHED
2735 +
2736 endif # CGROUPS
2737
2738 config MM_OWNER
2739 diff --git a/init/calibrate.c b/init/calibrate.c
2740 index 6eb48e5..24fe022 100644
2741 --- a/init/calibrate.c
2742 +++ b/init/calibrate.c
2743 @@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
2744 pre_start = 0;
2745 read_current_timer(&start);
2746 start_jiffies = jiffies;
2747 - while (jiffies <= (start_jiffies + 1)) {
2748 + while (time_before_eq(jiffies, start_jiffies + 1)) {
2749 pre_start = start;
2750 read_current_timer(&start);
2751 }
2752 @@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
2753
2754 pre_end = 0;
2755 end = post_start;
2756 - while (jiffies <=
2757 - (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
2758 + while (time_before_eq(jiffies, start_jiffies + 1 +
2759 + DELAY_CALIBRATION_TICKS)) {
2760 pre_end = end;
2761 read_current_timer(&end);
2762 }
2763 diff --git a/kernel/capability.c b/kernel/capability.c
2764 index 4e17041..8a944f5 100644
2765 --- a/kernel/capability.c
2766 +++ b/kernel/capability.c
2767 @@ -15,7 +15,6 @@
2768 #include <linux/syscalls.h>
2769 #include <linux/pid_namespace.h>
2770 #include <asm/uaccess.h>
2771 -#include "cred-internals.h"
2772
2773 /*
2774 * Leveraged for setting/resetting capabilities
2775 diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h
2776 deleted file mode 100644
2777 index 2dc4fc2..0000000
2778 --- a/kernel/cred-internals.h
2779 +++ /dev/null
2780 @@ -1,21 +0,0 @@
2781 -/* Internal credentials stuff
2782 - *
2783 - * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
2784 - * Written by David Howells (dhowells@redhat.com)
2785 - *
2786 - * This program is free software; you can redistribute it and/or
2787 - * modify it under the terms of the GNU General Public Licence
2788 - * as published by the Free Software Foundation; either version
2789 - * 2 of the Licence, or (at your option) any later version.
2790 - */
2791 -
2792 -/*
2793 - * user.c
2794 - */
2795 -static inline void sched_switch_user(struct task_struct *p)
2796 -{
2797 -#ifdef CONFIG_USER_SCHED
2798 - sched_move_task(p);
2799 -#endif /* CONFIG_USER_SCHED */
2800 -}
2801 -
2802 diff --git a/kernel/cred.c b/kernel/cred.c
2803 index 099f5e6..5fce398 100644
2804 --- a/kernel/cred.c
2805 +++ b/kernel/cred.c
2806 @@ -16,7 +16,6 @@
2807 #include <linux/init_task.h>
2808 #include <linux/security.h>
2809 #include <linux/cn_proc.h>
2810 -#include "cred-internals.h"
2811
2812 #if 0
2813 #define kdebug(FMT, ...) \
2814 @@ -553,8 +552,6 @@ int commit_creds(struct cred *new)
2815 atomic_dec(&old->user->processes);
2816 alter_cred_subscribers(old, -2);
2817
2818 - sched_switch_user(task);
2819 -
2820 /* send notifications */
2821 if (new->uid != old->uid ||
2822 new->euid != old->euid ||
2823 diff --git a/kernel/exit.c b/kernel/exit.c
2824 index d890628..0f8fae3 100644
2825 --- a/kernel/exit.c
2826 +++ b/kernel/exit.c
2827 @@ -54,7 +54,6 @@
2828 #include <asm/unistd.h>
2829 #include <asm/pgtable.h>
2830 #include <asm/mmu_context.h>
2831 -#include "cred-internals.h"
2832
2833 static void exit_mm(struct task_struct * tsk);
2834
2835 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
2836 index 528dd78..9cd2b1c 100644
2837 --- a/kernel/ksysfs.c
2838 +++ b/kernel/ksysfs.c
2839 @@ -176,16 +176,8 @@ static int __init ksysfs_init(void)
2840 goto group_exit;
2841 }
2842
2843 - /* create the /sys/kernel/uids/ directory */
2844 - error = uids_sysfs_init();
2845 - if (error)
2846 - goto notes_exit;
2847 -
2848 return 0;
2849
2850 -notes_exit:
2851 - if (notes_size > 0)
2852 - sysfs_remove_bin_file(kernel_kobj, &notes_attr);
2853 group_exit:
2854 sysfs_remove_group(kernel_kobj, &kernel_attr_group);
2855 kset_exit:
2856 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2857 index 23bd09c..05625f6 100644
2858 --- a/kernel/ptrace.c
2859 +++ b/kernel/ptrace.c
2860 @@ -314,7 +314,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
2861 child->exit_code = data;
2862 dead = __ptrace_detach(current, child);
2863 if (!child->exit_state)
2864 - wake_up_process(child);
2865 + wake_up_state(child, TASK_TRACED | TASK_STOPPED);
2866 }
2867 write_unlock_irq(&tasklist_lock);
2868
2869 diff --git a/kernel/sched.c b/kernel/sched.c
2870 index 9652eca..df16a0a 100644
2871 --- a/kernel/sched.c
2872 +++ b/kernel/sched.c
2873 @@ -233,7 +233,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
2874 */
2875 static DEFINE_MUTEX(sched_domains_mutex);
2876
2877 -#ifdef CONFIG_GROUP_SCHED
2878 +#ifdef CONFIG_CGROUP_SCHED
2879
2880 #include <linux/cgroup.h>
2881
2882 @@ -243,13 +243,7 @@ static LIST_HEAD(task_groups);
2883
2884 /* task group related information */
2885 struct task_group {
2886 -#ifdef CONFIG_CGROUP_SCHED
2887 struct cgroup_subsys_state css;
2888 -#endif
2889 -
2890 -#ifdef CONFIG_USER_SCHED
2891 - uid_t uid;
2892 -#endif
2893
2894 #ifdef CONFIG_FAIR_GROUP_SCHED
2895 /* schedulable entities of this group on each cpu */
2896 @@ -274,35 +268,7 @@ struct task_group {
2897 struct list_head children;
2898 };
2899
2900 -#ifdef CONFIG_USER_SCHED
2901 -
2902 -/* Helper function to pass uid information to create_sched_user() */
2903 -void set_tg_uid(struct user_struct *user)
2904 -{
2905 - user->tg->uid = user->uid;
2906 -}
2907 -
2908 -/*
2909 - * Root task group.
2910 - * Every UID task group (including init_task_group aka UID-0) will
2911 - * be a child to this group.
2912 - */
2913 -struct task_group root_task_group;
2914 -
2915 -#ifdef CONFIG_FAIR_GROUP_SCHED
2916 -/* Default task group's sched entity on each cpu */
2917 -static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
2918 -/* Default task group's cfs_rq on each cpu */
2919 -static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
2920 -#endif /* CONFIG_FAIR_GROUP_SCHED */
2921 -
2922 -#ifdef CONFIG_RT_GROUP_SCHED
2923 -static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
2924 -static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
2925 -#endif /* CONFIG_RT_GROUP_SCHED */
2926 -#else /* !CONFIG_USER_SCHED */
2927 #define root_task_group init_task_group
2928 -#endif /* CONFIG_USER_SCHED */
2929
2930 /* task_group_lock serializes add/remove of task groups and also changes to
2931 * a task group's cpu shares.
2932 @@ -318,11 +284,7 @@ static int root_task_group_empty(void)
2933 }
2934 #endif
2935
2936 -#ifdef CONFIG_USER_SCHED
2937 -# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
2938 -#else /* !CONFIG_USER_SCHED */
2939 # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
2940 -#endif /* CONFIG_USER_SCHED */
2941
2942 /*
2943 * A weight of 0 or 1 can cause arithmetics problems.
2944 @@ -348,11 +310,7 @@ static inline struct task_group *task_group(struct task_struct *p)
2945 {
2946 struct task_group *tg;
2947
2948 -#ifdef CONFIG_USER_SCHED
2949 - rcu_read_lock();
2950 - tg = __task_cred(p)->user->tg;
2951 - rcu_read_unlock();
2952 -#elif defined(CONFIG_CGROUP_SCHED)
2953 +#ifdef CONFIG_CGROUP_SCHED
2954 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
2955 struct task_group, css);
2956 #else
2957 @@ -383,7 +341,7 @@ static inline struct task_group *task_group(struct task_struct *p)
2958 return NULL;
2959 }
2960
2961 -#endif /* CONFIG_GROUP_SCHED */
2962 +#endif /* CONFIG_CGROUP_SCHED */
2963
2964 /* CFS-related fields in a runqueue */
2965 struct cfs_rq {
2966 @@ -567,6 +525,7 @@ struct rq {
2967 struct mm_struct *prev_mm;
2968
2969 u64 clock;
2970 + u64 clock_task;
2971
2972 atomic_t nr_iowait;
2973
2974 @@ -574,6 +533,8 @@ struct rq {
2975 struct root_domain *rd;
2976 struct sched_domain *sd;
2977
2978 + unsigned long cpu_power;
2979 +
2980 unsigned char idle_at_tick;
2981 /* For active balancing */
2982 int post_schedule;
2983 @@ -594,6 +555,10 @@ struct rq {
2984 u64 avg_idle;
2985 #endif
2986
2987 +#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2988 + u64 prev_irq_time;
2989 +#endif
2990 +
2991 /* calc_load related fields */
2992 unsigned long calc_load_update;
2993 long calc_load_active;
2994 @@ -631,11 +596,7 @@ struct rq {
2995
2996 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
2997
2998 -static inline
2999 -void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
3000 -{
3001 - rq->curr->sched_class->check_preempt_curr(rq, p, flags);
3002 -}
3003 +static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
3004
3005 static inline int cpu_of(struct rq *rq)
3006 {
3007 @@ -662,9 +623,20 @@ static inline int cpu_of(struct rq *rq)
3008 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
3009 #define raw_rq() (&__raw_get_cpu_var(runqueues))
3010
3011 +static u64 irq_time_cpu(int cpu);
3012 +static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
3013 +
3014 inline void update_rq_clock(struct rq *rq)
3015 {
3016 + int cpu = cpu_of(rq);
3017 + u64 irq_time;
3018 +
3019 rq->clock = sched_clock_cpu(cpu_of(rq));
3020 + irq_time = irq_time_cpu(cpu);
3021 + if (rq->clock - irq_time > rq->clock_task)
3022 + rq->clock_task = rq->clock - irq_time;
3023 +
3024 + sched_irq_time_avg_update(rq, irq_time);
3025 }
3026
3027 /*
3028 @@ -1297,6 +1269,10 @@ static void resched_task(struct task_struct *p)
3029 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
3030 {
3031 }
3032 +
3033 +static void sched_avg_update(struct rq *rq)
3034 +{
3035 +}
3036 #endif /* CONFIG_SMP */
3037
3038 #if BITS_PER_LONG == 32
3039 @@ -1546,24 +1522,9 @@ static unsigned long target_load(int cpu, int type)
3040 return max(rq->cpu_load[type-1], total);
3041 }
3042
3043 -static struct sched_group *group_of(int cpu)
3044 -{
3045 - struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
3046 -
3047 - if (!sd)
3048 - return NULL;
3049 -
3050 - return sd->groups;
3051 -}
3052 -
3053 static unsigned long power_of(int cpu)
3054 {
3055 - struct sched_group *group = group_of(cpu);
3056 -
3057 - if (!group)
3058 - return SCHED_LOAD_SCALE;
3059 -
3060 - return group->cpu_power;
3061 + return cpu_rq(cpu)->cpu_power;
3062 }
3063
3064 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
3065 @@ -1845,6 +1806,94 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
3066 #endif
3067 }
3068
3069 +#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3070 +
3071 +/*
3072 + * There are no locks covering percpu hardirq/softirq time.
3073 + * They are only modified in account_system_vtime, on corresponding CPU
3074 + * with interrupts disabled. So, writes are safe.
3075 + * They are read and saved off onto struct rq in update_rq_clock().
3076 + * This may result in other CPU reading this CPU's irq time and can
3077 + * race with irq/account_system_vtime on this CPU. We would either get old
3078 + * or new value (or semi updated value on 32 bit) with a side effect of
3079 + * accounting a slice of irq time to wrong task when irq is in progress
3080 + * while we read rq->clock. That is a worthy compromise in place of having
3081 + * locks on each irq in account_system_time.
3082 + */
3083 +static DEFINE_PER_CPU(u64, cpu_hardirq_time);
3084 +static DEFINE_PER_CPU(u64, cpu_softirq_time);
3085 +
3086 +static DEFINE_PER_CPU(u64, irq_start_time);
3087 +static int sched_clock_irqtime;
3088 +
3089 +void enable_sched_clock_irqtime(void)
3090 +{
3091 + sched_clock_irqtime = 1;
3092 +}
3093 +
3094 +void disable_sched_clock_irqtime(void)
3095 +{
3096 + sched_clock_irqtime = 0;
3097 +}
3098 +
3099 +static u64 irq_time_cpu(int cpu)
3100 +{
3101 + if (!sched_clock_irqtime)
3102 + return 0;
3103 +
3104 + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
3105 +}
3106 +
3107 +void account_system_vtime(struct task_struct *curr)
3108 +{
3109 + unsigned long flags;
3110 + int cpu;
3111 + u64 now, delta;
3112 +
3113 + if (!sched_clock_irqtime)
3114 + return;
3115 +
3116 + local_irq_save(flags);
3117 +
3118 + cpu = smp_processor_id();
3119 + now = sched_clock_cpu(cpu);
3120 + delta = now - per_cpu(irq_start_time, cpu);
3121 + per_cpu(irq_start_time, cpu) = now;
3122 + /*
3123 + * We do not account for softirq time from ksoftirqd here.
3124 + * We want to continue accounting softirq time to ksoftirqd thread
3125 + * in that case, so as not to confuse scheduler with a special task
3126 + * that do not consume any time, but still wants to run.
3127 + */
3128 + if (hardirq_count())
3129 + per_cpu(cpu_hardirq_time, cpu) += delta;
3130 + else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
3131 + per_cpu(cpu_softirq_time, cpu) += delta;
3132 +
3133 + local_irq_restore(flags);
3134 +}
3135 +EXPORT_SYMBOL_GPL(account_system_vtime);
3136 +
3137 +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
3138 +{
3139 + if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
3140 + u64 delta_irq = curr_irq_time - rq->prev_irq_time;
3141 + rq->prev_irq_time = curr_irq_time;
3142 + sched_rt_avg_update(rq, delta_irq);
3143 + }
3144 +}
3145 +
3146 +#else
3147 +
3148 +static u64 irq_time_cpu(int cpu)
3149 +{
3150 + return 0;
3151 +}
3152 +
3153 +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
3154 +
3155 +#endif
3156 +
3157 #include "sched_stats.h"
3158 #include "sched_idletask.c"
3159 #include "sched_fair.c"
3160 @@ -1870,8 +1919,8 @@ static void dec_nr_running(struct rq *rq)
3161 static void set_load_weight(struct task_struct *p)
3162 {
3163 if (task_has_rt_policy(p)) {
3164 - p->se.load.weight = prio_to_weight[0] * 2;
3165 - p->se.load.inv_weight = prio_to_wmult[0] >> 1;
3166 + p->se.load.weight = 0;
3167 + p->se.load.inv_weight = WMULT_CONST;
3168 return;
3169 }
3170
3171 @@ -2052,6 +2101,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3172 if (p->sched_class != &fair_sched_class)
3173 return 0;
3174
3175 + if (unlikely(p->policy == SCHED_IDLE))
3176 + return 0;
3177 +
3178 /*
3179 * Buddy candidates are cache hot:
3180 */
3181 @@ -2323,6 +2375,24 @@ void task_oncpu_function_call(struct task_struct *p,
3182 preempt_enable();
3183 }
3184
3185 +static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
3186 +{
3187 + const struct sched_class *class;
3188 +
3189 + if (p->sched_class == rq->curr->sched_class) {
3190 + rq->curr->sched_class->check_preempt_curr(rq, p, flags);
3191 + } else {
3192 + for_each_class(class) {
3193 + if (class == rq->curr->sched_class)
3194 + break;
3195 + if (class == p->sched_class) {
3196 + resched_task(rq->curr);
3197 + break;
3198 + }
3199 + }
3200 + }
3201 +}
3202 +
3203 #ifdef CONFIG_SMP
3204 /*
3205 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
3206 @@ -3144,6 +3214,8 @@ static void update_cpu_load(struct rq *this_rq)
3207 this_rq->calc_load_update += LOAD_FREQ;
3208 calc_load_account_active(this_rq);
3209 }
3210 +
3211 + sched_avg_update(this_rq);
3212 }
3213
3214 #ifdef CONFIG_SMP
3215 @@ -3275,7 +3347,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3216 * 2) too many balance attempts have failed.
3217 */
3218
3219 - tsk_cache_hot = task_hot(p, rq->clock, sd);
3220 + tsk_cache_hot = task_hot(p, rq->clock_task, sd);
3221 if (!tsk_cache_hot ||
3222 sd->nr_balance_failed > sd->cache_nice_tries) {
3223 #ifdef CONFIG_SCHEDSTATS
3224 @@ -3458,12 +3530,17 @@ struct sd_lb_stats {
3225 unsigned long this_load;
3226 unsigned long this_load_per_task;
3227 unsigned long this_nr_running;
3228 + unsigned long this_has_capacity;
3229 + unsigned int this_idle_cpus;
3230
3231 /* Statistics of the busiest group */
3232 + unsigned int busiest_idle_cpus;
3233 unsigned long max_load;
3234 unsigned long busiest_load_per_task;
3235 unsigned long busiest_nr_running;
3236 unsigned long busiest_group_capacity;
3237 + unsigned long busiest_has_capacity;
3238 + unsigned int busiest_group_weight;
3239
3240 int group_imb; /* Is there imbalance in this sd */
3241 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3242 @@ -3485,7 +3562,10 @@ struct sg_lb_stats {
3243 unsigned long sum_nr_running; /* Nr tasks running in the group */
3244 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3245 unsigned long group_capacity;
3246 + unsigned long idle_cpus;
3247 + unsigned long group_weight;
3248 int group_imb; /* Is there an imbalance in the group ? */
3249 + int group_has_capacity; /* Is there extra capacity in the group? */
3250 };
3251
3252 /**
3253 @@ -3695,10 +3775,14 @@ unsigned long scale_rt_power(int cpu)
3254 struct rq *rq = cpu_rq(cpu);
3255 u64 total, available;
3256
3257 - sched_avg_update(rq);
3258 -
3259 total = sched_avg_period() + (rq->clock - rq->age_stamp);
3260 - available = total - rq->rt_avg;
3261 +
3262 + if (unlikely(total < rq->rt_avg)) {
3263 + /* Ensures that power won't end up being negative */
3264 + available = 0;
3265 + } else {
3266 + available = total - rq->rt_avg;
3267 + }
3268
3269 if (unlikely((s64)total < SCHED_LOAD_SCALE))
3270 total = SCHED_LOAD_SCALE;
3271 @@ -3736,6 +3820,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
3272 if (!power)
3273 power = 1;
3274
3275 + cpu_rq(cpu)->cpu_power = power;
3276 sdg->cpu_power = power;
3277 }
3278
3279 @@ -3780,7 +3865,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3280 int local_group, const struct cpumask *cpus,
3281 int *balance, struct sg_lb_stats *sgs)
3282 {
3283 - unsigned long load, max_cpu_load, min_cpu_load;
3284 + unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
3285 int i;
3286 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3287 unsigned long avg_load_per_task = 0;
3288 @@ -3794,6 +3879,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3289 /* Tally up the load of all CPUs in the group */
3290 max_cpu_load = 0;
3291 min_cpu_load = ~0UL;
3292 + max_nr_running = 0;
3293
3294 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3295 struct rq *rq = cpu_rq(i);
3296 @@ -3811,8 +3897,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3297 load = target_load(i, load_idx);
3298 } else {
3299 load = source_load(i, load_idx);
3300 - if (load > max_cpu_load)
3301 + if (load > max_cpu_load) {
3302 max_cpu_load = load;
3303 + max_nr_running = rq->nr_running;
3304 + }
3305 if (min_cpu_load > load)
3306 min_cpu_load = load;
3307 }
3308 @@ -3820,7 +3908,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3309 sgs->group_load += load;
3310 sgs->sum_nr_running += rq->nr_running;
3311 sgs->sum_weighted_load += weighted_cpuload(i);
3312 -
3313 + if (idle_cpu(i))
3314 + sgs->idle_cpus++;
3315 }
3316
3317 /*
3318 @@ -3850,11 +3939,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
3319 if (sgs->sum_nr_running)
3320 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
3321
3322 - if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3323 + if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
3324 sgs->group_imb = 1;
3325
3326 - sgs->group_capacity =
3327 - DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
3328 + sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
3329 + sgs->group_weight = group->group_weight;
3330 +
3331 + if (sgs->group_capacity > sgs->sum_nr_running)
3332 + sgs->group_has_capacity = 1;
3333 }
3334
3335 /**
3336 @@ -3901,9 +3993,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3337 /*
3338 * In case the child domain prefers tasks go to siblings
3339 * first, lower the group capacity to one so that we'll try
3340 - * and move all the excess tasks away.
3341 + * and move all the excess tasks away. We lower the capacity
3342 + * of a group only if the local group has the capacity to fit
3343 + * these excess tasks, i.e. nr_running < group_capacity. The
3344 + * extra check prevents the case where you always pull from the
3345 + * heaviest group when it is already under-utilized (possible
3346 + * with a large weight task outweighs the tasks on the system).
3347 */
3348 - if (prefer_sibling)
3349 + if (prefer_sibling && !local_group && sds->this_has_capacity)
3350 sgs.group_capacity = min(sgs.group_capacity, 1UL);
3351
3352 if (local_group) {
3353 @@ -3911,14 +4008,19 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3354 sds->this = group;
3355 sds->this_nr_running = sgs.sum_nr_running;
3356 sds->this_load_per_task = sgs.sum_weighted_load;
3357 + sds->this_has_capacity = sgs.group_has_capacity;
3358 + sds->this_idle_cpus = sgs.idle_cpus;
3359 } else if (sgs.avg_load > sds->max_load &&
3360 (sgs.sum_nr_running > sgs.group_capacity ||
3361 sgs.group_imb)) {
3362 sds->max_load = sgs.avg_load;
3363 sds->busiest = group;
3364 sds->busiest_nr_running = sgs.sum_nr_running;
3365 + sds->busiest_idle_cpus = sgs.idle_cpus;
3366 sds->busiest_group_capacity = sgs.group_capacity;
3367 + sds->busiest_group_weight = sgs.group_weight;
3368 sds->busiest_load_per_task = sgs.sum_weighted_load;
3369 + sds->busiest_has_capacity = sgs.group_has_capacity;
3370 sds->group_imb = sgs.group_imb;
3371 }
3372
3373 @@ -4064,6 +4166,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3374 return fix_small_imbalance(sds, this_cpu, imbalance);
3375
3376 }
3377 +
3378 /******* find_busiest_group() helpers end here *********************/
3379
3380 /**
3381 @@ -4115,6 +4218,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3382 * 4) This group is more busy than the avg busieness at this
3383 * sched_domain.
3384 * 5) The imbalance is within the specified limit.
3385 + *
3386 + * Note: when doing newidle balance, if the local group has excess
3387 + * capacity (i.e. nr_running < group_capacity) and the busiest group
3388 + * does not have any capacity, we force a load balance to pull tasks
3389 + * to the local group. In this case, we skip past checks 3, 4 and 5.
3390 */
3391 if (balance && !(*balance))
3392 goto ret;
3393 @@ -4122,6 +4230,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3394 if (!sds.busiest || sds.busiest_nr_running == 0)
3395 goto out_balanced;
3396
3397 + /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
3398 + if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
3399 + !sds.busiest_has_capacity)
3400 + goto force_balance;
3401 +
3402 if (sds.this_load >= sds.max_load)
3403 goto out_balanced;
3404
3405 @@ -4130,9 +4243,28 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3406 if (sds.this_load >= sds.avg_load)
3407 goto out_balanced;
3408
3409 - if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3410 - goto out_balanced;
3411 + /*
3412 + * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
3413 + * And to check for busy balance use !idle_cpu instead of
3414 + * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
3415 + * even when they are idle.
3416 + */
3417 + if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
3418 + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3419 + goto out_balanced;
3420 + } else {
3421 + /*
3422 + * This cpu is idle. If the busiest group load doesn't
3423 + * have more tasks than the number of available cpu's and
3424 + * there is no imbalance between this and busiest group
3425 + * wrt to idle cpu's, it is balanced.
3426 + */
3427 + if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
3428 + sds.busiest_nr_running <= sds.busiest_group_weight)
3429 + goto out_balanced;
3430 + }
3431
3432 +force_balance:
3433 /* Looks like there is an imbalance. Compute it */
3434 calculate_imbalance(&sds, this_cpu, imbalance);
3435 return sds.busiest;
3436 @@ -4288,7 +4420,14 @@ redo:
3437
3438 if (!ld_moved) {
3439 schedstat_inc(sd, lb_failed[idle]);
3440 - sd->nr_balance_failed++;
3441 + /*
3442 + * Increment the failure counter only on periodic balance.
3443 + * We do not want newidle balance, which can be very
3444 + * frequent, pollute the failure counter causing
3445 + * excessive cache_hot migrations and active balances.
3446 + */
3447 + if (idle != CPU_NEWLY_IDLE)
3448 + sd->nr_balance_failed++;
3449
3450 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
3451
3452 @@ -5033,7 +5172,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3453
3454 if (task_current(rq, p)) {
3455 update_rq_clock(rq);
3456 - ns = rq->clock - p->se.exec_start;
3457 + ns = rq->clock_task - p->se.exec_start;
3458 if ((s64)ns < 0)
3459 ns = 0;
3460 }
3461 @@ -5177,7 +5316,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3462 tmp = cputime_to_cputime64(cputime);
3463 if (hardirq_count() - hardirq_offset)
3464 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3465 - else if (softirq_count())
3466 + else if (in_serving_softirq())
3467 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3468 else
3469 cpustat->system = cputime64_add(cpustat->system, tmp);
3470 @@ -7121,7 +7260,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
3471 idle->se.exec_start = sched_clock();
3472
3473 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
3474 + /*
3475 + * We're having a chicken and egg problem, even though we are
3476 + * holding rq->lock, the cpu isn't yet set to this cpu so the
3477 + * lockdep check in task_group() will fail.
3478 + *
3479 + * Similar case to sched_fork(). / Alternatively we could
3480 + * use task_rq_lock() here and obtain the other rq->lock.
3481 + *
3482 + * Silence PROVE_RCU
3483 + */
3484 + rcu_read_lock();
3485 __set_task_cpu(idle, cpu);
3486 + rcu_read_unlock();
3487
3488 rq->curr = rq->idle = idle;
3489 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
3490 @@ -8628,6 +8779,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
3491 if (cpu != group_first_cpu(sd->groups))
3492 return;
3493
3494 + sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
3495 +
3496 child = sd->child;
3497
3498 sd->groups->cpu_power = 0;
3499 @@ -9511,9 +9664,6 @@ void __init sched_init(void)
3500 #ifdef CONFIG_RT_GROUP_SCHED
3501 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
3502 #endif
3503 -#ifdef CONFIG_USER_SCHED
3504 - alloc_size *= 2;
3505 -#endif
3506 #ifdef CONFIG_CPUMASK_OFFSTACK
3507 alloc_size += num_possible_cpus() * cpumask_size();
3508 #endif
3509 @@ -9531,13 +9681,6 @@ void __init sched_init(void)
3510 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
3511 ptr += nr_cpu_ids * sizeof(void **);
3512
3513 -#ifdef CONFIG_USER_SCHED
3514 - root_task_group.se = (struct sched_entity **)ptr;
3515 - ptr += nr_cpu_ids * sizeof(void **);
3516 -
3517 - root_task_group.cfs_rq = (struct cfs_rq **)ptr;
3518 - ptr += nr_cpu_ids * sizeof(void **);
3519 -#endif /* CONFIG_USER_SCHED */
3520 #endif /* CONFIG_FAIR_GROUP_SCHED */
3521 #ifdef CONFIG_RT_GROUP_SCHED
3522 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
3523 @@ -9546,13 +9689,6 @@ void __init sched_init(void)
3524 init_task_group.rt_rq = (struct rt_rq **)ptr;
3525 ptr += nr_cpu_ids * sizeof(void **);
3526
3527 -#ifdef CONFIG_USER_SCHED
3528 - root_task_group.rt_se = (struct sched_rt_entity **)ptr;
3529 - ptr += nr_cpu_ids * sizeof(void **);
3530 -
3531 - root_task_group.rt_rq = (struct rt_rq **)ptr;
3532 - ptr += nr_cpu_ids * sizeof(void **);
3533 -#endif /* CONFIG_USER_SCHED */
3534 #endif /* CONFIG_RT_GROUP_SCHED */
3535 #ifdef CONFIG_CPUMASK_OFFSTACK
3536 for_each_possible_cpu(i) {
3537 @@ -9572,22 +9708,13 @@ void __init sched_init(void)
3538 #ifdef CONFIG_RT_GROUP_SCHED
3539 init_rt_bandwidth(&init_task_group.rt_bandwidth,
3540 global_rt_period(), global_rt_runtime());
3541 -#ifdef CONFIG_USER_SCHED
3542 - init_rt_bandwidth(&root_task_group.rt_bandwidth,
3543 - global_rt_period(), RUNTIME_INF);
3544 -#endif /* CONFIG_USER_SCHED */
3545 #endif /* CONFIG_RT_GROUP_SCHED */
3546
3547 -#ifdef CONFIG_GROUP_SCHED
3548 +#ifdef CONFIG_CGROUP_SCHED
3549 list_add(&init_task_group.list, &task_groups);
3550 INIT_LIST_HEAD(&init_task_group.children);
3551
3552 -#ifdef CONFIG_USER_SCHED
3553 - INIT_LIST_HEAD(&root_task_group.children);
3554 - init_task_group.parent = &root_task_group;
3555 - list_add(&init_task_group.siblings, &root_task_group.children);
3556 -#endif /* CONFIG_USER_SCHED */
3557 -#endif /* CONFIG_GROUP_SCHED */
3558 +#endif /* CONFIG_CGROUP_SCHED */
3559
3560 #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
3561 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
3562 @@ -9627,25 +9754,6 @@ void __init sched_init(void)
3563 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
3564 */
3565 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
3566 -#elif defined CONFIG_USER_SCHED
3567 - root_task_group.shares = NICE_0_LOAD;
3568 - init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
3569 - /*
3570 - * In case of task-groups formed thr' the user id of tasks,
3571 - * init_task_group represents tasks belonging to root user.
3572 - * Hence it forms a sibling of all subsequent groups formed.
3573 - * In this case, init_task_group gets only a fraction of overall
3574 - * system cpu resource, based on the weight assigned to root
3575 - * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
3576 - * by letting tasks of init_task_group sit in a separate cfs_rq
3577 - * (init_tg_cfs_rq) and having one entity represent this group of
3578 - * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
3579 - */
3580 - init_tg_cfs_entry(&init_task_group,
3581 - &per_cpu(init_tg_cfs_rq, i),
3582 - &per_cpu(init_sched_entity, i), i, 1,
3583 - root_task_group.se[i]);
3584 -
3585 #endif
3586 #endif /* CONFIG_FAIR_GROUP_SCHED */
3587
3588 @@ -9668,6 +9776,7 @@ void __init sched_init(void)
3589 #ifdef CONFIG_SMP
3590 rq->sd = NULL;
3591 rq->rd = NULL;
3592 + rq->cpu_power = SCHED_LOAD_SCALE;
3593 rq->post_schedule = 0;
3594 rq->active_balance = 0;
3595 rq->next_balance = jiffies;
3596 @@ -10051,7 +10160,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
3597 }
3598 #endif /* CONFIG_RT_GROUP_SCHED */
3599
3600 -#ifdef CONFIG_GROUP_SCHED
3601 +#ifdef CONFIG_CGROUP_SCHED
3602 static void free_sched_group(struct task_group *tg)
3603 {
3604 free_fair_sched_group(tg);
3605 @@ -10146,12 +10255,12 @@ void sched_move_task(struct task_struct *tsk)
3606 if (unlikely(running))
3607 tsk->sched_class->put_prev_task(rq, tsk);
3608
3609 - set_task_rq(tsk, task_cpu(tsk));
3610 -
3611 #ifdef CONFIG_FAIR_GROUP_SCHED
3612 - if (tsk->sched_class->moved_group)
3613 - tsk->sched_class->moved_group(tsk, on_rq);
3614 + if (tsk->sched_class->task_move_group)
3615 + tsk->sched_class->task_move_group(tsk, on_rq);
3616 + else
3617 #endif
3618 + set_task_rq(tsk, task_cpu(tsk));
3619
3620 if (unlikely(running))
3621 tsk->sched_class->set_curr_task(rq);
3622 @@ -10160,7 +10269,7 @@ void sched_move_task(struct task_struct *tsk)
3623
3624 task_rq_unlock(rq, &flags);
3625 }
3626 -#endif /* CONFIG_GROUP_SCHED */
3627 +#endif /* CONFIG_CGROUP_SCHED */
3628
3629 #ifdef CONFIG_FAIR_GROUP_SCHED
3630 static void __set_se_shares(struct sched_entity *se, unsigned long shares)
3631 @@ -10302,13 +10411,6 @@ static int tg_schedulable(struct task_group *tg, void *data)
3632 runtime = d->rt_runtime;
3633 }
3634
3635 -#ifdef CONFIG_USER_SCHED
3636 - if (tg == &root_task_group) {
3637 - period = global_rt_period();
3638 - runtime = global_rt_runtime();
3639 - }
3640 -#endif
3641 -
3642 /*
3643 * Cannot have more runtime than the period.
3644 */
3645 diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
3646 index 6f836a8..f9724c0 100644
3647 --- a/kernel/sched_debug.c
3648 +++ b/kernel/sched_debug.c
3649 @@ -173,11 +173,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
3650 task_group_path(tg, path, sizeof(path));
3651
3652 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
3653 -#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
3654 - {
3655 - uid_t uid = cfs_rq->tg->uid;
3656 - SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
3657 - }
3658 #else
3659 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
3660 #endif
3661 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
3662 index 01e311e..cd9a40b 100644
3663 --- a/kernel/sched_fair.c
3664 +++ b/kernel/sched_fair.c
3665 @@ -496,7 +496,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
3666 static void update_curr(struct cfs_rq *cfs_rq)
3667 {
3668 struct sched_entity *curr = cfs_rq->curr;
3669 - u64 now = rq_of(cfs_rq)->clock;
3670 + u64 now = rq_of(cfs_rq)->clock_task;
3671 unsigned long delta_exec;
3672
3673 if (unlikely(!curr))
3674 @@ -579,7 +579,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
3675 /*
3676 * We are starting a new run period:
3677 */
3678 - se->exec_start = rq_of(cfs_rq)->clock;
3679 + se->exec_start = rq_of(cfs_rq)->clock_task;
3680 }
3681
3682 /**************************************************
3683 @@ -1222,7 +1222,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3684 unsigned long this_load, load;
3685 int idx, this_cpu, prev_cpu;
3686 unsigned long tl_per_task;
3687 - unsigned int imbalance;
3688 struct task_group *tg;
3689 unsigned long weight;
3690 int balanced;
3691 @@ -1262,8 +1261,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3692 tg = task_group(p);
3693 weight = p->se.load.weight;
3694
3695 - imbalance = 100 + (sd->imbalance_pct - 100) / 2;
3696 -
3697 /*
3698 * In low-load situations, where prev_cpu is idle and this_cpu is idle
3699 * due to the sync cause above having dropped this_load to 0, we'll
3700 @@ -1273,9 +1270,22 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3701 * Otherwise check if either cpus are near enough in load to allow this
3702 * task to be woken on this_cpu.
3703 */
3704 - balanced = !this_load ||
3705 - 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
3706 - imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
3707 + if (this_load) {
3708 + unsigned long this_eff_load, prev_eff_load;
3709 +
3710 + this_eff_load = 100;
3711 + this_eff_load *= power_of(prev_cpu);
3712 + this_eff_load *= this_load +
3713 + effective_load(tg, this_cpu, weight, weight);
3714 +
3715 + prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3716 + prev_eff_load *= power_of(this_cpu);
3717 + prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3718 +
3719 + balanced = this_eff_load <= prev_eff_load;
3720 + } else
3721 + balanced = true;
3722 +
3723 rcu_read_unlock();
3724
3725 /*
3726 @@ -1992,8 +2002,11 @@ static void task_fork_fair(struct task_struct *p)
3727
3728 update_rq_clock(rq);
3729
3730 - if (unlikely(task_cpu(p) != this_cpu))
3731 + if (unlikely(task_cpu(p) != this_cpu)) {
3732 + rcu_read_lock();
3733 __set_task_cpu(p, this_cpu);
3734 + rcu_read_unlock();
3735 + }
3736
3737 update_curr(cfs_rq);
3738
3739 @@ -2065,13 +2078,26 @@ static void set_curr_task_fair(struct rq *rq)
3740 }
3741
3742 #ifdef CONFIG_FAIR_GROUP_SCHED
3743 -static void moved_group_fair(struct task_struct *p, int on_rq)
3744 +static void task_move_group_fair(struct task_struct *p, int on_rq)
3745 {
3746 - struct cfs_rq *cfs_rq = task_cfs_rq(p);
3747 -
3748 - update_curr(cfs_rq);
3749 + /*
3750 + * If the task was not on the rq at the time of this cgroup movement
3751 + * it must have been asleep, sleeping tasks keep their ->vruntime
3752 + * absolute on their old rq until wakeup (needed for the fair sleeper
3753 + * bonus in place_entity()).
3754 + *
3755 + * If it was on the rq, we've just 'preempted' it, which does convert
3756 + * ->vruntime to a relative base.
3757 + *
3758 + * Make sure both cases convert their relative position when migrating
3759 + * to another cgroup's rq. This does somewhat interfere with the
3760 + * fair sleeper stuff for the first placement, but who cares.
3761 + */
3762 + if (!on_rq)
3763 + p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
3764 + set_task_rq(p, task_cpu(p));
3765 if (!on_rq)
3766 - place_entity(cfs_rq, &p->se, 1);
3767 + p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
3768 }
3769 #endif
3770
3771 @@ -2125,7 +2151,7 @@ static const struct sched_class fair_sched_class = {
3772 .get_rr_interval = get_rr_interval_fair,
3773
3774 #ifdef CONFIG_FAIR_GROUP_SCHED
3775 - .moved_group = moved_group_fair,
3776 + .task_move_group = task_move_group_fair,
3777 #endif
3778 };
3779
3780 diff --git a/kernel/sched_features.h b/kernel/sched_features.h
3781 index 0d94083..f8df3ee 100644
3782 --- a/kernel/sched_features.h
3783 +++ b/kernel/sched_features.h
3784 @@ -121,3 +121,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1)
3785 * release the lock. Decreases scheduling overhead.
3786 */
3787 SCHED_FEAT(OWNER_SPIN, 1)
3788 +
3789 +/*
3790 + * Decrement CPU power based on irq activity
3791 + */
3792 +SCHED_FEAT(NONIRQ_POWER, 1)
3793 diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
3794 index af24fab..9d9a7b1 100644
3795 --- a/kernel/sched_rt.c
3796 +++ b/kernel/sched_rt.c
3797 @@ -603,7 +603,7 @@ static void update_curr_rt(struct rq *rq)
3798 if (!task_has_rt_policy(curr))
3799 return;
3800
3801 - delta_exec = rq->clock - curr->se.exec_start;
3802 + delta_exec = rq->clock_task - curr->se.exec_start;
3803 if (unlikely((s64)delta_exec < 0))
3804 delta_exec = 0;
3805
3806 @@ -612,7 +612,7 @@ static void update_curr_rt(struct rq *rq)
3807 curr->se.sum_exec_runtime += delta_exec;
3808 account_group_exec_runtime(curr, delta_exec);
3809
3810 - curr->se.exec_start = rq->clock;
3811 + curr->se.exec_start = rq->clock_task;
3812 cpuacct_charge(curr, delta_exec);
3813
3814 sched_rt_avg_update(rq, delta_exec);
3815 @@ -954,18 +954,19 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
3816 * runqueue. Otherwise simply start this RT task
3817 * on its current runqueue.
3818 *
3819 - * We want to avoid overloading runqueues. Even if
3820 - * the RT task is of higher priority than the current RT task.
3821 - * RT tasks behave differently than other tasks. If
3822 - * one gets preempted, we try to push it off to another queue.
3823 - * So trying to keep a preempting RT task on the same
3824 - * cache hot CPU will force the running RT task to
3825 - * a cold CPU. So we waste all the cache for the lower
3826 - * RT task in hopes of saving some of a RT task
3827 - * that is just being woken and probably will have
3828 - * cold cache anyway.
3829 + * We want to avoid overloading runqueues. If the woken
3830 + * task is a higher priority, then it will stay on this CPU
3831 + * and the lower prio task should be moved to another CPU.
3832 + * Even though this will probably make the lower prio task
3833 + * lose its cache, we do not want to bounce a higher task
3834 + * around just because it gave up its CPU, perhaps for a
3835 + * lock?
3836 + *
3837 + * For equal prio tasks, we just let the scheduler sort it out.
3838 */
3839 if (unlikely(rt_task(rq->curr)) &&
3840 + (rq->curr->rt.nr_cpus_allowed < 2 ||
3841 + rq->curr->prio < p->prio) &&
3842 (p->rt.nr_cpus_allowed > 1)) {
3843 int cpu = find_lowest_rq(p);
3844
3845 @@ -1068,7 +1069,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
3846 } while (rt_rq);
3847
3848 p = rt_task_of(rt_se);
3849 - p->se.exec_start = rq->clock;
3850 + p->se.exec_start = rq->clock_task;
3851
3852 return p;
3853 }
3854 @@ -1493,7 +1494,10 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
3855 if (!task_running(rq, p) &&
3856 !test_tsk_need_resched(rq->curr) &&
3857 has_pushable_tasks(rq) &&
3858 - p->rt.nr_cpus_allowed > 1)
3859 + p->rt.nr_cpus_allowed > 1 &&
3860 + rt_task(rq->curr) &&
3861 + (rq->curr->rt.nr_cpus_allowed < 2 ||
3862 + rq->curr->prio < p->prio))
3863 push_rt_tasks(rq);
3864 }
3865
3866 @@ -1731,7 +1735,7 @@ static void set_curr_task_rt(struct rq *rq)
3867 {
3868 struct task_struct *p = rq->curr;
3869
3870 - p->se.exec_start = rq->clock;
3871 + p->se.exec_start = rq->clock_task;
3872
3873 /* The running task is never eligible for pushing */
3874 dequeue_pushable_task(rq, p);
3875 diff --git a/kernel/smp.c b/kernel/smp.c
3876 index c9d1c78..ea5dc8f 100644
3877 --- a/kernel/smp.c
3878 +++ b/kernel/smp.c
3879 @@ -193,6 +193,24 @@ void generic_smp_call_function_interrupt(void)
3880 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
3881 int refs;
3882
3883 + /*
3884 + * Since we walk the list without any locks, we might
3885 + * see an entry that was completed, removed from the
3886 + * list and is in the process of being reused.
3887 + *
3888 + * We must check that the cpu is in the cpumask before
3889 + * checking the refs, and both must be set before
3890 + * executing the callback on this cpu.
3891 + */
3892 +
3893 + if (!cpumask_test_cpu(cpu, data->cpumask))
3894 + continue;
3895 +
3896 + smp_rmb();
3897 +
3898 + if (atomic_read(&data->refs) == 0)
3899 + continue;
3900 +
3901 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
3902 continue;
3903
3904 @@ -201,6 +219,8 @@ void generic_smp_call_function_interrupt(void)
3905 refs = atomic_dec_return(&data->refs);
3906 WARN_ON(refs < 0);
3907 if (!refs) {
3908 + WARN_ON(!cpumask_empty(data->cpumask));
3909 +
3910 spin_lock(&call_function.lock);
3911 list_del_rcu(&data->csd.list);
3912 spin_unlock(&call_function.lock);
3913 @@ -401,11 +421,21 @@ void smp_call_function_many(const struct cpumask *mask,
3914
3915 data = &__get_cpu_var(cfd_data);
3916 csd_lock(&data->csd);
3917 + BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
3918
3919 data->csd.func = func;
3920 data->csd.info = info;
3921 cpumask_and(data->cpumask, mask, cpu_online_mask);
3922 cpumask_clear_cpu(this_cpu, data->cpumask);
3923 +
3924 + /*
3925 + * To ensure the interrupt handler gets an complete view
3926 + * we order the cpumask and refs writes and order the read
3927 + * of them in the interrupt handler. In addition we may
3928 + * only clear our own cpu bit from the mask.
3929 + */
3930 + smp_wmb();
3931 +
3932 atomic_set(&data->refs, cpumask_weight(data->cpumask));
3933
3934 spin_lock_irqsave(&call_function.lock, flags);
3935 diff --git a/kernel/softirq.c b/kernel/softirq.c
3936 index f8749e5..04a0252 100644
3937 --- a/kernel/softirq.c
3938 +++ b/kernel/softirq.c
3939 @@ -77,11 +77,21 @@ void wakeup_softirqd(void)
3940 }
3941
3942 /*
3943 + * preempt_count and SOFTIRQ_OFFSET usage:
3944 + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
3945 + * softirq processing.
3946 + * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
3947 + * on local_bh_disable or local_bh_enable.
3948 + * This lets us distinguish between whether we are currently processing
3949 + * softirq and whether we just have bh disabled.
3950 + */
3951 +
3952 +/*
3953 * This one is for softirq.c-internal use,
3954 * where hardirqs are disabled legitimately:
3955 */
3956 #ifdef CONFIG_TRACE_IRQFLAGS
3957 -static void __local_bh_disable(unsigned long ip)
3958 +static void __local_bh_disable(unsigned long ip, unsigned int cnt)
3959 {
3960 unsigned long flags;
3961
3962 @@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip)
3963 * We must manually increment preempt_count here and manually
3964 * call the trace_preempt_off later.
3965 */
3966 - preempt_count() += SOFTIRQ_OFFSET;
3967 + preempt_count() += cnt;
3968 /*
3969 * Were softirqs turned off above:
3970 */
3971 - if (softirq_count() == SOFTIRQ_OFFSET)
3972 + if (softirq_count() == cnt)
3973 trace_softirqs_off(ip);
3974 raw_local_irq_restore(flags);
3975
3976 - if (preempt_count() == SOFTIRQ_OFFSET)
3977 + if (preempt_count() == cnt)
3978 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3979 }
3980 #else /* !CONFIG_TRACE_IRQFLAGS */
3981 -static inline void __local_bh_disable(unsigned long ip)
3982 +static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
3983 {
3984 - add_preempt_count(SOFTIRQ_OFFSET);
3985 + add_preempt_count(cnt);
3986 barrier();
3987 }
3988 #endif /* CONFIG_TRACE_IRQFLAGS */
3989
3990 void local_bh_disable(void)
3991 {
3992 - __local_bh_disable((unsigned long)__builtin_return_address(0));
3993 + __local_bh_disable((unsigned long)__builtin_return_address(0),
3994 + SOFTIRQ_DISABLE_OFFSET);
3995 }
3996
3997 EXPORT_SYMBOL(local_bh_disable);
3998
3999 +static void __local_bh_enable(unsigned int cnt)
4000 +{
4001 + WARN_ON_ONCE(in_irq());
4002 + WARN_ON_ONCE(!irqs_disabled());
4003 +
4004 + if (softirq_count() == cnt)
4005 + trace_softirqs_on((unsigned long)__builtin_return_address(0));
4006 + sub_preempt_count(cnt);
4007 +}
4008 +
4009 /*
4010 * Special-case - softirqs can safely be enabled in
4011 * cond_resched_softirq(), or by __do_softirq(),
4012 @@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable);
4013 */
4014 void _local_bh_enable(void)
4015 {
4016 - WARN_ON_ONCE(in_irq());
4017 - WARN_ON_ONCE(!irqs_disabled());
4018 -
4019 - if (softirq_count() == SOFTIRQ_OFFSET)
4020 - trace_softirqs_on((unsigned long)__builtin_return_address(0));
4021 - sub_preempt_count(SOFTIRQ_OFFSET);
4022 + __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
4023 }
4024
4025 EXPORT_SYMBOL(_local_bh_enable);
4026 @@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip)
4027 /*
4028 * Are softirqs going to be turned on now:
4029 */
4030 - if (softirq_count() == SOFTIRQ_OFFSET)
4031 + if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
4032 trace_softirqs_on(ip);
4033 /*
4034 * Keep preemption disabled until we are done with
4035 * softirq processing:
4036 */
4037 - sub_preempt_count(SOFTIRQ_OFFSET - 1);
4038 + sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
4039
4040 if (unlikely(!in_interrupt() && local_softirq_pending()))
4041 do_softirq();
4042 @@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void)
4043 pending = local_softirq_pending();
4044 account_system_vtime(current);
4045
4046 - __local_bh_disable((unsigned long)__builtin_return_address(0));
4047 + __local_bh_disable((unsigned long)__builtin_return_address(0),
4048 + SOFTIRQ_OFFSET);
4049 lockdep_softirq_enter();
4050
4051 cpu = smp_processor_id();
4052 @@ -245,7 +262,7 @@ restart:
4053 lockdep_softirq_exit();
4054
4055 account_system_vtime(current);
4056 - _local_bh_enable();
4057 + __local_bh_enable(SOFTIRQ_OFFSET);
4058 }
4059
4060 #ifndef __ARCH_HAS_DO_SOFTIRQ
4061 @@ -279,10 +296,16 @@ void irq_enter(void)
4062
4063 rcu_irq_enter();
4064 if (idle_cpu(cpu) && !in_interrupt()) {
4065 - __irq_enter();
4066 + /*
4067 + * Prevent raise_softirq from needlessly waking up ksoftirqd
4068 + * here, as softirq will be serviced on return from interrupt.
4069 + */
4070 + local_bh_disable();
4071 tick_check_idle(cpu);
4072 - } else
4073 - __irq_enter();
4074 + _local_bh_enable();
4075 + }
4076 +
4077 + __irq_enter();
4078 }
4079
4080 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
4081 @@ -701,6 +724,7 @@ static int ksoftirqd(void * __bind_cpu)
4082 {
4083 set_current_state(TASK_INTERRUPTIBLE);
4084
4085 + current->flags |= PF_KSOFTIRQD;
4086 while (!kthread_should_stop()) {
4087 preempt_disable();
4088 if (!local_softirq_pending()) {
4089 diff --git a/kernel/sys.c b/kernel/sys.c
4090 index 440ca69..e9512b1 100644
4091 --- a/kernel/sys.c
4092 +++ b/kernel/sys.c
4093 @@ -567,11 +567,6 @@ static int set_user(struct cred *new)
4094 if (!new_user)
4095 return -EAGAIN;
4096
4097 - if (!task_can_switch_user(new_user, current)) {
4098 - free_uid(new_user);
4099 - return -EINVAL;
4100 - }
4101 -
4102 if (atomic_read(&new_user->processes) >=
4103 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
4104 new_user != INIT_USER) {
4105 diff --git a/kernel/user.c b/kernel/user.c
4106 index 46d0165..1b91701 100644
4107 --- a/kernel/user.c
4108 +++ b/kernel/user.c
4109 @@ -16,7 +16,6 @@
4110 #include <linux/interrupt.h>
4111 #include <linux/module.h>
4112 #include <linux/user_namespace.h>
4113 -#include "cred-internals.h"
4114
4115 struct user_namespace init_user_ns = {
4116 .kref = {
4117 @@ -56,9 +55,6 @@ struct user_struct root_user = {
4118 .sigpending = ATOMIC_INIT(0),
4119 .locked_shm = 0,
4120 .user_ns = &init_user_ns,
4121 -#ifdef CONFIG_USER_SCHED
4122 - .tg = &init_task_group,
4123 -#endif
4124 };
4125
4126 /*
4127 @@ -75,268 +71,6 @@ static void uid_hash_remove(struct user_struct *up)
4128 put_user_ns(up->user_ns);
4129 }
4130
4131 -#ifdef CONFIG_USER_SCHED
4132 -
4133 -static void sched_destroy_user(struct user_struct *up)
4134 -{
4135 - sched_destroy_group(up->tg);
4136 -}
4137 -
4138 -static int sched_create_user(struct user_struct *up)
4139 -{
4140 - int rc = 0;
4141 -
4142 - up->tg = sched_create_group(&root_task_group);
4143 - if (IS_ERR(up->tg))
4144 - rc = -ENOMEM;
4145 -
4146 - set_tg_uid(up);
4147 -
4148 - return rc;
4149 -}
4150 -
4151 -#else /* CONFIG_USER_SCHED */
4152 -
4153 -static void sched_destroy_user(struct user_struct *up) { }
4154 -static int sched_create_user(struct user_struct *up) { return 0; }
4155 -
4156 -#endif /* CONFIG_USER_SCHED */
4157 -
4158 -#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
4159 -
4160 -static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
4161 -{
4162 - struct user_struct *user;
4163 - struct hlist_node *h;
4164 -
4165 - hlist_for_each_entry(user, h, hashent, uidhash_node) {
4166 - if (user->uid == uid) {
4167 - /* possibly resurrect an "almost deleted" object */
4168 - if (atomic_inc_return(&user->__count) == 1)
4169 - cancel_delayed_work(&user->work);
4170 - return user;
4171 - }
4172 - }
4173 -
4174 - return NULL;
4175 -}
4176 -
4177 -static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
4178 -static DEFINE_MUTEX(uids_mutex);
4179 -
4180 -static inline void uids_mutex_lock(void)
4181 -{
4182 - mutex_lock(&uids_mutex);
4183 -}
4184 -
4185 -static inline void uids_mutex_unlock(void)
4186 -{
4187 - mutex_unlock(&uids_mutex);
4188 -}
4189 -
4190 -/* uid directory attributes */
4191 -#ifdef CONFIG_FAIR_GROUP_SCHED
4192 -static ssize_t cpu_shares_show(struct kobject *kobj,
4193 - struct kobj_attribute *attr,
4194 - char *buf)
4195 -{
4196 - struct user_struct *up = container_of(kobj, struct user_struct, kobj);
4197 -
4198 - return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
4199 -}
4200 -
4201 -static ssize_t cpu_shares_store(struct kobject *kobj,
4202 - struct kobj_attribute *attr,
4203 - const char *buf, size_t size)
4204 -{
4205 - struct user_struct *up = container_of(kobj, struct user_struct, kobj);
4206 - unsigned long shares;
4207 - int rc;
4208 -
4209 - sscanf(buf, "%lu", &shares);
4210 -
4211 - rc = sched_group_set_shares(up->tg, shares);
4212 -
4213 - return (rc ? rc : size);
4214 -}
4215 -
4216 -static struct kobj_attribute cpu_share_attr =
4217 - __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
4218 -#endif
4219 -
4220 -#ifdef CONFIG_RT_GROUP_SCHED
4221 -static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
4222 - struct kobj_attribute *attr,
4223 - char *buf)
4224 -{
4225 - struct user_struct *up = container_of(kobj, struct user_struct, kobj);
4226 -
4227 - return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
4228 -}
4229 -
4230 -static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
4231 - struct kobj_attribute *attr,
4232 - const char *buf, size_t size)
4233 -{
4234 - struct user_struct *up = container_of(kobj, struct user_struct, kobj);
4235 - unsigned long rt_runtime;
4236 - int rc;
4237 -
4238 - sscanf(buf, "%ld", &rt_runtime);
4239 -
4240 - rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
4241 -
4242 - return (rc ? rc : size);
4243 -}
4244 -
4245 -static struct kobj_attribute cpu_rt_runtime_attr =
4246 - __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
4247 -
4248 -static ssize_t cpu_rt_period_show(struct kobject *kobj,
4249 - struct kobj_attribute *attr,
4250 - char *buf)
4251 -{
4252 - struct user_struct *up = container_of(kobj, struct user_struct, kobj);
4253 -
4254 - return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
4255 -}
4256 -
4257 -static ssize_t cpu_rt_period_store(struct kobject *kobj,
4258 - struct kobj_attribute *attr,
4259 - const char *buf, size_t size)
4260 -{
4261 - struct user_struct *up = container_of(kobj, struct user_struct, kobj);
4262 - unsigned long rt_period;
4263 - int rc;
4264 -
4265 - sscanf(buf, "%lu", &rt_period);
4266 -
4267 - rc = sched_group_set_rt_period(up->tg, rt_period);
4268 -
4269 - return (rc ? rc : size);
4270 -}
4271 -
4272 -static struct kobj_attribute cpu_rt_period_attr =
4273 - __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
4274 -#endif
4275 -
4276 -/* default attributes per uid directory */
4277 -static struct attribute *uids_attributes[] = {
4278 -#ifdef CONFIG_FAIR_GROUP_SCHED
4279 - &cpu_share_attr.attr,
4280 -#endif
4281 -#ifdef CONFIG_RT_GROUP_SCHED
4282 - &cpu_rt_runtime_attr.attr,
4283 - &cpu_rt_period_attr.attr,
4284 -#endif
4285 - NULL
4286 -};
4287 -
4288 -/* the lifetime of user_struct is not managed by the core (now) */
4289 -static void uids_release(struct kobject *kobj)
4290 -{
4291 - return;
4292 -}
4293 -
4294 -static struct kobj_type uids_ktype = {
4295 - .sysfs_ops = &kobj_sysfs_ops,
4296 - .default_attrs = uids_attributes,
4297 - .release = uids_release,
4298 -};
4299 -
4300 -/*
4301 - * Create /sys/kernel/uids/<uid>/cpu_share file for this user
4302 - * We do not create this file for users in a user namespace (until
4303 - * sysfs tagging is implemented).
4304 - *
4305 - * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
4306 - */
4307 -static int uids_user_create(struct user_struct *up)
4308 -{
4309 - struct kobject *kobj = &up->kobj;
4310 - int error;
4311 -
4312 - memset(kobj, 0, sizeof(struct kobject));
4313 - if (up->user_ns != &init_user_ns)
4314 - return 0;
4315 - kobj->kset = uids_kset;
4316 - error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
4317 - if (error) {
4318 - kobject_put(kobj);
4319 - goto done;
4320 - }
4321 -
4322 - kobject_uevent(kobj, KOBJ_ADD);
4323 -done:
4324 - return error;
4325 -}
4326 -
4327 -/* create these entries in sysfs:
4328 - * "/sys/kernel/uids" directory
4329 - * "/sys/kernel/uids/0" directory (for root user)
4330 - * "/sys/kernel/uids/0/cpu_share" file (for root user)
4331 - */
4332 -int __init uids_sysfs_init(void)
4333 -{
4334 - uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
4335 - if (!uids_kset)
4336 - return -ENOMEM;
4337 -
4338 - return uids_user_create(&root_user);
4339 -}
4340 -
4341 -/* delayed work function to remove sysfs directory for a user and free up
4342 - * corresponding structures.
4343 - */
4344 -static void cleanup_user_struct(struct work_struct *w)
4345 -{
4346 - struct user_struct *up = container_of(w, struct user_struct, work.work);
4347 - unsigned long flags;
4348 - int remove_user = 0;
4349 -
4350 - /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
4351 - * atomic.
4352 - */
4353 - uids_mutex_lock();
4354 -
4355 - spin_lock_irqsave(&uidhash_lock, flags);
4356 - if (atomic_read(&up->__count) == 0) {
4357 - uid_hash_remove(up);
4358 - remove_user = 1;
4359 - }
4360 - spin_unlock_irqrestore(&uidhash_lock, flags);
4361 -
4362 - if (!remove_user)
4363 - goto done;
4364 -
4365 - if (up->user_ns == &init_user_ns) {
4366 - kobject_uevent(&up->kobj, KOBJ_REMOVE);
4367 - kobject_del(&up->kobj);
4368 - kobject_put(&up->kobj);
4369 - }
4370 -
4371 - sched_destroy_user(up);
4372 - key_put(up->uid_keyring);
4373 - key_put(up->session_keyring);
4374 - kmem_cache_free(uid_cachep, up);
4375 -
4376 -done:
4377 - uids_mutex_unlock();
4378 -}
4379 -
4380 -/* IRQs are disabled and uidhash_lock is held upon function entry.
4381 - * IRQ state (as stored in flags) is restored and uidhash_lock released
4382 - * upon function exit.
4383 - */
4384 -static void free_user(struct user_struct *up, unsigned long flags)
4385 -{
4386 - INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
4387 - schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
4388 - spin_unlock_irqrestore(&uidhash_lock, flags);
4389 -}
4390 -
4391 -#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
4392 -
4393 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
4394 {
4395 struct user_struct *user;
4396 @@ -352,45 +86,20 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
4397 return NULL;
4398 }
4399
4400 -int uids_sysfs_init(void) { return 0; }
4401 -static inline int uids_user_create(struct user_struct *up) { return 0; }
4402 -static inline void uids_mutex_lock(void) { }
4403 -static inline void uids_mutex_unlock(void) { }
4404 -
4405 /* IRQs are disabled and uidhash_lock is held upon function entry.
4406 * IRQ state (as stored in flags) is restored and uidhash_lock released
4407 * upon function exit.
4408 */
4409 static void free_user(struct user_struct *up, unsigned long flags)
4410 + __releases(&uidhash_lock)
4411 {
4412 uid_hash_remove(up);
4413 spin_unlock_irqrestore(&uidhash_lock, flags);
4414 - sched_destroy_user(up);
4415 key_put(up->uid_keyring);
4416 key_put(up->session_keyring);
4417 kmem_cache_free(uid_cachep, up);
4418 }
4419
4420 -#endif
4421 -
4422 -#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
4423 -/*
4424 - * We need to check if a setuid can take place. This function should be called
4425 - * before successfully completing the setuid.
4426 - */
4427 -int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
4428 -{
4429 -
4430 - return sched_rt_can_attach(up->tg, tsk);
4431 -
4432 -}
4433 -#else
4434 -int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
4435 -{
4436 - return 1;
4437 -}
4438 -#endif
4439 -
4440 /*
4441 * Locate the user_struct for the passed UID. If found, take a ref on it. The
4442 * caller must undo that ref with free_uid().
4443 @@ -428,11 +137,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
4444 struct hlist_head *hashent = uidhashentry(ns, uid);
4445 struct user_struct *up, *new;
4446
4447 - /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
4448 - * atomic.
4449 - */
4450 - uids_mutex_lock();
4451 -
4452 + /* Make uid_hash_find() + uid_hash_insert() atomic. */
4453 spin_lock_irq(&uidhash_lock);
4454 up = uid_hash_find(uid, hashent);
4455 spin_unlock_irq(&uidhash_lock);
4456 @@ -445,14 +150,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
4457 new->uid = uid;
4458 atomic_set(&new->__count, 1);
4459
4460 - if (sched_create_user(new) < 0)
4461 - goto out_free_user;
4462 -
4463 new->user_ns = get_user_ns(ns);
4464
4465 - if (uids_user_create(new))
4466 - goto out_destoy_sched;
4467 -
4468 /*
4469 * Before adding this, check whether we raced
4470 * on adding the same user already..
4471 @@ -460,11 +159,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
4472 spin_lock_irq(&uidhash_lock);
4473 up = uid_hash_find(uid, hashent);
4474 if (up) {
4475 - /* This case is not possible when CONFIG_USER_SCHED
4476 - * is defined, since we serialize alloc_uid() using
4477 - * uids_mutex. Hence no need to call
4478 - * sched_destroy_user() or remove_user_sysfs_dir().
4479 - */
4480 key_put(new->uid_keyring);
4481 key_put(new->session_keyring);
4482 kmem_cache_free(uid_cachep, new);
4483 @@ -475,17 +169,9 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
4484 spin_unlock_irq(&uidhash_lock);
4485 }
4486
4487 - uids_mutex_unlock();
4488 -
4489 return up;
4490
4491 -out_destoy_sched:
4492 - sched_destroy_user(new);
4493 - put_user_ns(new->user_ns);
4494 -out_free_user:
4495 - kmem_cache_free(uid_cachep, new);
4496 out_unlock:
4497 - uids_mutex_unlock();
4498 return NULL;
4499 }
4500
4501 diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
4502 index e4877ca..f4f0231 100644
4503 --- a/net/sched/cls_cgroup.c
4504 +++ b/net/sched/cls_cgroup.c
4505 @@ -110,7 +110,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
4506 * calls by looking at the number of nested bh disable calls because
4507 * softirqs always disables bh.
4508 */
4509 - if (softirq_count() != SOFTIRQ_OFFSET)
4510 + if (in_serving_softirq())
4511 return -1;
4512
4513 rcu_read_lock();
4514 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4515 index 36d9e25..0b90dc9 100644
4516 --- a/security/selinux/hooks.c
4517 +++ b/security/selinux/hooks.c
4518 @@ -2601,7 +2601,10 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
4519 sid = tsec->sid;
4520 newsid = tsec->create_sid;
4521
4522 - if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
4523 + if ((sbsec->flags & SE_SBINITIALIZED) &&
4524 + (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
4525 + newsid = sbsec->mntpoint_sid;
4526 + else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
4527 rc = security_transition_sid(sid, dsec->sid,
4528 inode_mode_to_security_class(inode->i_mode),
4529 &newsid);
4530 diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
4531 index dd7cc6d..24b958c 100644
4532 --- a/security/selinux/nlmsgtab.c
4533 +++ b/security/selinux/nlmsgtab.c
4534 @@ -66,6 +66,8 @@ static struct nlmsg_perm nlmsg_route_perms[] =
4535 { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
4536 { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
4537 { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
4538 + { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
4539 + { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
4540 };
4541
4542 static struct nlmsg_perm nlmsg_firewall_perms[] =
4543 diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
4544 index 7f4d744..2475bda 100644
4545 --- a/sound/core/hrtimer.c
4546 +++ b/sound/core/hrtimer.c
4547 @@ -44,12 +44,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
4548 {
4549 struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
4550 struct snd_timer *t = stime->timer;
4551 + unsigned long oruns;
4552
4553 if (!atomic_read(&stime->running))
4554 return HRTIMER_NORESTART;
4555
4556 - hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
4557 - snd_timer_interrupt(stime->timer, t->sticks);
4558 + oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
4559 + snd_timer_interrupt(stime->timer, t->sticks * oruns);
4560
4561 if (!atomic_read(&stime->running))
4562 return HRTIMER_NORESTART;
4563 diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
4564 index b9d2f20..5439d66 100644
4565 --- a/sound/pci/au88x0/au88x0_pcm.c
4566 +++ b/sound/pci/au88x0/au88x0_pcm.c
4567 @@ -42,11 +42,7 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = {
4568 .rate_min = 5000,
4569 .rate_max = 48000,
4570 .channels_min = 1,
4571 -#ifdef CHIP_AU8830
4572 - .channels_max = 4,
4573 -#else
4574 .channels_max = 2,
4575 -#endif
4576 .buffer_bytes_max = 0x10000,
4577 .period_bytes_min = 0x1,
4578 .period_bytes_max = 0x1000,
4579 @@ -115,6 +111,17 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_wt = {
4580 .periods_max = 64,
4581 };
4582 #endif
4583 +#ifdef CHIP_AU8830
4584 +static unsigned int au8830_channels[3] = {
4585 + 1, 2, 4,
4586 +};
4587 +
4588 +static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
4589 + .count = ARRAY_SIZE(au8830_channels),
4590 + .list = au8830_channels,
4591 + .mask = 0,
4592 +};
4593 +#endif
4594 /* open callback */
4595 static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
4596 {
4597 @@ -156,6 +163,15 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
4598 if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB
4599 || VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S)
4600 runtime->hw = snd_vortex_playback_hw_adb;
4601 +#ifdef CHIP_AU8830
4602 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
4603 + VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
4604 + runtime->hw.channels_max = 4;
4605 + snd_pcm_hw_constraint_list(runtime, 0,
4606 + SNDRV_PCM_HW_PARAM_CHANNELS,
4607 + &hw_constraints_au8830_channels);
4608 + }
4609 +#endif
4610 substream->runtime->private_data = NULL;
4611 }
4612 #ifndef CHIP_AU8810
4613 diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
4614 index 9446a5a..634c604 100644
4615 --- a/sound/pci/hda/hda_eld.c
4616 +++ b/sound/pci/hda/hda_eld.c
4617 @@ -383,7 +383,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a)
4618 snd_print_pcm_rates(a->rates, buf, sizeof(buf));
4619
4620 if (a->format == AUDIO_CODING_TYPE_LPCM)
4621 - snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
4622 + snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
4623 else if (a->max_bitrate)
4624 snprintf(buf2, sizeof(buf2),
4625 ", max bitrate = %d", a->max_bitrate);
4626 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
4627 index 9d855f4..d68aaf4 100644
4628 --- a/sound/pci/hda/patch_conexant.c
4629 +++ b/sound/pci/hda/patch_conexant.c
4630 @@ -366,10 +366,16 @@ static int conexant_add_jack(struct hda_codec *codec,
4631 struct conexant_spec *spec;
4632 struct conexant_jack *jack;
4633 const char *name;
4634 - int err;
4635 + int i, err;
4636
4637 spec = codec->spec;
4638 snd_array_init(&spec->jacks, sizeof(*jack), 32);
4639 +
4640 + jack = spec->jacks.list;
4641 + for (i = 0; i < spec->jacks.used; i++, jack++)
4642 + if (jack->nid == nid)
4643 + return 0 ; /* already present */
4644 +
4645 jack = snd_array_new(&spec->jacks);
4646 name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
4647
4648 diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
4649 index e693229..488593f 100644
4650 --- a/sound/soc/blackfin/bf5xx-ac97.c
4651 +++ b/sound/soc/blackfin/bf5xx-ac97.c
4652 @@ -260,9 +260,9 @@ static int bf5xx_ac97_suspend(struct snd_soc_dai *dai)
4653 pr_debug("%s : sport %d\n", __func__, dai->id);
4654 if (!dai->active)
4655 return 0;
4656 - if (dai->capture.active)
4657 + if (dai->capture_active)
4658 sport_rx_stop(sport);
4659 - if (dai->playback.active)
4660 + if (dai->playback_active)
4661 sport_tx_stop(sport);
4662 return 0;
4663 }
4664 diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
4665 index 253159c..5b47d39 100644
4666 --- a/sound/soc/codecs/wm8990.c
4667 +++ b/sound/soc/codecs/wm8990.c
4668 @@ -1185,7 +1185,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
4669 WM8990_VMIDTOG);
4670
4671 /* Delay to allow output caps to discharge */
4672 - msleep(msecs_to_jiffies(300));
4673 + msleep(300);
4674
4675 /* Disable VMIDTOG */
4676 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
4677 @@ -1197,17 +1197,17 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
4678 /* Enable outputs */
4679 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
4680
4681 - msleep(msecs_to_jiffies(50));
4682 + msleep(50);
4683
4684 /* Enable VMID at 2x50k */
4685 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
4686
4687 - msleep(msecs_to_jiffies(100));
4688 + msleep(100);
4689
4690 /* Enable VREF */
4691 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
4692
4693 - msleep(msecs_to_jiffies(600));
4694 + msleep(600);
4695
4696 /* Enable BUFIOEN */
4697 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
4698 @@ -1252,7 +1252,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
4699 /* Disable VMID */
4700 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
4701
4702 - msleep(msecs_to_jiffies(300));
4703 + msleep(300);
4704
4705 /* Enable all output discharge bits */
4706 snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
4707 diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
4708 index 99f3376..74078d9 100644
4709 --- a/sound/usb/usx2y/us122l.c
4710 +++ b/sound/usb/usx2y/us122l.c
4711 @@ -234,29 +234,26 @@ static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw,
4712 struct file *file, poll_table *wait)
4713 {
4714 struct us122l *us122l = hw->private_data;
4715 - struct usb_stream *s = us122l->sk.s;
4716 unsigned *polled;
4717 unsigned int mask;
4718
4719 poll_wait(file, &us122l->sk.sleep, wait);
4720
4721 - switch (s->state) {
4722 - case usb_stream_ready:
4723 - if (us122l->first == file)
4724 - polled = &s->periods_polled;
4725 - else
4726 - polled = &us122l->second_periods_polled;
4727 - if (*polled != s->periods_done) {
4728 - *polled = s->periods_done;
4729 - mask = POLLIN | POLLOUT | POLLWRNORM;
4730 - break;
4731 + mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
4732 + if (mutex_trylock(&us122l->mutex)) {
4733 + struct usb_stream *s = us122l->sk.s;
4734 + if (s && s->state == usb_stream_ready) {
4735 + if (us122l->first == file)
4736 + polled = &s->periods_polled;
4737 + else
4738 + polled = &us122l->second_periods_polled;
4739 + if (*polled != s->periods_done) {
4740 + *polled = s->periods_done;
4741 + mask = POLLIN | POLLOUT | POLLWRNORM;
4742 + } else
4743 + mask = 0;
4744 }
4745 - /* Fall through */
4746 - mask = 0;
4747 - break;
4748 - default:
4749 - mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
4750 - break;
4751 + mutex_unlock(&us122l->mutex);
4752 }
4753 return mask;
4754 }
4755 @@ -342,6 +339,7 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
4756 {
4757 struct usb_stream_config *cfg;
4758 struct us122l *us122l = hw->private_data;
4759 + struct usb_stream *s;
4760 unsigned min_period_frames;
4761 int err = 0;
4762 bool high_speed;
4763 @@ -387,18 +385,18 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
4764 snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
4765
4766 mutex_lock(&us122l->mutex);
4767 + s = us122l->sk.s;
4768 if (!us122l->master)
4769 us122l->master = file;
4770 else if (us122l->master != file) {
4771 - if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
4772 + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
4773 err = -EIO;
4774 goto unlock;
4775 }
4776 us122l->slave = file;
4777 }
4778 - if (!us122l->sk.s ||
4779 - memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
4780 - us122l->sk.s->state == usb_stream_xrun) {
4781 + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
4782 + s->state == usb_stream_xrun) {
4783 us122l_stop(us122l);
4784 if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
4785 err = -EIO;
4786 @@ -409,6 +407,7 @@ unlock:
4787 mutex_unlock(&us122l->mutex);
4788 free:
4789 kfree(cfg);
4790 + wake_up_all(&us122l->sk.sleep);
4791 return err;
4792 }
4793

  ViewVC Help
Powered by ViewVC 1.1.20