/[linux-patches]/genpatches-2.6/tags/2.6.34-10/1005_linux-2.6.34.6.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.34-10/1005_linux-2.6.34.6.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1771 - (show annotations) (download)
Tue Aug 31 14:13:10 2010 UTC (4 years, 8 months ago) by mpagano
File size: 99997 byte(s)
2.6.34-10 release
1 diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
2 index 9dcb11e..bf62c44 100644
3 --- a/arch/arm/include/asm/ptrace.h
4 +++ b/arch/arm/include/asm/ptrace.h
5 @@ -158,15 +158,24 @@ struct pt_regs {
6 */
7 static inline int valid_user_regs(struct pt_regs *regs)
8 {
9 - if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) {
10 - regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
11 - return 1;
12 + unsigned long mode = regs->ARM_cpsr & MODE_MASK;
13 +
14 + /*
15 + * Always clear the F (FIQ) and A (delayed abort) bits
16 + */
17 + regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
18 +
19 + if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
20 + if (mode == USR_MODE)
21 + return 1;
22 + if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
23 + return 1;
24 }
25
26 /*
27 * Force CPSR to something logical...
28 */
29 - regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT;
30 + regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
31 if (!(elf_hwcap & HWCAP_26BIT))
32 regs->ARM_cpsr |= USR_MODE;
33
34 diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
35 index 827cbc4..ea9ee4e 100644
36 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
37 +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
38 @@ -100,6 +100,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
39
40 static struct platform_nand_data ixdp425_flash_nand_data = {
41 .chip = {
42 + .nr_chips = 1,
43 .chip_delay = 30,
44 .options = NAND_NO_AUTOINCR,
45 #ifdef CONFIG_MTD_PARTITIONS
46 diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c
47 index e5b5b83..1f9363f 100644
48 --- a/arch/arm/mach-mx3/mach-qong.c
49 +++ b/arch/arm/mach-mx3/mach-qong.c
50 @@ -169,6 +169,7 @@ static void qong_nand_select_chip(struct mtd_info *mtd, int chip)
51
52 static struct platform_nand_data qong_nand_data = {
53 .chip = {
54 + .nr_chips = 1,
55 .chip_delay = 20,
56 .options = 0,
57 },
58 diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
59 index 5041d1b..696b1a9 100644
60 --- a/arch/arm/mach-orion5x/ts78xx-setup.c
61 +++ b/arch/arm/mach-orion5x/ts78xx-setup.c
62 @@ -216,6 +216,7 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = {
63
64 static struct platform_nand_data ts78xx_ts_nand_data = {
65 .chip = {
66 + .nr_chips = 1,
67 .part_probe_types = ts_nand_part_probes,
68 .partitions = ts78xx_ts_nand_parts,
69 .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts),
70 diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
71 index 9eaf5b0..68a27bc 100644
72 --- a/arch/blackfin/mach-bf537/boards/stamp.c
73 +++ b/arch/blackfin/mach-bf537/boards/stamp.c
74 @@ -400,6 +400,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
75
76 static struct platform_nand_data bfin_plat_nand_data = {
77 .chip = {
78 + .nr_chips = 1,
79 .chip_delay = 30,
80 #ifdef CONFIG_MTD_PARTITIONS
81 .part_probe_types = part_probes,
82 diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
83 index 5163e2c..7aa6a22 100644
84 --- a/arch/blackfin/mach-bf561/boards/acvilon.c
85 +++ b/arch/blackfin/mach-bf561/boards/acvilon.c
86 @@ -283,6 +283,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
87
88 static struct platform_nand_data bfin_plat_nand_data = {
89 .chip = {
90 + .nr_chips = 1,
91 .chip_delay = 30,
92 #ifdef CONFIG_MTD_PARTITIONS
93 .part_probe_types = part_probes,
94 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
95 index 7546e2c..c107b74 100644
96 --- a/arch/powerpc/Makefile
97 +++ b/arch/powerpc/Makefile
98 @@ -159,7 +159,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
99 all: zImage
100
101 # With make 3.82 we cannot mix normal and wildcard targets
102 -BOOT_TARGETS1 := zImage zImage.initrd uImaged
103 +BOOT_TARGETS1 := zImage zImage.initrd uImage
104 BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
105
106 PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
107 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
108 index f2e4800..f5cc06f 100644
109 --- a/arch/sparc/include/asm/atomic_64.h
110 +++ b/arch/sparc/include/asm/atomic_64.h
111 @@ -20,14 +20,14 @@
112 #define atomic64_set(v, i) (((v)->counter) = i)
113
114 extern void atomic_add(int, atomic_t *);
115 -extern void atomic64_add(int, atomic64_t *);
116 +extern void atomic64_add(long, atomic64_t *);
117 extern void atomic_sub(int, atomic_t *);
118 -extern void atomic64_sub(int, atomic64_t *);
119 +extern void atomic64_sub(long, atomic64_t *);
120
121 extern int atomic_add_ret(int, atomic_t *);
122 -extern int atomic64_add_ret(int, atomic64_t *);
123 +extern long atomic64_add_ret(long, atomic64_t *);
124 extern int atomic_sub_ret(int, atomic_t *);
125 -extern int atomic64_sub_ret(int, atomic64_t *);
126 +extern long atomic64_sub_ret(long, atomic64_t *);
127
128 #define atomic_dec_return(v) atomic_sub_ret(1, v)
129 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
130 @@ -91,7 +91,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
131 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
132 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
133
134 -static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
135 +static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
136 {
137 long c, old;
138 c = atomic64_read(v);
139 diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
140 index ff9ead6..43cf002 100644
141 --- a/arch/sparc/include/asm/parport.h
142 +++ b/arch/sparc/include/asm/parport.h
143 @@ -228,6 +228,10 @@ static const struct of_device_id ecpp_match[] = {
144 .name = "parallel",
145 .compatible = "ns87317-ecpp",
146 },
147 + {
148 + .name = "parallel",
149 + .compatible = "pnpALI,1533,3",
150 + },
151 {},
152 };
153
154 diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h
155 index a303c9d..e4c61a1 100644
156 --- a/arch/sparc/include/asm/rwsem-const.h
157 +++ b/arch/sparc/include/asm/rwsem-const.h
158 @@ -5,7 +5,7 @@
159 #define RWSEM_UNLOCKED_VALUE 0x00000000
160 #define RWSEM_ACTIVE_BIAS 0x00000001
161 #define RWSEM_ACTIVE_MASK 0x0000ffff
162 -#define RWSEM_WAITING_BIAS 0xffff0000
163 +#define RWSEM_WAITING_BIAS (-0x00010000)
164 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
165 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
166
167 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
168 index 9458685..b99909c 100644
169 --- a/arch/x86/Kconfig
170 +++ b/arch/x86/Kconfig
171 @@ -240,6 +240,11 @@ config X86_32_LAZY_GS
172
173 config KTIME_SCALAR
174 def_bool X86_32
175 +
176 +config ARCH_CPU_PROBE_RELEASE
177 + def_bool y
178 + depends on HOTPLUG_CPU
179 +
180 source "init/Kconfig"
181 source "kernel/Kconfig.freezer"
182
183 diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
184 index a6d360e..6dcb69b 100644
185 --- a/arch/x86/include/asm/cmpxchg_32.h
186 +++ b/arch/x86/include/asm/cmpxchg_32.h
187 @@ -53,60 +53,33 @@ struct __xchg_dummy {
188 __xchg((v), (ptr), sizeof(*ptr))
189
190 /*
191 - * The semantics of XCHGCMP8B are a bit strange, this is why
192 - * there is a loop and the loading of %%eax and %%edx has to
193 - * be inside. This inlines well in most cases, the cached
194 - * cost is around ~38 cycles. (in the future we might want
195 - * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
196 - * might have an implicit FPU-save as a cost, so it's not
197 - * clear which path to go.)
198 + * CMPXCHG8B only writes to the target if we had the previous
199 + * value in registers, otherwise it acts as a read and gives us the
200 + * "new previous" value. That is why there is a loop. Preloading
201 + * EDX:EAX is a performance optimization: in the common case it means
202 + * we need only one locked operation.
203 *
204 - * cmpxchg8b must be used with the lock prefix here to allow
205 - * the instruction to be executed atomically, see page 3-102
206 - * of the instruction set reference 24319102.pdf. We need
207 - * the reader side to see the coherent 64bit value.
208 + * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
209 + * least an FPU save and/or %cr0.ts manipulation.
210 + *
211 + * cmpxchg8b must be used with the lock prefix here to allow the
212 + * instruction to be executed atomically. We need to have the reader
213 + * side to see the coherent 64bit value.
214 */
215 -static inline void __set_64bit(unsigned long long *ptr,
216 - unsigned int low, unsigned int high)
217 +static inline void set_64bit(volatile u64 *ptr, u64 value)
218 {
219 + u32 low = value;
220 + u32 high = value >> 32;
221 + u64 prev = *ptr;
222 +
223 asm volatile("\n1:\t"
224 - "movl (%1), %%eax\n\t"
225 - "movl 4(%1), %%edx\n\t"
226 - LOCK_PREFIX "cmpxchg8b (%1)\n\t"
227 + LOCK_PREFIX "cmpxchg8b %0\n\t"
228 "jnz 1b"
229 - : "=m" (*ptr)
230 - : "D" (ptr),
231 - "b" (low),
232 - "c" (high)
233 - : "ax", "dx", "memory");
234 -}
235 -
236 -static inline void __set_64bit_constant(unsigned long long *ptr,
237 - unsigned long long value)
238 -{
239 - __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
240 -}
241 -
242 -#define ll_low(x) *(((unsigned int *)&(x)) + 0)
243 -#define ll_high(x) *(((unsigned int *)&(x)) + 1)
244 -
245 -static inline void __set_64bit_var(unsigned long long *ptr,
246 - unsigned long long value)
247 -{
248 - __set_64bit(ptr, ll_low(value), ll_high(value));
249 + : "=m" (*ptr), "+A" (prev)
250 + : "b" (low), "c" (high)
251 + : "memory");
252 }
253
254 -#define set_64bit(ptr, value) \
255 - (__builtin_constant_p((value)) \
256 - ? __set_64bit_constant((ptr), (value)) \
257 - : __set_64bit_var((ptr), (value)))
258 -
259 -#define _set_64bit(ptr, value) \
260 - (__builtin_constant_p(value) \
261 - ? __set_64bit(ptr, (unsigned int)(value), \
262 - (unsigned int)((value) >> 32)) \
263 - : __set_64bit(ptr, ll_low((value)), ll_high((value))))
264 -
265 extern void __cmpxchg_wrong_size(void);
266
267 /*
268 diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
269 index b92f147..9596e7c 100644
270 --- a/arch/x86/include/asm/cmpxchg_64.h
271 +++ b/arch/x86/include/asm/cmpxchg_64.h
272 @@ -5,13 +5,11 @@
273
274 #define __xg(x) ((volatile long *)(x))
275
276 -static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
277 +static inline void set_64bit(volatile u64 *ptr, u64 val)
278 {
279 *ptr = val;
280 }
281
282 -#define _set_64bit set_64bit
283 -
284 extern void __xchg_wrong_size(void);
285 extern void __cmpxchg_wrong_size(void);
286
287 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
288 index 93e2a13..a96489e 100644
289 --- a/arch/x86/kernel/apic/apic.c
290 +++ b/arch/x86/kernel/apic/apic.c
291 @@ -51,6 +51,7 @@
292 #include <asm/smp.h>
293 #include <asm/mce.h>
294 #include <asm/kvm_para.h>
295 +#include <asm/tsc.h>
296
297 unsigned int num_processors;
298
299 @@ -1151,8 +1152,13 @@ static void __cpuinit lapic_setup_esr(void)
300 */
301 void __cpuinit setup_local_APIC(void)
302 {
303 - unsigned int value;
304 - int i, j;
305 + unsigned int value, queued;
306 + int i, j, acked = 0;
307 + unsigned long long tsc = 0, ntsc;
308 + long long max_loops = cpu_khz;
309 +
310 + if (cpu_has_tsc)
311 + rdtscll(tsc);
312
313 if (disable_apic) {
314 arch_disable_smp_support();
315 @@ -1204,13 +1210,32 @@ void __cpuinit setup_local_APIC(void)
316 * the interrupt. Hence a vector might get locked. It was noticed
317 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
318 */
319 - for (i = APIC_ISR_NR - 1; i >= 0; i--) {
320 - value = apic_read(APIC_ISR + i*0x10);
321 - for (j = 31; j >= 0; j--) {
322 - if (value & (1<<j))
323 - ack_APIC_irq();
324 + do {
325 + queued = 0;
326 + for (i = APIC_ISR_NR - 1; i >= 0; i--)
327 + queued |= apic_read(APIC_IRR + i*0x10);
328 +
329 + for (i = APIC_ISR_NR - 1; i >= 0; i--) {
330 + value = apic_read(APIC_ISR + i*0x10);
331 + for (j = 31; j >= 0; j--) {
332 + if (value & (1<<j)) {
333 + ack_APIC_irq();
334 + acked++;
335 + }
336 + }
337 }
338 - }
339 + if (acked > 256) {
340 + printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
341 + acked);
342 + break;
343 + }
344 + if (cpu_has_tsc) {
345 + rdtscll(ntsc);
346 + max_loops = (cpu_khz << 10) - (ntsc - tsc);
347 + } else
348 + max_loops--;
349 + } while (queued && max_loops > 0);
350 + WARN_ON(max_loops <= 0);
351
352 /*
353 * Now that we are all set up, enable the APIC
354 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
355 index eb2789c..c64499c 100644
356 --- a/arch/x86/kernel/apic/io_apic.c
357 +++ b/arch/x86/kernel/apic/io_apic.c
358 @@ -1732,6 +1732,8 @@ __apicdebuginit(void) print_IO_APIC(void)
359 struct irq_pin_list *entry;
360
361 cfg = desc->chip_data;
362 + if (!cfg)
363 + continue;
364 entry = cfg->irq_2_pin;
365 if (!entry)
366 continue;
367 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
368 index 763d815..8931d05 100644
369 --- a/arch/x86/kernel/smpboot.c
370 +++ b/arch/x86/kernel/smpboot.c
371 @@ -91,6 +91,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
372 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
373 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
374 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
375 +
376 +/*
377 + * We need this for trampoline_base protection from concurrent accesses when
378 + * off- and onlining cores wildly.
379 + */
380 +static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
381 +
382 +void cpu_hotplug_driver_lock()
383 +{
384 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
385 +}
386 +
387 +void cpu_hotplug_driver_unlock()
388 +{
389 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
390 +}
391 +
392 +ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
393 +ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
394 #else
395 static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
396 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
397 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
398 index f1fb411..c41ad50 100644
399 --- a/arch/x86/oprofile/nmi_int.c
400 +++ b/arch/x86/oprofile/nmi_int.c
401 @@ -584,6 +584,18 @@ static int __init ppro_init(char **cpu_type)
402 if (force_arch_perfmon && cpu_has_arch_perfmon)
403 return 0;
404
405 + /*
406 + * Documentation on identifying Intel processors by CPU family
407 + * and model can be found in the Intel Software Developer's
408 + * Manuals (SDM):
409 + *
410 + * http://www.intel.com/products/processor/manuals/
411 + *
412 + * As of May 2010 the documentation for this was in the:
413 + * "Intel 64 and IA-32 Architectures Software Developer's
414 + * Manual Volume 3B: System Programming Guide", "Table B-1
415 + * CPUID Signature Values of DisplayFamily_DisplayModel".
416 + */
417 switch (cpu_model) {
418 case 0 ... 2:
419 *cpu_type = "i386/ppro";
420 @@ -605,12 +617,13 @@ static int __init ppro_init(char **cpu_type)
421 case 15: case 23:
422 *cpu_type = "i386/core_2";
423 break;
424 + case 0x1a:
425 + case 0x1e:
426 case 0x2e:
427 - case 26:
428 spec = &op_arch_perfmon_spec;
429 *cpu_type = "i386/core_i7";
430 break;
431 - case 28:
432 + case 0x1c:
433 *cpu_type = "i386/atom";
434 break;
435 default:
436 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
437 index 4a66201..c9736ed 100644
438 --- a/drivers/gpu/drm/drm_drv.c
439 +++ b/drivers/gpu/drm/drm_drv.c
440 @@ -502,7 +502,9 @@ long drm_ioctl(struct file *filp,
441 retcode = -EFAULT;
442 goto err_i1;
443 }
444 - }
445 + } else
446 + memset(kdata, 0, _IOC_SIZE(cmd));
447 +
448 if (ioctl->flags & DRM_UNLOCKED)
449 retcode = func(dev, kdata, file_priv);
450 else {
451 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
452 index 44c07f8..66eb458 100644
453 --- a/drivers/gpu/drm/i915/intel_display.c
454 +++ b/drivers/gpu/drm/i915/intel_display.c
455 @@ -1470,6 +1470,7 @@ static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
456 dpa_ctl = I915_READ(DP_A);
457 dpa_ctl |= DP_PLL_ENABLE;
458 I915_WRITE(DP_A, dpa_ctl);
459 + POSTING_READ(DP_A);
460 udelay(200);
461 }
462
463 @@ -4290,14 +4291,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
464 work->pending_flip_obj = obj;
465
466 if (intel_crtc->plane)
467 - flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
468 + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
469 else
470 - flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
471 + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
472
473 - /* Wait for any previous flip to finish */
474 - if (IS_GEN3(dev))
475 - while (I915_READ(ISR) & flip_mask)
476 - ;
477 + if (IS_GEN3(dev) || IS_GEN2(dev)) {
478 + BEGIN_LP_RING(2);
479 + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
480 + OUT_RING(0);
481 + ADVANCE_LP_RING();
482 + }
483
484 /* Offset into the new buffer for cases of shared fbs between CRTCs */
485 offset = obj_priv->gtt_offset;
486 @@ -4311,12 +4314,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
487 OUT_RING(offset | obj_priv->tiling_mode);
488 pipesrc = I915_READ(pipesrc_reg);
489 OUT_RING(pipesrc & 0x0fff0fff);
490 - } else {
491 + } else if (IS_GEN3(dev)) {
492 OUT_RING(MI_DISPLAY_FLIP_I915 |
493 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
494 OUT_RING(fb->pitch);
495 OUT_RING(offset);
496 OUT_RING(MI_NOOP);
497 + } else {
498 + OUT_RING(MI_DISPLAY_FLIP |
499 + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
500 + OUT_RING(fb->pitch);
501 + OUT_RING(offset);
502 + OUT_RING(MI_NOOP);
503 }
504 ADVANCE_LP_RING();
505
506 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
507 index 0fb8fc1..05ef0c4 100644
508 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
509 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
510 @@ -206,6 +206,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
511 uint16_t *line_mux,
512 struct radeon_hpd *hpd)
513 {
514 + struct radeon_device *rdev = dev->dev_private;
515
516 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
517 if ((dev->pdev->device == 0x791e) &&
518 @@ -308,13 +309,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
519 }
520 }
521
522 - /* Acer laptop reports DVI-D as DVI-I */
523 + /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
524 if ((dev->pdev->device == 0x95c4) &&
525 (dev->pdev->subsystem_vendor == 0x1025) &&
526 (dev->pdev->subsystem_device == 0x013c)) {
527 + struct radeon_gpio_rec gpio;
528 +
529 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
530 - (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
531 + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
532 + gpio = radeon_lookup_gpio(rdev, 6);
533 + *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
534 *connector_type = DRM_MODE_CONNECTOR_DVID;
535 + } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
536 + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
537 + gpio = radeon_lookup_gpio(rdev, 7);
538 + *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
539 + }
540 }
541
542 /* XFX Pine Group device rv730 reports no VGA DDC lines
543 @@ -1038,7 +1048,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
544 return true;
545 break;
546 case 2:
547 - if (igp_info->info_2.ucMemoryType & 0x0f)
548 + if (igp_info->info_2.ulBootUpSidePortClock)
549 return true;
550 break;
551 default:
552 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
553 index ed6a724..2008481 100644
554 --- a/drivers/gpu/drm/radeon/radeon_device.c
555 +++ b/drivers/gpu/drm/radeon/radeon_device.c
556 @@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
557 mc->mc_vram_size = mc->aper_size;
558 }
559 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
560 - if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
561 + if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
562 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
563 mc->real_vram_size = mc->aper_size;
564 mc->mc_vram_size = mc->aper_size;
565 diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
566 index 5def6f5..0cd2704 100644
567 --- a/drivers/gpu/drm/radeon/radeon_i2c.c
568 +++ b/drivers/gpu/drm/radeon/radeon_i2c.c
569 @@ -95,6 +95,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
570 }
571 }
572
573 + /* switch the pads to ddc mode */
574 + if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
575 + temp = RREG32(rec->mask_clk_reg);
576 + temp &= ~(1 << 16);
577 + WREG32(rec->mask_clk_reg, temp);
578 + }
579 +
580 /* clear the output pin values */
581 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
582 WREG32(rec->a_clk_reg, temp);
583 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
584 index a212041..b05051f 100644
585 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
586 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
587 @@ -118,11 +118,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
588 * chips. Disable MSI on them for now.
589 */
590 if ((rdev->family >= CHIP_RV380) &&
591 - (!(rdev->flags & RADEON_IS_IGP))) {
592 + (!(rdev->flags & RADEON_IS_IGP)) &&
593 + (!(rdev->flags & RADEON_IS_AGP))) {
594 int ret = pci_enable_msi(rdev->pdev);
595 if (!ret) {
596 rdev->msi_enabled = 1;
597 - DRM_INFO("radeon: using MSI.\n");
598 + dev_info(rdev->dev, "radeon: using MSI.\n");
599 }
600 }
601 rdev->irq.installed = true;
602 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
603 index 88865e3..d6871ea 100644
604 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
605 +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
606 @@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
607 if (!ref_div)
608 return 1;
609
610 - vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
611 + vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
612
613 /*
614 * This is horribly crude: the VCO frequency range is divided into
615 diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
616 index 4a64b85..68e69a4 100644
617 --- a/drivers/hwmon/pc87360.c
618 +++ b/drivers/hwmon/pc87360.c
619 @@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
620
621 static int __init pc87360_device_add(unsigned short address)
622 {
623 - struct resource res = {
624 - .name = "pc87360",
625 - .flags = IORESOURCE_IO,
626 - };
627 - int err, i;
628 + struct resource res[3];
629 + int err, i, res_count;
630
631 pdev = platform_device_alloc("pc87360", address);
632 if (!pdev) {
633 @@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address)
634 goto exit;
635 }
636
637 + memset(res, 0, 3 * sizeof(struct resource));
638 + res_count = 0;
639 for (i = 0; i < 3; i++) {
640 if (!extra_isa[i])
641 continue;
642 - res.start = extra_isa[i];
643 - res.end = extra_isa[i] + PC87360_EXTENT - 1;
644 + res[res_count].start = extra_isa[i];
645 + res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1;
646 + res[res_count].name = "pc87360",
647 + res[res_count].flags = IORESOURCE_IO,
648
649 - err = acpi_check_resource_conflict(&res);
650 + err = acpi_check_resource_conflict(&res[res_count]);
651 if (err)
652 goto exit_device_put;
653
654 - err = platform_device_add_resources(pdev, &res, 1);
655 - if (err) {
656 - printk(KERN_ERR "pc87360: Device resource[%d] "
657 - "addition failed (%d)\n", i, err);
658 - goto exit_device_put;
659 - }
660 + res_count++;
661 + }
662 +
663 + err = platform_device_add_resources(pdev, res, res_count);
664 + if (err) {
665 + printk(KERN_ERR "pc87360: Device resources addition failed "
666 + "(%d)\n", err);
667 + goto exit_device_put;
668 }
669
670 err = platform_device_add(pdev);
671 diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
672 index bedc8b0..b4093f2 100644
673 --- a/drivers/isdn/gigaset/capi.c
674 +++ b/drivers/isdn/gigaset/capi.c
675 @@ -389,13 +389,13 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
676 ++bcs->trans_up;
677
678 if (!ap) {
679 - dev_err(cs->dev, "%s: no application\n", __func__);
680 + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
681 return;
682 }
683
684 /* don't send further B3 messages if disconnected */
685 if (bcs->apconnstate < APCONN_ACTIVE) {
686 - gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack");
687 + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
688 return;
689 }
690
691 @@ -433,13 +433,14 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
692 bcs->trans_down++;
693
694 if (!ap) {
695 - dev_err(cs->dev, "%s: no application\n", __func__);
696 + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__);
697 + dev_kfree_skb_any(skb);
698 return;
699 }
700
701 /* don't send further B3 messages if disconnected */
702 if (bcs->apconnstate < APCONN_ACTIVE) {
703 - gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
704 + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__);
705 dev_kfree_skb_any(skb);
706 return;
707 }
708 @@ -758,7 +759,7 @@ void gigaset_isdn_connD(struct bc_state *bcs)
709 ap = bcs->ap;
710 if (!ap) {
711 spin_unlock_irqrestore(&bcs->aplock, flags);
712 - dev_err(cs->dev, "%s: no application\n", __func__);
713 + gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
714 return;
715 }
716 if (bcs->apconnstate == APCONN_NONE) {
717 @@ -854,7 +855,7 @@ void gigaset_isdn_connB(struct bc_state *bcs)
718 ap = bcs->ap;
719 if (!ap) {
720 spin_unlock_irqrestore(&bcs->aplock, flags);
721 - dev_err(cs->dev, "%s: no application\n", __func__);
722 + gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
723 return;
724 }
725 if (!bcs->apconnstate) {
726 @@ -912,13 +913,12 @@ void gigaset_isdn_connB(struct bc_state *bcs)
727 */
728 void gigaset_isdn_hupB(struct bc_state *bcs)
729 {
730 - struct cardstate *cs = bcs->cs;
731 struct gigaset_capi_appl *ap = bcs->ap;
732
733 /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
734
735 if (!ap) {
736 - dev_err(cs->dev, "%s: no application\n", __func__);
737 + gig_dbg(DEBUG_CMD, "%s: application gone", __func__);
738 return;
739 }
740
741 @@ -1055,6 +1055,7 @@ static inline void remove_appl_from_channel(struct bc_state *bcs,
742 do {
743 if (bcap->bcnext == ap) {
744 bcap->bcnext = bcap->bcnext->bcnext;
745 + spin_unlock_irqrestore(&bcs->aplock, flags);
746 return;
747 }
748 bcap = bcap->bcnext;
749 diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
750 index 1081091..2655e3a 100644
751 --- a/drivers/isdn/sc/ioctl.c
752 +++ b/drivers/isdn/sc/ioctl.c
753 @@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data)
754 pr_debug("%s: SCIOGETSPID: ioctl received\n",
755 sc_adapter[card]->devicename);
756
757 - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
758 + spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL);
759 if (!spid) {
760 kfree(rcvmsg);
761 return -ENOMEM;
762 @@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data)
763 kfree(rcvmsg);
764 return status;
765 }
766 - strcpy(spid, rcvmsg->msg_data.byte_array);
767 + strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE);
768
769 /*
770 * Package the switch type and send to user space
771 @@ -272,12 +272,12 @@ int sc_ioctl(int card, scs_ioctl *data)
772 return status;
773 }
774
775 - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL);
776 + dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL);
777 if (!dn) {
778 kfree(rcvmsg);
779 return -ENOMEM;
780 }
781 - strcpy(dn, rcvmsg->msg_data.byte_array);
782 + strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE);
783 kfree(rcvmsg);
784
785 /*
786 @@ -348,7 +348,7 @@ int sc_ioctl(int card, scs_ioctl *data)
787 pr_debug("%s: SCIOSTAT: ioctl received\n",
788 sc_adapter[card]->devicename);
789
790 - bi = kmalloc (sizeof(boardInfo), GFP_KERNEL);
791 + bi = kzalloc(sizeof(boardInfo), GFP_KERNEL);
792 if (!bi) {
793 kfree(rcvmsg);
794 return -ENOMEM;
795 diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
796 index 2b7907b..0bdb201 100644
797 --- a/drivers/md/dm-exception-store.c
798 +++ b/drivers/md/dm-exception-store.c
799 @@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
800
801 /* Validate the chunk size against the device block size */
802 if (chunk_size %
803 - (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
804 + (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
805 + chunk_size %
806 + (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
807 *error = "Chunk size is not a multiple of device blocksize";
808 return -EINVAL;
809 }
810 diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
811 index e8dfa06..0b25362 100644
812 --- a/drivers/md/dm-exception-store.h
813 +++ b/drivers/md/dm-exception-store.h
814 @@ -126,8 +126,9 @@ struct dm_exception_store {
815 };
816
817 /*
818 - * Obtain the cow device used by a given snapshot.
819 + * Obtain the origin or cow device used by a given snapshot.
820 */
821 +struct dm_dev *dm_snap_origin(struct dm_snapshot *snap);
822 struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
823
824 /*
825 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
826 index d7500e1..f81c536 100644
827 --- a/drivers/md/dm-ioctl.c
828 +++ b/drivers/md/dm-ioctl.c
829 @@ -249,40 +249,46 @@ static void __hash_remove(struct hash_cell *hc)
830
831 static void dm_hash_remove_all(int keep_open_devices)
832 {
833 - int i, dev_skipped, dev_removed;
834 + int i, dev_skipped;
835 struct hash_cell *hc;
836 - struct list_head *tmp, *n;
837 + struct mapped_device *md;
838 +
839 +retry:
840 + dev_skipped = 0;
841
842 down_write(&_hash_lock);
843
844 -retry:
845 - dev_skipped = dev_removed = 0;
846 for (i = 0; i < NUM_BUCKETS; i++) {
847 - list_for_each_safe (tmp, n, _name_buckets + i) {
848 - hc = list_entry(tmp, struct hash_cell, name_list);
849 + list_for_each_entry(hc, _name_buckets + i, name_list) {
850 + md = hc->md;
851 + dm_get(md);
852
853 - if (keep_open_devices &&
854 - dm_lock_for_deletion(hc->md)) {
855 + if (keep_open_devices && dm_lock_for_deletion(md)) {
856 + dm_put(md);
857 dev_skipped++;
858 continue;
859 }
860 +
861 __hash_remove(hc);
862 - dev_removed = 1;
863 - }
864 - }
865
866 - /*
867 - * Some mapped devices may be using other mapped devices, so if any
868 - * still exist, repeat until we make no further progress.
869 - */
870 - if (dev_skipped) {
871 - if (dev_removed)
872 - goto retry;
873 + up_write(&_hash_lock);
874
875 - DMWARN("remove_all left %d open device(s)", dev_skipped);
876 + dm_put(md);
877 +
878 + /*
879 + * Some mapped devices may be using other mapped
880 + * devices, so repeat until we make no further
881 + * progress. If a new mapped device is created
882 + * here it will also get removed.
883 + */
884 + goto retry;
885 + }
886 }
887
888 up_write(&_hash_lock);
889 +
890 + if (dev_skipped)
891 + DMWARN("remove_all left %d open device(s)", dev_skipped);
892 }
893
894 static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
895 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
896 index 826bce7..da2223a 100644
897 --- a/drivers/md/dm-mpath.c
898 +++ b/drivers/md/dm-mpath.c
899 @@ -706,6 +706,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
900
901 if (as->argc < nr_params) {
902 ti->error = "not enough path parameters";
903 + r = -EINVAL;
904 goto bad;
905 }
906
907 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
908 index 5485377..a1f2ab5 100644
909 --- a/drivers/md/dm-snap.c
910 +++ b/drivers/md/dm-snap.c
911 @@ -148,6 +148,12 @@ struct dm_snapshot {
912 #define RUNNING_MERGE 0
913 #define SHUTDOWN_MERGE 1
914
915 +struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
916 +{
917 + return s->origin;
918 +}
919 +EXPORT_SYMBOL(dm_snap_origin);
920 +
921 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
922 {
923 return s->cow;
924 @@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
925 origin_mode = FMODE_WRITE;
926 }
927
928 - origin_path = argv[0];
929 - argv++;
930 - argc--;
931 -
932 s = kmalloc(sizeof(*s), GFP_KERNEL);
933 if (!s) {
934 ti->error = "Cannot allocate snapshot context private "
935 @@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
936 goto bad;
937 }
938
939 + origin_path = argv[0];
940 + argv++;
941 + argc--;
942 +
943 + r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
944 + if (r) {
945 + ti->error = "Cannot get origin device";
946 + goto bad_origin;
947 + }
948 +
949 cow_path = argv[0];
950 argv++;
951 argc--;
952 @@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
953 argv += args_used;
954 argc -= args_used;
955
956 - r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
957 - if (r) {
958 - ti->error = "Cannot get origin device";
959 - goto bad_origin;
960 - }
961 -
962 s->ti = ti;
963 s->valid = 1;
964 s->active = 0;
965 @@ -1212,15 +1218,15 @@ bad_kcopyd:
966 dm_exception_table_exit(&s->complete, exception_cache);
967
968 bad_hash_tables:
969 - dm_put_device(ti, s->origin);
970 -
971 -bad_origin:
972 dm_exception_store_destroy(s->store);
973
974 bad_store:
975 dm_put_device(ti, s->cow);
976
977 bad_cow:
978 + dm_put_device(ti, s->origin);
979 +
980 +bad_origin:
981 kfree(s);
982
983 bad:
984 @@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti)
985
986 mempool_destroy(s->pending_pool);
987
988 - dm_put_device(ti, s->origin);
989 -
990 dm_exception_store_destroy(s->store);
991
992 dm_put_device(ti, s->cow);
993
994 + dm_put_device(ti, s->origin);
995 +
996 kfree(s);
997 }
998
999 @@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti,
1000 iterate_devices_callout_fn fn, void *data)
1001 {
1002 struct dm_snapshot *snap = ti->private;
1003 + int r;
1004 +
1005 + r = fn(ti, snap->origin, 0, ti->len, data);
1006
1007 - return fn(ti, snap->origin, 0, ti->len, data);
1008 + if (!r)
1009 + r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1010 +
1011 + return r;
1012 }
1013
1014
1015 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1016 index d21e128..56d192d 100644
1017 --- a/drivers/md/dm.c
1018 +++ b/drivers/md/dm.c
1019 @@ -2141,6 +2141,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
1020 md = idr_find(&_minor_idr, minor);
1021 if (md && (md == MINOR_ALLOCED ||
1022 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1023 + dm_deleting_md(md) ||
1024 test_bit(DMF_FREEING, &md->flags))) {
1025 md = NULL;
1026 goto out;
1027 diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
1028 index 8327e24..300ec15 100644
1029 --- a/drivers/memstick/core/mspro_block.c
1030 +++ b/drivers/memstick/core/mspro_block.c
1031 @@ -1040,6 +1040,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
1032 snprintf(s_attr->name, sizeof(s_attr->name),
1033 "attr_x%02x", attr->entries[cnt].id);
1034
1035 + sysfs_attr_init(&s_attr->dev_attr.attr);
1036 s_attr->dev_attr.attr.name = s_attr->name;
1037 s_attr->dev_attr.attr.mode = S_IRUGO;
1038 s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
1039 @@ -1330,13 +1331,14 @@ static void mspro_block_remove(struct memstick_dev *card)
1040 struct mspro_block_data *msb = memstick_get_drvdata(card);
1041 unsigned long flags;
1042
1043 - del_gendisk(msb->disk);
1044 - dev_dbg(&card->dev, "mspro block remove\n");
1045 spin_lock_irqsave(&msb->q_lock, flags);
1046 msb->eject = 1;
1047 blk_start_queue(msb->queue);
1048 spin_unlock_irqrestore(&msb->q_lock, flags);
1049
1050 + del_gendisk(msb->disk);
1051 + dev_dbg(&card->dev, "mspro block remove\n");
1052 +
1053 blk_cleanup_queue(msb->queue);
1054 msb->queue = NULL;
1055
1056 diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
1057 index 5d55152..33f1165 100644
1058 --- a/drivers/mtd/nand/pxa3xx_nand.c
1059 +++ b/drivers/mtd/nand/pxa3xx_nand.c
1060 @@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
1061 #define tAR_NDTR1(r) (((r) >> 0) & 0xf)
1062
1063 /* convert nano-seconds to nand flash controller clock cycles */
1064 -#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1)
1065 +#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
1066
1067 /* convert nand flash controller clock cycles to nano-seconds */
1068 #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
1069 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
1070 index 9015555..4c2a31f 100644
1071 --- a/drivers/net/e1000e/82571.c
1072 +++ b/drivers/net/e1000e/82571.c
1073 @@ -926,12 +926,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1074 ew32(IMC, 0xffffffff);
1075 icr = er32(ICR);
1076
1077 - /* Install any alternate MAC address into RAR0 */
1078 - ret_val = e1000_check_alt_mac_addr_generic(hw);
1079 - if (ret_val)
1080 - return ret_val;
1081 + if (hw->mac.type == e1000_82571) {
1082 + /* Install any alternate MAC address into RAR0 */
1083 + ret_val = e1000_check_alt_mac_addr_generic(hw);
1084 + if (ret_val)
1085 + return ret_val;
1086
1087 - e1000e_set_laa_state_82571(hw, true);
1088 + e1000e_set_laa_state_82571(hw, true);
1089 + }
1090
1091 /* Reinitialize the 82571 serdes link state machine */
1092 if (hw->phy.media_type == e1000_media_type_internal_serdes)
1093 @@ -1609,14 +1611,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1094 {
1095 s32 ret_val = 0;
1096
1097 - /*
1098 - * If there's an alternate MAC address place it in RAR0
1099 - * so that it will override the Si installed default perm
1100 - * address.
1101 - */
1102 - ret_val = e1000_check_alt_mac_addr_generic(hw);
1103 - if (ret_val)
1104 - goto out;
1105 + if (hw->mac.type == e1000_82571) {
1106 + /*
1107 + * If there's an alternate MAC address place it in RAR0
1108 + * so that it will override the Si installed default perm
1109 + * address.
1110 + */
1111 + ret_val = e1000_check_alt_mac_addr_generic(hw);
1112 + if (ret_val)
1113 + goto out;
1114 + }
1115
1116 ret_val = e1000_read_mac_addr_generic(hw);
1117
1118 @@ -1826,6 +1830,7 @@ struct e1000_info e1000_82573_info = {
1119 | FLAG_HAS_SMART_POWER_DOWN
1120 | FLAG_HAS_AMT
1121 | FLAG_HAS_SWSM_ON_LOAD,
1122 + .flags2 = FLAG2_DISABLE_ASPM_L1,
1123 .pba = 20,
1124 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1125 .get_variants = e1000_get_variants_82571,
1126 diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
1127 index e301e26..57d7a76 100644
1128 --- a/drivers/net/e1000e/defines.h
1129 +++ b/drivers/net/e1000e/defines.h
1130 @@ -613,6 +613,7 @@
1131 #define E1000_FLASH_UPDATES 2000
1132
1133 /* NVM Word Offsets */
1134 +#define NVM_COMPAT 0x0003
1135 #define NVM_ID_LED_SETTINGS 0x0004
1136 #define NVM_INIT_CONTROL2_REG 0x000F
1137 #define NVM_INIT_CONTROL3_PORT_B 0x0014
1138 @@ -633,6 +634,9 @@
1139 /* Mask bits for fields in Word 0x1a of the NVM */
1140 #define NVM_WORD1A_ASPM_MASK 0x000C
1141
1142 +/* Mask bits for fields in Word 0x03 of the EEPROM */
1143 +#define NVM_COMPAT_LOM 0x0800
1144 +
1145 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
1146 #define NVM_SUM 0xBABA
1147
1148 diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
1149 index a8b2c0d..02f57f9 100644
1150 --- a/drivers/net/e1000e/lib.c
1151 +++ b/drivers/net/e1000e/lib.c
1152 @@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
1153 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
1154 u8 alt_mac_addr[ETH_ALEN];
1155
1156 + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
1157 + if (ret_val)
1158 + goto out;
1159 +
1160 + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
1161 + if (!((nvm_data & NVM_COMPAT_LOM) ||
1162 + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
1163 + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
1164 + goto out;
1165 +
1166 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
1167 &nvm_alt_mac_addr_offset);
1168 if (ret_val) {
1169 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1170 index ea90997..a350147 100644
1171 --- a/drivers/net/wireless/ath/ath5k/base.c
1172 +++ b/drivers/net/wireless/ath/ath5k/base.c
1173 @@ -48,6 +48,7 @@
1174 #include <linux/netdevice.h>
1175 #include <linux/cache.h>
1176 #include <linux/pci.h>
1177 +#include <linux/pci-aspm.h>
1178 #include <linux/ethtool.h>
1179 #include <linux/uaccess.h>
1180 #include <linux/slab.h>
1181 @@ -469,6 +470,26 @@ ath5k_pci_probe(struct pci_dev *pdev,
1182 int ret;
1183 u8 csz;
1184
1185 + /*
1186 + * L0s needs to be disabled on all ath5k cards.
1187 + *
1188 + * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
1189 + * by default in the future in 2.6.36) this will also mean both L1 and
1190 + * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
1191 + * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
1192 + * though but cannot currently undue the effect of a blacklist, for
1193 + * details you can read pcie_aspm_sanity_check() and see how it adjusts
1194 + * the device link capability.
1195 + *
1196 + * It may be possible in the future to implement some PCI API to allow
1197 + * drivers to override blacklists for pre 1.1 PCIe but for now it is
1198 + * best to accept that both L0s and L1 will be disabled completely for
1199 + * distributions shipping with CONFIG_PCIEASPM rather than having this
1200 + * issue present. Motivation for adding this new API will be to help
1201 + * with power consumption for some of these devices.
1202 + */
1203 + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
1204 +
1205 ret = pci_enable_device(pdev);
1206 if (ret) {
1207 dev_err(&pdev->dev, "can't enable device\n");
1208 diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
1209 index a37b30c..ce3722f 100644
1210 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
1211 +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
1212 @@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
1213
1214 cmd->timeout = timeout;
1215
1216 - ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
1217 + ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
1218 if (ret < 0) {
1219 wl1251_error("cmd trigger scan to failed: %d", ret);
1220 goto out;
1221 diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
1222 index 7c3d03b..cfcf868 100644
1223 --- a/drivers/pcmcia/pcmcia_resource.c
1224 +++ b/drivers/pcmcia/pcmcia_resource.c
1225 @@ -41,7 +41,7 @@ module_param(io_speed, int, 0444);
1226 #ifdef CONFIG_PCMCIA_PROBE
1227 #include <asm/irq.h>
1228 /* mask of IRQs already reserved by other cards, we should avoid using them */
1229 -static u8 pcmcia_used_irq[NR_IRQS];
1230 +static u8 pcmcia_used_irq[32];
1231 #endif
1232
1233 static int pcmcia_adjust_io_region(struct resource *res, unsigned long start,
1234 @@ -768,6 +768,9 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
1235 for (try = 0; try < 64; try++) {
1236 irq = try % 32;
1237
1238 + if (irq > NR_IRQS)
1239 + continue;
1240 +
1241 /* marked as available by driver, and not blocked by userspace? */
1242 if (!((mask >> irq) & 1))
1243 continue;
1244 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
1245 index 71ff154..90111d7 100644
1246 --- a/drivers/platform/x86/compal-laptop.c
1247 +++ b/drivers/platform/x86/compal-laptop.c
1248 @@ -259,6 +259,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
1249 .callback = dmi_check_cb
1250 },
1251 {
1252 + .ident = "Dell Mini 1012",
1253 + .matches = {
1254 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1255 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
1256 + },
1257 + .callback = dmi_check_cb
1258 + },
1259 + {
1260 .ident = "Dell Inspiron 11z",
1261 .matches = {
1262 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1263 @@ -375,5 +383,6 @@ MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*");
1264 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
1265 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
1266 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
1267 +MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
1268 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
1269 MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
1270 diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
1271 index 661e3ac..6110601 100644
1272 --- a/drivers/platform/x86/dell-laptop.c
1273 +++ b/drivers/platform/x86/dell-laptop.c
1274 @@ -116,6 +116,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
1275 },
1276 },
1277 {
1278 + .ident = "Dell Mini 1012",
1279 + .matches = {
1280 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1281 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
1282 + },
1283 + },
1284 + {
1285 .ident = "Dell Inspiron 11z",
1286 .matches = {
1287 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1288 diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
1289 index 5a1dc8a..03713bc 100644
1290 --- a/drivers/regulator/wm8994-regulator.c
1291 +++ b/drivers/regulator/wm8994-regulator.c
1292 @@ -219,8 +219,6 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
1293
1294 ldo->wm8994 = wm8994;
1295
1296 - ldo->is_enabled = true;
1297 -
1298 if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
1299 ldo->enable = pdata->ldo[id].enable;
1300
1301 @@ -237,7 +235,8 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
1302 ret);
1303 goto err_gpio;
1304 }
1305 - }
1306 + } else
1307 + ldo->is_enabled = true;
1308
1309 ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
1310 pdata->ldo[id].init_data, ldo);
1311 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
1312 index 40cba25..632a7b6 100644
1313 --- a/drivers/usb/host/xhci-ring.c
1314 +++ b/drivers/usb/host/xhci-ring.c
1315 @@ -125,7 +125,7 @@ static void next_trb(struct xhci_hcd *xhci,
1316 *seg = (*seg)->next;
1317 *trb = ((*seg)->trbs);
1318 } else {
1319 - *trb = (*trb)++;
1320 + (*trb)++;
1321 }
1322 }
1323
1324 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1325 index 2128d2c..2d36ed2 100644
1326 --- a/drivers/usb/serial/cp210x.c
1327 +++ b/drivers/usb/serial/cp210x.c
1328 @@ -223,8 +223,8 @@ static struct usb_serial_driver cp210x_device = {
1329 #define BITS_STOP_2 0x0002
1330
1331 /* CP210X_SET_BREAK */
1332 -#define BREAK_ON 0x0000
1333 -#define BREAK_OFF 0x0001
1334 +#define BREAK_ON 0x0001
1335 +#define BREAK_OFF 0x0000
1336
1337 /* CP210X_(SET_MHS|GET_MDMSTS) */
1338 #define CONTROL_DTR 0x0001
1339 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1340 index 738383a..ce929e4 100644
1341 --- a/drivers/usb/serial/ftdi_sio.c
1342 +++ b/drivers/usb/serial/ftdi_sio.c
1343 @@ -185,6 +185,7 @@ static struct usb_device_id id_table_combined [] = {
1344 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
1345 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
1346 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
1347 + { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
1348 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
1349 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
1350 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
1351 @@ -756,6 +757,8 @@ static struct usb_device_id id_table_combined [] = {
1352 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
1353 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1354 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
1355 + { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
1356 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1357 { }, /* Optional parameter entry */
1358 { } /* Terminating entry */
1359 };
1360 @@ -1399,7 +1402,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1361 }
1362
1363 /* set max packet size based on descriptor */
1364 - priv->max_packet_size = ep_desc->wMaxPacketSize;
1365 + priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
1366
1367 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
1368 }
1369 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1370 index c101821..792f005 100644
1371 --- a/drivers/usb/serial/ftdi_sio_ids.h
1372 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1373 @@ -110,6 +110,9 @@
1374 /* Propox devices */
1375 #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
1376
1377 +/* Lenz LI-USB Computer Interface. */
1378 +#define FTDI_LENZ_LIUSB_PID 0xD780
1379 +
1380 /*
1381 * Xsens Technologies BV products (http://www.xsens.com).
1382 */
1383 @@ -996,6 +999,12 @@
1384 #define ALTI2_N3_PID 0x6001 /* Neptune 3 */
1385
1386 /*
1387 + * Ionics PlugComputer
1388 + */
1389 +#define IONICS_VID 0x1c0c
1390 +#define IONICS_PLUGCOMPUTER_PID 0x0102
1391 +
1392 +/*
1393 * Dresden Elektronik Sensor Terminal Board
1394 */
1395 #define DE_VID 0x1cf1 /* Vendor ID */
1396 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
1397 index aa876f7..b6e8908 100644
1398 --- a/drivers/usb/serial/io_ti.c
1399 +++ b/drivers/usb/serial/io_ti.c
1400 @@ -1174,7 +1174,7 @@ static int download_fw(struct edgeport_serial *serial)
1401
1402 /* Check if we have an old version in the I2C and
1403 update if necessary */
1404 - if (download_cur_ver != download_new_ver) {
1405 + if (download_cur_ver < download_new_ver) {
1406 dbg("%s - Update I2C dld from %d.%d to %d.%d",
1407 __func__,
1408 firmware_version->Ver_Major,
1409 diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
1410 index a6b207c..1f00f24 100644
1411 --- a/drivers/usb/serial/navman.c
1412 +++ b/drivers/usb/serial/navman.c
1413 @@ -25,6 +25,7 @@ static int debug;
1414
1415 static const struct usb_device_id id_table[] = {
1416 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
1417 + { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
1418 { },
1419 };
1420 MODULE_DEVICE_TABLE(usb, id_table);
1421 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1422 index 20d1585..0e8abb5 100644
1423 --- a/drivers/usb/serial/option.c
1424 +++ b/drivers/usb/serial/option.c
1425 @@ -389,6 +389,10 @@ static int option_resume(struct usb_serial *serial);
1426 #define OLIVETTI_VENDOR_ID 0x0b3c
1427 #define OLIVETTI_PRODUCT_OLICARD100 0xc000
1428
1429 +/* Celot products */
1430 +#define CELOT_VENDOR_ID 0x211f
1431 +#define CELOT_PRODUCT_CT680M 0x6801
1432 +
1433 /* some devices interfaces need special handling due to a number of reasons */
1434 enum option_blacklist_reason {
1435 OPTION_BLACKLIST_NONE = 0,
1436 @@ -912,10 +916,9 @@ static const struct usb_device_id option_ids[] = {
1437 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
1438 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
1439 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
1440 -
1441 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
1442 -
1443 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
1444 + { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1445 { } /* Terminating entry */
1446 };
1447 MODULE_DEVICE_TABLE(usb, option_ids);
1448 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
1449 index c28b160..89609e1 100644
1450 --- a/drivers/usb/serial/pl2303.c
1451 +++ b/drivers/usb/serial/pl2303.c
1452 @@ -96,6 +96,7 @@ static const struct usb_device_id id_table[] = {
1453 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
1454 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
1455 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
1456 + { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
1457 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
1458 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
1459 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
1460 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
1461 index 23c09b3..e8b7c5b 100644
1462 --- a/drivers/usb/serial/pl2303.h
1463 +++ b/drivers/usb/serial/pl2303.h
1464 @@ -128,6 +128,10 @@
1465 #define CRESSI_VENDOR_ID 0x04b8
1466 #define CRESSI_EDY_PRODUCT_ID 0x0521
1467
1468 +/* Zeagle dive computer interface */
1469 +#define ZEAGLE_VENDOR_ID 0x04b8
1470 +#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
1471 +
1472 /* Sony, USB data cable for CMD-Jxx mobile phones */
1473 #define SONY_VENDOR_ID 0x054c
1474 #define SONY_QN3USB_PRODUCT_ID 0x0437
1475 diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
1476 index f3a4e15..f96a471 100644
1477 --- a/drivers/video/matrox/matroxfb_base.h
1478 +++ b/drivers/video/matrox/matroxfb_base.h
1479 @@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
1480 static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
1481 #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
1482 /*
1483 - * memcpy_toio works for us if:
1484 + * iowrite32_rep works for us if:
1485 * (1) Copies data as 32bit quantities, not byte after byte,
1486 * (2) Performs LE ordered stores, and
1487 * (3) It copes with unaligned source (destination is guaranteed to be page
1488 * aligned and length is guaranteed to be multiple of 4).
1489 */
1490 - memcpy_toio(va.vaddr, src, len);
1491 + iowrite32_rep(va.vaddr, src, len >> 2);
1492 #else
1493 u_int32_t __iomem* addr = va.vaddr;
1494
1495 diff --git a/firmware/Makefile b/firmware/Makefile
1496 index 8af0fc7..955c7e7 100644
1497 --- a/firmware/Makefile
1498 +++ b/firmware/Makefile
1499 @@ -141,7 +141,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
1500 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
1501
1502 # Directories which we _might_ need to create, so we have a rule for them.
1503 -firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
1504 +firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
1505
1506 quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@)
1507 cmd_mkdir = mkdir -p $@
1508 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
1509 index f35316c..ae570b0 100644
1510 --- a/fs/nfs/super.c
1511 +++ b/fs/nfs/super.c
1512 @@ -648,6 +648,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
1513
1514 if (nfss->options & NFS_OPTION_FSCACHE)
1515 seq_printf(m, ",fsc");
1516 +
1517 + if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
1518 + if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
1519 + seq_printf(m, ",lookupcache=none");
1520 + else
1521 + seq_printf(m, ",lookupcache=pos");
1522 + }
1523 }
1524
1525 /*
1526 diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
1527 index 48145f5..fadefe1 100644
1528 --- a/fs/nilfs2/super.c
1529 +++ b/fs/nilfs2/super.c
1530 @@ -360,9 +360,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
1531 list_add(&sbi->s_list, &nilfs->ns_supers);
1532 up_write(&nilfs->ns_super_sem);
1533
1534 + err = -ENOMEM;
1535 sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
1536 if (!sbi->s_ifile)
1537 - return -ENOMEM;
1538 + goto delist;
1539
1540 down_read(&nilfs->ns_segctor_sem);
1541 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
1542 @@ -393,6 +394,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
1543 nilfs_mdt_destroy(sbi->s_ifile);
1544 sbi->s_ifile = NULL;
1545
1546 + delist:
1547 down_write(&nilfs->ns_super_sem);
1548 list_del_init(&sbi->s_list);
1549 up_write(&nilfs->ns_super_sem);
1550 diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
1551 index e13fc9e..9ad30db 100644
1552 --- a/fs/ocfs2/acl.c
1553 +++ b/fs/ocfs2/acl.c
1554 @@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle,
1555
1556 int ocfs2_check_acl(struct inode *inode, int mask)
1557 {
1558 - struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
1559 + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1560 + struct buffer_head *di_bh = NULL;
1561 + struct posix_acl *acl;
1562 + int ret = -EAGAIN;
1563
1564 - if (IS_ERR(acl))
1565 + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
1566 + return ret;
1567 +
1568 + ret = ocfs2_read_inode_block(inode, &di_bh);
1569 + if (ret < 0) {
1570 + mlog_errno(ret);
1571 + return ret;
1572 + }
1573 +
1574 + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
1575 +
1576 + brelse(di_bh);
1577 +
1578 + if (IS_ERR(acl)) {
1579 + mlog_errno(PTR_ERR(acl));
1580 return PTR_ERR(acl);
1581 + }
1582 if (acl) {
1583 - int ret = posix_acl_permission(inode, acl, mask);
1584 + ret = posix_acl_permission(inode, acl, mask);
1585 posix_acl_release(acl);
1586 return ret;
1587 }
1588 @@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle,
1589 {
1590 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1591 struct posix_acl *acl = NULL;
1592 - int ret = 0;
1593 + int ret = 0, ret2;
1594 mode_t mode;
1595
1596 if (!S_ISLNK(inode->i_mode)) {
1597 @@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle,
1598 mode = inode->i_mode;
1599 ret = posix_acl_create_masq(clone, &mode);
1600 if (ret >= 0) {
1601 - ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
1602 + ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
1603 + if (ret2) {
1604 + mlog_errno(ret2);
1605 + ret = ret2;
1606 + goto cleanup;
1607 + }
1608 if (ret > 0) {
1609 ret = ocfs2_set_acl(handle, inode,
1610 di_bh, ACL_TYPE_ACCESS,
1611 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
1612 index 9289b43..f9ea9d0 100644
1613 --- a/fs/ocfs2/dlm/dlmmaster.c
1614 +++ b/fs/ocfs2/dlm/dlmmaster.c
1615 @@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref)
1616
1617 atomic_dec(&dlm->res_cur_count);
1618
1619 - dlm_put(dlm);
1620 -
1621 if (!hlist_unhashed(&res->hash_node) ||
1622 !list_empty(&res->granted) ||
1623 !list_empty(&res->converting) ||
1624 @@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
1625 res->migration_pending = 0;
1626 res->inflight_locks = 0;
1627
1628 - /* put in dlm_lockres_release */
1629 - dlm_grab(dlm);
1630 res->dlm = dlm;
1631
1632 kref_init(&res->refs);
1633 @@ -3044,8 +3040,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
1634 /* check for pre-existing lock */
1635 spin_lock(&dlm->spinlock);
1636 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1637 - spin_lock(&dlm->master_lock);
1638 -
1639 if (res) {
1640 spin_lock(&res->spinlock);
1641 if (res->state & DLM_LOCK_RES_RECOVERING) {
1642 @@ -3063,14 +3057,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
1643 spin_unlock(&res->spinlock);
1644 }
1645
1646 + spin_lock(&dlm->master_lock);
1647 /* ignore status. only nonzero status would BUG. */
1648 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
1649 name, namelen,
1650 migrate->new_master,
1651 migrate->master);
1652
1653 -unlock:
1654 spin_unlock(&dlm->master_lock);
1655 +unlock:
1656 spin_unlock(&dlm->spinlock);
1657
1658 if (oldmle) {
1659 diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
1660 index b4f99de..0cadae2 100644
1661 --- a/fs/ocfs2/dlm/dlmrecovery.c
1662 +++ b/fs/ocfs2/dlm/dlmrecovery.c
1663 @@ -1991,6 +1991,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1664 struct list_head *queue;
1665 struct dlm_lock *lock, *next;
1666
1667 + assert_spin_locked(&dlm->spinlock);
1668 + assert_spin_locked(&res->spinlock);
1669 res->state |= DLM_LOCK_RES_RECOVERING;
1670 if (!list_empty(&res->recovering)) {
1671 mlog(0,
1672 @@ -2320,19 +2322,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1673 /* zero the lvb if necessary */
1674 dlm_revalidate_lvb(dlm, res, dead_node);
1675 if (res->owner == dead_node) {
1676 - if (res->state & DLM_LOCK_RES_DROPPING_REF)
1677 - mlog(0, "%s:%.*s: owned by "
1678 - "dead node %u, this node was "
1679 - "dropping its ref when it died. "
1680 - "continue, dropping the flag.\n",
1681 - dlm->name, res->lockname.len,
1682 - res->lockname.name, dead_node);
1683 -
1684 - /* the wake_up for this will happen when the
1685 - * RECOVERING flag is dropped later */
1686 - res->state &= ~DLM_LOCK_RES_DROPPING_REF;
1687 + if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1688 + mlog(ML_NOTICE, "Ignore %.*s for "
1689 + "recovery as it is being freed\n",
1690 + res->lockname.len,
1691 + res->lockname.name);
1692 + } else
1693 + dlm_move_lockres_to_recovery_list(dlm,
1694 + res);
1695
1696 - dlm_move_lockres_to_recovery_list(dlm, res);
1697 } else if (res->owner == dlm->node_num) {
1698 dlm_free_dead_locks(dlm, res, dead_node);
1699 __dlm_lockres_calc_usage(dlm, res);
1700 diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
1701 index 11a6d1f..47e32c0 100644
1702 --- a/fs/ocfs2/dlm/dlmthread.c
1703 +++ b/fs/ocfs2/dlm/dlmthread.c
1704 @@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
1705 * truly ready to be freed. */
1706 int __dlm_lockres_unused(struct dlm_lock_resource *res)
1707 {
1708 - if (!__dlm_lockres_has_locks(res) &&
1709 - (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
1710 - /* try not to scan the bitmap unless the first two
1711 - * conditions are already true */
1712 - int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
1713 - if (bit >= O2NM_MAX_NODES) {
1714 - /* since the bit for dlm->node_num is not
1715 - * set, inflight_locks better be zero */
1716 - BUG_ON(res->inflight_locks != 0);
1717 - return 1;
1718 - }
1719 - }
1720 - return 0;
1721 + int bit;
1722 +
1723 + if (__dlm_lockres_has_locks(res))
1724 + return 0;
1725 +
1726 + if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
1727 + return 0;
1728 +
1729 + if (res->state & DLM_LOCK_RES_RECOVERING)
1730 + return 0;
1731 +
1732 + bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
1733 + if (bit < O2NM_MAX_NODES)
1734 + return 0;
1735 +
1736 + /*
1737 + * since the bit for dlm->node_num is not set, inflight_locks better
1738 + * be zero
1739 + */
1740 + BUG_ON(res->inflight_locks != 0);
1741 + return 1;
1742 }
1743
1744
1745 @@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
1746 spin_unlock(&dlm->spinlock);
1747 }
1748
1749 -static int dlm_purge_lockres(struct dlm_ctxt *dlm,
1750 +static void dlm_purge_lockres(struct dlm_ctxt *dlm,
1751 struct dlm_lock_resource *res)
1752 {
1753 int master;
1754 int ret = 0;
1755
1756 - spin_lock(&res->spinlock);
1757 - if (!__dlm_lockres_unused(res)) {
1758 - mlog(0, "%s:%.*s: tried to purge but not unused\n",
1759 - dlm->name, res->lockname.len, res->lockname.name);
1760 - __dlm_print_one_lock_resource(res);
1761 - spin_unlock(&res->spinlock);
1762 - BUG();
1763 - }
1764 -
1765 - if (res->state & DLM_LOCK_RES_MIGRATING) {
1766 - mlog(0, "%s:%.*s: Delay dropref as this lockres is "
1767 - "being remastered\n", dlm->name, res->lockname.len,
1768 - res->lockname.name);
1769 - /* Re-add the lockres to the end of the purge list */
1770 - if (!list_empty(&res->purge)) {
1771 - list_del_init(&res->purge);
1772 - list_add_tail(&res->purge, &dlm->purge_list);
1773 - }
1774 - spin_unlock(&res->spinlock);
1775 - return 0;
1776 - }
1777 + assert_spin_locked(&dlm->spinlock);
1778 + assert_spin_locked(&res->spinlock);
1779
1780 master = (res->owner == dlm->node_num);
1781
1782 - if (!master)
1783 - res->state |= DLM_LOCK_RES_DROPPING_REF;
1784 - spin_unlock(&res->spinlock);
1785
1786 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
1787 res->lockname.name, master);
1788
1789 if (!master) {
1790 + res->state |= DLM_LOCK_RES_DROPPING_REF;
1791 /* drop spinlock... retake below */
1792 + spin_unlock(&res->spinlock);
1793 spin_unlock(&dlm->spinlock);
1794
1795 spin_lock(&res->spinlock);
1796 @@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
1797 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
1798 dlm->name, res->lockname.len, res->lockname.name, ret);
1799 spin_lock(&dlm->spinlock);
1800 + spin_lock(&res->spinlock);
1801 }
1802
1803 - spin_lock(&res->spinlock);
1804 if (!list_empty(&res->purge)) {
1805 mlog(0, "removing lockres %.*s:%p from purgelist, "
1806 "master = %d\n", res->lockname.len, res->lockname.name,
1807 res, master);
1808 list_del_init(&res->purge);
1809 - spin_unlock(&res->spinlock);
1810 dlm_lockres_put(res);
1811 dlm->purge_count--;
1812 - } else
1813 - spin_unlock(&res->spinlock);
1814 + }
1815 +
1816 + if (!__dlm_lockres_unused(res)) {
1817 + mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
1818 + dlm->name, res->lockname.len, res->lockname.name);
1819 + __dlm_print_one_lock_resource(res);
1820 + BUG();
1821 + }
1822
1823 __dlm_unhash_lockres(res);
1824
1825 /* lockres is not in the hash now. drop the flag and wake up
1826 * any processes waiting in dlm_get_lock_resource. */
1827 if (!master) {
1828 - spin_lock(&res->spinlock);
1829 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
1830 spin_unlock(&res->spinlock);
1831 wake_up(&res->wq);
1832 - }
1833 - return 0;
1834 + } else
1835 + spin_unlock(&res->spinlock);
1836 }
1837
1838 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
1839 @@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
1840 lockres = list_entry(dlm->purge_list.next,
1841 struct dlm_lock_resource, purge);
1842
1843 - /* Status of the lockres *might* change so double
1844 - * check. If the lockres is unused, holding the dlm
1845 - * spinlock will prevent people from getting and more
1846 - * refs on it -- there's no need to keep the lockres
1847 - * spinlock. */
1848 spin_lock(&lockres->spinlock);
1849 - unused = __dlm_lockres_unused(lockres);
1850 - spin_unlock(&lockres->spinlock);
1851 -
1852 - if (!unused)
1853 - continue;
1854
1855 purge_jiffies = lockres->last_used +
1856 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
1857 @@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
1858 * in tail order, we can stop at the first
1859 * unpurgable resource -- anyone added after
1860 * him will have a greater last_used value */
1861 + spin_unlock(&lockres->spinlock);
1862 break;
1863 }
1864
1865 + /* Status of the lockres *might* change so double
1866 + * check. If the lockres is unused, holding the dlm
1867 + * spinlock will prevent people from getting and more
1868 + * refs on it. */
1869 + unused = __dlm_lockres_unused(lockres);
1870 + if (!unused ||
1871 + (lockres->state & DLM_LOCK_RES_MIGRATING)) {
1872 + mlog(0, "lockres %s:%.*s: is in use or "
1873 + "being remastered, used %d, state %d\n",
1874 + dlm->name, lockres->lockname.len,
1875 + lockres->lockname.name, !unused, lockres->state);
1876 + list_move_tail(&dlm->purge_list, &lockres->purge);
1877 + spin_unlock(&lockres->spinlock);
1878 + continue;
1879 + }
1880 +
1881 dlm_lockres_get(lockres);
1882
1883 - /* This may drop and reacquire the dlm spinlock if it
1884 - * has to do migration. */
1885 - if (dlm_purge_lockres(dlm, lockres))
1886 - BUG();
1887 + dlm_purge_lockres(dlm, lockres);
1888
1889 dlm_lockres_put(lockres);
1890
1891 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
1892 index 5cbcd0f..9e6dc1d 100644
1893 --- a/fs/ocfs2/refcounttree.c
1894 +++ b/fs/ocfs2/refcounttree.c
1895 @@ -2437,16 +2437,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
1896 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
1897 le32_to_cpu(rec.r_clusters)) - cpos;
1898 /*
1899 - * If the refcount rec already exist, cool. We just need
1900 - * to check whether there is a split. Otherwise we just need
1901 - * to increase the refcount.
1902 - * If we will insert one, increases recs_add.
1903 - *
1904 * We record all the records which will be inserted to the
1905 * same refcount block, so that we can tell exactly whether
1906 * we need a new refcount block or not.
1907 + *
1908 + * If we will insert a new one, this is easy and only happens
1909 + * during adding refcounted flag to the extent, so we don't
1910 + * have a chance of spliting. We just need one record.
1911 + *
1912 + * If the refcount rec already exists, that would be a little
1913 + * complicated. we may have to:
1914 + * 1) split at the beginning if the start pos isn't aligned.
1915 + * we need 1 more record in this case.
1916 + * 2) split int the end if the end pos isn't aligned.
1917 + * we need 1 more record in this case.
1918 + * 3) split in the middle because of file system fragmentation.
1919 + * we need 2 more records in this case(we can't detect this
1920 + * beforehand, so always think of the worst case).
1921 */
1922 if (rec.r_refcount) {
1923 + recs_add += 2;
1924 /* Check whether we need a split at the beginning. */
1925 if (cpos == start_cpos &&
1926 cpos != le64_to_cpu(rec.r_cpos))
1927 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
1928 index b8bb9a6..ee7e258 100644
1929 --- a/include/linux/mm_types.h
1930 +++ b/include/linux/mm_types.h
1931 @@ -134,7 +134,7 @@ struct vm_area_struct {
1932 within vm_mm. */
1933
1934 /* linked list of VM areas per task, sorted by address */
1935 - struct vm_area_struct *vm_next;
1936 + struct vm_area_struct *vm_next, *vm_prev;
1937
1938 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
1939 unsigned long vm_flags; /* Flags, see mm.h. */
1940 diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
1941 index 6a664c3..7dc97d1 100644
1942 --- a/include/sound/emu10k1.h
1943 +++ b/include/sound/emu10k1.h
1944 @@ -1707,6 +1707,7 @@ struct snd_emu10k1 {
1945 unsigned int card_type; /* EMU10K1_CARD_* */
1946 unsigned int ecard_ctrl; /* ecard control bits */
1947 unsigned long dma_mask; /* PCI DMA mask */
1948 + unsigned int delay_pcm_irq; /* in samples */
1949 int max_cache_pages; /* max memory size / PAGE_SIZE */
1950 struct snd_dma_buffer silent_page; /* silent page */
1951 struct snd_dma_buffer ptb_pages; /* page table pages */
1952 diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
1953 index 9496b96..fa8223a 100644
1954 --- a/include/trace/events/timer.h
1955 +++ b/include/trace/events/timer.h
1956 @@ -74,14 +74,16 @@ TRACE_EVENT(timer_expire_entry,
1957 TP_STRUCT__entry(
1958 __field( void *, timer )
1959 __field( unsigned long, now )
1960 + __field( void *, function)
1961 ),
1962
1963 TP_fast_assign(
1964 __entry->timer = timer;
1965 __entry->now = jiffies;
1966 + __entry->function = timer->function;
1967 ),
1968
1969 - TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
1970 + TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
1971 );
1972
1973 /**
1974 @@ -213,14 +215,16 @@ TRACE_EVENT(hrtimer_expire_entry,
1975 TP_STRUCT__entry(
1976 __field( void *, hrtimer )
1977 __field( s64, now )
1978 + __field( void *, function)
1979 ),
1980
1981 TP_fast_assign(
1982 __entry->hrtimer = hrtimer;
1983 __entry->now = now->tv64;
1984 + __entry->function = hrtimer->function;
1985 ),
1986
1987 - TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
1988 + TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
1989 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
1990 );
1991
1992 diff --git a/kernel/fork.c b/kernel/fork.c
1993 index 4c14942..32fdbd4 100644
1994 --- a/kernel/fork.c
1995 +++ b/kernel/fork.c
1996 @@ -287,7 +287,7 @@ out:
1997 #ifdef CONFIG_MMU
1998 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1999 {
2000 - struct vm_area_struct *mpnt, *tmp, **pprev;
2001 + struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
2002 struct rb_node **rb_link, *rb_parent;
2003 int retval;
2004 unsigned long charge;
2005 @@ -315,6 +315,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
2006 if (retval)
2007 goto out;
2008
2009 + prev = NULL;
2010 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
2011 struct file *file;
2012
2013 @@ -346,7 +347,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
2014 goto fail_nomem_anon_vma_fork;
2015 tmp->vm_flags &= ~VM_LOCKED;
2016 tmp->vm_mm = mm;
2017 - tmp->vm_next = NULL;
2018 + tmp->vm_next = tmp->vm_prev = NULL;
2019 file = tmp->vm_file;
2020 if (file) {
2021 struct inode *inode = file->f_path.dentry->d_inode;
2022 @@ -379,6 +380,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
2023 */
2024 *pprev = tmp;
2025 pprev = &tmp->vm_next;
2026 + tmp->vm_prev = prev;
2027 + prev = tmp;
2028
2029 __vma_link_rb(mm, tmp, rb_link, rb_parent);
2030 rb_link = &tmp->vm_rb.rb_right;
2031 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2032 index 41ca394..1b6197a 100644
2033 --- a/kernel/trace/ring_buffer.c
2034 +++ b/kernel/trace/ring_buffer.c
2035 @@ -3757,6 +3757,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
2036 rpos = reader->read;
2037 pos += size;
2038
2039 + if (rpos >= commit)
2040 + break;
2041 +
2042 event = rb_reader_event(cpu_buffer);
2043 size = rb_event_length(event);
2044 } while (len > size);
2045 diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
2046 index 9aed1a5..72a0d96 100644
2047 --- a/kernel/trace/trace_functions_graph.c
2048 +++ b/kernel/trace/trace_functions_graph.c
2049 @@ -506,7 +506,15 @@ get_return_for_leaf(struct trace_iterator *iter,
2050 * if the output fails.
2051 */
2052 data->ent = *curr;
2053 - data->ret = *next;
2054 + /*
2055 + * If the next event is not a return type, then
2056 + * we only care about what type it is. Otherwise we can
2057 + * safely copy the entire event.
2058 + */
2059 + if (next->ent.type == TRACE_GRAPH_RET)
2060 + data->ret = *next;
2061 + else
2062 + data->ret.ent.type = next->ent.type;
2063 }
2064 }
2065
2066 diff --git a/mm/memory.c b/mm/memory.c
2067 index e48eb86..47fb0a0 100644
2068 --- a/mm/memory.c
2069 +++ b/mm/memory.c
2070 @@ -2761,11 +2761,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
2071 {
2072 address &= PAGE_MASK;
2073 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2074 - address -= PAGE_SIZE;
2075 - if (find_vma(vma->vm_mm, address) != vma)
2076 - return -ENOMEM;
2077 + struct vm_area_struct *prev = vma->vm_prev;
2078 +
2079 + /*
2080 + * Is there a mapping abutting this one below?
2081 + *
2082 + * That's only ok if it's the same stack mapping
2083 + * that has gotten split..
2084 + */
2085 + if (prev && prev->vm_end == address)
2086 + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2087
2088 - expand_stack(vma, address);
2089 + expand_stack(vma, address - PAGE_SIZE);
2090 }
2091 return 0;
2092 }
2093 diff --git a/mm/mlock.c b/mm/mlock.c
2094 index 8268859..5e57ebc 100644
2095 --- a/mm/mlock.c
2096 +++ b/mm/mlock.c
2097 @@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page)
2098 }
2099 }
2100
2101 +/* Is the vma a continuation of the stack vma above it? */
2102 +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
2103 +{
2104 + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
2105 +}
2106 +
2107 +static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
2108 +{
2109 + return (vma->vm_flags & VM_GROWSDOWN) &&
2110 + (vma->vm_start == addr) &&
2111 + !vma_stack_continue(vma->vm_prev, addr);
2112 +}
2113 +
2114 /**
2115 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
2116 * @vma: target vma
2117 @@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
2118 gup_flags |= FOLL_WRITE;
2119
2120 /* We don't try to access the guard page of a stack vma */
2121 - if (vma->vm_flags & VM_GROWSDOWN) {
2122 - if (start == vma->vm_start) {
2123 - start += PAGE_SIZE;
2124 - nr_pages--;
2125 - }
2126 + if (stack_guard_page(vma, start)) {
2127 + addr += PAGE_SIZE;
2128 + nr_pages--;
2129 }
2130
2131 while (nr_pages > 0) {
2132 diff --git a/mm/mmap.c b/mm/mmap.c
2133 index 456ec6f..3867cfc 100644
2134 --- a/mm/mmap.c
2135 +++ b/mm/mmap.c
2136 @@ -388,17 +388,23 @@ static inline void
2137 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
2138 struct vm_area_struct *prev, struct rb_node *rb_parent)
2139 {
2140 + struct vm_area_struct *next;
2141 +
2142 + vma->vm_prev = prev;
2143 if (prev) {
2144 - vma->vm_next = prev->vm_next;
2145 + next = prev->vm_next;
2146 prev->vm_next = vma;
2147 } else {
2148 mm->mmap = vma;
2149 if (rb_parent)
2150 - vma->vm_next = rb_entry(rb_parent,
2151 + next = rb_entry(rb_parent,
2152 struct vm_area_struct, vm_rb);
2153 else
2154 - vma->vm_next = NULL;
2155 + next = NULL;
2156 }
2157 + vma->vm_next = next;
2158 + if (next)
2159 + next->vm_prev = vma;
2160 }
2161
2162 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
2163 @@ -485,7 +491,11 @@ static inline void
2164 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
2165 struct vm_area_struct *prev)
2166 {
2167 - prev->vm_next = vma->vm_next;
2168 + struct vm_area_struct *next = vma->vm_next;
2169 +
2170 + prev->vm_next = next;
2171 + if (next)
2172 + next->vm_prev = prev;
2173 rb_erase(&vma->vm_rb, &mm->mm_rb);
2174 if (mm->mmap_cache == vma)
2175 mm->mmap_cache = prev;
2176 @@ -1900,6 +1910,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2177 unsigned long addr;
2178
2179 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2180 + vma->vm_prev = NULL;
2181 do {
2182 rb_erase(&vma->vm_rb, &mm->mm_rb);
2183 mm->map_count--;
2184 @@ -1907,6 +1918,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2185 vma = vma->vm_next;
2186 } while (vma && vma->vm_start < end);
2187 *insertion_point = vma;
2188 + if (vma)
2189 + vma->vm_prev = prev;
2190 tail_vma->vm_next = NULL;
2191 if (mm->unmap_area == arch_unmap_area)
2192 addr = prev ? prev->vm_end : mm->mmap_base;
2193 diff --git a/mm/nommu.c b/mm/nommu.c
2194 index 63fa17d..28994ee 100644
2195 --- a/mm/nommu.c
2196 +++ b/mm/nommu.c
2197 @@ -609,7 +609,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
2198 */
2199 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
2200 {
2201 - struct vm_area_struct *pvma, **pp;
2202 + struct vm_area_struct *pvma, **pp, *next;
2203 struct address_space *mapping;
2204 struct rb_node **p, *parent;
2205
2206 @@ -669,8 +669,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
2207 break;
2208 }
2209
2210 - vma->vm_next = *pp;
2211 + next = *pp;
2212 *pp = vma;
2213 + vma->vm_next = next;
2214 + if (next)
2215 + next->vm_prev = vma;
2216 }
2217
2218 /*
2219 diff --git a/mm/slab.c b/mm/slab.c
2220 index bac0f4f..9ec5de0 100644
2221 --- a/mm/slab.c
2222 +++ b/mm/slab.c
2223 @@ -2262,8 +2262,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2224 }
2225 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2226 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2227 - && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2228 - cachep->obj_offset += PAGE_SIZE - size;
2229 + && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
2230 + cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
2231 size = PAGE_SIZE;
2232 }
2233 #endif
2234 diff --git a/mm/vmscan.c b/mm/vmscan.c
2235 index 3ff3311..5c46206 100644
2236 --- a/mm/vmscan.c
2237 +++ b/mm/vmscan.c
2238 @@ -1118,6 +1118,48 @@ static int too_many_isolated(struct zone *zone, int file,
2239 }
2240
2241 /*
2242 + * Returns true if the caller should wait to clean dirty/writeback pages.
2243 + *
2244 + * If we are direct reclaiming for contiguous pages and we do not reclaim
2245 + * everything in the list, try again and wait for writeback IO to complete.
2246 + * This will stall high-order allocations noticeably. Only do that when really
2247 + * need to free the pages under high memory pressure.
2248 + */
2249 +static inline bool should_reclaim_stall(unsigned long nr_taken,
2250 + unsigned long nr_freed,
2251 + int priority,
2252 + int lumpy_reclaim,
2253 + struct scan_control *sc)
2254 +{
2255 + int lumpy_stall_priority;
2256 +
2257 + /* kswapd should not stall on sync IO */
2258 + if (current_is_kswapd())
2259 + return false;
2260 +
2261 + /* Only stall on lumpy reclaim */
2262 + if (!lumpy_reclaim)
2263 + return false;
2264 +
2265 + /* If we have relaimed everything on the isolated list, no stall */
2266 + if (nr_freed == nr_taken)
2267 + return false;
2268 +
2269 + /*
2270 + * For high-order allocations, there are two stall thresholds.
2271 + * High-cost allocations stall immediately where as lower
2272 + * order allocations such as stacks require the scanning
2273 + * priority to be much higher before stalling.
2274 + */
2275 + if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
2276 + lumpy_stall_priority = DEF_PRIORITY;
2277 + else
2278 + lumpy_stall_priority = DEF_PRIORITY / 3;
2279 +
2280 + return priority <= lumpy_stall_priority;
2281 +}
2282 +
2283 +/*
2284 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
2285 * of reclaimed pages
2286 */
2287 @@ -1209,14 +1251,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
2288 nr_scanned += nr_scan;
2289 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
2290
2291 - /*
2292 - * If we are direct reclaiming for contiguous pages and we do
2293 - * not reclaim everything in the list, try again and wait
2294 - * for IO to complete. This will stall high-order allocations
2295 - * but that should be acceptable to the caller
2296 - */
2297 - if (nr_freed < nr_taken && !current_is_kswapd() &&
2298 - lumpy_reclaim) {
2299 + /* Check if we should syncronously wait for writeback */
2300 + if (should_reclaim_stall(nr_taken, nr_freed, priority,
2301 + lumpy_reclaim, sc)) {
2302 congestion_wait(BLK_RW_ASYNC, HZ/10);
2303
2304 /*
2305 diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
2306 index 90a9024..5c8ee83 100644
2307 --- a/net/bridge/br_device.c
2308 +++ b/net/bridge/br_device.c
2309 @@ -19,7 +19,7 @@
2310 #include <asm/uaccess.h>
2311 #include "br_private.h"
2312
2313 -/* net device transmit always called with no BH (preempt_disabled) */
2314 +/* net device transmit always called with BH disabled */
2315 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
2316 {
2317 struct net_bridge *br = netdev_priv(dev);
2318 @@ -35,9 +35,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
2319 skb_reset_mac_header(skb);
2320 skb_pull(skb, ETH_HLEN);
2321
2322 + rcu_read_lock();
2323 if (dest[0] & 1) {
2324 - if (br_multicast_rcv(br, NULL, skb))
2325 + if (br_multicast_rcv(br, NULL, skb)) {
2326 + kfree_skb(skb);
2327 goto out;
2328 + }
2329
2330 mdst = br_mdb_get(br, skb);
2331 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
2332 @@ -50,6 +53,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
2333 br_flood_deliver(br, skb);
2334
2335 out:
2336 + rcu_read_unlock();
2337 return NETDEV_TX_OK;
2338 }
2339
2340 diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
2341 index 26c0cbe..5c96374 100644
2342 --- a/net/bridge/br_fdb.c
2343 +++ b/net/bridge/br_fdb.c
2344 @@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
2345 spin_unlock_bh(&br->hash_lock);
2346 }
2347
2348 -/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
2349 +/* No locking or refcounting, assumes caller has rcu_read_lock */
2350 struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
2351 const unsigned char *addr)
2352 {
2353 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
2354 index a82dde2..9015a20 100644
2355 --- a/net/bridge/br_input.c
2356 +++ b/net/bridge/br_input.c
2357 @@ -35,7 +35,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
2358 netif_receive_skb);
2359 }
2360
2361 -/* note: already called with rcu_read_lock (preempt_disabled) */
2362 +/* note: already called with rcu_read_lock */
2363 int br_handle_frame_finish(struct sk_buff *skb)
2364 {
2365 const unsigned char *dest = eth_hdr(skb)->h_dest;
2366 @@ -106,7 +106,7 @@ drop:
2367 goto out;
2368 }
2369
2370 -/* note: already called with rcu_read_lock (preempt_disabled) */
2371 +/* note: already called with rcu_read_lock */
2372 static int br_handle_local_finish(struct sk_buff *skb)
2373 {
2374 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
2375 @@ -131,7 +131,7 @@ static inline int is_link_local(const unsigned char *dest)
2376 /*
2377 * Called via br_handle_frame_hook.
2378 * Return NULL if skb is handled
2379 - * note: already called with rcu_read_lock (preempt_disabled)
2380 + * note: already called with rcu_read_lock
2381 */
2382 struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
2383 {
2384 diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
2385 index d66cce1..edc7111 100644
2386 --- a/net/bridge/br_stp_bpdu.c
2387 +++ b/net/bridge/br_stp_bpdu.c
2388 @@ -131,7 +131,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
2389 /*
2390 * Called from llc.
2391 *
2392 - * NO locks, but rcu_read_lock (preempt_disabled)
2393 + * NO locks, but rcu_read_lock
2394 */
2395 void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
2396 struct net_device *dev)
2397 diff --git a/net/can/bcm.c b/net/can/bcm.c
2398 index 907dc87..e10ee05 100644
2399 --- a/net/can/bcm.c
2400 +++ b/net/can/bcm.c
2401 @@ -60,6 +60,13 @@
2402 #include <net/sock.h>
2403 #include <net/net_namespace.h>
2404
2405 +/*
2406 + * To send multiple CAN frame content within TX_SETUP or to filter
2407 + * CAN messages with multiplex index within RX_SETUP, the number of
2408 + * different filters is limited to 256 due to the one byte index value.
2409 + */
2410 +#define MAX_NFRAMES 256
2411 +
2412 /* use of last_frames[index].can_dlc */
2413 #define RX_RECV 0x40 /* received data for this element */
2414 #define RX_THR 0x80 /* element not been sent due to throttle feature */
2415 @@ -89,16 +96,16 @@ struct bcm_op {
2416 struct list_head list;
2417 int ifindex;
2418 canid_t can_id;
2419 - int flags;
2420 + u32 flags;
2421 unsigned long frames_abs, frames_filtered;
2422 struct timeval ival1, ival2;
2423 struct hrtimer timer, thrtimer;
2424 struct tasklet_struct tsklet, thrtsklet;
2425 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
2426 int rx_ifindex;
2427 - int count;
2428 - int nframes;
2429 - int currframe;
2430 + u32 count;
2431 + u32 nframes;
2432 + u32 currframe;
2433 struct can_frame *frames;
2434 struct can_frame *last_frames;
2435 struct can_frame sframe;
2436 @@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
2437
2438 seq_printf(m, "rx_op: %03X %-5s ",
2439 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
2440 - seq_printf(m, "[%d]%c ", op->nframes,
2441 + seq_printf(m, "[%u]%c ", op->nframes,
2442 (op->flags & RX_CHECK_DLC)?'d':' ');
2443 if (op->kt_ival1.tv64)
2444 seq_printf(m, "timeo=%lld ",
2445 @@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
2446
2447 list_for_each_entry(op, &bo->tx_ops, list) {
2448
2449 - seq_printf(m, "tx_op: %03X %s [%d] ",
2450 + seq_printf(m, "tx_op: %03X %s [%u] ",
2451 op->can_id,
2452 bcm_proc_getifname(ifname, op->ifindex),
2453 op->nframes);
2454 @@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
2455 struct can_frame *firstframe;
2456 struct sockaddr_can *addr;
2457 struct sock *sk = op->sk;
2458 - int datalen = head->nframes * CFSIZ;
2459 + unsigned int datalen = head->nframes * CFSIZ;
2460 int err;
2461
2462 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
2463 @@ -468,7 +475,7 @@ rx_changed_settime:
2464 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
2465 * received data stored in op->last_frames[]
2466 */
2467 -static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
2468 +static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
2469 const struct can_frame *rxdata)
2470 {
2471 /*
2472 @@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
2473 /*
2474 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
2475 */
2476 -static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
2477 +static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
2478 + unsigned int index)
2479 {
2480 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
2481 if (update)
2482 @@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
2483 int updated = 0;
2484
2485 if (op->nframes > 1) {
2486 - int i;
2487 + unsigned int i;
2488
2489 /* for MUX filter we start at index 1 */
2490 for (i = 1; i < op->nframes; i++)
2491 @@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
2492 {
2493 struct bcm_op *op = (struct bcm_op *)data;
2494 const struct can_frame *rxframe = (struct can_frame *)skb->data;
2495 - int i;
2496 + unsigned int i;
2497
2498 /* disable timeout */
2499 hrtimer_cancel(&op->timer);
2500 @@ -824,14 +832,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
2501 {
2502 struct bcm_sock *bo = bcm_sk(sk);
2503 struct bcm_op *op;
2504 - int i, err;
2505 + unsigned int i;
2506 + int err;
2507
2508 /* we need a real device to send frames */
2509 if (!ifindex)
2510 return -ENODEV;
2511
2512 - /* we need at least one can_frame */
2513 - if (msg_head->nframes < 1)
2514 + /* check nframes boundaries - we need at least one can_frame */
2515 + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
2516 return -EINVAL;
2517
2518 /* check the given can_id */
2519 @@ -995,6 +1004,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
2520 msg_head->nframes = 0;
2521 }
2522
2523 + /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
2524 + if (msg_head->nframes > MAX_NFRAMES + 1)
2525 + return -EINVAL;
2526 +
2527 if ((msg_head->flags & RX_RTR_FRAME) &&
2528 ((msg_head->nframes != 1) ||
2529 (!(msg_head->can_id & CAN_RTR_FLAG))))
2530 diff --git a/net/core/dev.c b/net/core/dev.c
2531 index ece33fd..740e71f 100644
2532 --- a/net/core/dev.c
2533 +++ b/net/core/dev.c
2534 @@ -1464,6 +1464,7 @@ static inline void net_timestamp(struct sk_buff *skb)
2535 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2536 {
2537 skb_orphan(skb);
2538 + nf_reset(skb);
2539
2540 if (!(dev->flags & IFF_UP) ||
2541 (skb->len > (dev->mtu + dev->hard_header_len))) {
2542 @@ -2665,7 +2666,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2543 int mac_len;
2544 enum gro_result ret;
2545
2546 - if (!(skb->dev->features & NETIF_F_GRO))
2547 + if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
2548 goto normal;
2549
2550 if (skb_is_gso(skb) || skb_has_frags(skb))
2551 @@ -2734,7 +2735,7 @@ pull:
2552 put_page(skb_shinfo(skb)->frags[0].page);
2553 memmove(skb_shinfo(skb)->frags,
2554 skb_shinfo(skb)->frags + 1,
2555 - --skb_shinfo(skb)->nr_frags);
2556 + --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
2557 }
2558 }
2559
2560 @@ -2752,9 +2753,6 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2561 {
2562 struct sk_buff *p;
2563
2564 - if (netpoll_rx_on(skb))
2565 - return GRO_NORMAL;
2566 -
2567 for (p = napi->gro_list; p; p = p->next) {
2568 NAPI_GRO_CB(p)->same_flow =
2569 (p->dev == skb->dev) &&
2570 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2571 index 296150b..4fbf481 100644
2572 --- a/net/ipv4/tcp.c
2573 +++ b/net/ipv4/tcp.c
2574 @@ -2175,6 +2175,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2575 GFP_KERNEL);
2576 if (cvp == NULL)
2577 return -ENOMEM;
2578 +
2579 + kref_init(&cvp->kref);
2580 }
2581 lock_sock(sk);
2582 tp->rx_opt.cookie_in_always =
2583 @@ -2189,12 +2191,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2584 */
2585 kref_put(&tp->cookie_values->kref,
2586 tcp_cookie_values_release);
2587 - kref_init(&cvp->kref);
2588 - tp->cookie_values = cvp;
2589 } else {
2590 cvp = tp->cookie_values;
2591 }
2592 }
2593 +
2594 if (cvp != NULL) {
2595 cvp->cookie_desired = ctd.tcpct_cookie_desired;
2596
2597 @@ -2208,6 +2209,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2598 cvp->s_data_desired = ctd.tcpct_s_data_desired;
2599 cvp->s_data_constant = 0; /* false */
2600 }
2601 +
2602 + tp->cookie_values = cvp;
2603 }
2604 release_sock(sk);
2605 return err;
2606 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2607 index 7954243..fe63131 100644
2608 --- a/net/netlink/af_netlink.c
2609 +++ b/net/netlink/af_netlink.c
2610 @@ -1383,7 +1383,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2611 struct netlink_sock *nlk = nlk_sk(sk);
2612 int noblock = flags&MSG_DONTWAIT;
2613 size_t copied;
2614 - struct sk_buff *skb, *frag __maybe_unused = NULL;
2615 + struct sk_buff *skb, *data_skb;
2616 int err;
2617
2618 if (flags&MSG_OOB)
2619 @@ -1395,45 +1395,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2620 if (skb == NULL)
2621 goto out;
2622
2623 + data_skb = skb;
2624 +
2625 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2626 if (unlikely(skb_shinfo(skb)->frag_list)) {
2627 - bool need_compat = !!(flags & MSG_CMSG_COMPAT);
2628 -
2629 /*
2630 - * If this skb has a frag_list, then here that means that
2631 - * we will have to use the frag_list skb for compat tasks
2632 - * and the regular skb for non-compat tasks.
2633 + * If this skb has a frag_list, then here that means that we
2634 + * will have to use the frag_list skb's data for compat tasks
2635 + * and the regular skb's data for normal (non-compat) tasks.
2636 *
2637 - * The skb might (and likely will) be cloned, so we can't
2638 - * just reset frag_list and go on with things -- we need to
2639 - * keep that. For the compat case that's easy -- simply get
2640 - * a reference to the compat skb and free the regular one
2641 - * including the frag. For the non-compat case, we need to
2642 - * avoid sending the frag to the user -- so assign NULL but
2643 - * restore it below before freeing the skb.
2644 + * If we need to send the compat skb, assign it to the
2645 + * 'data_skb' variable so that it will be used below for data
2646 + * copying. We keep 'skb' for everything else, including
2647 + * freeing both later.
2648 */
2649 - if (need_compat) {
2650 - struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
2651 - skb_get(compskb);
2652 - kfree_skb(skb);
2653 - skb = compskb;
2654 - } else {
2655 - frag = skb_shinfo(skb)->frag_list;
2656 - skb_shinfo(skb)->frag_list = NULL;
2657 - }
2658 + if (flags & MSG_CMSG_COMPAT)
2659 + data_skb = skb_shinfo(skb)->frag_list;
2660 }
2661 #endif
2662
2663 msg->msg_namelen = 0;
2664
2665 - copied = skb->len;
2666 + copied = data_skb->len;
2667 if (len < copied) {
2668 msg->msg_flags |= MSG_TRUNC;
2669 copied = len;
2670 }
2671
2672 - skb_reset_transport_header(skb);
2673 - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2674 + skb_reset_transport_header(data_skb);
2675 + err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
2676
2677 if (msg->msg_name) {
2678 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
2679 @@ -1453,11 +1443,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2680 }
2681 siocb->scm->creds = *NETLINK_CREDS(skb);
2682 if (flags & MSG_TRUNC)
2683 - copied = skb->len;
2684 -
2685 -#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2686 - skb_shinfo(skb)->frag_list = frag;
2687 -#endif
2688 + copied = data_skb->len;
2689
2690 skb_free_datagram(sk, skb);
2691
2692 diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
2693 index d885ba3..37e198c 100644
2694 --- a/net/sched/act_nat.c
2695 +++ b/net/sched/act_nat.c
2696 @@ -240,7 +240,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
2697 iph->saddr = new_addr;
2698
2699 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
2700 - 1);
2701 + 0);
2702 break;
2703 }
2704 default:
2705 diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2706 index c5a9ac5..d716d9b 100644
2707 --- a/net/sched/sch_sfq.c
2708 +++ b/net/sched/sch_sfq.c
2709 @@ -497,11 +497,22 @@ nla_put_failure:
2710 return -1;
2711 }
2712
2713 +static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
2714 +{
2715 + return NULL;
2716 +}
2717 +
2718 static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
2719 {
2720 return 0;
2721 }
2722
2723 +static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
2724 + u32 classid)
2725 +{
2726 + return 0;
2727 +}
2728 +
2729 static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
2730 {
2731 struct sfq_sched_data *q = qdisc_priv(sch);
2732 @@ -554,8 +565,10 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2733 }
2734
2735 static const struct Qdisc_class_ops sfq_class_ops = {
2736 + .leaf = sfq_leaf,
2737 .get = sfq_get,
2738 .tcf_chain = sfq_find_tcf,
2739 + .bind_tcf = sfq_bind,
2740 .dump = sfq_dump_class,
2741 .dump_stats = sfq_dump_class_stats,
2742 .walk = sfq_walk,
2743 diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
2744 index 04c6dfd..007133d 100644
2745 --- a/net/wireless/mlme.c
2746 +++ b/net/wireless/mlme.c
2747 @@ -825,12 +825,18 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
2748 return -EINVAL;
2749 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
2750 /* Verify that we are associated with the destination AP */
2751 + wdev_lock(wdev);
2752 +
2753 if (!wdev->current_bss ||
2754 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
2755 ETH_ALEN) != 0 ||
2756 memcmp(wdev->current_bss->pub.bssid, mgmt->da,
2757 - ETH_ALEN) != 0)
2758 + ETH_ALEN) != 0) {
2759 + wdev_unlock(wdev);
2760 return -ENOTCONN;
2761 + }
2762 + wdev_unlock(wdev);
2763 +
2764 }
2765
2766 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
2767 diff --git a/scripts/mkmakefile b/scripts/mkmakefile
2768 index 67d59c7..5325423 100644
2769 --- a/scripts/mkmakefile
2770 +++ b/scripts/mkmakefile
2771 @@ -44,7 +44,9 @@ all:
2772
2773 Makefile:;
2774
2775 -\$(all) %/: all
2776 +\$(all): all
2777 @:
2778
2779 +%/: all
2780 + @:
2781 EOF
2782 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
2783 index d124e95..9e1b5f5 100644
2784 --- a/sound/core/pcm_native.c
2785 +++ b/sound/core/pcm_native.c
2786 @@ -979,6 +979,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
2787 {
2788 if (substream->runtime->trigger_master != substream)
2789 return 0;
2790 + /* some drivers might use hw_ptr to recover from the pause -
2791 + update the hw_ptr now */
2792 + if (push)
2793 + snd_pcm_update_hw_ptr(substream);
2794 /* The jiffies check in snd_pcm_update_hw_ptr*() is done by
2795 * a delta betwen the current jiffies, this gives a large enough
2796 * delta, effectively to skip the check once.
2797 diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
2798 index 4203782..aff8387 100644
2799 --- a/sound/pci/emu10k1/emu10k1.c
2800 +++ b/sound/pci/emu10k1/emu10k1.c
2801 @@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64};
2802 static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128};
2803 static int enable_ir[SNDRV_CARDS];
2804 static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */
2805 +static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
2806
2807 module_param_array(index, int, NULL, 0444);
2808 MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard.");
2809 @@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444);
2810 MODULE_PARM_DESC(enable_ir, "Enable IR.");
2811 module_param_array(subsystem, uint, NULL, 0444);
2812 MODULE_PARM_DESC(subsystem, "Force card subsystem model.");
2813 +module_param_array(delay_pcm_irq, uint, NULL, 0444);
2814 +MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0).");
2815 /*
2816 * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400
2817 */
2818 @@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci,
2819 &emu)) < 0)
2820 goto error;
2821 card->private_data = emu;
2822 + emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f;
2823 if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0)
2824 goto error;
2825 if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0)
2826 diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
2827 index 55b83ef..622bace 100644
2828 --- a/sound/pci/emu10k1/emupcm.c
2829 +++ b/sound/pci/emu10k1/emupcm.c
2830 @@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
2831 evoice->epcm->ccca_start_addr = start_addr + ccis;
2832 if (extra) {
2833 start_addr += ccis;
2834 - end_addr += ccis;
2835 + end_addr += ccis + emu->delay_pcm_irq;
2836 }
2837 if (stereo && !extra) {
2838 snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK);
2839 @@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
2840 /* Assumption that PT is already 0 so no harm overwriting */
2841 snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]);
2842 snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24));
2843 - snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24));
2844 + snd_emu10k1_ptr_write(emu, PSST, voice,
2845 + (start_addr + (extra ? emu->delay_pcm_irq : 0)) |
2846 + (send_amount[2] << 24));
2847 if (emu->card_capabilities->emu_model)
2848 pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */
2849 else
2850 @@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_
2851 snd_emu10k1_ptr_write(emu, IP, voice, 0);
2852 }
2853
2854 +static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu,
2855 + struct snd_emu10k1_pcm *epcm,
2856 + struct snd_pcm_substream *substream,
2857 + struct snd_pcm_runtime *runtime)
2858 +{
2859 + unsigned int ptr, period_pos;
2860 +
2861 + /* try to sychronize the current position for the interrupt
2862 + source voice */
2863 + period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt;
2864 + period_pos %= runtime->period_size;
2865 + ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number);
2866 + ptr &= ~0x00ffffff;
2867 + ptr |= epcm->ccca_start_addr + period_pos;
2868 + snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr);
2869 +}
2870 +
2871 static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
2872 int cmd)
2873 {
2874 @@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream,
2875 /* follow thru */
2876 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
2877 case SNDRV_PCM_TRIGGER_RESUME:
2878 + if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE)
2879 + snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime);
2880 mix = &emu->pcm_mixer[substream->number];
2881 snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix);
2882 snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix);
2883 @@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream *
2884 #endif
2885 /*
2886 printk(KERN_DEBUG
2887 - "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n",
2888 - ptr, runtime->buffer_size, runtime->period_size);
2889 + "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n",
2890 + (long)ptr, (long)runtime->buffer_size,
2891 + (long)runtime->period_size);
2892 */
2893 return ptr;
2894 }
2895 diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
2896 index ffb1ddb..957a311 100644
2897 --- a/sound/pci/emu10k1/memory.c
2898 +++ b/sound/pci/emu10k1/memory.c
2899 @@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
2900 if (snd_BUG_ON(!hdr))
2901 return NULL;
2902
2903 + idx = runtime->period_size >= runtime->buffer_size ?
2904 + (emu->delay_pcm_irq * 2) : 0;
2905 mutex_lock(&hdr->block_mutex);
2906 - blk = search_empty(emu, runtime->dma_bytes);
2907 + blk = search_empty(emu, runtime->dma_bytes + idx);
2908 if (blk == NULL) {
2909 mutex_unlock(&hdr->block_mutex);
2910 return NULL;
2911 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2912 index feabb44..405492d 100644
2913 --- a/sound/pci/hda/patch_conexant.c
2914 +++ b/sound/pci/hda/patch_conexant.c
2915 @@ -2843,6 +2843,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2916 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
2917 CXT5066_DELL_LAPTOP),
2918 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
2919 + SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
2920 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
2921 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
2922 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
2923 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2924 index c260b1b..aa93b0b 100644
2925 --- a/sound/pci/hda/patch_realtek.c
2926 +++ b/sound/pci/hda/patch_realtek.c
2927 @@ -6757,6 +6757,7 @@ static int patch_alc260(struct hda_codec *codec)
2928
2929 spec->stream_analog_playback = &alc260_pcm_analog_playback;
2930 spec->stream_analog_capture = &alc260_pcm_analog_capture;
2931 + spec->stream_analog_alt_capture = &alc260_pcm_analog_capture;
2932
2933 spec->stream_digital_playback = &alc260_pcm_digital_playback;
2934 spec->stream_digital_capture = &alc260_pcm_digital_capture;
2935 diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
2936 index 6433e65..4677492 100644
2937 --- a/sound/pci/intel8x0.c
2938 +++ b/sound/pci/intel8x0.c
2939 @@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
2940 },
2941 {
2942 .subvendor = 0x1014,
2943 + .subdevice = 0x0534,
2944 + .name = "ThinkPad X31",
2945 + .type = AC97_TUNE_INV_EAPD
2946 + },
2947 + {
2948 + .subvendor = 0x1014,
2949 .subdevice = 0x1f00,
2950 .name = "MS-9128",
2951 .type = AC97_TUNE_ALC_JACK
2952 diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
2953 index ad44626..c737287 100644
2954 --- a/sound/pci/riptide/riptide.c
2955 +++ b/sound/pci/riptide/riptide.c
2956 @@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
2957 firmware.firmware.ASIC, firmware.firmware.CODEC,
2958 firmware.firmware.AUXDSP, firmware.firmware.PROG);
2959
2960 + if (!chip)
2961 + return 1;
2962 +
2963 for (i = 0; i < FIRMWARE_VERSIONS; i++) {
2964 if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware)))
2965 - break;
2966 - }
2967 - if (i >= FIRMWARE_VERSIONS)
2968 - return 0; /* no match */
2969 + return 1; /* OK */
2970
2971 - if (!chip)
2972 - return 1; /* OK */
2973 + }
2974
2975 snd_printdd("Writing Firmware\n");
2976 if (!chip->fw_entry) {
2977 diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
2978 index 8cc9042..c907c76 100644
2979 --- a/sound/soc/codecs/wm8580.c
2980 +++ b/sound/soc/codecs/wm8580.c
2981 @@ -269,9 +269,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0),
2982 SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0),
2983
2984 SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
2985 -SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0),
2986 -SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0),
2987 -SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0),
2988 +SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
2989 +SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
2990 +SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
2991
2992 SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0),
2993 SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
2994 diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
2995 index e02ef53..4eb77f5 100644
2996 --- a/sound/soc/codecs/wm8776.c
2997 +++ b/sound/soc/codecs/wm8776.c
2998 @@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
2999 case SND_SOC_DAIFMT_LEFT_J:
3000 iface |= 0x0001;
3001 break;
3002 - /* FIXME: CHECK A/B */
3003 - case SND_SOC_DAIFMT_DSP_A:
3004 - iface |= 0x0003;
3005 - break;
3006 - case SND_SOC_DAIFMT_DSP_B:
3007 - iface |= 0x0007;
3008 - break;
3009 default:
3010 return -EINVAL;
3011 }
3012 diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
3013 index 5869dc3..a0b45be 100644
3014 --- a/sound/soc/soc-cache.c
3015 +++ b/sound/soc/soc-cache.c
3016 @@ -296,7 +296,7 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec,
3017 static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
3018 unsigned int reg)
3019 {
3020 - u16 *cache = codec->reg_cache;
3021 + u8 *cache = codec->reg_cache;
3022
3023 reg &= 0xff;
3024 if (reg >= codec->reg_cache_size)
3025 @@ -307,7 +307,7 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
3026 static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
3027 unsigned int value)
3028 {
3029 - u16 *cache = codec->reg_cache;
3030 + u8 *cache = codec->reg_cache;
3031 u8 data[3];
3032 int ret;
3033

  ViewVC Help
Powered by ViewVC 1.1.20