/[linux-patches]/genpatches-2.6/tags/2.6.34-10/1001_linux-2.6.34.2.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.34-10/1001_linux-2.6.34.2.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1771 - (show annotations) (download)
Tue Aug 31 14:13:10 2010 UTC (4 years, 3 months ago) by mpagano
File size: 301108 byte(s)
2.6.34-10 release
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 839b21b..c3d1cea 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -2013,7 +2013,9 @@ and is between 256 and 4096 characters. It is defined in the file
6 WARNING: Forcing ASPM on may cause system lockups.
7
8 pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
9 - off Do not use native PCIe PME signaling.
10 + Format: {auto|force}[,nomsi]
11 + auto Use native PCIe PME signaling if the BIOS allows the
12 + kernel to control PCIe config registers of root ports.
13 force Use native PCIe PME signaling even if the BIOS refuses
14 to allow the kernel to control the relevant PCIe config
15 registers.
16 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
17 index e8ddec2..df97c20 100644
18 --- a/arch/arm/include/asm/atomic.h
19 +++ b/arch/arm/include/asm/atomic.h
20 @@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v)
21 int result;
22
23 __asm__ __volatile__("@ atomic_add\n"
24 -"1: ldrex %0, [%2]\n"
25 -" add %0, %0, %3\n"
26 -" strex %1, %0, [%2]\n"
27 +"1: ldrex %0, [%3]\n"
28 +" add %0, %0, %4\n"
29 +" strex %1, %0, [%3]\n"
30 " teq %1, #0\n"
31 " bne 1b"
32 - : "=&r" (result), "=&r" (tmp)
33 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
34 : "r" (&v->counter), "Ir" (i)
35 : "cc");
36 }
37 @@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
38 smp_mb();
39
40 __asm__ __volatile__("@ atomic_add_return\n"
41 -"1: ldrex %0, [%2]\n"
42 -" add %0, %0, %3\n"
43 -" strex %1, %0, [%2]\n"
44 +"1: ldrex %0, [%3]\n"
45 +" add %0, %0, %4\n"
46 +" strex %1, %0, [%3]\n"
47 " teq %1, #0\n"
48 " bne 1b"
49 - : "=&r" (result), "=&r" (tmp)
50 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
51 : "r" (&v->counter), "Ir" (i)
52 : "cc");
53
54 @@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v)
55 int result;
56
57 __asm__ __volatile__("@ atomic_sub\n"
58 -"1: ldrex %0, [%2]\n"
59 -" sub %0, %0, %3\n"
60 -" strex %1, %0, [%2]\n"
61 +"1: ldrex %0, [%3]\n"
62 +" sub %0, %0, %4\n"
63 +" strex %1, %0, [%3]\n"
64 " teq %1, #0\n"
65 " bne 1b"
66 - : "=&r" (result), "=&r" (tmp)
67 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
68 : "r" (&v->counter), "Ir" (i)
69 : "cc");
70 }
71 @@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
72 smp_mb();
73
74 __asm__ __volatile__("@ atomic_sub_return\n"
75 -"1: ldrex %0, [%2]\n"
76 -" sub %0, %0, %3\n"
77 -" strex %1, %0, [%2]\n"
78 +"1: ldrex %0, [%3]\n"
79 +" sub %0, %0, %4\n"
80 +" strex %1, %0, [%3]\n"
81 " teq %1, #0\n"
82 " bne 1b"
83 - : "=&r" (result), "=&r" (tmp)
84 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
85 : "r" (&v->counter), "Ir" (i)
86 : "cc");
87
88 @@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
89
90 do {
91 __asm__ __volatile__("@ atomic_cmpxchg\n"
92 - "ldrex %1, [%2]\n"
93 + "ldrex %1, [%3]\n"
94 "mov %0, #0\n"
95 - "teq %1, %3\n"
96 - "strexeq %0, %4, [%2]\n"
97 - : "=&r" (res), "=&r" (oldval)
98 + "teq %1, %4\n"
99 + "strexeq %0, %5, [%3]\n"
100 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
101 : "r" (&ptr->counter), "Ir" (old), "r" (new)
102 : "cc");
103 } while (res);
104 @@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
105 unsigned long tmp, tmp2;
106
107 __asm__ __volatile__("@ atomic_clear_mask\n"
108 -"1: ldrex %0, [%2]\n"
109 -" bic %0, %0, %3\n"
110 -" strex %1, %0, [%2]\n"
111 +"1: ldrex %0, [%3]\n"
112 +" bic %0, %0, %4\n"
113 +" strex %1, %0, [%3]\n"
114 " teq %1, #0\n"
115 " bne 1b"
116 - : "=&r" (tmp), "=&r" (tmp2)
117 + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
118 : "r" (addr), "Ir" (mask)
119 : "cc");
120 }
121 @@ -249,7 +249,7 @@ static inline u64 atomic64_read(atomic64_t *v)
122 __asm__ __volatile__("@ atomic64_read\n"
123 " ldrexd %0, %H0, [%1]"
124 : "=&r" (result)
125 - : "r" (&v->counter)
126 + : "r" (&v->counter), "Qo" (v->counter)
127 );
128
129 return result;
130 @@ -260,11 +260,11 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
131 u64 tmp;
132
133 __asm__ __volatile__("@ atomic64_set\n"
134 -"1: ldrexd %0, %H0, [%1]\n"
135 -" strexd %0, %2, %H2, [%1]\n"
136 +"1: ldrexd %0, %H0, [%2]\n"
137 +" strexd %0, %3, %H3, [%2]\n"
138 " teq %0, #0\n"
139 " bne 1b"
140 - : "=&r" (tmp)
141 + : "=&r" (tmp), "=Qo" (v->counter)
142 : "r" (&v->counter), "r" (i)
143 : "cc");
144 }
145 @@ -275,13 +275,13 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
146 unsigned long tmp;
147
148 __asm__ __volatile__("@ atomic64_add\n"
149 -"1: ldrexd %0, %H0, [%2]\n"
150 -" adds %0, %0, %3\n"
151 -" adc %H0, %H0, %H3\n"
152 -" strexd %1, %0, %H0, [%2]\n"
153 +"1: ldrexd %0, %H0, [%3]\n"
154 +" adds %0, %0, %4\n"
155 +" adc %H0, %H0, %H4\n"
156 +" strexd %1, %0, %H0, [%3]\n"
157 " teq %1, #0\n"
158 " bne 1b"
159 - : "=&r" (result), "=&r" (tmp)
160 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
161 : "r" (&v->counter), "r" (i)
162 : "cc");
163 }
164 @@ -294,13 +294,13 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
165 smp_mb();
166
167 __asm__ __volatile__("@ atomic64_add_return\n"
168 -"1: ldrexd %0, %H0, [%2]\n"
169 -" adds %0, %0, %3\n"
170 -" adc %H0, %H0, %H3\n"
171 -" strexd %1, %0, %H0, [%2]\n"
172 +"1: ldrexd %0, %H0, [%3]\n"
173 +" adds %0, %0, %4\n"
174 +" adc %H0, %H0, %H4\n"
175 +" strexd %1, %0, %H0, [%3]\n"
176 " teq %1, #0\n"
177 " bne 1b"
178 - : "=&r" (result), "=&r" (tmp)
179 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
180 : "r" (&v->counter), "r" (i)
181 : "cc");
182
183 @@ -315,13 +315,13 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
184 unsigned long tmp;
185
186 __asm__ __volatile__("@ atomic64_sub\n"
187 -"1: ldrexd %0, %H0, [%2]\n"
188 -" subs %0, %0, %3\n"
189 -" sbc %H0, %H0, %H3\n"
190 -" strexd %1, %0, %H0, [%2]\n"
191 +"1: ldrexd %0, %H0, [%3]\n"
192 +" subs %0, %0, %4\n"
193 +" sbc %H0, %H0, %H4\n"
194 +" strexd %1, %0, %H0, [%3]\n"
195 " teq %1, #0\n"
196 " bne 1b"
197 - : "=&r" (result), "=&r" (tmp)
198 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
199 : "r" (&v->counter), "r" (i)
200 : "cc");
201 }
202 @@ -334,13 +334,13 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
203 smp_mb();
204
205 __asm__ __volatile__("@ atomic64_sub_return\n"
206 -"1: ldrexd %0, %H0, [%2]\n"
207 -" subs %0, %0, %3\n"
208 -" sbc %H0, %H0, %H3\n"
209 -" strexd %1, %0, %H0, [%2]\n"
210 +"1: ldrexd %0, %H0, [%3]\n"
211 +" subs %0, %0, %4\n"
212 +" sbc %H0, %H0, %H4\n"
213 +" strexd %1, %0, %H0, [%3]\n"
214 " teq %1, #0\n"
215 " bne 1b"
216 - : "=&r" (result), "=&r" (tmp)
217 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
218 : "r" (&v->counter), "r" (i)
219 : "cc");
220
221 @@ -358,12 +358,12 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
222
223 do {
224 __asm__ __volatile__("@ atomic64_cmpxchg\n"
225 - "ldrexd %1, %H1, [%2]\n"
226 + "ldrexd %1, %H1, [%3]\n"
227 "mov %0, #0\n"
228 - "teq %1, %3\n"
229 - "teqeq %H1, %H3\n"
230 - "strexdeq %0, %4, %H4, [%2]"
231 - : "=&r" (res), "=&r" (oldval)
232 + "teq %1, %4\n"
233 + "teqeq %H1, %H4\n"
234 + "strexdeq %0, %5, %H5, [%3]"
235 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
236 : "r" (&ptr->counter), "r" (old), "r" (new)
237 : "cc");
238 } while (res);
239 @@ -381,11 +381,11 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
240 smp_mb();
241
242 __asm__ __volatile__("@ atomic64_xchg\n"
243 -"1: ldrexd %0, %H0, [%2]\n"
244 -" strexd %1, %3, %H3, [%2]\n"
245 +"1: ldrexd %0, %H0, [%3]\n"
246 +" strexd %1, %4, %H4, [%3]\n"
247 " teq %1, #0\n"
248 " bne 1b"
249 - : "=&r" (result), "=&r" (tmp)
250 + : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
251 : "r" (&ptr->counter), "r" (new)
252 : "cc");
253
254 @@ -402,16 +402,16 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
255 smp_mb();
256
257 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
258 -"1: ldrexd %0, %H0, [%2]\n"
259 +"1: ldrexd %0, %H0, [%3]\n"
260 " subs %0, %0, #1\n"
261 " sbc %H0, %H0, #0\n"
262 " teq %H0, #0\n"
263 " bmi 2f\n"
264 -" strexd %1, %0, %H0, [%2]\n"
265 +" strexd %1, %0, %H0, [%3]\n"
266 " teq %1, #0\n"
267 " bne 1b\n"
268 "2:"
269 - : "=&r" (result), "=&r" (tmp)
270 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
271 : "r" (&v->counter)
272 : "cc");
273
274 @@ -429,18 +429,18 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
275 smp_mb();
276
277 __asm__ __volatile__("@ atomic64_add_unless\n"
278 -"1: ldrexd %0, %H0, [%3]\n"
279 -" teq %0, %4\n"
280 -" teqeq %H0, %H4\n"
281 +"1: ldrexd %0, %H0, [%4]\n"
282 +" teq %0, %5\n"
283 +" teqeq %H0, %H5\n"
284 " moveq %1, #0\n"
285 " beq 2f\n"
286 -" adds %0, %0, %5\n"
287 -" adc %H0, %H0, %H5\n"
288 -" strexd %2, %0, %H0, [%3]\n"
289 +" adds %0, %0, %6\n"
290 +" adc %H0, %H0, %H6\n"
291 +" strexd %2, %0, %H0, [%4]\n"
292 " teq %2, #0\n"
293 " bne 1b\n"
294 "2:"
295 - : "=&r" (val), "=&r" (ret), "=&r" (tmp)
296 + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
297 : "r" (&v->counter), "r" (u), "r" (a)
298 : "cc");
299
300 diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
301 index da1f949..8bccbfa 100644
302 --- a/arch/arm/kernel/kprobes-decode.c
303 +++ b/arch/arm/kernel/kprobes-decode.c
304 @@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
305 {
306 insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
307 kprobe_opcode_t insn = p->opcode;
308 + long ppc = (long)p->addr + 8;
309 union reg_pair fnr;
310 int rd = (insn >> 12) & 0xf;
311 int rn = (insn >> 16) & 0xf;
312 int rm = insn & 0xf;
313 long rdv;
314 - long rnv = regs->uregs[rn];
315 - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
316 + long rnv = (rn == 15) ? ppc : regs->uregs[rn];
317 + long rmv = (rm == 15) ? ppc : regs->uregs[rm];
318 long cpsr = regs->ARM_cpsr;
319
320 fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
321 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
322 index 9e70f20..6e46f02 100644
323 --- a/arch/arm/kernel/perf_event.c
324 +++ b/arch/arm/kernel/perf_event.c
325 @@ -165,7 +165,7 @@ armpmu_event_update(struct perf_event *event,
326 {
327 int shift = 64 - 32;
328 s64 prev_raw_count, new_raw_count;
329 - s64 delta;
330 + u64 delta;
331
332 again:
333 prev_raw_count = atomic64_read(&hwc->prev_count);
334 diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
335 index 4377a4c..fecc24c 100644
336 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c
337 +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
338 @@ -147,6 +147,10 @@ static void __init rx51_add_gpio_keys(void)
339 #endif /* CONFIG_KEYBOARD_GPIO || CONFIG_KEYBOARD_GPIO_MODULE */
340
341 static int board_keymap[] = {
342 + /*
343 + * Note that KEY(x, 8, KEY_XXX) entries represent "entrire row
344 + * connected to the ground" matrix state.
345 + */
346 KEY(0, 0, KEY_Q),
347 KEY(0, 1, KEY_O),
348 KEY(0, 2, KEY_P),
349 @@ -154,6 +158,7 @@ static int board_keymap[] = {
350 KEY(0, 4, KEY_BACKSPACE),
351 KEY(0, 6, KEY_A),
352 KEY(0, 7, KEY_S),
353 +
354 KEY(1, 0, KEY_W),
355 KEY(1, 1, KEY_D),
356 KEY(1, 2, KEY_F),
357 @@ -162,6 +167,7 @@ static int board_keymap[] = {
358 KEY(1, 5, KEY_J),
359 KEY(1, 6, KEY_K),
360 KEY(1, 7, KEY_L),
361 +
362 KEY(2, 0, KEY_E),
363 KEY(2, 1, KEY_DOT),
364 KEY(2, 2, KEY_UP),
365 @@ -169,6 +175,8 @@ static int board_keymap[] = {
366 KEY(2, 5, KEY_Z),
367 KEY(2, 6, KEY_X),
368 KEY(2, 7, KEY_C),
369 + KEY(2, 8, KEY_F9),
370 +
371 KEY(3, 0, KEY_R),
372 KEY(3, 1, KEY_V),
373 KEY(3, 2, KEY_B),
374 @@ -177,20 +185,23 @@ static int board_keymap[] = {
375 KEY(3, 5, KEY_SPACE),
376 KEY(3, 6, KEY_SPACE),
377 KEY(3, 7, KEY_LEFT),
378 +
379 KEY(4, 0, KEY_T),
380 KEY(4, 1, KEY_DOWN),
381 KEY(4, 2, KEY_RIGHT),
382 KEY(4, 4, KEY_LEFTCTRL),
383 KEY(4, 5, KEY_RIGHTALT),
384 KEY(4, 6, KEY_LEFTSHIFT),
385 + KEY(4, 8, KEY_F10),
386 +
387 KEY(5, 0, KEY_Y),
388 + KEY(5, 8, KEY_F11),
389 +
390 KEY(6, 0, KEY_U),
391 +
392 KEY(7, 0, KEY_I),
393 KEY(7, 1, KEY_F7),
394 KEY(7, 2, KEY_F8),
395 - KEY(0xff, 2, KEY_F9),
396 - KEY(0xff, 4, KEY_F10),
397 - KEY(0xff, 5, KEY_F11),
398 };
399
400 static struct matrix_keymap_data board_map_data = {
401 diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
402 index ee5e392..b4575ae 100644
403 --- a/arch/arm/mach-realview/Kconfig
404 +++ b/arch/arm/mach-realview/Kconfig
405 @@ -18,6 +18,7 @@ config REALVIEW_EB_ARM11MP
406 bool "Support ARM11MPCore tile"
407 depends on MACH_REALVIEW_EB
408 select CPU_V6
409 + select ARCH_HAS_BARRIERS if SMP
410 help
411 Enable support for the ARM11MPCore tile on the Realview platform.
412
413 @@ -35,6 +36,7 @@ config MACH_REALVIEW_PB11MP
414 select CPU_V6
415 select ARM_GIC
416 select HAVE_PATA_PLATFORM
417 + select ARCH_HAS_BARRIERS if SMP
418 help
419 Include support for the ARM(R) RealView MPCore Platform Baseboard.
420 PB11MPCore is a platform with an on-board ARM11MPCore and has
421 diff --git a/arch/arm/mach-realview/include/mach/barriers.h b/arch/arm/mach-realview/include/mach/barriers.h
422 new file mode 100644
423 index 0000000..0c5d749
424 --- /dev/null
425 +++ b/arch/arm/mach-realview/include/mach/barriers.h
426 @@ -0,0 +1,8 @@
427 +/*
428 + * Barriers redefined for RealView ARM11MPCore platforms with L220 cache
429 + * controller to work around hardware errata causing the outer_sync()
430 + * operation to deadlock the system.
431 + */
432 +#define mb() dsb()
433 +#define rmb() dmb()
434 +#define wmb() mb()
435 diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
436 index 5dfd916..7b3cdc6 100644
437 --- a/arch/ia64/mm/tlb.c
438 +++ b/arch/ia64/mm/tlb.c
439 @@ -121,7 +121,7 @@ static inline void down_spin(struct spinaphore *ss)
440 ia64_invala();
441
442 for (;;) {
443 - asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
444 + asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
445 if (time_before(t, serve))
446 return;
447 cpu_relax();
448 diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
449 index a9f0336..52d883d 100644
450 --- a/arch/mips/alchemy/mtx-1/board_setup.c
451 +++ b/arch/mips/alchemy/mtx-1/board_setup.c
452 @@ -67,8 +67,6 @@ static void mtx1_power_off(void)
453
454 void __init board_setup(void)
455 {
456 - alchemy_gpio2_enable();
457 -
458 #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
459 /* Enable USB power switch */
460 alchemy_gpio_direction_output(204, 0);
461 @@ -117,11 +115,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
462
463 if (assert && devsel != 0)
464 /* Suppress signal to Cardbus */
465 - gpio_set_value(1, 0); /* set EXT_IO3 OFF */
466 + alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
467 else
468 - gpio_set_value(1, 1); /* set EXT_IO3 ON */
469 + alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */
470
471 - au_sync_udelay(1);
472 + udelay(1);
473 return 1;
474 }
475
476 diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
477 index 0835eb9..e50323f 100644
478 --- a/arch/powerpc/include/asm/cpm.h
479 +++ b/arch/powerpc/include/asm/cpm.h
480 @@ -7,6 +7,30 @@
481 #include <linux/of.h>
482
483 /*
484 + * SPI Parameter RAM common to QE and CPM.
485 + */
486 +struct spi_pram {
487 + __be16 rbase; /* Rx Buffer descriptor base address */
488 + __be16 tbase; /* Tx Buffer descriptor base address */
489 + u8 rfcr; /* Rx function code */
490 + u8 tfcr; /* Tx function code */
491 + __be16 mrblr; /* Max receive buffer length */
492 + __be32 rstate; /* Internal */
493 + __be32 rdp; /* Internal */
494 + __be16 rbptr; /* Internal */
495 + __be16 rbc; /* Internal */
496 + __be32 rxtmp; /* Internal */
497 + __be32 tstate; /* Internal */
498 + __be32 tdp; /* Internal */
499 + __be16 tbptr; /* Internal */
500 + __be16 tbc; /* Internal */
501 + __be32 txtmp; /* Internal */
502 + __be32 res; /* Tx temp. */
503 + __be16 rpbase; /* Relocation pointer (CPM1 only) */
504 + __be16 res1; /* Reserved */
505 +};
506 +
507 +/*
508 * USB Controller pram common to QE and CPM.
509 */
510 struct usb_ctlr {
511 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
512 index 066bd31..127e443 100644
513 --- a/arch/powerpc/kernel/irq.c
514 +++ b/arch/powerpc/kernel/irq.c
515 @@ -294,7 +294,10 @@ void fixup_irqs(cpumask_t map)
516 cpumask_t mask;
517
518 desc = irq_to_desc(irq);
519 - if (desc && desc->status & IRQ_PER_CPU)
520 + if (!desc)
521 + continue;
522 +
523 + if (desc->status & IRQ_PER_CPU)
524 continue;
525
526 cpumask_and(&mask, desc->affinity, &map);
527 diff --git a/arch/powerpc/sysdev/micropatch.c b/arch/powerpc/sysdev/micropatch.c
528 index d8d6028..6c56ae9 100644
529 --- a/arch/powerpc/sysdev/micropatch.c
530 +++ b/arch/powerpc/sysdev/micropatch.c
531 @@ -16,6 +16,7 @@
532 #include <asm/page.h>
533 #include <asm/pgtable.h>
534 #include <asm/8xx_immap.h>
535 +#include <asm/cpm.h>
536 #include <asm/cpm1.h>
537
538 /*
539 @@ -625,9 +626,14 @@ cpm_load_patch(cpm8xx_t *cp)
540 {
541 volatile uint *dp; /* Dual-ported RAM. */
542 volatile cpm8xx_t *commproc;
543 +#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \
544 + defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
545 volatile iic_t *iip;
546 - volatile spi_t *spp;
547 + volatile struct spi_pram *spp;
548 +#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
549 volatile smc_uart_t *smp;
550 +#endif
551 +#endif
552 int i;
553
554 commproc = cp;
555 @@ -668,8 +674,8 @@ cpm_load_patch(cpm8xx_t *cp)
556 /* Put SPI above the IIC, also 32-byte aligned.
557 */
558 i = (RPBASE + sizeof(iic_t) + 31) & ~31;
559 - spp = (spi_t *)&commproc->cp_dparam[PROFF_SPI];
560 - spp->spi_rpbase = i;
561 + spp = (struct spi_pram *)&commproc->cp_dparam[PROFF_SPI];
562 + spp->rpbase = i;
563
564 # if defined(CONFIG_I2C_SPI_UCODE_PATCH)
565 commproc->cp_cpmcr1 = 0x802a;
566 diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
567 index 93a11d7..e696144 100644
568 --- a/arch/um/os-Linux/mem.c
569 +++ b/arch/um/os-Linux/mem.c
570 @@ -10,6 +10,7 @@
571 #include <errno.h>
572 #include <fcntl.h>
573 #include <string.h>
574 +#include <sys/stat.h>
575 #include <sys/mman.h>
576 #include <sys/param.h>
577 #include "init.h"
578 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
579 index d86da72..5928fc0 100644
580 --- a/arch/x86/include/asm/msr-index.h
581 +++ b/arch/x86/include/asm/msr-index.h
582 @@ -107,6 +107,7 @@
583 #define MSR_AMD64_PATCH_LOADER 0xc0010020
584 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
585 #define MSR_AMD64_OSVW_STATUS 0xc0010141
586 +#define MSR_AMD64_DC_CFG 0xc0011022
587 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
588 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
589 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
590 diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
591 index 48dcfa6..fd921c3 100644
592 --- a/arch/x86/include/asm/suspend_32.h
593 +++ b/arch/x86/include/asm/suspend_32.h
594 @@ -15,6 +15,8 @@ static inline int arch_prepare_suspend(void) { return 0; }
595 struct saved_context {
596 u16 es, fs, gs, ss;
597 unsigned long cr0, cr2, cr3, cr4;
598 + u64 misc_enable;
599 + bool misc_enable_saved;
600 struct desc_ptr gdt;
601 struct desc_ptr idt;
602 u16 ldt;
603 diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
604 index 06284f4..8d942af 100644
605 --- a/arch/x86/include/asm/suspend_64.h
606 +++ b/arch/x86/include/asm/suspend_64.h
607 @@ -27,6 +27,8 @@ struct saved_context {
608 u16 ds, es, fs, gs, ss;
609 unsigned long gs_base, gs_kernel_base, fs_base;
610 unsigned long cr0, cr2, cr3, cr4, cr8;
611 + u64 misc_enable;
612 + bool misc_enable_saved;
613 unsigned long efer;
614 u16 gdt_pad;
615 u16 gdt_limit;
616 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
617 index b8fe48e..e7f4d33 100644
618 --- a/arch/x86/include/asm/system.h
619 +++ b/arch/x86/include/asm/system.h
620 @@ -451,7 +451,7 @@ void stop_this_cpu(void *dummy);
621 *
622 * (Could use an alternative three way for this if there was one.)
623 */
624 -static inline void rdtsc_barrier(void)
625 +static __always_inline void rdtsc_barrier(void)
626 {
627 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
628 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
629 diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
630 index 2e837f5..fb7a5f0 100644
631 --- a/arch/x86/kernel/acpi/cstate.c
632 +++ b/arch/x86/kernel/acpi/cstate.c
633 @@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
634 percpu_entry->states[cx->index].eax = cx->address;
635 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
636 }
637 +
638 + /*
639 + * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
640 + * then we should skip checking BM_STS for this C-state.
641 + * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
642 + */
643 + if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
644 + cx->bm_sts_skip = 1;
645 +
646 return retval;
647 }
648 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
649 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
650 index f996103..82e5086 100644
651 --- a/arch/x86/kernel/acpi/sleep.c
652 +++ b/arch/x86/kernel/acpi/sleep.c
653 @@ -162,8 +162,6 @@ static int __init acpi_sleep_setup(char *str)
654 #endif
655 if (strncmp(str, "old_ordering", 12) == 0)
656 acpi_old_suspend_ordering();
657 - if (strncmp(str, "sci_force_enable", 16) == 0)
658 - acpi_set_sci_en_on_resume();
659 str = strchr(str, ',');
660 if (str != NULL)
661 str += strspn(str, ", \t");
662 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
663 index e5a4a1e..93e2a13 100644
664 --- a/arch/x86/kernel/apic/apic.c
665 +++ b/arch/x86/kernel/apic/apic.c
666 @@ -920,7 +920,7 @@ void disable_local_APIC(void)
667 unsigned int value;
668
669 /* APIC hasn't been mapped yet */
670 - if (!apic_phys)
671 + if (!x2apic_mode && !apic_phys)
672 return;
673
674 clear_local_APIC();
675 diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
676 index db6f7d4..44addd3 100644
677 --- a/arch/x86/kernel/cpu/perf_event_amd.c
678 +++ b/arch/x86/kernel/cpu/perf_event_amd.c
679 @@ -102,8 +102,8 @@ static const u64 amd_perfmon_event_map[] =
680 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
681 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
682 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
683 - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
684 - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
685 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
686 + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
687 };
688
689 static u64 amd_pmu_event_map(int hw_event)
690 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
691 index 9c794ac..b1ab4a0 100644
692 --- a/arch/x86/kernel/cpu/perf_event_intel.c
693 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
694 @@ -72,6 +72,7 @@ static struct event_constraint intel_westmere_event_constraints[] =
695 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
696 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
697 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
698 + INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
699 EVENT_CONSTRAINT_END
700 };
701
702 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
703 index 23b4ecd..5294b58 100644
704 --- a/arch/x86/kernel/hpet.c
705 +++ b/arch/x86/kernel/hpet.c
706 @@ -959,7 +959,7 @@ fs_initcall(hpet_late_init);
707
708 void hpet_disable(void)
709 {
710 - if (is_hpet_capable()) {
711 + if (is_hpet_capable() && hpet_virt_address) {
712 unsigned int cfg = hpet_readl(HPET_CFG);
713
714 if (hpet_legacy_int_enabled) {
715 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
716 index 7c9f02c..cafa7c8 100644
717 --- a/arch/x86/kernel/i8259.c
718 +++ b/arch/x86/kernel/i8259.c
719 @@ -276,16 +276,6 @@ static struct sys_device device_i8259A = {
720 .cls = &i8259_sysdev_class,
721 };
722
723 -static int __init i8259A_init_sysfs(void)
724 -{
725 - int error = sysdev_class_register(&i8259_sysdev_class);
726 - if (!error)
727 - error = sysdev_register(&device_i8259A);
728 - return error;
729 -}
730 -
731 -device_initcall(i8259A_init_sysfs);
732 -
733 static void mask_8259A(void)
734 {
735 unsigned long flags;
736 @@ -407,3 +397,18 @@ struct legacy_pic default_legacy_pic = {
737 };
738
739 struct legacy_pic *legacy_pic = &default_legacy_pic;
740 +
741 +static int __init i8259A_init_sysfs(void)
742 +{
743 + int error;
744 +
745 + if (legacy_pic != &default_legacy_pic)
746 + return 0;
747 +
748 + error = sysdev_class_register(&i8259_sysdev_class);
749 + if (!error)
750 + error = sysdev_register(&device_i8259A);
751 + return error;
752 +}
753 +
754 +device_initcall(i8259A_init_sysfs);
755 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
756 index 1658efd..ac4ed92 100644
757 --- a/arch/x86/kernel/kprobes.c
758 +++ b/arch/x86/kernel/kprobes.c
759 @@ -632,8 +632,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
760 /* Skip cs, ip, orig_ax and gs. */ \
761 " subl $16, %esp\n" \
762 " pushl %fs\n" \
763 - " pushl %ds\n" \
764 " pushl %es\n" \
765 + " pushl %ds\n" \
766 " pushl %eax\n" \
767 " pushl %ebp\n" \
768 " pushl %edi\n" \
769 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
770 index fb99f7e..078d4ec 100644
771 --- a/arch/x86/kernel/pci-calgary_64.c
772 +++ b/arch/x86/kernel/pci-calgary_64.c
773 @@ -103,11 +103,16 @@ int use_calgary __read_mostly = 0;
774 #define PMR_SOFTSTOPFAULT 0x40000000
775 #define PMR_HARDSTOP 0x20000000
776
777 -#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
778 -#define MAX_NUM_CHASSIS 8 /* max number of chassis */
779 -/* MAX_PHB_BUS_NUM is the maximal possible dev->bus->number */
780 -#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2)
781 -#define PHBS_PER_CALGARY 4
782 +/*
783 + * The maximum PHB bus number.
784 + * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
785 + * x3950M2: 4 chassis, 48 PHBs per chassis = 192
786 + * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
787 + * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
788 + */
789 +#define MAX_PHB_BUS_NUM 256
790 +
791 +#define PHBS_PER_CALGARY 4
792
793 /* register offsets in Calgary's internal register space */
794 static const unsigned long tar_offsets[] = {
795 @@ -1051,8 +1056,6 @@ static int __init calgary_init_one(struct pci_dev *dev)
796 struct iommu_table *tbl;
797 int ret;
798
799 - BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
800 -
801 bbar = busno_to_bbar(dev->bus->number);
802 ret = calgary_setup_tar(dev, bbar);
803 if (ret)
804 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
805 index 1168e44..4cfa8fd 100644
806 --- a/arch/x86/kernel/traps.c
807 +++ b/arch/x86/kernel/traps.c
808 @@ -529,6 +529,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
809 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
810 {
811 struct task_struct *tsk = current;
812 + int user_icebp = 0;
813 unsigned long dr6;
814 int si_code;
815
816 @@ -537,6 +538,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
817 /* Filter out all the reserved bits which are preset to 1 */
818 dr6 &= ~DR6_RESERVED;
819
820 + /*
821 + * If dr6 has no reason to give us about the origin of this trap,
822 + * then it's very likely the result of an icebp/int01 trap.
823 + * User wants a sigtrap for that.
824 + */
825 + if (!dr6 && user_mode(regs))
826 + user_icebp = 1;
827 +
828 /* Catch kmemcheck conditions first of all! */
829 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
830 return;
831 @@ -578,7 +587,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
832 regs->flags &= ~X86_EFLAGS_TF;
833 }
834 si_code = get_si_code(tsk->thread.debugreg6);
835 - if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS))
836 + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
837 send_sigtrap(tsk, regs, error_code, si_code);
838 preempt_conditional_cli(regs);
839
840 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
841 index 62fd8e6..dd3df44 100644
842 --- a/arch/x86/kvm/mmu.c
843 +++ b/arch/x86/kvm/mmu.c
844 @@ -1837,6 +1837,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
845
846 spte |= PT_WRITABLE_MASK;
847
848 + if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
849 + spte &= ~PT_USER_MASK;
850 +
851 /*
852 * Optimization: for pte sync, if spte was writable the hash
853 * lookup is unnecessary (and expensive). Write protection
854 @@ -1892,6 +1895,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
855
856 child = page_header(pte & PT64_BASE_ADDR_MASK);
857 mmu_page_remove_parent_pte(child, sptep);
858 + __set_spte(sptep, shadow_trap_nonpresent_pte);
859 + kvm_flush_remote_tlbs(vcpu->kvm);
860 } else if (pfn != spte_to_pfn(*sptep)) {
861 pgprintk("hfn old %lx new %lx\n",
862 spte_to_pfn(*sptep), pfn);
863 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
864 index 1185b55..49dabc1 100644
865 --- a/arch/x86/kvm/svm.c
866 +++ b/arch/x86/kvm/svm.c
867 @@ -28,6 +28,7 @@
868 #include <linux/ftrace_event.h>
869 #include <linux/slab.h>
870
871 +#include <asm/tlbflush.h>
872 #include <asm/desc.h>
873
874 #include <asm/virtext.h>
875 @@ -55,6 +56,8 @@ MODULE_LICENSE("GPL");
876
877 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
878
879 +static bool erratum_383_found __read_mostly;
880 +
881 static const u32 host_save_user_msrs[] = {
882 #ifdef CONFIG_X86_64
883 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
884 @@ -298,6 +301,31 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
885 svm_set_interrupt_shadow(vcpu, 0);
886 }
887
888 +static void svm_init_erratum_383(void)
889 +{
890 + u32 low, high;
891 + int err;
892 + u64 val;
893 +
894 + /* Only Fam10h is affected */
895 + if (boot_cpu_data.x86 != 0x10)
896 + return;
897 +
898 + /* Use _safe variants to not break nested virtualization */
899 + val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
900 + if (err)
901 + return;
902 +
903 + val |= (1ULL << 47);
904 +
905 + low = lower_32_bits(val);
906 + high = upper_32_bits(val);
907 +
908 + native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
909 +
910 + erratum_383_found = true;
911 +}
912 +
913 static int has_svm(void)
914 {
915 const char *msg;
916 @@ -353,6 +381,8 @@ static int svm_hardware_enable(void *garbage)
917
918 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
919
920 + svm_init_erratum_383();
921 +
922 return 0;
923 }
924
925 @@ -1280,8 +1310,59 @@ static int nm_interception(struct vcpu_svm *svm)
926 return 1;
927 }
928
929 -static int mc_interception(struct vcpu_svm *svm)
930 +static bool is_erratum_383(void)
931 {
932 + int err, i;
933 + u64 value;
934 +
935 + if (!erratum_383_found)
936 + return false;
937 +
938 + value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
939 + if (err)
940 + return false;
941 +
942 + /* Bit 62 may or may not be set for this mce */
943 + value &= ~(1ULL << 62);
944 +
945 + if (value != 0xb600000000010015ULL)
946 + return false;
947 +
948 + /* Clear MCi_STATUS registers */
949 + for (i = 0; i < 6; ++i)
950 + native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
951 +
952 + value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
953 + if (!err) {
954 + u32 low, high;
955 +
956 + value &= ~(1ULL << 2);
957 + low = lower_32_bits(value);
958 + high = upper_32_bits(value);
959 +
960 + native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
961 + }
962 +
963 + /* Flush tlb to evict multi-match entries */
964 + __flush_tlb_all();
965 +
966 + return true;
967 +}
968 +
969 +static void svm_handle_mce(struct vcpu_svm *svm)
970 +{
971 + if (is_erratum_383()) {
972 + /*
973 + * Erratum 383 triggered. Guest state is corrupt so kill the
974 + * guest.
975 + */
976 + pr_err("KVM: Guest triggered AMD Erratum 383\n");
977 +
978 + set_bit(KVM_REQ_TRIPLE_FAULT, &svm->vcpu.requests);
979 +
980 + return;
981 + }
982 +
983 /*
984 * On an #MC intercept the MCE handler is not called automatically in
985 * the host. So do it by hand here.
986 @@ -1290,6 +1371,11 @@ static int mc_interception(struct vcpu_svm *svm)
987 "int $0x12\n");
988 /* not sure if we ever come back to this point */
989
990 + return;
991 +}
992 +
993 +static int mc_interception(struct vcpu_svm *svm)
994 +{
995 return 1;
996 }
997
998 @@ -2842,6 +2928,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
999 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
1000 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
1001 }
1002 +
1003 + /*
1004 + * We need to handle MC intercepts here before the vcpu has a chance to
1005 + * change the physical cpu
1006 + */
1007 + if (unlikely(svm->vmcb->control.exit_code ==
1008 + SVM_EXIT_EXCP_BASE + MC_VECTOR))
1009 + svm_handle_mce(svm);
1010 }
1011
1012 #undef R
1013 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
1014 index 1cdc02c..1699ea8 100644
1015 --- a/arch/x86/pci/mrst.c
1016 +++ b/arch/x86/pci/mrst.c
1017 @@ -66,8 +66,9 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
1018 devfn, pos, 4, &pcie_cap))
1019 return 0;
1020
1021 - if (pcie_cap == 0xffffffff)
1022 - return 0;
1023 + if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
1024 + PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
1025 + break;
1026
1027 if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
1028 raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
1029 @@ -76,7 +77,7 @@ static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
1030 return pos;
1031 }
1032
1033 - pos = pcie_cap >> 20;
1034 + pos = PCI_EXT_CAP_NEXT(pcie_cap);
1035 }
1036
1037 return 0;
1038 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
1039 index 0a979f3..1290ba5 100644
1040 --- a/arch/x86/power/cpu.c
1041 +++ b/arch/x86/power/cpu.c
1042 @@ -105,6 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
1043 ctxt->cr4 = read_cr4();
1044 ctxt->cr8 = read_cr8();
1045 #endif
1046 + ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
1047 + &ctxt->misc_enable);
1048 }
1049
1050 /* Needed by apm.c */
1051 @@ -152,6 +154,8 @@ static void fix_processor_context(void)
1052 */
1053 static void __restore_processor_state(struct saved_context *ctxt)
1054 {
1055 + if (ctxt->misc_enable_saved)
1056 + wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
1057 /*
1058 * control registers
1059 */
1060 diff --git a/block/blk-core.c b/block/blk-core.c
1061 index 9fe174d..dd9795d 100644
1062 --- a/block/blk-core.c
1063 +++ b/block/blk-core.c
1064 @@ -1556,7 +1556,7 @@ void submit_bio(int rw, struct bio *bio)
1065 * If it's a regular read/write or a barrier with data attached,
1066 * go through the normal accounting stuff before submission.
1067 */
1068 - if (bio_has_data(bio)) {
1069 + if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
1070 if (rw & WRITE) {
1071 count_vm_events(PGPGOUT, count);
1072 } else {
1073 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1074 index 002c0ce..4fab7c9 100644
1075 --- a/block/cfq-iosched.c
1076 +++ b/block/cfq-iosched.c
1077 @@ -1930,6 +1930,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1078 int process_refs, new_process_refs;
1079 struct cfq_queue *__cfqq;
1080
1081 + /*
1082 + * If there are no process references on the new_cfqq, then it is
1083 + * unsafe to follow the ->new_cfqq chain as other cfqq's in the
1084 + * chain may have dropped their last reference (not just their
1085 + * last process reference).
1086 + */
1087 + if (!cfqq_process_refs(new_cfqq))
1088 + return;
1089 +
1090 /* Avoid a circular list and skip interim queue merges */
1091 while ((__cfqq = new_cfqq->new_cfqq)) {
1092 if (__cfqq == cfqq)
1093 @@ -1938,17 +1947,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1094 }
1095
1096 process_refs = cfqq_process_refs(cfqq);
1097 + new_process_refs = cfqq_process_refs(new_cfqq);
1098 /*
1099 * If the process for the cfqq has gone away, there is no
1100 * sense in merging the queues.
1101 */
1102 - if (process_refs == 0)
1103 + if (process_refs == 0 || new_process_refs == 0)
1104 return;
1105
1106 /*
1107 * Merge in the direction of the lesser amount of work.
1108 */
1109 - new_process_refs = cfqq_process_refs(new_cfqq);
1110 if (new_process_refs >= process_refs) {
1111 cfqq->new_cfqq = new_cfqq;
1112 atomic_add(process_refs, &new_cfqq->ref);
1113 diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
1114 index 3e6ba99..af38dfb 100644
1115 --- a/drivers/acpi/acpica/acevents.h
1116 +++ b/drivers/acpi/acpica/acevents.h
1117 @@ -78,10 +78,6 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
1118 acpi_status
1119 acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
1120
1121 -acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
1122 -
1123 -acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
1124 -
1125 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
1126 u32 gpe_number);
1127
1128 diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
1129 index 5900f13..3239158 100644
1130 --- a/drivers/acpi/acpica/achware.h
1131 +++ b/drivers/acpi/acpica/achware.h
1132 @@ -90,7 +90,11 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
1133 /*
1134 * hwgpe - GPE support
1135 */
1136 -acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
1137 +u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
1138 + struct acpi_gpe_register_info *gpe_register_info);
1139 +
1140 +acpi_status
1141 +acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
1142
1143 acpi_status
1144 acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info);
1145 diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
1146 index 78c5550..e2dc53d 100644
1147 --- a/drivers/acpi/acpica/evgpe.c
1148 +++ b/drivers/acpi/acpica/evgpe.c
1149 @@ -68,7 +68,7 @@ acpi_status
1150 acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
1151 {
1152 struct acpi_gpe_register_info *gpe_register_info;
1153 - u8 register_bit;
1154 + u32 register_bit;
1155
1156 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
1157
1158 @@ -77,9 +77,8 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
1159 return_ACPI_STATUS(AE_NOT_EXIST);
1160 }
1161
1162 - register_bit = (u8)
1163 - (1 <<
1164 - (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
1165 + register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
1166 + gpe_register_info);
1167
1168 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
1169 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
1170 @@ -95,76 +94,6 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
1171
1172 /*******************************************************************************
1173 *
1174 - * FUNCTION: acpi_ev_enable_gpe
1175 - *
1176 - * PARAMETERS: gpe_event_info - GPE to enable
1177 - *
1178 - * RETURN: Status
1179 - *
1180 - * DESCRIPTION: Enable a GPE based on the GPE type
1181 - *
1182 - ******************************************************************************/
1183 -
1184 -acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1185 -{
1186 - acpi_status status;
1187 -
1188 - ACPI_FUNCTION_TRACE(ev_enable_gpe);
1189 -
1190 - /* Make sure HW enable masks are updated */
1191 -
1192 - status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
1193 - if (ACPI_FAILURE(status))
1194 - return_ACPI_STATUS(status);
1195 -
1196 - /* Clear the GPE (of stale events), then enable it */
1197 - status = acpi_hw_clear_gpe(gpe_event_info);
1198 - if (ACPI_FAILURE(status))
1199 - return_ACPI_STATUS(status);
1200 -
1201 - /* Enable the requested GPE */
1202 - status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
1203 - return_ACPI_STATUS(status);
1204 -}
1205 -
1206 -/*******************************************************************************
1207 - *
1208 - * FUNCTION: acpi_ev_disable_gpe
1209 - *
1210 - * PARAMETERS: gpe_event_info - GPE to disable
1211 - *
1212 - * RETURN: Status
1213 - *
1214 - * DESCRIPTION: Disable a GPE based on the GPE type
1215 - *
1216 - ******************************************************************************/
1217 -
1218 -acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1219 -{
1220 - acpi_status status;
1221 -
1222 - ACPI_FUNCTION_TRACE(ev_disable_gpe);
1223 -
1224 - /* Make sure HW enable masks are updated */
1225 -
1226 - status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
1227 - if (ACPI_FAILURE(status))
1228 - return_ACPI_STATUS(status);
1229 -
1230 - /*
1231 - * Even if we don't know the GPE type, make sure that we always
1232 - * disable it. low_disable_gpe will just clear the enable bit for this
1233 - * GPE and write it. It will not write out the current GPE enable mask,
1234 - * since this may inadvertently enable GPEs too early, if a rogue GPE has
1235 - * come in during ACPICA initialization - possibly as a result of AML or
1236 - * other code that has enabled the GPE.
1237 - */
1238 - status = acpi_hw_low_disable_gpe(gpe_event_info);
1239 - return_ACPI_STATUS(status);
1240 -}
1241 -
1242 -/*******************************************************************************
1243 - *
1244 * FUNCTION: acpi_ev_get_gpe_event_info
1245 *
1246 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
1247 @@ -389,10 +318,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
1248 return_VOID;
1249 }
1250
1251 - /* Set the GPE flags for return to enabled state */
1252 -
1253 - (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
1254 -
1255 /*
1256 * Take a snapshot of the GPE info for this level - we copy the info to
1257 * prevent a race condition with remove_handler/remove_block.
1258 @@ -545,7 +470,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
1259 * Disable the GPE, so it doesn't keep firing before the method has a
1260 * chance to run (it runs asynchronously with interrupts enabled).
1261 */
1262 - status = acpi_ev_disable_gpe(gpe_event_info);
1263 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
1264 if (ACPI_FAILURE(status)) {
1265 ACPI_EXCEPTION((AE_INFO, status,
1266 "Unable to disable GPE[%2X]",
1267 @@ -579,7 +504,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
1268 * Disable the GPE. The GPE will remain disabled until the ACPICA
1269 * Core Subsystem is restarted, or a handler is installed.
1270 */
1271 - status = acpi_ev_disable_gpe(gpe_event_info);
1272 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
1273 if (ACPI_FAILURE(status)) {
1274 ACPI_EXCEPTION((AE_INFO, status,
1275 "Unable to disable GPE[%2X]",
1276 diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
1277 index fef7219..a439850 100644
1278 --- a/drivers/acpi/acpica/evgpeblk.c
1279 +++ b/drivers/acpi/acpica/evgpeblk.c
1280 @@ -1023,6 +1023,19 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1281 /* Get the info block for this particular GPE */
1282 gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
1283 gpe_event_info = &gpe_block->event_info[gpe_index];
1284 + gpe_number = gpe_index + gpe_block->block_base_number;
1285 +
1286 + /*
1287 + * If the GPE has already been enabled for runtime
1288 + * signaling, make sure it remains enabled, but do not
1289 + * increment its reference counter.
1290 + */
1291 + if (gpe_event_info->runtime_count) {
1292 + acpi_set_gpe(gpe_device, gpe_number,
1293 + ACPI_GPE_ENABLE);
1294 + gpe_enabled_count++;
1295 + continue;
1296 + }
1297
1298 if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
1299 wake_gpe_count++;
1300 @@ -1033,7 +1046,6 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1301 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
1302 continue;
1303
1304 - gpe_number = gpe_index + gpe_block->block_base_number;
1305 status = acpi_enable_gpe(gpe_device, gpe_number,
1306 ACPI_GPE_TYPE_RUNTIME);
1307 if (ACPI_FAILURE(status))
1308 diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
1309 index 5ff32c7..47c1aac 100644
1310 --- a/drivers/acpi/acpica/evxfevnt.c
1311 +++ b/drivers/acpi/acpica/evxfevnt.c
1312 @@ -201,6 +201,44 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
1313
1314 /*******************************************************************************
1315 *
1316 + * FUNCTION: acpi_clear_and_enable_gpe
1317 + *
1318 + * PARAMETERS: gpe_event_info - GPE to enable
1319 + *
1320 + * RETURN: Status
1321 + *
1322 + * DESCRIPTION: Clear the given GPE from stale events and enable it.
1323 + *
1324 + ******************************************************************************/
1325 +static acpi_status
1326 +acpi_clear_and_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1327 +{
1328 + acpi_status status;
1329 +
1330 + /*
1331 + * We will only allow a GPE to be enabled if it has either an
1332 + * associated method (_Lxx/_Exx) or a handler. Otherwise, the
1333 + * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
1334 + * first time it fires.
1335 + */
1336 + if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
1337 + return_ACPI_STATUS(AE_NO_HANDLER);
1338 + }
1339 +
1340 + /* Clear the GPE (of stale events) */
1341 + status = acpi_hw_clear_gpe(gpe_event_info);
1342 + if (ACPI_FAILURE(status)) {
1343 + return_ACPI_STATUS(status);
1344 + }
1345 +
1346 + /* Enable the requested GPE */
1347 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
1348 +
1349 + return_ACPI_STATUS(status);
1350 +}
1351 +
1352 +/*******************************************************************************
1353 + *
1354 * FUNCTION: acpi_set_gpe
1355 *
1356 * PARAMETERS: gpe_device - Parent GPE Device
1357 @@ -235,11 +273,11 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
1358
1359 switch (action) {
1360 case ACPI_GPE_ENABLE:
1361 - status = acpi_ev_enable_gpe(gpe_event_info);
1362 + status = acpi_clear_and_enable_gpe(gpe_event_info);
1363 break;
1364
1365 case ACPI_GPE_DISABLE:
1366 - status = acpi_ev_disable_gpe(gpe_event_info);
1367 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
1368 break;
1369
1370 default:
1371 @@ -291,9 +329,13 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
1372
1373 if (type & ACPI_GPE_TYPE_RUNTIME) {
1374 if (++gpe_event_info->runtime_count == 1) {
1375 - status = acpi_ev_enable_gpe(gpe_event_info);
1376 - if (ACPI_FAILURE(status))
1377 + status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
1378 + if (ACPI_SUCCESS(status)) {
1379 + status = acpi_clear_and_enable_gpe(gpe_event_info);
1380 + }
1381 + if (ACPI_FAILURE(status)) {
1382 gpe_event_info->runtime_count--;
1383 + }
1384 }
1385 }
1386
1387 @@ -308,7 +350,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
1388 * system into a sleep state.
1389 */
1390 if (++gpe_event_info->wakeup_count == 1)
1391 - acpi_ev_update_gpe_enable_masks(gpe_event_info);
1392 + status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
1393 }
1394
1395 unlock_and_exit:
1396 @@ -351,8 +393,16 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
1397 }
1398
1399 if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
1400 - if (--gpe_event_info->runtime_count == 0)
1401 - status = acpi_ev_disable_gpe(gpe_event_info);
1402 + if (--gpe_event_info->runtime_count == 0) {
1403 + status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
1404 + if (ACPI_SUCCESS(status)) {
1405 + status = acpi_hw_low_set_gpe(gpe_event_info,
1406 + ACPI_GPE_DISABLE);
1407 + }
1408 + if (ACPI_FAILURE(status)) {
1409 + gpe_event_info->runtime_count++;
1410 + }
1411 + }
1412 }
1413
1414 if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
1415 @@ -361,7 +411,7 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
1416 * states, so we don't need to disable them here.
1417 */
1418 if (--gpe_event_info->wakeup_count == 0)
1419 - acpi_ev_update_gpe_enable_masks(gpe_event_info);
1420 + status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
1421 }
1422
1423 unlock_and_exit:
1424 diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
1425 index bd72319..89e6cfd 100644
1426 --- a/drivers/acpi/acpica/hwgpe.c
1427 +++ b/drivers/acpi/acpica/hwgpe.c
1428 @@ -57,21 +57,47 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1429
1430 /******************************************************************************
1431 *
1432 - * FUNCTION: acpi_hw_low_disable_gpe
1433 + * FUNCTION: acpi_hw_gpe_register_bit
1434 + *
1435 + * PARAMETERS: gpe_event_info - Info block for the GPE
1436 + * gpe_register_info - Info block for the GPE register
1437 + *
1438 + * RETURN: Status
1439 + *
1440 + * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given
1441 + * GPE set.
1442 + *
1443 + ******************************************************************************/
1444 +
1445 +u32 acpi_hw_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info,
1446 + struct acpi_gpe_register_info *gpe_register_info)
1447 +{
1448 + return (u32)1 << (gpe_event_info->gpe_number -
1449 + gpe_register_info->base_gpe_number);
1450 +}
1451 +
1452 +/******************************************************************************
1453 + *
1454 + * FUNCTION: acpi_hw_low_set_gpe
1455 *
1456 * PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
1457 + * action - Enable or disable
1458 *
1459 * RETURN: Status
1460 *
1461 - * DESCRIPTION: Disable a single GPE in the enable register.
1462 + * DESCRIPTION: Enable or disable a single GPE in its enable register.
1463 *
1464 ******************************************************************************/
1465
1466 -acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1467 +acpi_status
1468 +acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
1469 {
1470 struct acpi_gpe_register_info *gpe_register_info;
1471 acpi_status status;
1472 u32 enable_mask;
1473 + u32 register_bit;
1474 +
1475 + ACPI_FUNCTION_ENTRY();
1476
1477 /* Get the info block for the entire GPE register */
1478
1479 @@ -87,11 +113,27 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1480 return (status);
1481 }
1482
1483 - /* Clear just the bit that corresponds to this GPE */
1484 + /* Set or clear just the bit that corresponds to this GPE */
1485
1486 - ACPI_CLEAR_BIT(enable_mask, ((u32)1 <<
1487 - (gpe_event_info->gpe_number -
1488 - gpe_register_info->base_gpe_number)));
1489 + register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
1490 + gpe_register_info);
1491 + switch (action) {
1492 + case ACPI_GPE_COND_ENABLE:
1493 + if (!(register_bit & gpe_register_info->enable_for_run))
1494 + return (AE_BAD_PARAMETER);
1495 +
1496 + case ACPI_GPE_ENABLE:
1497 + ACPI_SET_BIT(enable_mask, register_bit);
1498 + break;
1499 +
1500 + case ACPI_GPE_DISABLE:
1501 + ACPI_CLEAR_BIT(enable_mask, register_bit);
1502 + break;
1503 +
1504 + default:
1505 + ACPI_ERROR((AE_INFO, "Invalid action\n"));
1506 + return (AE_BAD_PARAMETER);
1507 + }
1508
1509 /* Write the updated enable mask */
1510
1511 @@ -116,23 +158,11 @@ acpi_status acpi_hw_low_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
1512 acpi_status
1513 acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
1514 {
1515 - struct acpi_gpe_register_info *gpe_register_info;
1516 acpi_status status;
1517
1518 ACPI_FUNCTION_ENTRY();
1519
1520 - /* Get the info block for the entire GPE register */
1521 -
1522 - gpe_register_info = gpe_event_info->register_info;
1523 - if (!gpe_register_info) {
1524 - return (AE_NOT_EXIST);
1525 - }
1526 -
1527 - /* Write the entire GPE (runtime) enable register */
1528 -
1529 - status = acpi_hw_write(gpe_register_info->enable_for_run,
1530 - &gpe_register_info->enable_address);
1531 -
1532 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
1533 return (status);
1534 }
1535
1536 @@ -150,21 +180,28 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info * gpe_event_info)
1537
1538 acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
1539 {
1540 + struct acpi_gpe_register_info *gpe_register_info;
1541 acpi_status status;
1542 - u8 register_bit;
1543 + u32 register_bit;
1544
1545 ACPI_FUNCTION_ENTRY();
1546
1547 - register_bit = (u8)(1 <<
1548 - (gpe_event_info->gpe_number -
1549 - gpe_event_info->register_info->base_gpe_number));
1550 + /* Get the info block for the entire GPE register */
1551 +
1552 + gpe_register_info = gpe_event_info->register_info;
1553 + if (!gpe_register_info) {
1554 + return (AE_NOT_EXIST);
1555 + }
1556 +
1557 + register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
1558 + gpe_register_info);
1559
1560 /*
1561 * Write a one to the appropriate bit in the status register to
1562 * clear this GPE.
1563 */
1564 status = acpi_hw_write(register_bit,
1565 - &gpe_event_info->register_info->status_address);
1566 + &gpe_register_info->status_address);
1567
1568 return (status);
1569 }
1570 @@ -187,7 +224,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
1571 acpi_event_status * event_status)
1572 {
1573 u32 in_byte;
1574 - u8 register_bit;
1575 + u32 register_bit;
1576 struct acpi_gpe_register_info *gpe_register_info;
1577 acpi_status status;
1578 acpi_event_status local_event_status = 0;
1579 @@ -204,9 +241,8 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
1580
1581 /* Get the register bitmask for this GPE */
1582
1583 - register_bit = (u8)(1 <<
1584 - (gpe_event_info->gpe_number -
1585 - gpe_event_info->register_info->base_gpe_number));
1586 + register_bit = acpi_hw_gpe_register_bit(gpe_event_info,
1587 + gpe_register_info);
1588
1589 /* GPE currently enabled? (enabled for runtime?) */
1590
1591 diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
1592 index fd51c4a..7d857da 100644
1593 --- a/drivers/acpi/button.c
1594 +++ b/drivers/acpi/button.c
1595 @@ -425,7 +425,7 @@ static int acpi_button_add(struct acpi_device *device)
1596 /* Button's GPE is run-wake GPE */
1597 acpi_enable_gpe(device->wakeup.gpe_device,
1598 device->wakeup.gpe_number,
1599 - ACPI_GPE_TYPE_WAKE_RUN);
1600 + ACPI_GPE_TYPE_RUNTIME);
1601 device->wakeup.run_wake_count++;
1602 device->wakeup.state.enabled = 1;
1603 }
1604 @@ -449,7 +449,7 @@ static int acpi_button_remove(struct acpi_device *device, int type)
1605 if (device->wakeup.flags.valid) {
1606 acpi_disable_gpe(device->wakeup.gpe_device,
1607 device->wakeup.gpe_number,
1608 - ACPI_GPE_TYPE_WAKE_RUN);
1609 + ACPI_GPE_TYPE_RUNTIME);
1610 device->wakeup.run_wake_count--;
1611 device->wakeup.state.enabled = 0;
1612 }
1613 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
1614 index 5128435..e9699aa 100644
1615 --- a/drivers/acpi/processor_core.c
1616 +++ b/drivers/acpi/processor_core.c
1617 @@ -223,7 +223,7 @@ static bool processor_physically_present(acpi_handle handle)
1618 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
1619 cpuid = acpi_get_cpuid(handle, type, acpi_id);
1620
1621 - if (cpuid == -1)
1622 + if ((cpuid == -1) && (num_possible_cpus() > 1))
1623 return false;
1624
1625 return true;
1626 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1627 index 5939e7f..346b758 100644
1628 --- a/drivers/acpi/processor_idle.c
1629 +++ b/drivers/acpi/processor_idle.c
1630 @@ -955,7 +955,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1631 if (acpi_idle_suspend)
1632 return(acpi_idle_enter_c1(dev, state));
1633
1634 - if (acpi_idle_bm_check()) {
1635 + if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
1636 if (dev->safe_state) {
1637 dev->last_state = dev->safe_state;
1638 return dev->safe_state->enter(dev, dev->safe_state);
1639 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
1640 index baa76bb..4ab2275 100644
1641 --- a/drivers/acpi/sleep.c
1642 +++ b/drivers/acpi/sleep.c
1643 @@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
1644
1645 #ifdef CONFIG_ACPI_SLEEP
1646 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
1647 -/*
1648 - * According to the ACPI specification the BIOS should make sure that ACPI is
1649 - * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
1650 - * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
1651 - * on such systems during resume. Unfortunately that doesn't help in
1652 - * particularly pathological cases in which SCI_EN has to be set directly on
1653 - * resume, although the specification states very clearly that this flag is
1654 - * owned by the hardware. The set_sci_en_on_resume variable will be set in such
1655 - * cases.
1656 - */
1657 -static bool set_sci_en_on_resume;
1658 -
1659 -void __init acpi_set_sci_en_on_resume(void)
1660 -{
1661 - set_sci_en_on_resume = true;
1662 -}
1663
1664 /*
1665 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
1666 @@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
1667 break;
1668 }
1669
1670 - /* If ACPI is not enabled by the BIOS, we need to enable it here. */
1671 - if (set_sci_en_on_resume)
1672 - acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
1673 - else
1674 - acpi_enable();
1675 + /* This violates the spec but is required for bug compatibility. */
1676 + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
1677
1678 /* Reprogram control registers and execute _BFS */
1679 acpi_leave_sleep_state_prep(acpi_state);
1680 @@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
1681 return 0;
1682 }
1683
1684 -static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
1685 -{
1686 - set_sci_en_on_resume = true;
1687 - return 0;
1688 -}
1689 -
1690 static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
1691 {
1692 .callback = init_old_suspend_ordering,
1693 @@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
1694 },
1695 },
1696 {
1697 - .callback = init_set_sci_en_on_resume,
1698 - .ident = "Apple MacBook 1,1",
1699 - .matches = {
1700 - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
1701 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
1702 - },
1703 - },
1704 - {
1705 - .callback = init_set_sci_en_on_resume,
1706 - .ident = "Apple MacMini 1,1",
1707 - .matches = {
1708 - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
1709 - DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
1710 - },
1711 - },
1712 - {
1713 .callback = init_old_suspend_ordering,
1714 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
1715 .matches = {
1716 @@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
1717 },
1718 },
1719 {
1720 - .callback = init_set_sci_en_on_resume,
1721 - .ident = "Toshiba Satellite L300",
1722 - .matches = {
1723 - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1724 - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
1725 - },
1726 - },
1727 - {
1728 - .callback = init_set_sci_en_on_resume,
1729 - .ident = "Hewlett-Packard HP G7000 Notebook PC",
1730 - .matches = {
1731 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1732 - DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
1733 - },
1734 - },
1735 - {
1736 - .callback = init_set_sci_en_on_resume,
1737 - .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
1738 - .matches = {
1739 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1740 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
1741 - },
1742 - },
1743 - {
1744 - .callback = init_set_sci_en_on_resume,
1745 - .ident = "Hewlett-Packard Pavilion dv4",
1746 - .matches = {
1747 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1748 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
1749 - },
1750 - },
1751 - {
1752 - .callback = init_set_sci_en_on_resume,
1753 - .ident = "Hewlett-Packard Pavilion dv7",
1754 - .matches = {
1755 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1756 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
1757 - },
1758 - },
1759 - {
1760 - .callback = init_set_sci_en_on_resume,
1761 - .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
1762 - .matches = {
1763 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1764 - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
1765 - },
1766 - },
1767 - {
1768 - .callback = init_set_sci_en_on_resume,
1769 - .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
1770 - .matches = {
1771 - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1772 - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
1773 - },
1774 - },
1775 - {
1776 - .callback = init_set_sci_en_on_resume,
1777 - .ident = "Lenovo ThinkPad T410",
1778 - .matches = {
1779 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1780 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
1781 - },
1782 - },
1783 - {
1784 - .callback = init_set_sci_en_on_resume,
1785 - .ident = "Lenovo ThinkPad T510",
1786 - .matches = {
1787 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1788 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
1789 - },
1790 - },
1791 - {
1792 - .callback = init_set_sci_en_on_resume,
1793 - .ident = "Lenovo ThinkPad W510",
1794 - .matches = {
1795 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1796 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
1797 - },
1798 - },
1799 - {
1800 - .callback = init_set_sci_en_on_resume,
1801 - .ident = "Lenovo ThinkPad X201[s]",
1802 - .matches = {
1803 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1804 - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
1805 - },
1806 - },
1807 - {
1808 .callback = init_old_suspend_ordering,
1809 .ident = "Panasonic CF51-2L",
1810 .matches = {
1811 @@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
1812 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
1813 },
1814 },
1815 - {
1816 - .callback = init_set_sci_en_on_resume,
1817 - .ident = "Dell Studio 1558",
1818 - .matches = {
1819 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1820 - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
1821 - },
1822 - },
1823 - {
1824 - .callback = init_set_sci_en_on_resume,
1825 - .ident = "Dell Studio 1557",
1826 - .matches = {
1827 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1828 - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
1829 - },
1830 - },
1831 - {
1832 - .callback = init_set_sci_en_on_resume,
1833 - .ident = "Dell Studio 1555",
1834 - .matches = {
1835 - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1836 - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
1837 - },
1838 - },
1839 {},
1840 };
1841 #endif /* CONFIG_SUSPEND */
1842 diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
1843 index 4aaf249..4a89ae4 100644
1844 --- a/drivers/acpi/system.c
1845 +++ b/drivers/acpi/system.c
1846 @@ -389,10 +389,12 @@ static ssize_t counter_set(struct kobject *kobj,
1847 if (index < num_gpes) {
1848 if (!strcmp(buf, "disable\n") &&
1849 (status & ACPI_EVENT_FLAG_ENABLED))
1850 - result = acpi_set_gpe(handle, index, ACPI_GPE_DISABLE);
1851 + result = acpi_disable_gpe(handle, index,
1852 + ACPI_GPE_TYPE_RUNTIME);
1853 else if (!strcmp(buf, "enable\n") &&
1854 !(status & ACPI_EVENT_FLAG_ENABLED))
1855 - result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
1856 + result = acpi_enable_gpe(handle, index,
1857 + ACPI_GPE_TYPE_RUNTIME);
1858 else if (!strcmp(buf, "clear\n") &&
1859 (status & ACPI_EVENT_FLAG_SET))
1860 result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
1861 diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
1862 index 4b9d339..388747a 100644
1863 --- a/drivers/acpi/wakeup.c
1864 +++ b/drivers/acpi/wakeup.c
1865 @@ -64,16 +64,13 @@ void acpi_enable_wakeup_device(u8 sleep_state)
1866 struct acpi_device *dev =
1867 container_of(node, struct acpi_device, wakeup_list);
1868
1869 - if (!dev->wakeup.flags.valid)
1870 - continue;
1871 -
1872 - if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
1873 + if (!dev->wakeup.flags.valid || !dev->wakeup.state.enabled
1874 || sleep_state > (u32) dev->wakeup.sleep_state)
1875 continue;
1876
1877 /* The wake-up power should have been enabled already. */
1878 - acpi_set_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
1879 - ACPI_GPE_ENABLE);
1880 + acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
1881 + ACPI_GPE_TYPE_WAKE);
1882 }
1883 }
1884
1885 @@ -96,6 +93,8 @@ void acpi_disable_wakeup_device(u8 sleep_state)
1886 || (sleep_state > (u32) dev->wakeup.sleep_state))
1887 continue;
1888
1889 + acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
1890 + ACPI_GPE_TYPE_WAKE);
1891 acpi_disable_wakeup_device_power(dev);
1892 }
1893 }
1894 @@ -109,13 +108,8 @@ int __init acpi_wakeup_device_init(void)
1895 struct acpi_device *dev = container_of(node,
1896 struct acpi_device,
1897 wakeup_list);
1898 - /* In case user doesn't load button driver */
1899 - if (!dev->wakeup.flags.always_enabled ||
1900 - dev->wakeup.state.enabled)
1901 - continue;
1902 - acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
1903 - ACPI_GPE_TYPE_WAKE);
1904 - dev->wakeup.state.enabled = 1;
1905 + if (dev->wakeup.flags.always_enabled)
1906 + dev->wakeup.state.enabled = 1;
1907 }
1908 mutex_unlock(&acpi_device_lock);
1909 return 0;
1910 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1911 index 5326af2..c12aa05 100644
1912 --- a/drivers/ata/ahci.c
1913 +++ b/drivers/ata/ahci.c
1914 @@ -3249,6 +3249,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1915 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
1916 return -ENODEV;
1917
1918 + /*
1919 + * For some reason, MCP89 on MacBook 7,1 doesn't work with
1920 + * ahci, use ata_generic instead.
1921 + */
1922 + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
1923 + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
1924 + pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1925 + pdev->subsystem_device == 0xcb89)
1926 + return -ENODEV;
1927 +
1928 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
1929 * At the moment, we can only use the AHCI mode. Let the users know
1930 * that for SAS drives they're out of luck.
1931 diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
1932 index 33fb614..ae65cf8 100644
1933 --- a/drivers/ata/ata_generic.c
1934 +++ b/drivers/ata/ata_generic.c
1935 @@ -32,6 +32,11 @@
1936 * A generic parallel ATA driver using libata
1937 */
1938
1939 +enum {
1940 + ATA_GEN_CLASS_MATCH = (1 << 0),
1941 + ATA_GEN_FORCE_DMA = (1 << 1),
1942 +};
1943 +
1944 /**
1945 * generic_set_mode - mode setting
1946 * @link: link to set up
1947 @@ -46,13 +51,17 @@
1948 static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
1949 {
1950 struct ata_port *ap = link->ap;
1951 + const struct pci_device_id *id = ap->host->private_data;
1952 int dma_enabled = 0;
1953 struct ata_device *dev;
1954 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1955
1956 - /* Bits 5 and 6 indicate if DMA is active on master/slave */
1957 - if (ap->ioaddr.bmdma_addr)
1958 + if (id->driver_data & ATA_GEN_FORCE_DMA) {
1959 + dma_enabled = 0xff;
1960 + } else if (ap->ioaddr.bmdma_addr) {
1961 + /* Bits 5 and 6 indicate if DMA is active on master/slave */
1962 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1963 + }
1964
1965 if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
1966 dma_enabled = 0xFF;
1967 @@ -126,7 +135,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
1968 const struct ata_port_info *ppi[] = { &info, NULL };
1969
1970 /* Don't use the generic entry unless instructed to do so */
1971 - if (id->driver_data == 1 && all_generic_ide == 0)
1972 + if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
1973 return -ENODEV;
1974
1975 /* Devices that need care */
1976 @@ -155,7 +164,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
1977 return rc;
1978 pcim_pin_device(dev);
1979 }
1980 - return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0);
1981 + return ata_pci_sff_init_one(dev, ppi, &generic_sht, (void *)id, 0);
1982 }
1983
1984 static struct pci_device_id ata_generic[] = {
1985 @@ -167,7 +176,15 @@ static struct pci_device_id ata_generic[] = {
1986 { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
1987 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
1988 { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
1989 - { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
1990 + { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
1991 + .driver_data = ATA_GEN_FORCE_DMA },
1992 + /*
1993 + * For some reason, MCP89 on MacBook 7,1 doesn't work with
1994 + * ahci, use ata_generic instead.
1995 + */
1996 + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
1997 + PCI_VENDOR_ID_APPLE, 0xcb89,
1998 + .driver_data = ATA_GEN_FORCE_DMA },
1999 #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
2000 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
2001 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
2002 @@ -175,7 +192,8 @@ static struct pci_device_id ata_generic[] = {
2003 { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
2004 #endif
2005 /* Must come last. If you add entries adjust this table appropriately */
2006 - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
2007 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
2008 + .driver_data = ATA_GEN_CLASS_MATCH },
2009 { 0, },
2010 };
2011
2012 diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
2013 index 985da11..4c70b91 100644
2014 --- a/drivers/base/firmware_class.c
2015 +++ b/drivers/base/firmware_class.c
2016 @@ -130,6 +130,17 @@ static ssize_t firmware_loading_show(struct device *dev,
2017 return sprintf(buf, "%d\n", loading);
2018 }
2019
2020 +static void firmware_free_data(const struct firmware *fw)
2021 +{
2022 + int i;
2023 + vunmap(fw->data);
2024 + if (fw->pages) {
2025 + for (i = 0; i < PFN_UP(fw->size); i++)
2026 + __free_page(fw->pages[i]);
2027 + kfree(fw->pages);
2028 + }
2029 +}
2030 +
2031 /* Some architectures don't have PAGE_KERNEL_RO */
2032 #ifndef PAGE_KERNEL_RO
2033 #define PAGE_KERNEL_RO PAGE_KERNEL
2034 @@ -162,21 +173,21 @@ static ssize_t firmware_loading_store(struct device *dev,
2035 mutex_unlock(&fw_lock);
2036 break;
2037 }
2038 - vfree(fw_priv->fw->data);
2039 - fw_priv->fw->data = NULL;
2040 + firmware_free_data(fw_priv->fw);
2041 + memset(fw_priv->fw, 0, sizeof(struct firmware));
2042 + /* If the pages are not owned by 'struct firmware' */
2043 for (i = 0; i < fw_priv->nr_pages; i++)
2044 __free_page(fw_priv->pages[i]);
2045 kfree(fw_priv->pages);
2046 fw_priv->pages = NULL;
2047 fw_priv->page_array_size = 0;
2048 fw_priv->nr_pages = 0;
2049 - fw_priv->fw->size = 0;
2050 set_bit(FW_STATUS_LOADING, &fw_priv->status);
2051 mutex_unlock(&fw_lock);
2052 break;
2053 case 0:
2054 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
2055 - vfree(fw_priv->fw->data);
2056 + vunmap(fw_priv->fw->data);
2057 fw_priv->fw->data = vmap(fw_priv->pages,
2058 fw_priv->nr_pages,
2059 0, PAGE_KERNEL_RO);
2060 @@ -184,7 +195,10 @@ static ssize_t firmware_loading_store(struct device *dev,
2061 dev_err(dev, "%s: vmap() failed\n", __func__);
2062 goto err;
2063 }
2064 - /* Pages will be freed by vfree() */
2065 + /* Pages are now owned by 'struct firmware' */
2066 + fw_priv->fw->pages = fw_priv->pages;
2067 + fw_priv->pages = NULL;
2068 +
2069 fw_priv->page_array_size = 0;
2070 fw_priv->nr_pages = 0;
2071 complete(&fw_priv->completion);
2072 @@ -578,7 +592,7 @@ release_firmware(const struct firmware *fw)
2073 if (fw->data == builtin->data)
2074 goto free_fw;
2075 }
2076 - vfree(fw->data);
2077 + firmware_free_data(fw);
2078 free_fw:
2079 kfree(fw);
2080 }
2081 diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
2082 index fd50ead..93f56d6 100644
2083 --- a/drivers/char/agp/amd64-agp.c
2084 +++ b/drivers/char/agp/amd64-agp.c
2085 @@ -499,6 +499,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
2086 u8 cap_ptr;
2087 int err;
2088
2089 + /* The Highlander principle */
2090 + if (agp_bridges_found)
2091 + return -ENODEV;
2092 +
2093 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
2094 if (!cap_ptr)
2095 return -ENODEV;
2096 @@ -562,6 +566,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
2097 amd64_aperture_sizes[bridge->aperture_size_idx].size);
2098 agp_remove_bridge(bridge);
2099 agp_put_bridge(bridge);
2100 +
2101 + agp_bridges_found--;
2102 }
2103
2104 #ifdef CONFIG_PM
2105 @@ -709,6 +715,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
2106
2107 MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
2108
2109 +static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
2110 + { PCI_DEVICE_CLASS(0, 0) },
2111 + { }
2112 +};
2113 +
2114 static struct pci_driver agp_amd64_pci_driver = {
2115 .name = "agpgart-amd64",
2116 .id_table = agp_amd64_pci_table,
2117 @@ -734,7 +745,6 @@ int __init agp_amd64_init(void)
2118 return err;
2119
2120 if (agp_bridges_found == 0) {
2121 - struct pci_dev *dev;
2122 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
2123 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
2124 #ifdef MODULE
2125 @@ -750,17 +760,10 @@ int __init agp_amd64_init(void)
2126 return -ENODEV;
2127
2128 /* Look for any AGP bridge */
2129 - dev = NULL;
2130 - err = -ENODEV;
2131 - for_each_pci_dev(dev) {
2132 - if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
2133 - continue;
2134 - /* Only one bridge supported right now */
2135 - if (agp_amd64_probe(dev, NULL) == 0) {
2136 - err = 0;
2137 - break;
2138 - }
2139 - }
2140 + agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
2141 + err = driver_attach(&agp_amd64_pci_driver.driver);
2142 + if (err == 0 && agp_bridges_found == 0)
2143 + err = -ENODEV;
2144 }
2145 return err;
2146 }
2147 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
2148 index 9ead05d..7198491 100644
2149 --- a/drivers/char/ipmi/ipmi_si_intf.c
2150 +++ b/drivers/char/ipmi/ipmi_si_intf.c
2151 @@ -1003,7 +1003,7 @@ static int ipmi_thread(void *data)
2152 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
2153 schedule();
2154 else
2155 - schedule_timeout_interruptible(0);
2156 + schedule_timeout_interruptible(1);
2157 }
2158 return 0;
2159 }
2160 diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
2161 index 8e00b4d..792868d 100644
2162 --- a/drivers/char/tpm/tpm.h
2163 +++ b/drivers/char/tpm/tpm.h
2164 @@ -224,6 +224,7 @@ struct tpm_readpubek_params_out {
2165 u8 algorithm[4];
2166 u8 encscheme[2];
2167 u8 sigscheme[2];
2168 + __be32 paramsize;
2169 u8 parameters[12]; /*assuming RSA*/
2170 __be32 keysize;
2171 u8 modulus[256];
2172 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
2173 index 9434599..2c53754 100644
2174 --- a/drivers/char/tpm/tpm_tis.c
2175 +++ b/drivers/char/tpm/tpm_tis.c
2176 @@ -623,7 +623,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
2177
2178 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
2179 {
2180 - return tpm_pm_resume(&dev->dev);
2181 + struct tpm_chip *chip = pnp_get_drvdata(dev);
2182 + int ret;
2183 +
2184 + ret = tpm_pm_resume(&dev->dev);
2185 + if (!ret)
2186 + tpm_continue_selftest(chip);
2187 +
2188 + return ret;
2189 }
2190
2191 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
2192 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2193 index 75d293e..634757f 100644
2194 --- a/drivers/cpufreq/cpufreq.c
2195 +++ b/drivers/cpufreq/cpufreq.c
2196 @@ -1774,17 +1774,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
2197 dprintk("governor switch\n");
2198
2199 /* end old governor */
2200 - if (data->governor) {
2201 - /*
2202 - * Need to release the rwsem around governor
2203 - * stop due to lock dependency between
2204 - * cancel_delayed_work_sync and the read lock
2205 - * taken in the delayed work handler.
2206 - */
2207 - unlock_policy_rwsem_write(data->cpu);
2208 + if (data->governor)
2209 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
2210 - lock_policy_rwsem_write(data->cpu);
2211 - }
2212
2213 /* start new governor */
2214 data->governor = policy->governor;
2215 diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
2216 index 55c9c59..d3b21a5 100644
2217 --- a/drivers/edac/Kconfig
2218 +++ b/drivers/edac/Kconfig
2219 @@ -196,7 +196,7 @@ config EDAC_I5100
2220
2221 config EDAC_MPC85XX
2222 tristate "Freescale MPC83xx / MPC85xx"
2223 - depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || MPC85xx)
2224 + depends on EDAC_MM_EDAC && FSL_SOC && (PPC_83xx || PPC_85xx)
2225 help
2226 Support for error detection and correction on the Freescale
2227 MPC8349, MPC8560, MPC8540, MPC8548
2228 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
2229 index cf17dbb..ac9f798 100644
2230 --- a/drivers/edac/amd64_edac.c
2231 +++ b/drivers/edac/amd64_edac.c
2232 @@ -1958,20 +1958,20 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2233 u32 value = 0;
2234 int err_sym = 0;
2235
2236 - amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
2237 + if (boot_cpu_data.x86 == 0x10) {
2238
2239 - /* F3x180[EccSymbolSize]=1, x8 symbols */
2240 - if (boot_cpu_data.x86 == 0x10 &&
2241 - boot_cpu_data.x86_model > 7 &&
2242 - value & BIT(25)) {
2243 - err_sym = decode_syndrome(syndrome, x8_vectors,
2244 - ARRAY_SIZE(x8_vectors), 8);
2245 - return map_err_sym_to_channel(err_sym, 8);
2246 - } else {
2247 - err_sym = decode_syndrome(syndrome, x4_vectors,
2248 - ARRAY_SIZE(x4_vectors), 4);
2249 - return map_err_sym_to_channel(err_sym, 4);
2250 + amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
2251 +
2252 + /* F3x180[EccSymbolSize]=1 => x8 symbols */
2253 + if (boot_cpu_data.x86_model > 7 &&
2254 + value & BIT(25)) {
2255 + err_sym = decode_syndrome(syndrome, x8_vectors,
2256 + ARRAY_SIZE(x8_vectors), 8);
2257 + return map_err_sym_to_channel(err_sym, 8);
2258 + }
2259 }
2260 + err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4);
2261 + return map_err_sym_to_channel(err_sym, 4);
2262 }
2263
2264 /*
2265 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
2266 index a0b8447..6e7e47d 100644
2267 --- a/drivers/gpu/drm/i915/i915_debugfs.c
2268 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
2269 @@ -618,7 +618,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
2270 drm_i915_private_t *dev_priv = dev->dev_private;
2271 bool sr_enabled = false;
2272
2273 - if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
2274 + if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
2275 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
2276 else if (IS_I915GM(dev))
2277 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
2278 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
2279 index c3cfafc..9550230 100644
2280 --- a/drivers/gpu/drm/i915/i915_dma.c
2281 +++ b/drivers/gpu/drm/i915/i915_dma.c
2282 @@ -1488,6 +1488,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
2283 if (ret)
2284 goto destroy_ringbuffer;
2285
2286 + /* IIR "flip pending" bit means done if this bit is set */
2287 + if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
2288 + dev_priv->flip_pending_is_done = true;
2289 +
2290 intel_modeset_init(dev);
2291
2292 ret = drm_irq_install(dev);
2293 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
2294 index cc03537..f5fee1b 100644
2295 --- a/drivers/gpu/drm/i915/i915_drv.c
2296 +++ b/drivers/gpu/drm/i915/i915_drv.c
2297 @@ -341,6 +341,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
2298 }
2299 } else {
2300 DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
2301 + mutex_unlock(&dev->struct_mutex);
2302 return -ENODEV;
2303 }
2304
2305 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2306 index 6e47900..aa7a5a8 100644
2307 --- a/drivers/gpu/drm/i915/i915_drv.h
2308 +++ b/drivers/gpu/drm/i915/i915_drv.h
2309 @@ -611,6 +611,7 @@ typedef struct drm_i915_private {
2310 struct drm_crtc *plane_to_crtc_mapping[2];
2311 struct drm_crtc *pipe_to_crtc_mapping[2];
2312 wait_queue_head_t pending_flip_queue;
2313 + bool flip_pending_is_done;
2314
2315 /* Reclocking support */
2316 bool render_reclock_avail;
2317 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2318 index 691701a..37a9ea3 100644
2319 --- a/drivers/gpu/drm/i915/i915_gem.c
2320 +++ b/drivers/gpu/drm/i915/i915_gem.c
2321 @@ -2312,8 +2312,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2322 mapping = inode->i_mapping;
2323 for (i = 0; i < page_count; i++) {
2324 page = read_cache_page_gfp(mapping, i,
2325 - mapping_gfp_mask (mapping) |
2326 + GFP_HIGHUSER |
2327 __GFP_COLD |
2328 + __GFP_RECLAIMABLE |
2329 gfpmask);
2330 if (IS_ERR(page))
2331 goto err_pages;
2332 @@ -4984,6 +4985,16 @@ i915_gem_load(struct drm_device *dev)
2333 list_add(&dev_priv->mm.shrink_list, &shrink_list);
2334 spin_unlock(&shrink_list_lock);
2335
2336 + /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
2337 + if (IS_GEN3(dev)) {
2338 + u32 tmp = I915_READ(MI_ARB_STATE);
2339 + if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
2340 + /* arb state is a masked write, so set bit + bit in mask */
2341 + tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
2342 + I915_WRITE(MI_ARB_STATE, tmp);
2343 + }
2344 + }
2345 +
2346 /* Old X drivers will take 0-2 for front, back, depth buffers */
2347 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2348 dev_priv->fence_reg_start = 3;
2349 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
2350 index df6a9cd..2976ce9 100644
2351 --- a/drivers/gpu/drm/i915/i915_irq.c
2352 +++ b/drivers/gpu/drm/i915/i915_irq.c
2353 @@ -932,22 +932,30 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
2354 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
2355 }
2356
2357 - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2358 + if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2359 intel_prepare_page_flip(dev, 0);
2360 + if (dev_priv->flip_pending_is_done)
2361 + intel_finish_page_flip_plane(dev, 0);
2362 + }
2363
2364 - if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2365 + if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2366 + if (dev_priv->flip_pending_is_done)
2367 + intel_finish_page_flip_plane(dev, 1);
2368 intel_prepare_page_flip(dev, 1);
2369 + }
2370
2371 if (pipea_stats & vblank_status) {
2372 vblank++;
2373 drm_handle_vblank(dev, 0);
2374 - intel_finish_page_flip(dev, 0);
2375 + if (!dev_priv->flip_pending_is_done)
2376 + intel_finish_page_flip(dev, 0);
2377 }
2378
2379 if (pipeb_stats & vblank_status) {
2380 vblank++;
2381 drm_handle_vblank(dev, 1);
2382 - intel_finish_page_flip(dev, 1);
2383 + if (!dev_priv->flip_pending_is_done)
2384 + intel_finish_page_flip(dev, 1);
2385 }
2386
2387 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
2388 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2389 index 4cbc521..eb01ca6 100644
2390 --- a/drivers/gpu/drm/i915/i915_reg.h
2391 +++ b/drivers/gpu/drm/i915/i915_reg.h
2392 @@ -178,6 +178,7 @@
2393 #define MI_OVERLAY_OFF (0x2<<21)
2394 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
2395 #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
2396 +#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
2397 #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
2398 #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
2399 #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
2400 @@ -357,6 +358,70 @@
2401 #define LM_BURST_LENGTH 0x00000700
2402 #define LM_FIFO_WATERMARK 0x0000001F
2403 #define MI_ARB_STATE 0x020e4 /* 915+ only */
2404 +#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
2405 +
2406 +/* Make render/texture TLB fetches lower priorty than associated data
2407 + * fetches. This is not turned on by default
2408 + */
2409 +#define MI_ARB_RENDER_TLB_LOW_PRIORITY (1 << 15)
2410 +
2411 +/* Isoch request wait on GTT enable (Display A/B/C streams).
2412 + * Make isoch requests stall on the TLB update. May cause
2413 + * display underruns (test mode only)
2414 + */
2415 +#define MI_ARB_ISOCH_WAIT_GTT (1 << 14)
2416 +
2417 +/* Block grant count for isoch requests when block count is
2418 + * set to a finite value.
2419 + */
2420 +#define MI_ARB_BLOCK_GRANT_MASK (3 << 12)
2421 +#define MI_ARB_BLOCK_GRANT_8 (0 << 12) /* for 3 display planes */
2422 +#define MI_ARB_BLOCK_GRANT_4 (1 << 12) /* for 2 display planes */
2423 +#define MI_ARB_BLOCK_GRANT_2 (2 << 12) /* for 1 display plane */
2424 +#define MI_ARB_BLOCK_GRANT_0 (3 << 12) /* don't use */
2425 +
2426 +/* Enable render writes to complete in C2/C3/C4 power states.
2427 + * If this isn't enabled, render writes are prevented in low
2428 + * power states. That seems bad to me.
2429 + */
2430 +#define MI_ARB_C3_LP_WRITE_ENABLE (1 << 11)
2431 +
2432 +/* This acknowledges an async flip immediately instead
2433 + * of waiting for 2TLB fetches.
2434 + */
2435 +#define MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE (1 << 10)
2436 +
2437 +/* Enables non-sequential data reads through arbiter
2438 + */
2439 +#define MI_ARB_DUAL_DATA_PHASE_DISABLE (1 << 9)
2440 +
2441 +/* Disable FSB snooping of cacheable write cycles from binner/render
2442 + * command stream
2443 + */
2444 +#define MI_ARB_CACHE_SNOOP_DISABLE (1 << 8)
2445 +
2446 +/* Arbiter time slice for non-isoch streams */
2447 +#define MI_ARB_TIME_SLICE_MASK (7 << 5)
2448 +#define MI_ARB_TIME_SLICE_1 (0 << 5)
2449 +#define MI_ARB_TIME_SLICE_2 (1 << 5)
2450 +#define MI_ARB_TIME_SLICE_4 (2 << 5)
2451 +#define MI_ARB_TIME_SLICE_6 (3 << 5)
2452 +#define MI_ARB_TIME_SLICE_8 (4 << 5)
2453 +#define MI_ARB_TIME_SLICE_10 (5 << 5)
2454 +#define MI_ARB_TIME_SLICE_14 (6 << 5)
2455 +#define MI_ARB_TIME_SLICE_16 (7 << 5)
2456 +
2457 +/* Low priority grace period page size */
2458 +#define MI_ARB_LOW_PRIORITY_GRACE_4KB (0 << 4) /* default */
2459 +#define MI_ARB_LOW_PRIORITY_GRACE_8KB (1 << 4)
2460 +
2461 +/* Disable display A/B trickle feed */
2462 +#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2)
2463 +
2464 +/* Set display plane priority */
2465 +#define MI_ARB_DISPLAY_PRIORITY_A_B (0 << 0) /* display A > display B */
2466 +#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
2467 +
2468 #define CACHE_MODE_0 0x02120 /* 915+ only */
2469 #define CM0_MASK_SHIFT 16
2470 #define CM0_IZ_OPT_DISABLE (1<<6)
2471 @@ -367,6 +432,9 @@
2472 #define CM0_RC_OP_FLUSH_DISABLE (1<<0)
2473 #define BB_ADDR 0x02140 /* 8 bytes */
2474 #define GFX_FLSH_CNTL 0x02170 /* 915+ only */
2475 +#define ECOSKPD 0x021d0
2476 +#define ECO_GATING_CX_ONLY (1<<3)
2477 +#define ECO_FLIP_DONE (1<<0)
2478
2479
2480 /*
2481 @@ -2619,6 +2687,7 @@
2482
2483 #define PCH_PP_STATUS 0xc7200
2484 #define PCH_PP_CONTROL 0xc7204
2485 +#define PANEL_UNLOCK_REGS (0xabcd << 16)
2486 #define EDP_FORCE_VDD (1 << 3)
2487 #define EDP_BLC_ENABLE (1 << 2)
2488 #define PANEL_POWER_RESET (1 << 1)
2489 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2490 index 70765cf..44c07f8 100644
2491 --- a/drivers/gpu/drm/i915/intel_display.c
2492 +++ b/drivers/gpu/drm/i915/intel_display.c
2493 @@ -880,8 +880,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
2494 intel_clock_t clock;
2495 int max_n;
2496 bool found;
2497 - /* approximately equals target * 0.00488 */
2498 - int err_most = (target >> 8) + (target >> 10);
2499 + /* approximately equals target * 0.00585 */
2500 + int err_most = (target >> 8) + (target >> 9);
2501 found = false;
2502
2503 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2504 @@ -2691,11 +2691,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2505 if (srwm < 0)
2506 srwm = 1;
2507 srwm &= 0x3f;
2508 - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2509 + if (IS_I965GM(dev))
2510 + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2511 } else {
2512 /* Turn off self refresh if both pipes are enabled */
2513 - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2514 - & ~FW_BLC_SELF_EN);
2515 + if (IS_I965GM(dev))
2516 + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2517 + & ~FW_BLC_SELF_EN);
2518 }
2519
2520 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2521 @@ -3948,7 +3950,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
2522 DRM_DEBUG_DRIVER("upclocking LVDS\n");
2523
2524 /* Unlock panel regs */
2525 - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
2526 + I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
2527 + PANEL_UNLOCK_REGS);
2528
2529 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
2530 I915_WRITE(dpll_reg, dpll);
2531 @@ -3991,7 +3994,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
2532 DRM_DEBUG_DRIVER("downclocking LVDS\n");
2533
2534 /* Unlock panel regs */
2535 - I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
2536 + I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
2537 + PANEL_UNLOCK_REGS);
2538
2539 dpll |= DISPLAY_RATE_SELECT_FPA1;
2540 I915_WRITE(dpll_reg, dpll);
2541 @@ -4137,10 +4141,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
2542 kfree(work);
2543 }
2544
2545 -void intel_finish_page_flip(struct drm_device *dev, int pipe)
2546 +static void do_intel_finish_page_flip(struct drm_device *dev,
2547 + struct drm_crtc *crtc)
2548 {
2549 drm_i915_private_t *dev_priv = dev->dev_private;
2550 - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2551 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2552 struct intel_unpin_work *work;
2553 struct drm_i915_gem_object *obj_priv;
2554 @@ -4184,6 +4188,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
2555 schedule_work(&work->work);
2556 }
2557
2558 +void intel_finish_page_flip(struct drm_device *dev, int pipe)
2559 +{
2560 + drm_i915_private_t *dev_priv = dev->dev_private;
2561 + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2562 +
2563 + do_intel_finish_page_flip(dev, crtc);
2564 +}
2565 +
2566 +void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
2567 +{
2568 + drm_i915_private_t *dev_priv = dev->dev_private;
2569 + struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
2570 +
2571 + do_intel_finish_page_flip(dev, crtc);
2572 +}
2573 +
2574 void intel_prepare_page_flip(struct drm_device *dev, int plane)
2575 {
2576 drm_i915_private_t *dev_priv = dev->dev_private;
2577 @@ -4211,17 +4231,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
2578 struct drm_gem_object *obj;
2579 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2580 struct intel_unpin_work *work;
2581 - unsigned long flags;
2582 + unsigned long flags, offset;
2583 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
2584 int ret, pipesrc;
2585 + u32 flip_mask;
2586 RING_LOCALS;
2587
2588 work = kzalloc(sizeof *work, GFP_KERNEL);
2589 if (work == NULL)
2590 return -ENOMEM;
2591
2592 - mutex_lock(&dev->struct_mutex);
2593 -
2594 work->event = event;
2595 work->dev = crtc->dev;
2596 intel_fb = to_intel_framebuffer(crtc->fb);
2597 @@ -4231,10 +4250,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
2598 /* We borrow the event spin lock for protecting unpin_work */
2599 spin_lock_irqsave(&dev->event_lock, flags);
2600 if (intel_crtc->unpin_work) {
2601 - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
2602 spin_unlock_irqrestore(&dev->event_lock, flags);
2603 kfree(work);
2604 - mutex_unlock(&dev->struct_mutex);
2605 +
2606 + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
2607 return -EBUSY;
2608 }
2609 intel_crtc->unpin_work = work;
2610 @@ -4243,13 +4262,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
2611 intel_fb = to_intel_framebuffer(fb);
2612 obj = intel_fb->obj;
2613
2614 + mutex_lock(&dev->struct_mutex);
2615 ret = intel_pin_and_fence_fb_obj(dev, obj);
2616 if (ret != 0) {
2617 - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
2618 - to_intel_bo(obj));
2619 - kfree(work);
2620 - intel_crtc->unpin_work = NULL;
2621 mutex_unlock(&dev->struct_mutex);
2622 +
2623 + spin_lock_irqsave(&dev->event_lock, flags);
2624 + intel_crtc->unpin_work = NULL;
2625 + spin_unlock_irqrestore(&dev->event_lock, flags);
2626 +
2627 + kfree(work);
2628 +
2629 + DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
2630 + to_intel_bo(obj));
2631 return ret;
2632 }
2633
2634 @@ -4264,16 +4289,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
2635 atomic_inc(&obj_priv->pending_flip);
2636 work->pending_flip_obj = obj;
2637
2638 + if (intel_crtc->plane)
2639 + flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2640 + else
2641 + flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2642 +
2643 + /* Wait for any previous flip to finish */
2644 + if (IS_GEN3(dev))
2645 + while (I915_READ(ISR) & flip_mask)
2646 + ;
2647 +
2648 + /* Offset into the new buffer for cases of shared fbs between CRTCs */
2649 + offset = obj_priv->gtt_offset;
2650 + offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
2651 +
2652 BEGIN_LP_RING(4);
2653 - OUT_RING(MI_DISPLAY_FLIP |
2654 - MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
2655 - OUT_RING(fb->pitch);
2656 if (IS_I965G(dev)) {
2657 - OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
2658 + OUT_RING(MI_DISPLAY_FLIP |
2659 + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
2660 + OUT_RING(fb->pitch);
2661 + OUT_RING(offset | obj_priv->tiling_mode);
2662 pipesrc = I915_READ(pipesrc_reg);
2663 OUT_RING(pipesrc & 0x0fff0fff);
2664 } else {
2665 - OUT_RING(obj_priv->gtt_offset);
2666 + OUT_RING(MI_DISPLAY_FLIP_I915 |
2667 + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
2668 + OUT_RING(fb->pitch);
2669 + OUT_RING(offset);
2670 OUT_RING(MI_NOOP);
2671 }
2672 ADVANCE_LP_RING();
2673 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2674 index 7c28ff1..a9ec41d 100644
2675 --- a/drivers/gpu/drm/i915/intel_dp.c
2676 +++ b/drivers/gpu/drm/i915/intel_dp.c
2677 @@ -677,6 +677,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
2678 }
2679 }
2680
2681 +static void ironlake_edp_panel_on (struct drm_device *dev)
2682 +{
2683 + struct drm_i915_private *dev_priv = dev->dev_private;
2684 + unsigned long timeout = jiffies + msecs_to_jiffies(5000);
2685 + u32 pp, pp_status;
2686 +
2687 + pp_status = I915_READ(PCH_PP_STATUS);
2688 + if (pp_status & PP_ON)
2689 + return;
2690 +
2691 + pp = I915_READ(PCH_PP_CONTROL);
2692 + pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
2693 + I915_WRITE(PCH_PP_CONTROL, pp);
2694 + do {
2695 + pp_status = I915_READ(PCH_PP_STATUS);
2696 + } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
2697 +
2698 + if (time_after(jiffies, timeout))
2699 + DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
2700 +
2701 + pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
2702 + I915_WRITE(PCH_PP_CONTROL, pp);
2703 +}
2704 +
2705 +static void ironlake_edp_panel_off (struct drm_device *dev)
2706 +{
2707 + struct drm_i915_private *dev_priv = dev->dev_private;
2708 + unsigned long timeout = jiffies + msecs_to_jiffies(5000);
2709 + u32 pp, pp_status;
2710 +
2711 + pp = I915_READ(PCH_PP_CONTROL);
2712 + pp &= ~POWER_TARGET_ON;
2713 + I915_WRITE(PCH_PP_CONTROL, pp);
2714 + do {
2715 + pp_status = I915_READ(PCH_PP_STATUS);
2716 + } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
2717 +
2718 + if (time_after(jiffies, timeout))
2719 + DRM_DEBUG_KMS("panel off wait timed out\n");
2720 +
2721 + /* Make sure VDD is enabled so DP AUX will work */
2722 + pp |= EDP_FORCE_VDD;
2723 + I915_WRITE(PCH_PP_CONTROL, pp);
2724 +}
2725 +
2726 static void ironlake_edp_backlight_on (struct drm_device *dev)
2727 {
2728 struct drm_i915_private *dev_priv = dev->dev_private;
2729 @@ -711,14 +756,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
2730 if (mode != DRM_MODE_DPMS_ON) {
2731 if (dp_reg & DP_PORT_EN) {
2732 intel_dp_link_down(intel_encoder, dp_priv->DP);
2733 - if (IS_eDP(intel_encoder))
2734 + if (IS_eDP(intel_encoder)) {
2735 ironlake_edp_backlight_off(dev);
2736 + ironlake_edp_panel_off(dev);
2737 + }
2738 }
2739 } else {
2740 if (!(dp_reg & DP_PORT_EN)) {
2741 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
2742 - if (IS_eDP(intel_encoder))
2743 + if (IS_eDP(intel_encoder)) {
2744 + ironlake_edp_panel_on(dev);
2745 ironlake_edp_backlight_on(dev);
2746 + }
2747 }
2748 }
2749 dp_priv->dpms_mode = mode;
2750 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
2751 index e302537..7a1ad65 100644
2752 --- a/drivers/gpu/drm/i915/intel_drv.h
2753 +++ b/drivers/gpu/drm/i915/intel_drv.h
2754 @@ -219,6 +219,7 @@ extern int intel_framebuffer_create(struct drm_device *dev,
2755
2756 extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
2757 extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
2758 +extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
2759
2760 extern void intel_setup_overlay(struct drm_device *dev);
2761 extern void intel_cleanup_overlay(struct drm_device *dev);
2762 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
2763 index cf60c0b..f6546ad 100644
2764 --- a/drivers/gpu/drm/radeon/r100.c
2765 +++ b/drivers/gpu/drm/radeon/r100.c
2766 @@ -1392,6 +1392,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
2767 case RADEON_TXFORMAT_RGB332:
2768 case RADEON_TXFORMAT_Y8:
2769 track->textures[i].cpp = 1;
2770 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2771 break;
2772 case RADEON_TXFORMAT_AI88:
2773 case RADEON_TXFORMAT_ARGB1555:
2774 @@ -1403,12 +1404,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
2775 case RADEON_TXFORMAT_LDUDV655:
2776 case RADEON_TXFORMAT_DUDV88:
2777 track->textures[i].cpp = 2;
2778 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2779 break;
2780 case RADEON_TXFORMAT_ARGB8888:
2781 case RADEON_TXFORMAT_RGBA8888:
2782 case RADEON_TXFORMAT_SHADOW32:
2783 case RADEON_TXFORMAT_LDUDUV8888:
2784 track->textures[i].cpp = 4;
2785 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2786 break;
2787 case RADEON_TXFORMAT_DXT1:
2788 track->textures[i].cpp = 1;
2789 @@ -2829,33 +2832,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2790 DRM_ERROR("compress format %d\n", t->compress_format);
2791 }
2792
2793 -static int r100_cs_track_cube(struct radeon_device *rdev,
2794 - struct r100_cs_track *track, unsigned idx)
2795 -{
2796 - unsigned face, w, h;
2797 - struct radeon_bo *cube_robj;
2798 - unsigned long size;
2799 -
2800 - for (face = 0; face < 5; face++) {
2801 - cube_robj = track->textures[idx].cube_info[face].robj;
2802 - w = track->textures[idx].cube_info[face].width;
2803 - h = track->textures[idx].cube_info[face].height;
2804 -
2805 - size = w * h;
2806 - size *= track->textures[idx].cpp;
2807 -
2808 - size += track->textures[idx].cube_info[face].offset;
2809 -
2810 - if (size > radeon_bo_size(cube_robj)) {
2811 - DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2812 - size, radeon_bo_size(cube_robj));
2813 - r100_cs_track_texture_print(&track->textures[idx]);
2814 - return -1;
2815 - }
2816 - }
2817 - return 0;
2818 -}
2819 -
2820 static int r100_track_compress_size(int compress_format, int w, int h)
2821 {
2822 int block_width, block_height, block_bytes;
2823 @@ -2886,6 +2862,37 @@ static int r100_track_compress_size(int compress_format, int w, int h)
2824 return sz;
2825 }
2826
2827 +static int r100_cs_track_cube(struct radeon_device *rdev,
2828 + struct r100_cs_track *track, unsigned idx)
2829 +{
2830 + unsigned face, w, h;
2831 + struct radeon_bo *cube_robj;
2832 + unsigned long size;
2833 + unsigned compress_format = track->textures[idx].compress_format;
2834 +
2835 + for (face = 0; face < 5; face++) {
2836 + cube_robj = track->textures[idx].cube_info[face].robj;
2837 + w = track->textures[idx].cube_info[face].width;
2838 + h = track->textures[idx].cube_info[face].height;
2839 +
2840 + if (compress_format) {
2841 + size = r100_track_compress_size(compress_format, w, h);
2842 + } else
2843 + size = w * h;
2844 + size *= track->textures[idx].cpp;
2845 +
2846 + size += track->textures[idx].cube_info[face].offset;
2847 +
2848 + if (size > radeon_bo_size(cube_robj)) {
2849 + DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2850 + size, radeon_bo_size(cube_robj));
2851 + r100_cs_track_texture_print(&track->textures[idx]);
2852 + return -1;
2853 + }
2854 + }
2855 + return 0;
2856 +}
2857 +
2858 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2859 struct r100_cs_track *track)
2860 {
2861 diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
2862 index 85617c3..0266d72 100644
2863 --- a/drivers/gpu/drm/radeon/r200.c
2864 +++ b/drivers/gpu/drm/radeon/r200.c
2865 @@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
2866 /* 2D, 3D, CUBE */
2867 switch (tmp) {
2868 case 0:
2869 + case 3:
2870 + case 4:
2871 case 5:
2872 case 6:
2873 case 7:
2874 @@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
2875 case R200_TXFORMAT_RGB332:
2876 case R200_TXFORMAT_Y8:
2877 track->textures[i].cpp = 1;
2878 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2879 break;
2880 case R200_TXFORMAT_AI88:
2881 case R200_TXFORMAT_ARGB1555:
2882 @@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
2883 case R200_TXFORMAT_DVDU88:
2884 case R200_TXFORMAT_AVYU4444:
2885 track->textures[i].cpp = 2;
2886 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2887 break;
2888 case R200_TXFORMAT_ARGB8888:
2889 case R200_TXFORMAT_RGBA8888:
2890 @@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
2891 case R200_TXFORMAT_BGR111110:
2892 case R200_TXFORMAT_LDVDU8888:
2893 track->textures[i].cpp = 4;
2894 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2895 break;
2896 case R200_TXFORMAT_DXT1:
2897 track->textures[i].cpp = 1;
2898 diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
2899 index a5ff807..48c537c1 100644
2900 --- a/drivers/gpu/drm/radeon/r300.c
2901 +++ b/drivers/gpu/drm/radeon/r300.c
2902 @@ -881,6 +881,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
2903 case R300_TX_FORMAT_Y4X4:
2904 case R300_TX_FORMAT_Z3Y3X2:
2905 track->textures[i].cpp = 1;
2906 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2907 break;
2908 case R300_TX_FORMAT_X16:
2909 case R300_TX_FORMAT_Y8X8:
2910 @@ -892,6 +893,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
2911 case R300_TX_FORMAT_B8G8_B8G8:
2912 case R300_TX_FORMAT_G8R8_G8B8:
2913 track->textures[i].cpp = 2;
2914 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2915 break;
2916 case R300_TX_FORMAT_Y16X16:
2917 case R300_TX_FORMAT_Z11Y11X10:
2918 @@ -902,14 +904,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
2919 case R300_TX_FORMAT_FL_I32:
2920 case 0x1e:
2921 track->textures[i].cpp = 4;
2922 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2923 break;
2924 case R300_TX_FORMAT_W16Z16Y16X16:
2925 case R300_TX_FORMAT_FL_R16G16B16A16:
2926 case R300_TX_FORMAT_FL_I32A32:
2927 track->textures[i].cpp = 8;
2928 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2929 break;
2930 case R300_TX_FORMAT_FL_R32G32B32A32:
2931 track->textures[i].cpp = 16;
2932 + track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2933 break;
2934 case R300_TX_FORMAT_DXT1:
2935 track->textures[i].cpp = 1;
2936 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
2937 index 1a4fa9b..1bac9ee 100644
2938 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
2939 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
2940 @@ -280,6 +280,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
2941 }
2942 }
2943
2944 + /* ASUS HD 3600 board lists the DVI port as HDMI */
2945 + if ((dev->pdev->device == 0x9598) &&
2946 + (dev->pdev->subsystem_vendor == 0x1043) &&
2947 + (dev->pdev->subsystem_device == 0x01e4)) {
2948 + if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
2949 + *connector_type = DRM_MODE_CONNECTOR_DVII;
2950 + }
2951 + }
2952 +
2953 /* ASUS HD 3450 board lists the DVI port as HDMI */
2954 if ((dev->pdev->device == 0x95C5) &&
2955 (dev->pdev->subsystem_vendor == 0x1043) &&
2956 @@ -1021,8 +1030,15 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
2957 data_offset);
2958 switch (crev) {
2959 case 1:
2960 - if (igp_info->info.ucMemoryType & 0xf0)
2961 - return true;
2962 + /* AMD IGPS */
2963 + if ((rdev->family == CHIP_RS690) ||
2964 + (rdev->family == CHIP_RS740)) {
2965 + if (igp_info->info.ulBootUpMemoryClock)
2966 + return true;
2967 + } else {
2968 + if (igp_info->info.ucMemoryType & 0xf0)
2969 + return true;
2970 + }
2971 break;
2972 case 2:
2973 if (igp_info->info_2.ucMemoryType & 0x0f)
2974 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2975 index 4559a53..5d9b4e1 100644
2976 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2977 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2978 @@ -771,30 +771,27 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
2979 } else
2980 ret = connector_status_connected;
2981
2982 - /* multiple connectors on the same encoder with the same ddc line
2983 - * This tends to be HDMI and DVI on the same encoder with the
2984 - * same ddc line. If the edid says HDMI, consider the HDMI port
2985 - * connected and the DVI port disconnected. If the edid doesn't
2986 - * say HDMI, vice versa.
2987 + /* This gets complicated. We have boards with VGA + HDMI with a
2988 + * shared DDC line and we have boards with DVI-D + HDMI with a shared
2989 + * DDC line. The latter is more complex because with DVI<->HDMI adapters
2990 + * you don't really know what's connected to which port as both are digital.
2991 */
2992 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
2993 struct drm_device *dev = connector->dev;
2994 + struct radeon_device *rdev = dev->dev_private;
2995 struct drm_connector *list_connector;
2996 struct radeon_connector *list_radeon_connector;
2997 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
2998 if (connector == list_connector)
2999 continue;
3000 list_radeon_connector = to_radeon_connector(list_connector);
3001 - if (radeon_connector->devices == list_radeon_connector->devices) {
3002 - if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
3003 - if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
3004 - kfree(radeon_connector->edid);
3005 - radeon_connector->edid = NULL;
3006 - ret = connector_status_disconnected;
3007 - }
3008 - } else {
3009 - if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
3010 - (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
3011 + if (list_radeon_connector->shared_ddc &&
3012 + (list_radeon_connector->ddc_bus->rec.i2c_id ==
3013 + radeon_connector->ddc_bus->rec.i2c_id)) {
3014 + /* cases where both connectors are digital */
3015 + if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
3016 + /* hpd is our only option in this case */
3017 + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
3018 kfree(radeon_connector->edid);
3019 radeon_connector->edid = NULL;
3020 ret = connector_status_disconnected;
3021 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
3022 index c5ddaf5..de052de 100644
3023 --- a/drivers/gpu/drm/radeon/radeon_encoders.c
3024 +++ b/drivers/gpu/drm/radeon/radeon_encoders.c
3025 @@ -1075,6 +1075,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
3026 if (is_dig) {
3027 switch (mode) {
3028 case DRM_MODE_DPMS_ON:
3029 + if (!ASIC_IS_DCE4(rdev))
3030 + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
3031 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
3032 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
3033
3034 @@ -1082,8 +1084,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
3035 if (ASIC_IS_DCE4(rdev))
3036 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
3037 }
3038 - if (!ASIC_IS_DCE4(rdev))
3039 - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
3040 break;
3041 case DRM_MODE_DPMS_STANDBY:
3042 case DRM_MODE_DPMS_SUSPEND:
3043 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
3044 index 0274abe..d844392 100644
3045 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
3046 +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
3047 @@ -108,6 +108,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
3048 udelay(panel_pwr_delay * 1000);
3049 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
3050 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
3051 + udelay(panel_pwr_delay * 1000);
3052 break;
3053 }
3054
3055 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
3056 index f2ed27c..0320403 100644
3057 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
3058 +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
3059 @@ -642,8 +642,8 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
3060 }
3061 flicker_removal = (tmp + 500) / 1000;
3062
3063 - if (flicker_removal < 2)
3064 - flicker_removal = 2;
3065 + if (flicker_removal < 3)
3066 + flicker_removal = 3;
3067 for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
3068 if (flicker_removal == SLOPE_limit[i])
3069 break;
3070 diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
3071 index 7b85b69..5242684 100644
3072 --- a/drivers/hid/usbhid/hid-core.c
3073 +++ b/drivers/hid/usbhid/hid-core.c
3074 @@ -1019,12 +1019,15 @@ static int usbhid_start(struct hid_device *hid)
3075 /* Some keyboards don't work until their LEDs have been set.
3076 * Since BIOSes do set the LEDs, it must be safe for any device
3077 * that supports the keyboard boot protocol.
3078 + * In addition, enable remote wakeup by default for all keyboard
3079 + * devices supporting the boot protocol.
3080 */
3081 if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT &&
3082 interface->desc.bInterfaceProtocol ==
3083 - USB_INTERFACE_PROTOCOL_KEYBOARD)
3084 + USB_INTERFACE_PROTOCOL_KEYBOARD) {
3085 usbhid_set_leds(hid);
3086 -
3087 + device_set_wakeup_enable(&dev->dev, 1);
3088 + }
3089 return 0;
3090
3091 fail:
3092 diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
3093 index f843443..b2fd0b0 100644
3094 --- a/drivers/hid/usbhid/usbkbd.c
3095 +++ b/drivers/hid/usbhid/usbkbd.c
3096 @@ -313,6 +313,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
3097 goto fail2;
3098
3099 usb_set_intfdata(iface, kbd);
3100 + device_set_wakeup_enable(&dev->dev, 1);
3101 return 0;
3102
3103 fail2:
3104 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
3105 index e9b7fbc..48f4b7f 100644
3106 --- a/drivers/hwmon/coretemp.c
3107 +++ b/drivers/hwmon/coretemp.c
3108 @@ -53,6 +53,7 @@ struct coretemp_data {
3109 struct mutex update_lock;
3110 const char *name;
3111 u32 id;
3112 + u16 core_id;
3113 char valid; /* zero until following fields are valid */
3114 unsigned long last_updated; /* in jiffies */
3115 int temp;
3116 @@ -75,7 +76,7 @@ static ssize_t show_name(struct device *dev, struct device_attribute
3117 if (attr->index == SHOW_NAME)
3118 ret = sprintf(buf, "%s\n", data->name);
3119 else /* show label */
3120 - ret = sprintf(buf, "Core %d\n", data->id);
3121 + ret = sprintf(buf, "Core %d\n", data->core_id);
3122 return ret;
3123 }
3124
3125 @@ -255,6 +256,9 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
3126 }
3127
3128 data->id = pdev->id;
3129 +#ifdef CONFIG_SMP
3130 + data->core_id = c->cpu_core_id;
3131 +#endif
3132 data->name = "coretemp";
3133 mutex_init(&data->update_lock);
3134
3135 @@ -352,6 +356,10 @@ struct pdev_entry {
3136 struct list_head list;
3137 struct platform_device *pdev;
3138 unsigned int cpu;
3139 +#ifdef CONFIG_SMP
3140 + u16 phys_proc_id;
3141 + u16 cpu_core_id;
3142 +#endif
3143 };
3144
3145 static LIST_HEAD(pdev_list);
3146 @@ -362,6 +370,22 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
3147 int err;
3148 struct platform_device *pdev;
3149 struct pdev_entry *pdev_entry;
3150 +#ifdef CONFIG_SMP
3151 + struct cpuinfo_x86 *c = &cpu_data(cpu);
3152 +#endif
3153 +
3154 + mutex_lock(&pdev_list_mutex);
3155 +
3156 +#ifdef CONFIG_SMP
3157 + /* Skip second HT entry of each core */
3158 + list_for_each_entry(pdev_entry, &pdev_list, list) {
3159 + if (c->phys_proc_id == pdev_entry->phys_proc_id &&
3160 + c->cpu_core_id == pdev_entry->cpu_core_id) {
3161 + err = 0; /* Not an error */
3162 + goto exit;
3163 + }
3164 + }
3165 +#endif
3166
3167 pdev = platform_device_alloc(DRVNAME, cpu);
3168 if (!pdev) {
3169 @@ -385,7 +409,10 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
3170
3171 pdev_entry->pdev = pdev;
3172 pdev_entry->cpu = cpu;
3173 - mutex_lock(&pdev_list_mutex);
3174 +#ifdef CONFIG_SMP
3175 + pdev_entry->phys_proc_id = c->phys_proc_id;
3176 + pdev_entry->cpu_core_id = c->cpu_core_id;
3177 +#endif
3178 list_add_tail(&pdev_entry->list, &pdev_list);
3179 mutex_unlock(&pdev_list_mutex);
3180
3181 @@ -396,6 +423,7 @@ exit_device_free:
3182 exit_device_put:
3183 platform_device_put(pdev);
3184 exit:
3185 + mutex_unlock(&pdev_list_mutex);
3186 return err;
3187 }
3188
3189 diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
3190 index e880e2c..9379834 100644
3191 --- a/drivers/hwmon/i5k_amb.c
3192 +++ b/drivers/hwmon/i5k_amb.c
3193 @@ -289,6 +289,7 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
3194 iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
3195 iattr->s_attr.dev_attr.show = show_label;
3196 iattr->s_attr.index = k;
3197 + sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
3198 res = device_create_file(&pdev->dev,
3199 &iattr->s_attr.dev_attr);
3200 if (res)
3201 @@ -303,6 +304,7 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
3202 iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
3203 iattr->s_attr.dev_attr.show = show_amb_temp;
3204 iattr->s_attr.index = k;
3205 + sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
3206 res = device_create_file(&pdev->dev,
3207 &iattr->s_attr.dev_attr);
3208 if (res)
3209 @@ -318,6 +320,7 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
3210 iattr->s_attr.dev_attr.show = show_amb_min;
3211 iattr->s_attr.dev_attr.store = store_amb_min;
3212 iattr->s_attr.index = k;
3213 + sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
3214 res = device_create_file(&pdev->dev,
3215 &iattr->s_attr.dev_attr);
3216 if (res)
3217 @@ -333,6 +336,7 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
3218 iattr->s_attr.dev_attr.show = show_amb_mid;
3219 iattr->s_attr.dev_attr.store = store_amb_mid;
3220 iattr->s_attr.index = k;
3221 + sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
3222 res = device_create_file(&pdev->dev,
3223 &iattr->s_attr.dev_attr);
3224 if (res)
3225 @@ -348,6 +352,7 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
3226 iattr->s_attr.dev_attr.show = show_amb_max;
3227 iattr->s_attr.dev_attr.store = store_amb_max;
3228 iattr->s_attr.index = k;
3229 + sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
3230 res = device_create_file(&pdev->dev,
3231 &iattr->s_attr.dev_attr);
3232 if (res)
3233 @@ -362,6 +367,7 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
3234 iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
3235 iattr->s_attr.dev_attr.show = show_amb_alarm;
3236 iattr->s_attr.index = k;
3237 + sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
3238 res = device_create_file(&pdev->dev,
3239 &iattr->s_attr.dev_attr);
3240 if (res)
3241 diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
3242 index 5be09c0..25763d2 100644
3243 --- a/drivers/hwmon/it87.c
3244 +++ b/drivers/hwmon/it87.c
3245 @@ -80,6 +80,13 @@ superio_inb(int reg)
3246 return inb(VAL);
3247 }
3248
3249 +static inline void
3250 +superio_outb(int reg, int val)
3251 +{
3252 + outb(reg, REG);
3253 + outb(val, VAL);
3254 +}
3255 +
3256 static int superio_inw(int reg)
3257 {
3258 int val;
3259 @@ -1517,6 +1524,21 @@ static int __init it87_find(unsigned short *address,
3260 sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
3261
3262 reg = superio_inb(IT87_SIO_PINX2_REG);
3263 + /*
3264 + * The IT8720F has no VIN7 pin, so VCCH should always be
3265 + * routed internally to VIN7 with an internal divider.
3266 + * Curiously, there still is a configuration bit to control
3267 + * this, which means it can be set incorrectly. And even
3268 + * more curiously, many boards out there are improperly
3269 + * configured, even though the IT8720F datasheet claims
3270 + * that the internal routing of VCCH to VIN7 is the default
3271 + * setting. So we force the internal routing in this case.
3272 + */
3273 + if (sio_data->type == it8720 && !(reg & (1 << 1))) {
3274 + reg |= (1 << 1);
3275 + superio_outb(IT87_SIO_PINX2_REG, reg);
3276 + pr_notice("it87: Routing internal VCCH to in7\n");
3277 + }
3278 if (reg & (1 << 0))
3279 pr_info("it87: in3 is VCC (+5V)\n");
3280 if (reg & (1 << 1))
3281 diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
3282 index 099a213..da5a240 100644
3283 --- a/drivers/hwmon/k10temp.c
3284 +++ b/drivers/hwmon/k10temp.c
3285 @@ -112,11 +112,21 @@ static bool __devinit has_erratum_319(struct pci_dev *pdev)
3286 if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
3287 return false;
3288
3289 - /* Differentiate between AM2+ (bad) and AM3 (good) */
3290 + /* DDR3 memory implies socket AM3, which is good */
3291 pci_bus_read_config_dword(pdev->bus,
3292 PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
3293 REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
3294 - return !(reg_dram_cfg & DDR3_MODE);
3295 + if (reg_dram_cfg & DDR3_MODE)
3296 + return false;
3297 +
3298 + /*
3299 + * Unfortunately it is possible to run a socket AM3 CPU with DDR2
3300 + * memory. We blacklist all the cores which do exist in socket AM2+
3301 + * format. It still isn't perfect, as RB-C2 cores exist in both AM2+
3302 + * and AM3 formats, but that's the best we can do.
3303 + */
3304 + return boot_cpu_data.x86_model < 4 ||
3305 + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
3306 }
3307
3308 static int __devinit k10temp_probe(struct pci_dev *pdev,
3309 diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
3310 index 0ceb6d6..8bdf80d 100644
3311 --- a/drivers/hwmon/k8temp.c
3312 +++ b/drivers/hwmon/k8temp.c
3313 @@ -120,7 +120,7 @@ static ssize_t show_temp(struct device *dev,
3314 int temp;
3315 struct k8temp_data *data = k8temp_update_device(dev);
3316
3317 - if (data->swap_core_select)
3318 + if (data->swap_core_select && (data->sensorsp & SEL_CORE))
3319 core = core ? 0 : 1;
3320
3321 temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
3322 @@ -180,11 +180,13 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
3323 }
3324
3325 if ((model >= 0x69) &&
3326 - !(model == 0xc1 || model == 0x6c || model == 0x7c)) {
3327 + !(model == 0xc1 || model == 0x6c || model == 0x7c ||
3328 + model == 0x6b || model == 0x6f || model == 0x7f)) {
3329 /*
3330 - * RevG desktop CPUs (i.e. no socket S1G1 parts)
3331 - * need additional offset, otherwise reported
3332 - * temperature is below ambient temperature
3333 + * RevG desktop CPUs (i.e. no socket S1G1 or
3334 + * ASB1 parts) need additional offset,
3335 + * otherwise reported temperature is below
3336 + * ambient temperature
3337 */
3338 data->temp_offset = 21000;
3339 }
3340 diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
3341 index d2b8b27..cb10201 100644
3342 --- a/drivers/ide/cmd640.c
3343 +++ b/drivers/ide/cmd640.c
3344 @@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive)
3345
3346 static int cmd640_test_irq(ide_hwif_t *hwif)
3347 {
3348 - struct pci_dev *dev = to_pci_dev(hwif->dev);
3349 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
3350 - u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
3351 + u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
3352 CFR_IDE01INTR;
3353 -
3354 - pci_read_config_byte(dev, irq_reg, &irq_stat);
3355 + u8 irq_stat = get_cmd640_reg(irq_reg);
3356
3357 return (irq_stat & irq_mask) ? 1 : 0;
3358 }
3359 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
3360 index df3eb8c..b4b2257 100644
3361 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
3362 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
3363 @@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev,
3364
3365 return ret ? ret : count;
3366 }
3367 -static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
3368 +static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
3369
3370 static ssize_t delete_child(struct device *dev,
3371 struct device_attribute *attr,
3372 @@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev,
3373 return ret ? ret : count;
3374
3375 }
3376 -static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
3377 +static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
3378
3379 int ipoib_add_pkey_attr(struct net_device *dev)
3380 {
3381 diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
3382 index fbd62ab..0ffaf2c 100644
3383 --- a/drivers/input/joystick/gamecon.c
3384 +++ b/drivers/input/joystick/gamecon.c
3385 @@ -89,7 +89,6 @@ struct gc_pad {
3386 struct gc {
3387 struct pardevice *pd;
3388 struct gc_pad pads[GC_MAX_DEVICES];
3389 - struct input_dev *dev[GC_MAX_DEVICES];
3390 struct timer_list timer;
3391 int pad_count[GC_MAX];
3392 int used;
3393 @@ -387,7 +386,7 @@ static void gc_nes_process_packet(struct gc *gc)
3394 for (i = 0; i < GC_MAX_DEVICES; i++) {
3395
3396 pad = &gc->pads[i];
3397 - dev = gc->dev[i];
3398 + dev = pad->dev;
3399 s = gc_status_bit[i];
3400
3401 switch (pad->type) {
3402 @@ -579,7 +578,7 @@ static void gc_psx_command(struct gc *gc, int b, unsigned char *data)
3403 read = parport_read_status(port) ^ 0x80;
3404
3405 for (j = 0; j < GC_MAX_DEVICES; j++) {
3406 - struct gc_pad *pad = &gc->pads[i];
3407 + struct gc_pad *pad = &gc->pads[j];
3408
3409 if (pad->type == GC_PSX || pad->type == GC_DDR)
3410 data[j] |= (read & gc_status_bit[j]) ? (1 << i) : 0;
3411 diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
3412 index 7aa59e0..fb16b5e 100644
3413 --- a/drivers/input/keyboard/twl4030_keypad.c
3414 +++ b/drivers/input/keyboard/twl4030_keypad.c
3415 @@ -51,8 +51,12 @@
3416 */
3417 #define TWL4030_MAX_ROWS 8 /* TWL4030 hard limit */
3418 #define TWL4030_MAX_COLS 8
3419 -#define TWL4030_ROW_SHIFT 3
3420 -#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS * TWL4030_MAX_COLS)
3421 +/*
3422 + * Note that we add space for an extra column so that we can handle
3423 + * row lines connected to the gnd (see twl4030_col_xlate()).
3424 + */
3425 +#define TWL4030_ROW_SHIFT 4
3426 +#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
3427
3428 struct twl4030_keypad {
3429 unsigned short keymap[TWL4030_KEYMAP_SIZE];
3430 @@ -182,7 +186,7 @@ static int twl4030_read_kp_matrix_state(struct twl4030_keypad *kp, u16 *state)
3431 return ret;
3432 }
3433
3434 -static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
3435 +static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
3436 {
3437 int i;
3438 u16 check = 0;
3439 @@ -191,12 +195,12 @@ static int twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
3440 u16 col = key_state[i];
3441
3442 if ((col & check) && hweight16(col) > 1)
3443 - return 1;
3444 + return true;
3445
3446 check |= col;
3447 }
3448
3449 - return 0;
3450 + return false;
3451 }
3452
3453 static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
3454 @@ -225,7 +229,8 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
3455 if (!changed)
3456 continue;
3457
3458 - for (col = 0; col < kp->n_cols; col++) {
3459 + /* Extra column handles "all gnd" rows */
3460 + for (col = 0; col < kp->n_cols + 1; col++) {
3461 int code;
3462
3463 if (!(changed & (1 << col)))
3464 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3465 index ead0494..c452504 100644
3466 --- a/drivers/input/serio/i8042-x86ia64io.h
3467 +++ b/drivers/input/serio/i8042-x86ia64io.h
3468 @@ -166,6 +166,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
3469 },
3470 },
3471 {
3472 + /* Gigabyte Spring Peak - defines wrong chassis type */
3473 + .matches = {
3474 + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
3475 + DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
3476 + },
3477 + },
3478 + {
3479 .matches = {
3480 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3481 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
3482 diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
3483 index bd00dce..bde3c88 100644
3484 --- a/drivers/isdn/capi/kcapi.c
3485 +++ b/drivers/isdn/capi/kcapi.c
3486 @@ -1147,6 +1147,12 @@ load_unlock_out:
3487 if (ctr->state == CAPI_CTR_DETECTED)
3488 goto reset_unlock_out;
3489
3490 + if (ctr->reset_ctr == NULL) {
3491 + printk(KERN_DEBUG "kcapi: reset: no reset function\n");
3492 + retval = -ESRCH;
3493 + goto reset_unlock_out;
3494 + }
3495 +
3496 ctr->reset_ctr(ctr);
3497
3498 retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
3499 diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
3500 index c5016bd..c3b1dc3 100644
3501 --- a/drivers/isdn/gigaset/asyncdata.c
3502 +++ b/drivers/isdn/gigaset/asyncdata.c
3503 @@ -126,26 +126,6 @@ static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
3504 return numbytes;
3505 }
3506
3507 -/* set up next receive skb for data mode
3508 - */
3509 -static void new_rcv_skb(struct bc_state *bcs)
3510 -{
3511 - struct cardstate *cs = bcs->cs;
3512 - unsigned short hw_hdr_len = cs->hw_hdr_len;
3513 -
3514 - if (bcs->ignore) {
3515 - bcs->skb = NULL;
3516 - return;
3517 - }
3518 -
3519 - bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len);
3520 - if (bcs->skb == NULL) {
3521 - dev_warn(cs->dev, "could not allocate new skb\n");
3522 - return;
3523 - }
3524 - skb_reserve(bcs->skb, hw_hdr_len);
3525 -}
3526 -
3527 /* process a block of received bytes in HDLC data mode
3528 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
3529 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
3530 @@ -159,8 +139,8 @@ static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
3531 struct cardstate *cs = inbuf->cs;
3532 struct bc_state *bcs = cs->bcs;
3533 int inputstate = bcs->inputstate;
3534 - __u16 fcs = bcs->fcs;
3535 - struct sk_buff *skb = bcs->skb;
3536 + __u16 fcs = bcs->rx_fcs;
3537 + struct sk_buff *skb = bcs->rx_skb;
3538 unsigned char *src = inbuf->data + inbuf->head;
3539 unsigned procbytes = 0;
3540 unsigned char c;
3541 @@ -245,8 +225,7 @@ byte_stuff:
3542
3543 /* prepare reception of next frame */
3544 inputstate &= ~INS_have_data;
3545 - new_rcv_skb(bcs);
3546 - skb = bcs->skb;
3547 + skb = gigaset_new_rx_skb(bcs);
3548 } else {
3549 /* empty frame (7E 7E) */
3550 #ifdef CONFIG_GIGASET_DEBUG
3551 @@ -255,8 +234,7 @@ byte_stuff:
3552 if (!skb) {
3553 /* skipped (?) */
3554 gigaset_isdn_rcv_err(bcs);
3555 - new_rcv_skb(bcs);
3556 - skb = bcs->skb;
3557 + skb = gigaset_new_rx_skb(bcs);
3558 }
3559 }
3560
3561 @@ -279,11 +257,11 @@ byte_stuff:
3562 #endif
3563 inputstate |= INS_have_data;
3564 if (skb) {
3565 - if (skb->len == SBUFSIZE) {
3566 + if (skb->len >= bcs->rx_bufsize) {
3567 dev_warn(cs->dev, "received packet too long\n");
3568 dev_kfree_skb_any(skb);
3569 /* skip remainder of packet */
3570 - bcs->skb = skb = NULL;
3571 + bcs->rx_skb = skb = NULL;
3572 } else {
3573 *__skb_put(skb, 1) = c;
3574 fcs = crc_ccitt_byte(fcs, c);
3575 @@ -292,7 +270,7 @@ byte_stuff:
3576 }
3577
3578 bcs->inputstate = inputstate;
3579 - bcs->fcs = fcs;
3580 + bcs->rx_fcs = fcs;
3581 return procbytes;
3582 }
3583
3584 @@ -308,18 +286,18 @@ static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
3585 struct cardstate *cs = inbuf->cs;
3586 struct bc_state *bcs = cs->bcs;
3587 int inputstate = bcs->inputstate;
3588 - struct sk_buff *skb = bcs->skb;
3589 + struct sk_buff *skb = bcs->rx_skb;
3590 unsigned char *src = inbuf->data + inbuf->head;
3591 unsigned procbytes = 0;
3592 unsigned char c;
3593
3594 if (!skb) {
3595 /* skip this block */
3596 - new_rcv_skb(bcs);
3597 + gigaset_new_rx_skb(bcs);
3598 return numbytes;
3599 }
3600
3601 - while (procbytes < numbytes && skb->len < SBUFSIZE) {
3602 + while (procbytes < numbytes && skb->len < bcs->rx_bufsize) {
3603 c = *src++;
3604 procbytes++;
3605
3606 @@ -343,7 +321,7 @@ static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
3607 if (inputstate & INS_have_data) {
3608 gigaset_skb_rcvd(bcs, skb);
3609 inputstate &= ~INS_have_data;
3610 - new_rcv_skb(bcs);
3611 + gigaset_new_rx_skb(bcs);
3612 }
3613
3614 bcs->inputstate = inputstate;
3615 diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
3616 index 964a55f..bedc8b0 100644
3617 --- a/drivers/isdn/gigaset/capi.c
3618 +++ b/drivers/isdn/gigaset/capi.c
3619 @@ -70,7 +70,7 @@
3620 #define MAX_NUMBER_DIGITS 20
3621 #define MAX_FMT_IE_LEN 20
3622
3623 -/* values for gigaset_capi_appl.connected */
3624 +/* values for bcs->apconnstate */
3625 #define APCONN_NONE 0 /* inactive/listening */
3626 #define APCONN_SETUP 1 /* connecting */
3627 #define APCONN_ACTIVE 2 /* B channel up */
3628 @@ -80,10 +80,10 @@ struct gigaset_capi_appl {
3629 struct list_head ctrlist;
3630 struct gigaset_capi_appl *bcnext;
3631 u16 id;
3632 + struct capi_register_params rp;
3633 u16 nextMessageNumber;
3634 u32 listenInfoMask;
3635 u32 listenCIPmask;
3636 - int connected;
3637 };
3638
3639 /* CAPI specific controller data structure */
3640 @@ -330,6 +330,39 @@ static const char *format_ie(const char *ie)
3641 return result;
3642 }
3643
3644 +/*
3645 + * emit DATA_B3_CONF message
3646 + */
3647 +static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr,
3648 + u16 appl, u16 msgid, int channel,
3649 + u16 handle, u16 info)
3650 +{
3651 + struct sk_buff *cskb;
3652 + u8 *msg;
3653 +
3654 + cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
3655 + if (!cskb) {
3656 + dev_err(cs->dev, "%s: out of memory\n", __func__);
3657 + return;
3658 + }
3659 + /* frequent message, avoid _cmsg overhead */
3660 + msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN);
3661 + CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN);
3662 + CAPIMSG_SETAPPID(msg, appl);
3663 + CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3);
3664 + CAPIMSG_SETSUBCOMMAND(msg, CAPI_CONF);
3665 + CAPIMSG_SETMSGID(msg, msgid);
3666 + CAPIMSG_SETCONTROLLER(msg, ctr->cnr);
3667 + CAPIMSG_SETPLCI_PART(msg, channel);
3668 + CAPIMSG_SETNCCI_PART(msg, 1);
3669 + CAPIMSG_SETHANDLE_CONF(msg, handle);
3670 + CAPIMSG_SETINFO_CONF(msg, info);
3671 +
3672 + /* emit message */
3673 + dump_rawmsg(DEBUG_MCMD, __func__, msg);
3674 + capi_ctr_handle_message(ctr, appl, cskb);
3675 +}
3676 +
3677
3678 /*
3679 * driver interface functions
3680 @@ -350,7 +383,6 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
3681 struct gigaset_capi_ctr *iif = cs->iif;
3682 struct gigaset_capi_appl *ap = bcs->ap;
3683 unsigned char *req = skb_mac_header(dskb);
3684 - struct sk_buff *cskb;
3685 u16 flags;
3686
3687 /* update statistics */
3688 @@ -362,39 +394,22 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
3689 }
3690
3691 /* don't send further B3 messages if disconnected */
3692 - if (ap->connected < APCONN_ACTIVE) {
3693 + if (bcs->apconnstate < APCONN_ACTIVE) {
3694 gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack");
3695 return;
3696 }
3697
3698 - /* ToDo: honor unset "delivery confirmation" bit */
3699 + /*
3700 + * send DATA_B3_CONF if "delivery confirmation" bit was set in request;
3701 + * otherwise it has already been sent by do_data_b3_req()
3702 + */
3703 flags = CAPIMSG_FLAGS(req);
3704 -
3705 - /* build DATA_B3_CONF message */
3706 - cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
3707 - if (!cskb) {
3708 - dev_err(cs->dev, "%s: out of memory\n", __func__);
3709 - return;
3710 - }
3711 - /* frequent message, avoid _cmsg overhead */
3712 - CAPIMSG_SETLEN(cskb->data, CAPI_DATA_B3_CONF_LEN);
3713 - CAPIMSG_SETAPPID(cskb->data, ap->id);
3714 - CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3);
3715 - CAPIMSG_SETSUBCOMMAND(cskb->data, CAPI_CONF);
3716 - CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req));
3717 - CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr);
3718 - CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1);
3719 - CAPIMSG_SETNCCI_PART(cskb->data, 1);
3720 - CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req));
3721 - if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION)
3722 - CAPIMSG_SETINFO_CONF(cskb->data,
3723 - CapiFlagsNotSupportedByProtocol);
3724 - else
3725 - CAPIMSG_SETINFO_CONF(cskb->data, CAPI_NOERROR);
3726 -
3727 - /* emit message */
3728 - dump_rawmsg(DEBUG_LLDATA, "DATA_B3_CONF", cskb->data);
3729 - capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
3730 + if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)
3731 + send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req),
3732 + bcs->channel + 1, CAPIMSG_HANDLE_REQ(req),
3733 + (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ?
3734 + CapiFlagsNotSupportedByProtocol :
3735 + CAPI_NOERROR);
3736 }
3737 EXPORT_SYMBOL_GPL(gigaset_skb_sent);
3738
3739 @@ -423,7 +438,7 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
3740 }
3741
3742 /* don't send further B3 messages if disconnected */
3743 - if (ap->connected < APCONN_ACTIVE) {
3744 + if (bcs->apconnstate < APCONN_ACTIVE) {
3745 gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
3746 dev_kfree_skb_any(skb);
3747 return;
3748 @@ -495,6 +510,7 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
3749 u32 actCIPmask;
3750 struct sk_buff *skb;
3751 unsigned int msgsize;
3752 + unsigned long flags;
3753 int i;
3754
3755 /*
3756 @@ -619,7 +635,14 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
3757 format_ie(iif->hcmsg.CalledPartyNumber));
3758
3759 /* scan application list for matching listeners */
3760 - bcs->ap = NULL;
3761 + spin_lock_irqsave(&bcs->aplock, flags);
3762 + if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) {
3763 + dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
3764 + __func__, bcs->ap, bcs->apconnstate);
3765 + bcs->ap = NULL;
3766 + bcs->apconnstate = APCONN_NONE;
3767 + }
3768 + spin_unlock_irqrestore(&bcs->aplock, flags);
3769 actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
3770 list_for_each_entry(ap, &iif->appls, ctrlist)
3771 if (actCIPmask & ap->listenCIPmask) {
3772 @@ -637,10 +660,12 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
3773 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
3774
3775 /* add to listeners on this B channel, update state */
3776 + spin_lock_irqsave(&bcs->aplock, flags);
3777 ap->bcnext = bcs->ap;
3778 bcs->ap = ap;
3779 bcs->chstate |= CHS_NOTIFY_LL;
3780 - ap->connected = APCONN_SETUP;
3781 + bcs->apconnstate = APCONN_SETUP;
3782 + spin_unlock_irqrestore(&bcs->aplock, flags);
3783
3784 /* emit message */
3785 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
3786 @@ -665,7 +690,7 @@ static void send_disconnect_ind(struct bc_state *bcs,
3787 struct gigaset_capi_ctr *iif = cs->iif;
3788 struct sk_buff *skb;
3789
3790 - if (ap->connected == APCONN_NONE)
3791 + if (bcs->apconnstate == APCONN_NONE)
3792 return;
3793
3794 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
3795 @@ -679,7 +704,6 @@ static void send_disconnect_ind(struct bc_state *bcs,
3796 }
3797 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN));
3798 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
3799 - ap->connected = APCONN_NONE;
3800 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
3801 }
3802
3803 @@ -696,9 +720,9 @@ static void send_disconnect_b3_ind(struct bc_state *bcs,
3804 struct sk_buff *skb;
3805
3806 /* nothing to do if no logical connection active */
3807 - if (ap->connected < APCONN_ACTIVE)
3808 + if (bcs->apconnstate < APCONN_ACTIVE)
3809 return;
3810 - ap->connected = APCONN_SETUP;
3811 + bcs->apconnstate = APCONN_SETUP;
3812
3813 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
3814 ap->nextMessageNumber++,
3815 @@ -725,14 +749,25 @@ void gigaset_isdn_connD(struct bc_state *bcs)
3816 {
3817 struct cardstate *cs = bcs->cs;
3818 struct gigaset_capi_ctr *iif = cs->iif;
3819 - struct gigaset_capi_appl *ap = bcs->ap;
3820 + struct gigaset_capi_appl *ap;
3821 struct sk_buff *skb;
3822 unsigned int msgsize;
3823 + unsigned long flags;
3824
3825 + spin_lock_irqsave(&bcs->aplock, flags);
3826 + ap = bcs->ap;
3827 if (!ap) {
3828 + spin_unlock_irqrestore(&bcs->aplock, flags);
3829 dev_err(cs->dev, "%s: no application\n", __func__);
3830 return;
3831 }
3832 + if (bcs->apconnstate == APCONN_NONE) {
3833 + spin_unlock_irqrestore(&bcs->aplock, flags);
3834 + dev_warn(cs->dev, "%s: application %u not connected\n",
3835 + __func__, ap->id);
3836 + return;
3837 + }
3838 + spin_unlock_irqrestore(&bcs->aplock, flags);
3839 while (ap->bcnext) {
3840 /* this should never happen */
3841 dev_warn(cs->dev, "%s: dropping extra application %u\n",
3842 @@ -741,11 +776,6 @@ void gigaset_isdn_connD(struct bc_state *bcs)
3843 CapiCallGivenToOtherApplication);
3844 ap->bcnext = ap->bcnext->bcnext;
3845 }
3846 - if (ap->connected == APCONN_NONE) {
3847 - dev_warn(cs->dev, "%s: application %u not connected\n",
3848 - __func__, ap->id);
3849 - return;
3850 - }
3851
3852 /* prepare CONNECT_ACTIVE_IND message
3853 * Note: LLC not supported by device
3854 @@ -783,17 +813,24 @@ void gigaset_isdn_connD(struct bc_state *bcs)
3855 void gigaset_isdn_hupD(struct bc_state *bcs)
3856 {
3857 struct gigaset_capi_appl *ap;
3858 + unsigned long flags;
3859
3860 /*
3861 * ToDo: pass on reason code reported by device
3862 * (requires ev-layer state machine extension to collect
3863 * ZCAU device reply)
3864 */
3865 - for (ap = bcs->ap; ap != NULL; ap = ap->bcnext) {
3866 + spin_lock_irqsave(&bcs->aplock, flags);
3867 + while (bcs->ap != NULL) {
3868 + ap = bcs->ap;
3869 + bcs->ap = ap->bcnext;
3870 + spin_unlock_irqrestore(&bcs->aplock, flags);
3871 send_disconnect_b3_ind(bcs, ap);
3872 send_disconnect_ind(bcs, ap, 0);
3873 + spin_lock_irqsave(&bcs->aplock, flags);
3874 }
3875 - bcs->ap = NULL;
3876 + bcs->apconnstate = APCONN_NONE;
3877 + spin_unlock_irqrestore(&bcs->aplock, flags);
3878 }
3879
3880 /**
3881 @@ -807,24 +844,21 @@ void gigaset_isdn_connB(struct bc_state *bcs)
3882 {
3883 struct cardstate *cs = bcs->cs;
3884 struct gigaset_capi_ctr *iif = cs->iif;
3885 - struct gigaset_capi_appl *ap = bcs->ap;
3886 + struct gigaset_capi_appl *ap;
3887 struct sk_buff *skb;
3888 + unsigned long flags;
3889 unsigned int msgsize;
3890 u8 command;
3891
3892 + spin_lock_irqsave(&bcs->aplock, flags);
3893 + ap = bcs->ap;
3894 if (!ap) {
3895 + spin_unlock_irqrestore(&bcs->aplock, flags);
3896 dev_err(cs->dev, "%s: no application\n", __func__);
3897 return;
3898 }
3899 - while (ap->bcnext) {
3900 - /* this should never happen */
3901 - dev_warn(cs->dev, "%s: dropping extra application %u\n",
3902 - __func__, ap->bcnext->id);
3903 - send_disconnect_ind(bcs, ap->bcnext,
3904 - CapiCallGivenToOtherApplication);
3905 - ap->bcnext = ap->bcnext->bcnext;
3906 - }
3907 - if (!ap->connected) {
3908 + if (!bcs->apconnstate) {
3909 + spin_unlock_irqrestore(&bcs->aplock, flags);
3910 dev_warn(cs->dev, "%s: application %u not connected\n",
3911 __func__, ap->id);
3912 return;
3913 @@ -836,13 +870,26 @@ void gigaset_isdn_connB(struct bc_state *bcs)
3914 * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
3915 * Parameters in both cases always: NCCI = 1, NCPI empty
3916 */
3917 - if (ap->connected >= APCONN_ACTIVE) {
3918 + if (bcs->apconnstate >= APCONN_ACTIVE) {
3919 command = CAPI_CONNECT_B3_ACTIVE;
3920 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
3921 } else {
3922 command = CAPI_CONNECT_B3;
3923 msgsize = CAPI_CONNECT_B3_IND_BASELEN;
3924 }
3925 + bcs->apconnstate = APCONN_ACTIVE;
3926 +
3927 + spin_unlock_irqrestore(&bcs->aplock, flags);
3928 +
3929 + while (ap->bcnext) {
3930 + /* this should never happen */
3931 + dev_warn(cs->dev, "%s: dropping extra application %u\n",
3932 + __func__, ap->bcnext->id);
3933 + send_disconnect_ind(bcs, ap->bcnext,
3934 + CapiCallGivenToOtherApplication);
3935 + ap->bcnext = ap->bcnext->bcnext;
3936 + }
3937 +
3938 capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
3939 ap->nextMessageNumber++,
3940 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
3941 @@ -853,7 +900,6 @@ void gigaset_isdn_connB(struct bc_state *bcs)
3942 }
3943 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
3944 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
3945 - ap->connected = APCONN_ACTIVE;
3946 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
3947 }
3948
3949 @@ -933,30 +979,6 @@ void gigaset_isdn_stop(struct cardstate *cs)
3950 */
3951
3952 /*
3953 - * load firmware
3954 - */
3955 -static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
3956 -{
3957 - struct cardstate *cs = ctr->driverdata;
3958 -
3959 - /* AVM specific operation, not needed for Gigaset -- ignore */
3960 - dev_notice(cs->dev, "load_firmware ignored\n");
3961 -
3962 - return 0;
3963 -}
3964 -
3965 -/*
3966 - * reset (deactivate) controller
3967 - */
3968 -static void gigaset_reset_ctr(struct capi_ctr *ctr)
3969 -{
3970 - struct cardstate *cs = ctr->driverdata;
3971 -
3972 - /* AVM specific operation, not needed for Gigaset -- ignore */
3973 - dev_notice(cs->dev, "reset_ctr ignored\n");
3974 -}
3975 -
3976 -/*
3977 * register CAPI application
3978 */
3979 static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
3980 @@ -980,8 +1002,64 @@ static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
3981 return;
3982 }
3983 ap->id = appl;
3984 + ap->rp = *rp;
3985
3986 list_add(&ap->ctrlist, &iif->appls);
3987 + dev_info(cs->dev, "application %u registered\n", ap->id);
3988 +}
3989 +
3990 +/*
3991 + * remove CAPI application from channel
3992 + * helper function to keep indentation levels down and stay in 80 columns
3993 + */
3994 +
3995 +static inline void remove_appl_from_channel(struct bc_state *bcs,
3996 + struct gigaset_capi_appl *ap)
3997 +{
3998 + struct cardstate *cs = bcs->cs;
3999 + struct gigaset_capi_appl *bcap;
4000 + unsigned long flags;
4001 + int prevconnstate;
4002 +
4003 + spin_lock_irqsave(&bcs->aplock, flags);
4004 + bcap = bcs->ap;
4005 + if (bcap == NULL) {
4006 + spin_unlock_irqrestore(&bcs->aplock, flags);
4007 + return;
4008 + }
4009 +
4010 + /* check first application on channel */
4011 + if (bcap == ap) {
4012 + bcs->ap = ap->bcnext;
4013 + if (bcs->ap != NULL) {
4014 + spin_unlock_irqrestore(&bcs->aplock, flags);
4015 + return;
4016 + }
4017 +
4018 + /* none left, clear channel state */
4019 + prevconnstate = bcs->apconnstate;
4020 + bcs->apconnstate = APCONN_NONE;
4021 + spin_unlock_irqrestore(&bcs->aplock, flags);
4022 +
4023 + if (prevconnstate == APCONN_ACTIVE) {
4024 + dev_notice(cs->dev, "%s: hanging up channel %u\n",
4025 + __func__, bcs->channel);
4026 + gigaset_add_event(cs, &bcs->at_state,
4027 + EV_HUP, NULL, 0, NULL);
4028 + gigaset_schedule_event(cs);
4029 + }
4030 + return;
4031 + }
4032 +
4033 + /* check remaining list */
4034 + do {
4035 + if (bcap->bcnext == ap) {
4036 + bcap->bcnext = bcap->bcnext->bcnext;
4037 + return;
4038 + }
4039 + bcap = bcap->bcnext;
4040 + } while (bcap != NULL);
4041 + spin_unlock_irqrestore(&bcs->aplock, flags);
4042 }
4043
4044 /*
4045 @@ -993,19 +1071,19 @@ static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
4046 = container_of(ctr, struct gigaset_capi_ctr, ctr);
4047 struct cardstate *cs = iif->ctr.driverdata;
4048 struct gigaset_capi_appl *ap, *tmp;
4049 + unsigned ch;
4050
4051 list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
4052 if (ap->id == appl) {
4053 - if (ap->connected != APCONN_NONE) {
4054 - dev_err(cs->dev,
4055 - "%s: application %u still connected\n",
4056 - __func__, ap->id);
4057 - /* ToDo: clear active connection */
4058 - }
4059 + /* remove from any channels */
4060 + for (ch = 0; ch < cs->channels; ch++)
4061 + remove_appl_from_channel(&cs->bcs[ch], ap);
4062 +
4063 + /* remove from registration list */
4064 list_del(&ap->ctrlist);
4065 kfree(ap);
4066 + dev_info(cs->dev, "application %u released\n", appl);
4067 }
4068 -
4069 }
4070
4071 /*
4072 @@ -1184,7 +1262,8 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
4073 char **commands;
4074 char *s;
4075 u8 *pp;
4076 - int i, l;
4077 + unsigned long flags;
4078 + int i, l, lbc, lhlc;
4079 u16 info;
4080
4081 /* decode message */
4082 @@ -1199,8 +1278,18 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
4083 send_conf(iif, ap, skb, CapiNoPlciAvailable);
4084 return;
4085 }
4086 + spin_lock_irqsave(&bcs->aplock, flags);
4087 + if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE)
4088 + dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n",
4089 + __func__, bcs->ap, bcs->apconnstate);
4090 ap->bcnext = NULL;
4091 bcs->ap = ap;
4092 + bcs->apconnstate = APCONN_SETUP;
4093 + spin_unlock_irqrestore(&bcs->aplock, flags);
4094 +
4095 + bcs->rx_bufsize = ap->rp.datablklen;
4096 + dev_kfree_skb(bcs->rx_skb);
4097 + gigaset_new_rx_skb(bcs);
4098 cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
4099
4100 /* build command table */
4101 @@ -1308,42 +1397,59 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
4102 goto error;
4103 }
4104
4105 - /* check/encode parameter: BC */
4106 - if (cmsg->BC && cmsg->BC[0]) {
4107 - /* explicit BC overrides CIP */
4108 - l = 2*cmsg->BC[0] + 7;
4109 + /*
4110 + * check/encode parameters: BC & HLC
4111 + * must be encoded together as device doesn't accept HLC separately
4112 + * explicit parameters override values derived from CIP
4113 + */
4114 +
4115 + /* determine lengths */
4116 + if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
4117 + lbc = 2*cmsg->BC[0];
4118 + else if (cip2bchlc[cmsg->CIPValue].bc) /* BC derived from CIP */
4119 + lbc = strlen(cip2bchlc[cmsg->CIPValue].bc);
4120 + else /* no BC */
4121 + lbc = 0;
4122 + if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */
4123 + lhlc = 2*cmsg->HLC[0];
4124 + else if (cip2bchlc[cmsg->CIPValue].hlc) /* HLC derived from CIP */
4125 + lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc);
4126 + else /* no HLC */
4127 + lhlc = 0;
4128 +
4129 + if (lbc) {
4130 + /* have BC: allocate and assemble command string */
4131 + l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */
4132 + if (lhlc)
4133 + l += lhlc + 7; /* ";^SHLC=" + value */
4134 commands[AT_BC] = kmalloc(l, GFP_KERNEL);
4135 if (!commands[AT_BC])
4136 goto oom;
4137 strcpy(commands[AT_BC], "^SBC=");
4138 - decode_ie(cmsg->BC, commands[AT_BC]+5);
4139 + if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */
4140 + decode_ie(cmsg->BC, commands[AT_BC] + 5);
4141 + else /* BC derived from CIP */
4142 + strcpy(commands[AT_BC] + 5,
4143 + cip2bchlc[cmsg->CIPValue].bc);
4144 + if (lhlc) {
4145 + strcpy(commands[AT_BC] + lbc + 5, ";^SHLC=");
4146 + if (cmsg->HLC && cmsg->HLC[0])
4147 + /* HLC specified explicitly */
4148 + decode_ie(cmsg->HLC,
4149 + commands[AT_BC] + lbc + 12);
4150 + else /* HLC derived from CIP */
4151 + strcpy(commands[AT_BC] + lbc + 12,
4152 + cip2bchlc[cmsg->CIPValue].hlc);
4153 + }
4154 strcpy(commands[AT_BC] + l - 2, "\r");
4155 - } else if (cip2bchlc[cmsg->CIPValue].bc) {
4156 - l = strlen(cip2bchlc[cmsg->CIPValue].bc) + 7;
4157 - commands[AT_BC] = kmalloc(l, GFP_KERNEL);
4158 - if (!commands[AT_BC])
4159 - goto oom;
4160 - snprintf(commands[AT_BC], l, "^SBC=%s\r",
4161 - cip2bchlc[cmsg->CIPValue].bc);
4162 - }
4163 -
4164 - /* check/encode parameter: HLC */
4165 - if (cmsg->HLC && cmsg->HLC[0]) {
4166 - /* explicit HLC overrides CIP */
4167 - l = 2*cmsg->HLC[0] + 7;
4168 - commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
4169 - if (!commands[AT_HLC])
4170 - goto oom;
4171 - strcpy(commands[AT_HLC], "^SHLC=");
4172 - decode_ie(cmsg->HLC, commands[AT_HLC]+5);
4173 - strcpy(commands[AT_HLC] + l - 2, "\r");
4174 - } else if (cip2bchlc[cmsg->CIPValue].hlc) {
4175 - l = strlen(cip2bchlc[cmsg->CIPValue].hlc) + 7;
4176 - commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
4177 - if (!commands[AT_HLC])
4178 - goto oom;
4179 - snprintf(commands[AT_HLC], l, "^SHLC=%s\r",
4180 - cip2bchlc[cmsg->CIPValue].hlc);
4181 + } else {
4182 + /* no BC */
4183 + if (lhlc) {
4184 + dev_notice(cs->dev, "%s: cannot set HLC without BC\n",
4185 + "CONNECT_REQ");
4186 + info = CapiIllMessageParmCoding; /* ? */
4187 + goto error;
4188 + }
4189 }
4190
4191 /* check/encode parameter: B Protocol */
4192 @@ -1357,13 +1463,13 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
4193 bcs->proto2 = L2_HDLC;
4194 break;
4195 case 1:
4196 - bcs->proto2 = L2_BITSYNC;
4197 + bcs->proto2 = L2_VOICE;
4198 break;
4199 default:
4200 dev_warn(cs->dev,
4201 "B1 Protocol %u unsupported, using Transparent\n",
4202 cmsg->B1protocol);
4203 - bcs->proto2 = L2_BITSYNC;
4204 + bcs->proto2 = L2_VOICE;
4205 }
4206 if (cmsg->B2protocol != 1)
4207 dev_warn(cs->dev,
4208 @@ -1417,7 +1523,6 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
4209 goto error;
4210 }
4211 gigaset_schedule_event(cs);
4212 - ap->connected = APCONN_SETUP;
4213 send_conf(iif, ap, skb, CapiSuccess);
4214 return;
4215
4216 @@ -1445,6 +1550,7 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
4217 _cmsg *cmsg = &iif->acmsg;
4218 struct bc_state *bcs;
4219 struct gigaset_capi_appl *oap;
4220 + unsigned long flags;
4221 int channel;
4222
4223 /* decode message */
4224 @@ -1464,12 +1570,24 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
4225 switch (cmsg->Reject) {
4226 case 0: /* Accept */
4227 /* drop all competing applications, keep only this one */
4228 - for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
4229 - if (oap != ap)
4230 + spin_lock_irqsave(&bcs->aplock, flags);
4231 + while (bcs->ap != NULL) {
4232 + oap = bcs->ap;
4233 + bcs->ap = oap->bcnext;
4234 + if (oap != ap) {
4235 + spin_unlock_irqrestore(&bcs->aplock, flags);
4236 send_disconnect_ind(bcs, oap,
4237 CapiCallGivenToOtherApplication);
4238 + spin_lock_irqsave(&bcs->aplock, flags);
4239 + }
4240 + }
4241 ap->bcnext = NULL;
4242 bcs->ap = ap;
4243 + spin_unlock_irqrestore(&bcs->aplock, flags);
4244 +
4245 + bcs->rx_bufsize = ap->rp.datablklen;
4246 + dev_kfree_skb(bcs->rx_skb);
4247 + gigaset_new_rx_skb(bcs);
4248 bcs->chstate |= CHS_NOTIFY_LL;
4249
4250 /* check/encode B channel protocol */
4251 @@ -1483,13 +1601,13 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
4252 bcs->proto2 = L2_HDLC;
4253 break;
4254 case 1:
4255 - bcs->proto2 = L2_BITSYNC;
4256 + bcs->proto2 = L2_VOICE;
4257 break;
4258 default:
4259 dev_warn(cs->dev,
4260 "B1 Protocol %u unsupported, using Transparent\n",
4261 cmsg->B1protocol);
4262 - bcs->proto2 = L2_BITSYNC;
4263 + bcs->proto2 = L2_VOICE;
4264 }
4265 if (cmsg->B2protocol != 1)
4266 dev_warn(cs->dev,
4267 @@ -1537,31 +1655,45 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
4268 send_disconnect_ind(bcs, ap, 0);
4269
4270 /* remove it from the list of listening apps */
4271 + spin_lock_irqsave(&bcs->aplock, flags);
4272 if (bcs->ap == ap) {
4273 bcs->ap = ap->bcnext;
4274 - if (bcs->ap == NULL)
4275 + if (bcs->ap == NULL) {
4276 /* last one: stop ev-layer hupD notifications */
4277 + bcs->apconnstate = APCONN_NONE;
4278 bcs->chstate &= ~CHS_NOTIFY_LL;
4279 + }
4280 + spin_unlock_irqrestore(&bcs->aplock, flags);
4281 return;
4282 }
4283 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
4284 if (oap->bcnext == ap) {
4285 oap->bcnext = oap->bcnext->bcnext;
4286 + spin_unlock_irqrestore(&bcs->aplock, flags);
4287 return;
4288 }
4289 }
4290 + spin_unlock_irqrestore(&bcs->aplock, flags);
4291 dev_err(cs->dev, "%s: application %u not found\n",
4292 __func__, ap->id);
4293 return;
4294
4295 default: /* Reject */
4296 /* drop all competing applications, keep only this one */
4297 - for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
4298 - if (oap != ap)
4299 + spin_lock_irqsave(&bcs->aplock, flags);
4300 + while (bcs->ap != NULL) {
4301 + oap = bcs->ap;
4302 + bcs->ap = oap->bcnext;
4303 + if (oap != ap) {
4304 + spin_unlock_irqrestore(&bcs->aplock, flags);
4305 send_disconnect_ind(bcs, oap,
4306 CapiCallGivenToOtherApplication);
4307 + spin_lock_irqsave(&bcs->aplock, flags);
4308 + }
4309 + }
4310 ap->bcnext = NULL;
4311 bcs->ap = ap;
4312 + spin_unlock_irqrestore(&bcs->aplock, flags);
4313
4314 /* reject call - will trigger DISCONNECT_IND for this app */
4315 dev_info(cs->dev, "%s: Reject=%x\n",
4316 @@ -1584,6 +1716,7 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
4317 {
4318 struct cardstate *cs = iif->ctr.driverdata;
4319 _cmsg *cmsg = &iif->acmsg;
4320 + struct bc_state *bcs;
4321 int channel;
4322
4323 /* decode message */
4324 @@ -1598,9 +1731,10 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
4325 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
4326 return;
4327 }
4328 + bcs = &cs->bcs[channel-1];
4329
4330 /* mark logical connection active */
4331 - ap->connected = APCONN_ACTIVE;
4332 + bcs->apconnstate = APCONN_ACTIVE;
4333
4334 /* build NCCI: always 1 (one B3 connection only) */
4335 cmsg->adr.adrNCCI |= 1 << 16;
4336 @@ -1646,7 +1780,7 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
4337
4338 if (cmsg->Reject) {
4339 /* Reject: clear B3 connect received flag */
4340 - ap->connected = APCONN_SETUP;
4341 + bcs->apconnstate = APCONN_SETUP;
4342
4343 /* trigger hangup, causing eventual DISCONNECT_IND */
4344 if (!gigaset_add_event(cs, &bcs->at_state,
4345 @@ -1718,11 +1852,11 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
4346 }
4347
4348 /* skip if DISCONNECT_IND already sent */
4349 - if (!ap->connected)
4350 + if (!bcs->apconnstate)
4351 return;
4352
4353 /* check for active logical connection */
4354 - if (ap->connected >= APCONN_ACTIVE) {
4355 + if (bcs->apconnstate >= APCONN_ACTIVE) {
4356 /*
4357 * emit DISCONNECT_B3_IND with cause 0x3301
4358 * use separate cmsg structure, as the content of iif->acmsg
4359 @@ -1771,6 +1905,7 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
4360 {
4361 struct cardstate *cs = iif->ctr.driverdata;
4362 _cmsg *cmsg = &iif->acmsg;
4363 + struct bc_state *bcs;
4364 int channel;
4365
4366 /* decode message */
4367 @@ -1786,17 +1921,17 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
4368 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
4369 return;
4370 }
4371 + bcs = &cs->bcs[channel-1];
4372
4373 /* reject if logical connection not active */
4374 - if (ap->connected < APCONN_ACTIVE) {
4375 + if (bcs->apconnstate < APCONN_ACTIVE) {
4376 send_conf(iif, ap, skb,
4377 CapiMessageNotSupportedInCurrentState);
4378 return;
4379 }
4380
4381 /* trigger hangup, causing eventual DISCONNECT_B3_IND */
4382 - if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
4383 - EV_HUP, NULL, 0, NULL)) {
4384 + if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
4385 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
4386 return;
4387 }
4388 @@ -1817,11 +1952,14 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif,
4389 struct sk_buff *skb)
4390 {
4391 struct cardstate *cs = iif->ctr.driverdata;
4392 + struct bc_state *bcs;
4393 int channel = CAPIMSG_PLCI_PART(skb->data);
4394 u16 ncci = CAPIMSG_NCCI_PART(skb->data);
4395 u16 msglen = CAPIMSG_LEN(skb->data);
4396 u16 datalen = CAPIMSG_DATALEN(skb->data);
4397 u16 flags = CAPIMSG_FLAGS(skb->data);
4398 + u16 msgid = CAPIMSG_MSGID(skb->data);
4399 + u16 handle = CAPIMSG_HANDLE_REQ(skb->data);
4400
4401 /* frequent message, avoid _cmsg overhead */
4402 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data);
4403 @@ -1837,6 +1975,7 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif,
4404 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
4405 return;
4406 }
4407 + bcs = &cs->bcs[channel-1];
4408 if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
4409 dev_notice(cs->dev, "%s: unexpected length %d\n",
4410 "DATA_B3_REQ", msglen);
4411 @@ -1856,7 +1995,7 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif,
4412 }
4413
4414 /* reject if logical connection not active */
4415 - if (ap->connected < APCONN_ACTIVE) {
4416 + if (bcs->apconnstate < APCONN_ACTIVE) {
4417 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
4418 return;
4419 }
4420 @@ -1867,17 +2006,19 @@ static void do_data_b3_req(struct gigaset_capi_ctr *iif,
4421 skb_pull(skb, msglen);
4422
4423 /* pass to device-specific module */
4424 - if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) {
4425 + if (cs->ops->send_skb(bcs, skb) < 0) {
4426 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
4427 return;
4428 }
4429
4430 - /* DATA_B3_CONF reply will be sent by gigaset_skb_sent() */
4431 -
4432 /*
4433 - * ToDo: honor unset "delivery confirmation" bit
4434 - * (send DATA_B3_CONF immediately?)
4435 + * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery
4436 + * confirmation" bit is set; otherwise we have to send it now
4437 */
4438 + if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION))
4439 + send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle,
4440 + flags ? CapiFlagsNotSupportedByProtocol
4441 + : CAPI_NOERROR);
4442 }
4443
4444 /*
4445 @@ -2213,8 +2354,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
4446 iif->ctr.driverdata = cs;
4447 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
4448 iif->ctr.driver_name = "gigaset";
4449 - iif->ctr.load_firmware = gigaset_load_firmware;
4450 - iif->ctr.reset_ctr = gigaset_reset_ctr;
4451 + iif->ctr.load_firmware = NULL;
4452 + iif->ctr.reset_ctr = NULL;
4453 iif->ctr.register_appl = gigaset_register_appl;
4454 iif->ctr.release_appl = gigaset_release_appl;
4455 iif->ctr.send_message = gigaset_send_message;
4456 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
4457 index f6f45f2..5d4befb 100644
4458 --- a/drivers/isdn/gigaset/common.c
4459 +++ b/drivers/isdn/gigaset/common.c
4460 @@ -399,8 +399,8 @@ static void gigaset_freebcs(struct bc_state *bcs)
4461 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
4462 clear_at_state(&bcs->at_state);
4463 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
4464 - dev_kfree_skb(bcs->skb);
4465 - bcs->skb = NULL;
4466 + dev_kfree_skb(bcs->rx_skb);
4467 + bcs->rx_skb = NULL;
4468
4469 for (i = 0; i < AT_NUM; ++i) {
4470 kfree(bcs->commands[i]);
4471 @@ -634,19 +634,10 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
4472 bcs->emptycount = 0;
4473 #endif
4474
4475 - gig_dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel);
4476 - bcs->fcs = PPP_INITFCS;
4477 + bcs->rx_bufsize = 0;
4478 + bcs->rx_skb = NULL;
4479 + bcs->rx_fcs = PPP_INITFCS;
4480 bcs->inputstate = 0;
4481 - if (cs->ignoreframes) {
4482 - bcs->skb = NULL;
4483 - } else {
4484 - bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
4485 - if (bcs->skb != NULL)
4486 - skb_reserve(bcs->skb, cs->hw_hdr_len);
4487 - else
4488 - pr_err("out of memory\n");
4489 - }
4490 -
4491 bcs->channel = channel;
4492 bcs->cs = cs;
4493
4494 @@ -658,16 +649,15 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
4495 for (i = 0; i < AT_NUM; ++i)
4496 bcs->commands[i] = NULL;
4497
4498 + spin_lock_init(&bcs->aplock);
4499 + bcs->ap = NULL;
4500 + bcs->apconnstate = 0;
4501 +
4502 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
4503 if (cs->ops->initbcshw(bcs))
4504 return bcs;
4505
4506 gig_dbg(DEBUG_INIT, " failed");
4507 -
4508 - gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
4509 - dev_kfree_skb(bcs->skb);
4510 - bcs->skb = NULL;
4511 -
4512 return NULL;
4513 }
4514
4515 @@ -839,14 +829,12 @@ void gigaset_bcs_reinit(struct bc_state *bcs)
4516 bcs->emptycount = 0;
4517 #endif
4518
4519 - bcs->fcs = PPP_INITFCS;
4520 + bcs->rx_fcs = PPP_INITFCS;
4521 bcs->chstate = 0;
4522
4523 bcs->ignore = cs->ignoreframes;
4524 - if (bcs->ignore) {
4525 - dev_kfree_skb(bcs->skb);
4526 - bcs->skb = NULL;
4527 - }
4528 + dev_kfree_skb(bcs->rx_skb);
4529 + bcs->rx_skb = NULL;
4530
4531 cs->ops->reinitbcshw(bcs);
4532 }
4533 diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
4534 index 206c380..ceaef9a 100644
4535 --- a/drivers/isdn/gigaset/ev-layer.c
4536 +++ b/drivers/isdn/gigaset/ev-layer.c
4537 @@ -282,9 +282,7 @@ struct reply_t gigaset_tab_cid[] =
4538 /* dial */
4539 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
4540 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC} },
4541 -{RSP_OK, 601, 601, -1, 602, 5, {ACT_CMD+AT_HLC} },
4542 -{RSP_NULL, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
4543 -{RSP_OK, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
4544 +{RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD+AT_PROTO} },
4545 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD+AT_TYPE} },
4546 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD+AT_MSN} },
4547 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
4548 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
4549 index 05947f9..8738b08 100644
4550 --- a/drivers/isdn/gigaset/gigaset.h
4551 +++ b/drivers/isdn/gigaset/gigaset.h
4552 @@ -45,10 +45,6 @@
4553 #define MAX_EVENTS 64 /* size of event queue */
4554
4555 #define RBUFSIZE 8192
4556 -#define SBUFSIZE 4096 /* sk_buff payload size */
4557 -
4558 -#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
4559 -#define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */
4560
4561 /* compile time options */
4562 #define GIG_MAJOR 0
4563 @@ -190,10 +186,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
4564 #define AT_BC 3
4565 #define AT_PROTO 4
4566 #define AT_TYPE 5
4567 -#define AT_HLC 6
4568 -#define AT_CLIP 7
4569 +#define AT_CLIP 6
4570 /* total number */
4571 -#define AT_NUM 8
4572 +#define AT_NUM 7
4573
4574 /* variables in struct at_state_t */
4575 #define VAR_ZSAU 0
4576 @@ -380,8 +375,10 @@ struct bc_state {
4577
4578 struct at_state_t at_state;
4579
4580 - __u16 fcs;
4581 - struct sk_buff *skb;
4582 + /* receive buffer */
4583 + unsigned rx_bufsize; /* max size accepted by application */
4584 + struct sk_buff *rx_skb;
4585 + __u16 rx_fcs;
4586 int inputstate; /* see INS_XXXX */
4587
4588 int channel;
4589 @@ -406,7 +403,9 @@ struct bc_state {
4590 struct bas_bc_state *bas; /* usb hardware driver (base) */
4591 } hw;
4592
4593 - void *ap; /* LL application structure */
4594 + void *ap; /* associated LL application */
4595 + int apconnstate; /* LL application connection state */
4596 + spinlock_t aplock;
4597 };
4598
4599 struct cardstate {
4600 @@ -801,8 +800,23 @@ static inline void gigaset_bchannel_up(struct bc_state *bcs)
4601 gigaset_schedule_event(bcs->cs);
4602 }
4603
4604 -/* handling routines for sk_buff */
4605 -/* ============================= */
4606 +/* set up next receive skb for data mode */
4607 +static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs)
4608 +{
4609 + struct cardstate *cs = bcs->cs;
4610 + unsigned short hw_hdr_len = cs->hw_hdr_len;
4611 +
4612 + if (bcs->ignore) {
4613 + bcs->rx_skb = NULL;
4614 + } else {
4615 + bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len);
4616 + if (bcs->rx_skb == NULL)
4617 + dev_warn(cs->dev, "could not allocate skb\n");
4618 + else
4619 + skb_reserve(bcs->rx_skb, hw_hdr_len);
4620 + }
4621 + return bcs->rx_skb;
4622 +}
4623
4624 /* append received bytes to inbuf */
4625 int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
4626 diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
4627 index c22e5ac..f01c3c2 100644
4628 --- a/drivers/isdn/gigaset/i4l.c
4629 +++ b/drivers/isdn/gigaset/i4l.c
4630 @@ -16,7 +16,10 @@
4631 #include "gigaset.h"
4632 #include <linux/isdnif.h>
4633
4634 +#define SBUFSIZE 4096 /* sk_buff payload size */
4635 +#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
4636 #define HW_HDR_LEN 2 /* Header size used to store ack info */
4637 +#define MAX_BUF_SIZE (SBUFSIZE - HW_HDR_LEN) /* max data packet from LL */
4638
4639 /* == Handling of I4L IO =====================================================*/
4640
4641 @@ -231,6 +234,15 @@ static int command_from_LL(isdn_ctrl *cntrl)
4642 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
4643 return -EBUSY;
4644 }
4645 + switch (bcs->proto2) {
4646 + case L2_HDLC:
4647 + bcs->rx_bufsize = SBUFSIZE;
4648 + break;
4649 + default: /* assume transparent */
4650 + bcs->rx_bufsize = TRANSBUFSIZE;
4651 + }
4652 + dev_kfree_skb(bcs->rx_skb);
4653 + gigaset_new_rx_skb(bcs);
4654
4655 commands = kzalloc(AT_NUM*(sizeof *commands), GFP_ATOMIC);
4656 if (!commands) {
4657 @@ -314,6 +326,15 @@ static int command_from_LL(isdn_ctrl *cntrl)
4658 return -EINVAL;
4659 }
4660 bcs = cs->bcs + ch;
4661 + switch (bcs->proto2) {
4662 + case L2_HDLC:
4663 + bcs->rx_bufsize = SBUFSIZE;
4664 + break;
4665 + default: /* assume transparent */
4666 + bcs->rx_bufsize = TRANSBUFSIZE;
4667 + }
4668 + dev_kfree_skb(bcs->rx_skb);
4669 + gigaset_new_rx_skb(bcs);
4670 if (!gigaset_add_event(cs, &bcs->at_state,
4671 EV_ACCEPT, NULL, 0, NULL))
4672 return -ENOMEM;
4673 diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
4674 index 16fd3bd..2dfd346 100644
4675 --- a/drivers/isdn/gigaset/isocdata.c
4676 +++ b/drivers/isdn/gigaset/isocdata.c
4677 @@ -500,19 +500,18 @@ int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
4678 */
4679 static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
4680 {
4681 - bcs->fcs = crc_ccitt_byte(bcs->fcs, c);
4682 - if (unlikely(bcs->skb == NULL)) {
4683 + bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c);
4684 + if (bcs->rx_skb == NULL)
4685 /* skipping */
4686 return;
4687 - }
4688 - if (unlikely(bcs->skb->len == SBUFSIZE)) {
4689 + if (bcs->rx_skb->len >= bcs->rx_bufsize) {
4690 dev_warn(bcs->cs->dev, "received oversized packet discarded\n");
4691 bcs->hw.bas->giants++;
4692 - dev_kfree_skb_any(bcs->skb);
4693 - bcs->skb = NULL;
4694 + dev_kfree_skb_any(bcs->rx_skb);
4695 + bcs->rx_skb = NULL;
4696 return;
4697 }
4698 - *__skb_put(bcs->skb, 1) = c;
4699 + *__skb_put(bcs->rx_skb, 1) = c;
4700 }
4701
4702 /* hdlc_flush
4703 @@ -521,18 +520,13 @@ static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
4704 static inline void hdlc_flush(struct bc_state *bcs)
4705 {
4706 /* clear skb or allocate new if not skipping */
4707 - if (likely(bcs->skb != NULL))
4708 - skb_trim(bcs->skb, 0);
4709 - else if (!bcs->ignore) {
4710 - bcs->skb = dev_alloc_skb(SBUFSIZE + bcs->cs->hw_hdr_len);
4711 - if (bcs->skb)
4712 - skb_reserve(bcs->skb, bcs->cs->hw_hdr_len);
4713 - else
4714 - dev_err(bcs->cs->dev, "could not allocate skb\n");
4715 - }
4716 + if (bcs->rx_skb != NULL)
4717 + skb_trim(bcs->rx_skb, 0);
4718 + else
4719 + gigaset_new_rx_skb(bcs);
4720
4721 /* reset packet state */
4722 - bcs->fcs = PPP_INITFCS;
4723 + bcs->rx_fcs = PPP_INITFCS;
4724 }
4725
4726 /* hdlc_done
4727 @@ -549,7 +543,7 @@ static inline void hdlc_done(struct bc_state *bcs)
4728 hdlc_flush(bcs);
4729 return;
4730 }
4731 - procskb = bcs->skb;
4732 + procskb = bcs->rx_skb;
4733 if (procskb == NULL) {
4734 /* previous error */
4735 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
4736 @@ -560,8 +554,8 @@ static inline void hdlc_done(struct bc_state *bcs)
4737 bcs->hw.bas->runts++;
4738 dev_kfree_skb_any(procskb);
4739 gigaset_isdn_rcv_err(bcs);
4740 - } else if (bcs->fcs != PPP_GOODFCS) {
4741 - dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs);
4742 + } else if (bcs->rx_fcs != PPP_GOODFCS) {
4743 + dev_notice(cs->dev, "frame check error\n");
4744 bcs->hw.bas->fcserrs++;
4745 dev_kfree_skb_any(procskb);
4746 gigaset_isdn_rcv_err(bcs);
4747 @@ -574,13 +568,8 @@ static inline void hdlc_done(struct bc_state *bcs)
4748 bcs->hw.bas->goodbytes += len;
4749 gigaset_skb_rcvd(bcs, procskb);
4750 }
4751 -
4752 - bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
4753 - if (bcs->skb)
4754 - skb_reserve(bcs->skb, cs->hw_hdr_len);
4755 - else
4756 - dev_err(cs->dev, "could not allocate skb\n");
4757 - bcs->fcs = PPP_INITFCS;
4758 + gigaset_new_rx_skb(bcs);
4759 + bcs->rx_fcs = PPP_INITFCS;
4760 }
4761
4762 /* hdlc_frag
4763 @@ -597,8 +586,8 @@ static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
4764 dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
4765 bcs->hw.bas->alignerrs++;
4766 gigaset_isdn_rcv_err(bcs);
4767 - __skb_trim(bcs->skb, 0);
4768 - bcs->fcs = PPP_INITFCS;
4769 + __skb_trim(bcs->rx_skb, 0);
4770 + bcs->rx_fcs = PPP_INITFCS;
4771 }
4772
4773 /* bit counts lookup table for HDLC bit unstuffing
4774 @@ -847,7 +836,6 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
4775 static inline void trans_receive(unsigned char *src, unsigned count,
4776 struct bc_state *bcs)
4777 {
4778 - struct cardstate *cs = bcs->cs;
4779 struct sk_buff *skb;
4780 int dobytes;
4781 unsigned char *dst;
4782 @@ -857,17 +845,11 @@ static inline void trans_receive(unsigned char *src, unsigned count,
4783 hdlc_flush(bcs);
4784 return;
4785 }
4786 - skb = bcs->skb;
4787 - if (unlikely(skb == NULL)) {
4788 - bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
4789 - if (!skb) {
4790 - dev_err(cs->dev, "could not allocate skb\n");
4791 - return;
4792 - }
4793 - skb_reserve(skb, cs->hw_hdr_len);
4794 - }
4795 + skb = bcs->rx_skb;
4796 + if (skb == NULL)
4797 + skb = gigaset_new_rx_skb(bcs);
4798 bcs->hw.bas->goodbytes += skb->len;
4799 - dobytes = TRANSBUFSIZE - skb->len;
4800 + dobytes = bcs->rx_bufsize - skb->len;
4801 while (count > 0) {
4802 dst = skb_put(skb, count < dobytes ? count : dobytes);
4803 while (count > 0 && dobytes > 0) {
4804 @@ -879,14 +861,10 @@ static inline void trans_receive(unsigned char *src, unsigned count,
4805 dump_bytes(DEBUG_STREAM_DUMP,
4806 "rcv data", skb->data, skb->len);
4807 gigaset_skb_rcvd(bcs, skb);
4808 - bcs->skb = skb =
4809 - dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
4810 - if (!skb) {
4811 - dev_err(cs->dev, "could not allocate skb\n");
4812 + skb = gigaset_new_rx_skb(bcs);
4813 + if (skb == NULL)
4814 return;
4815 - }
4816 - skb_reserve(skb, cs->hw_hdr_len);
4817 - dobytes = TRANSBUFSIZE;
4818 + dobytes = bcs->rx_bufsize;
4819 }
4820 }
4821 }
4822 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
4823 index ad945cc..c819165 100644
4824 --- a/drivers/md/raid10.c
4825 +++ b/drivers/md/raid10.c
4826 @@ -1487,14 +1487,14 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
4827 int sectors = r10_bio->sectors;
4828 mdk_rdev_t*rdev;
4829 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
4830 + int d = r10_bio->devs[r10_bio->read_slot].devnum;
4831
4832 rcu_read_lock();
4833 - {
4834 - int d = r10_bio->devs[r10_bio->read_slot].devnum;
4835 + rdev = rcu_dereference(conf->mirrors[d].rdev);
4836 + if (rdev) { /* If rdev is not NULL */
4837 char b[BDEVNAME_SIZE];
4838 int cur_read_error_count = 0;
4839
4840 - rdev = rcu_dereference(conf->mirrors[d].rdev);
4841 bdevname(rdev->bdev, b);
4842
4843 if (test_bit(Faulty, &rdev->flags)) {
4844 @@ -1534,7 +1534,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
4845
4846 rcu_read_lock();
4847 do {
4848 - int d = r10_bio->devs[sl].devnum;
4849 + d = r10_bio->devs[sl].devnum;
4850 rdev = rcu_dereference(conf->mirrors[d].rdev);
4851 if (rdev &&
4852 test_bit(In_sync, &rdev->flags)) {
4853 @@ -1568,7 +1568,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
4854 rcu_read_lock();
4855 while (sl != r10_bio->read_slot) {
4856 char b[BDEVNAME_SIZE];
4857 - int d;
4858 +
4859 if (sl==0)
4860 sl = conf->copies;
4861 sl--;
4862 @@ -1604,7 +1604,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
4863 }
4864 sl = start;
4865 while (sl != r10_bio->read_slot) {
4866 - int d;
4867 +
4868 if (sl==0)
4869 sl = conf->copies;
4870 sl--;
4871 diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
4872 index 441c064..1e93097 100644
4873 --- a/drivers/media/dvb/dvb-core/dvb_net.c
4874 +++ b/drivers/media/dvb/dvb-core/dvb_net.c
4875 @@ -350,6 +350,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
4876 const u8 *ts, *ts_end, *from_where = NULL;
4877 u8 ts_remain = 0, how_much = 0, new_ts = 1;
4878 struct ethhdr *ethh = NULL;
4879 + bool error = false;
4880
4881 #ifdef ULE_DEBUG
4882 /* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
4883 @@ -459,10 +460,16 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
4884
4885 /* Drop partly decoded SNDU, reset state, resync on PUSI. */
4886 if (priv->ule_skb) {
4887 - dev_kfree_skb( priv->ule_skb );
4888 + error = true;
4889 + dev_kfree_skb(priv->ule_skb);
4890 + }
4891 +
4892 + if (error || priv->ule_sndu_remain) {
4893 dev->stats.rx_errors++;
4894 dev->stats.rx_frame_errors++;
4895 + error = false;
4896 }
4897 +
4898 reset_ule(priv);
4899 priv->need_pusi = 1;
4900 continue;
4901 @@ -534,6 +541,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
4902 from_where += 2;
4903 }
4904
4905 + priv->ule_sndu_remain = priv->ule_sndu_len + 2;
4906 /*
4907 * State of current TS:
4908 * ts_remain (remaining bytes in the current TS cell)
4909 @@ -543,6 +551,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
4910 */
4911 switch (ts_remain) {
4912 case 1:
4913 + priv->ule_sndu_remain--;
4914 priv->ule_sndu_type = from_where[0] << 8;
4915 priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
4916 ts_remain -= 1; from_where += 1;
4917 @@ -556,6 +565,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
4918 default: /* complete ULE header is present in current TS. */
4919 /* Extract ULE type field. */
4920 if (priv->ule_sndu_type_1) {
4921 + priv->ule_sndu_type_1 = 0;
4922 priv->ule_sndu_type |= from_where[0];
4923 from_where += 1; /* points to payload start. */
4924 ts_remain -= 1;
4925 diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
4926 index d8d4214..32a7ec6 100644
4927 --- a/drivers/media/dvb/ttpci/Kconfig
4928 +++ b/drivers/media/dvb/ttpci/Kconfig
4929 @@ -68,13 +68,14 @@ config DVB_BUDGET
4930 select DVB_VES1820 if !DVB_FE_CUSTOMISE
4931 select DVB_L64781 if !DVB_FE_CUSTOMISE
4932 select DVB_TDA8083 if !DVB_FE_CUSTOMISE
4933 - select DVB_TDA10021 if !DVB_FE_CUSTOMISE
4934 - select DVB_TDA10023 if !DVB_FE_CUSTOMISE
4935 select DVB_S5H1420 if !DVB_FE_CUSTOMISE
4936 select DVB_TDA10086 if !DVB_FE_CUSTOMISE
4937 select DVB_TDA826X if !DVB_FE_CUSTOMISE
4938 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
4939 select DVB_TDA1004X if !DVB_FE_CUSTOMISE
4940 + select DVB_ISL6423 if !DVB_FE_CUSTOMISE
4941 + select DVB_STV090x if !DVB_FE_CUSTOMISE
4942 + select DVB_STV6110x if !DVB_FE_CUSTOMISE
4943 help
4944 Support for simple SAA7146 based DVB cards (so called Budget-
4945 or Nova-PCI cards) without onboard MPEG2 decoder, and without
4946 diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
4947 index 4172cb3..d4746e0 100644
4948 --- a/drivers/media/video/cx23885/cx23885-i2c.c
4949 +++ b/drivers/media/video/cx23885/cx23885-i2c.c
4950 @@ -365,7 +365,17 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
4951
4952 memset(&info, 0, sizeof(struct i2c_board_info));
4953 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
4954 - i2c_new_probed_device(&bus->i2c_adap, &info, addr_list);
4955 + /*
4956 + * We can't call i2c_new_probed_device() because it uses
4957 + * quick writes for probing and the IR receiver device only
4958 + * replies to reads.
4959 + */
4960 + if (i2c_smbus_xfer(&bus->i2c_adap, addr_list[0], 0,
4961 + I2C_SMBUS_READ, 0, I2C_SMBUS_QUICK,
4962 + NULL) >= 0) {
4963 + info.addr = addr_list[0];
4964 + i2c_new_device(&bus->i2c_adap, &info);
4965 + }
4966 }
4967
4968 return bus->i2c_rc;
4969 diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
4970 index ee1ca39..fb39f11 100644
4971 --- a/drivers/media/video/cx88/cx88-i2c.c
4972 +++ b/drivers/media/video/cx88/cx88-i2c.c
4973 @@ -188,10 +188,24 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
4974 0x18, 0x6b, 0x71,
4975 I2C_CLIENT_END
4976 };
4977 + const unsigned short *addrp;
4978
4979 memset(&info, 0, sizeof(struct i2c_board_info));
4980 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
4981 - i2c_new_probed_device(&core->i2c_adap, &info, addr_list);
4982 + /*
4983 + * We can't call i2c_new_probed_device() because it uses
4984 + * quick writes for probing and at least some R receiver
4985 + * devices only reply to reads.
4986 + */
4987 + for (addrp = addr_list; *addrp != I2C_CLIENT_END; addrp++) {
4988 + if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
4989 + I2C_SMBUS_READ, 0,
4990 + I2C_SMBUS_QUICK, NULL) >= 0) {
4991 + info.addr = *addrp;
4992 + i2c_new_device(&core->i2c_adap, &info);
4993 + break;
4994 + }
4995 + }
4996 }
4997 return core->i2c_rc;
4998 }
4999 diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
5000 index 86ff8c1..838b56f 100644
5001 --- a/drivers/media/video/uvc/uvc_driver.c
5002 +++ b/drivers/media/video/uvc/uvc_driver.c
5003 @@ -91,11 +91,16 @@ static struct uvc_format_desc uvc_fmts[] = {
5004 .fcc = V4L2_PIX_FMT_UYVY,
5005 },
5006 {
5007 - .name = "Greyscale",
5008 + .name = "Greyscale (8-bit)",
5009 .guid = UVC_GUID_FORMAT_Y800,
5010 .fcc = V4L2_PIX_FMT_GREY,
5011 },
5012 {
5013 + .name = "Greyscale (16-bit)",
5014 + .guid = UVC_GUID_FORMAT_Y16,
5015 + .fcc = V4L2_PIX_FMT_Y16,
5016 + },
5017 + {
5018 .name = "RGB Bayer",
5019 .guid = UVC_GUID_FORMAT_BY8,
5020 .fcc = V4L2_PIX_FMT_SBGGR8,
5021 @@ -2105,6 +2110,15 @@ static struct usb_device_id uvc_ids[] = {
5022 .bInterfaceSubClass = 1,
5023 .bInterfaceProtocol = 0,
5024 .driver_info = UVC_QUIRK_STREAM_NO_FID },
5025 + /* Syntek (Packard Bell EasyNote MX52 */
5026 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
5027 + | USB_DEVICE_ID_MATCH_INT_INFO,
5028 + .idVendor = 0x174f,
5029 + .idProduct = 0x8a12,
5030 + .bInterfaceClass = USB_CLASS_VIDEO,
5031 + .bInterfaceSubClass = 1,
5032 + .bInterfaceProtocol = 0,
5033 + .driver_info = UVC_QUIRK_STREAM_NO_FID },
5034 /* Syntek (Asus F9SG) */
5035 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
5036 | USB_DEVICE_ID_MATCH_INT_INFO,
5037 @@ -2169,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
5038 .bInterfaceSubClass = 1,
5039 .bInterfaceProtocol = 0,
5040 .driver_info = UVC_QUIRK_PROBE_MINMAX },
5041 + /* Arkmicro unbranded */
5042 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
5043 + | USB_DEVICE_ID_MATCH_INT_INFO,
5044 + .idVendor = 0x18ec,
5045 + .idProduct = 0x3290,
5046 + .bInterfaceClass = USB_CLASS_VIDEO,
5047 + .bInterfaceSubClass = 1,
5048 + .bInterfaceProtocol = 0,
5049 + .driver_info = UVC_QUIRK_PROBE_DEF },
5050 /* Bodelin ProScopeHR */
5051 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
5052 | USB_DEVICE_ID_MATCH_DEV_HI
5053 diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
5054 index 2bba059..d1f8840 100644
5055 --- a/drivers/media/video/uvc/uvcvideo.h
5056 +++ b/drivers/media/video/uvc/uvcvideo.h
5057 @@ -131,11 +131,13 @@ struct uvc_xu_control {
5058 #define UVC_GUID_FORMAT_Y800 \
5059 { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
5060 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
5061 +#define UVC_GUID_FORMAT_Y16 \
5062 + { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
5063 + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
5064 #define UVC_GUID_FORMAT_BY8 \
5065 { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
5066 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
5067
5068 -
5069 /* ------------------------------------------------------------------------
5070 * Driver specific constants.
5071 */
5072 diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
5073 index 4a6e718..e55f3d2 100644
5074 --- a/drivers/mfd/88pm860x-i2c.c
5075 +++ b/drivers/mfd/88pm860x-i2c.c
5076 @@ -200,7 +200,6 @@ static int __devexit pm860x_remove(struct i2c_client *client)
5077
5078 pm860x_device_exit(chip);
5079 i2c_unregister_device(chip->companion);
5080 - i2c_set_clientdata(chip->companion, NULL);
5081 i2c_set_clientdata(chip->client, NULL);
5082 kfree(chip);
5083 return 0;
5084 diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
5085 index d9fd878..e73f3f5 100644
5086 --- a/drivers/mfd/max8925-i2c.c
5087 +++ b/drivers/mfd/max8925-i2c.c
5088 @@ -173,8 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client)
5089 max8925_device_exit(chip);
5090 i2c_unregister_device(chip->adc);
5091 i2c_unregister_device(chip->rtc);
5092 - i2c_set_clientdata(chip->adc, NULL);
5093 - i2c_set_clientdata(chip->rtc, NULL);
5094 i2c_set_clientdata(chip->i2c, NULL);
5095 kfree(chip);
5096 return 0;
5097 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
5098 index 2136794..9bf1ddf 100644
5099 --- a/drivers/mmc/host/sdhci-s3c.c
5100 +++ b/drivers/mmc/host/sdhci-s3c.c
5101 @@ -373,6 +373,26 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
5102
5103 static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
5104 {
5105 + struct sdhci_host *host = platform_get_drvdata(pdev);
5106 + struct sdhci_s3c *sc = sdhci_priv(host);
5107 + int ptr;
5108 +
5109 + sdhci_remove_host(host, 1);
5110 +
5111 + for (ptr = 0; ptr < 3; ptr++) {
5112 + clk_disable(sc->clk_bus[ptr]);
5113 + clk_put(sc->clk_bus[ptr]);
5114 + }
5115 + clk_disable(sc->clk_io);
5116 + clk_put(sc->clk_io);
5117 +
5118 + iounmap(host->ioaddr);
5119 + release_resource(sc->ioarea);
5120 + kfree(sc->ioarea);
5121 +
5122 + sdhci_free_host(host);
5123 + platform_set_drvdata(pdev, NULL);
5124 +
5125 return 0;
5126 }
5127
5128 diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
5129 index 66e0323..b74a0ea 100644
5130 --- a/drivers/net/3c503.c
5131 +++ b/drivers/net/3c503.c
5132 @@ -380,6 +380,12 @@ out:
5133 return retval;
5134 }
5135
5136 +static irqreturn_t el2_probe_interrupt(int irq, void *seen)
5137 +{
5138 + *(bool *)seen = true;
5139 + return IRQ_HANDLED;
5140 +}
5141 +
5142 static int
5143 el2_open(struct net_device *dev)
5144 {
5145 @@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
5146
5147 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
5148 do {
5149 - retval = request_irq(*irqp, NULL, 0, "bogus", dev);
5150 - if (retval >= 0) {
5151 + bool seen;
5152 +
5153 + retval = request_irq(*irqp, el2_probe_interrupt, 0,
5154 + dev->name, &seen);
5155 + if (retval == -EBUSY)
5156 + continue;
5157 + if (retval < 0)
5158 + goto err_disable;
5159 +
5160 /* Twinkle the interrupt, and check if it's seen. */
5161 - unsigned long cookie = probe_irq_on();
5162 + seen = false;
5163 + smp_wmb();
5164 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
5165 outb_p(0x00, E33G_IDCFR);
5166 - if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */
5167 - ((retval = request_irq(dev->irq = *irqp,
5168 - eip_interrupt, 0,
5169 - dev->name, dev)) == 0))
5170 - break;
5171 - } else {
5172 - if (retval != -EBUSY)
5173 - return retval;
5174 - }
5175 + msleep(1);
5176 + free_irq(*irqp, el2_probe_interrupt);
5177 + if (!seen)
5178 + continue;
5179 +
5180 + retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
5181 + dev->name, dev);
5182 + if (retval == -EBUSY)
5183 + continue;
5184 + if (retval < 0)
5185 + goto err_disable;
5186 } while (*++irqp);
5187 +
5188 if (*irqp == 0) {
5189 + err_disable:
5190 outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
5191 return -EAGAIN;
5192 }
5193 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
5194 index ac90a38..4872a80 100644
5195 --- a/drivers/net/bnx2.c
5196 +++ b/drivers/net/bnx2.c
5197 @@ -247,6 +247,7 @@ static const struct flash_spec flash_5709 = {
5198 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
5199
5200 static void bnx2_init_napi(struct bnx2 *bp);
5201 +static void bnx2_del_napi(struct bnx2 *bp);
5202
5203 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
5204 {
5205 @@ -6265,6 +6266,7 @@ open_err:
5206 bnx2_free_skbs(bp);
5207 bnx2_free_irq(bp);
5208 bnx2_free_mem(bp);
5209 + bnx2_del_napi(bp);
5210 return rc;
5211 }
5212
5213 @@ -6523,6 +6525,7 @@ bnx2_close(struct net_device *dev)
5214 bnx2_free_irq(bp);
5215 bnx2_free_skbs(bp);
5216 bnx2_free_mem(bp);
5217 + bnx2_del_napi(bp);
5218 bp->link_up = 0;
5219 netif_carrier_off(bp->dev);
5220 bnx2_set_power_state(bp, PCI_D3hot);
5221 @@ -8213,7 +8216,16 @@ bnx2_bus_string(struct bnx2 *bp, char *str)
5222 return str;
5223 }
5224
5225 -static void __devinit
5226 +static void
5227 +bnx2_del_napi(struct bnx2 *bp)
5228 +{
5229 + int i;
5230 +
5231 + for (i = 0; i < bp->irq_nvecs; i++)
5232 + netif_napi_del(&bp->bnx2_napi[i].napi);
5233 +}
5234 +
5235 +static void
5236 bnx2_init_napi(struct bnx2 *bp)
5237 {
5238 int i;
5239 diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
5240 index 60777fd..e456886 100644
5241 --- a/drivers/net/cpmac.c
5242 +++ b/drivers/net/cpmac.c
5243 @@ -1182,7 +1182,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
5244 if (netif_msg_drv(priv))
5245 printk(KERN_ERR "%s: Could not attach to PHY\n",
5246 dev->name);
5247 - return PTR_ERR(priv->phy);
5248 + rc = PTR_ERR(priv->phy);
5249 + goto fail;
5250 }
5251
5252 if ((rc = register_netdev(dev))) {
5253 diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
5254 index 7f9960f..3556b2c 100644
5255 --- a/drivers/net/dm9000.c
5256 +++ b/drivers/net/dm9000.c
5257 @@ -476,17 +476,13 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
5258 return dm->rx_csum;
5259 }
5260
5261 -static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
5262 +static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
5263 {
5264 board_info_t *dm = to_dm9000_board(dev);
5265 - unsigned long flags;
5266
5267 if (dm->can_csum) {
5268 dm->rx_csum = data;
5269 -
5270 - spin_lock_irqsave(&dm->lock, flags);
5271 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
5272 - spin_unlock_irqrestore(&dm->lock, flags);
5273
5274 return 0;
5275 }
5276 @@ -494,6 +490,19 @@ static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
5277 return -EOPNOTSUPP;
5278 }
5279
5280 +static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
5281 +{
5282 + board_info_t *dm = to_dm9000_board(dev);
5283 + unsigned long flags;
5284 + int ret;
5285 +
5286 + spin_lock_irqsave(&dm->lock, flags);
5287 + ret = dm9000_set_rx_csum_unlocked(dev, data);
5288 + spin_unlock_irqrestore(&dm->lock, flags);
5289 +
5290 + return ret;
5291 +}
5292 +
5293 static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
5294 {
5295 board_info_t *dm = to_dm9000_board(dev);
5296 @@ -722,7 +731,7 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
5297 * Set DM9000 multicast address
5298 */
5299 static void
5300 -dm9000_hash_table(struct net_device *dev)
5301 +dm9000_hash_table_unlocked(struct net_device *dev)
5302 {
5303 board_info_t *db = netdev_priv(dev);
5304 struct dev_mc_list *mcptr;
5305 @@ -730,12 +739,9 @@ dm9000_hash_table(struct net_device *dev)
5306 u32 hash_val;
5307 u16 hash_table[4];
5308 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
5309 - unsigned long flags;
5310
5311 dm9000_dbg(db, 1, "entering %s\n", __func__);
5312
5313 - spin_lock_irqsave(&db->lock, flags);
5314 -
5315 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
5316 iow(db, oft, dev->dev_addr[i]);
5317
5318 @@ -765,6 +771,16 @@ dm9000_hash_table(struct net_device *dev)
5319 }
5320
5321 iow(db, DM9000_RCR, rcr);
5322 +}
5323 +
5324 +static void
5325 +dm9000_hash_table(struct net_device *dev)
5326 +{
5327 + board_info_t *db = netdev_priv(dev);
5328 + unsigned long flags;
5329 +
5330 + spin_lock_irqsave(&db->lock, flags);
5331 + dm9000_hash_table_unlocked(dev);
5332 spin_unlock_irqrestore(&db->lock, flags);
5333 }
5334
5335 @@ -784,7 +800,7 @@ dm9000_init_dm9000(struct net_device *dev)
5336 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
5337
5338 /* Checksum mode */
5339 - dm9000_set_rx_csum(dev, db->rx_csum);
5340 + dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
5341
5342 /* GPIO0 on pre-activate PHY */
5343 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
5344 @@ -811,7 +827,7 @@ dm9000_init_dm9000(struct net_device *dev)
5345 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
5346
5347 /* Set address filter table */
5348 - dm9000_hash_table(dev);
5349 + dm9000_hash_table_unlocked(dev);
5350
5351 imr = IMR_PAR | IMR_PTM | IMR_PRM;
5352 if (db->type != TYPE_DM9000E)
5353 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
5354 index dd8106f..17382c3 100644
5355 --- a/drivers/net/r8169.c
5356 +++ b/drivers/net/r8169.c
5357 @@ -557,6 +557,11 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
5358 break;
5359 udelay(25);
5360 }
5361 + /*
5362 + * According to hardware specs a 20us delay is required after write
5363 + * complete indication, but before sending next command.
5364 + */
5365 + udelay(20);
5366 }
5367
5368 static int mdio_read(void __iomem *ioaddr, int reg_addr)
5369 @@ -576,6 +581,12 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
5370 }
5371 udelay(25);
5372 }
5373 + /*
5374 + * According to hardware specs a 20us delay is required after read
5375 + * complete indication, but before sending next command.
5376 + */
5377 + udelay(20);
5378 +
5379 return value;
5380 }
5381
5382 diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
5383 index 088c797..e89875b 100644
5384 --- a/drivers/net/sky2.c
5385 +++ b/drivers/net/sky2.c
5386 @@ -720,11 +720,24 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
5387 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
5388 }
5389
5390 +/* Enable Rx/Tx */
5391 +static void sky2_enable_rx_tx(struct sky2_port *sky2)
5392 +{
5393 + struct sky2_hw *hw = sky2->hw;
5394 + unsigned port = sky2->port;
5395 + u16 reg;
5396 +
5397 + reg = gma_read16(hw, port, GM_GP_CTRL);
5398 + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
5399 + gma_write16(hw, port, GM_GP_CTRL, reg);
5400 +}
5401 +
5402 /* Force a renegotiation */
5403 static void sky2_phy_reinit(struct sky2_port *sky2)
5404 {
5405 spin_lock_bh(&sky2->phy_lock);
5406 sky2_phy_init(sky2->hw, sky2->port);
5407 + sky2_enable_rx_tx(sky2);
5408 spin_unlock_bh(&sky2->phy_lock);
5409 }
5410
5411 @@ -2002,7 +2015,6 @@ static void sky2_link_up(struct sky2_port *sky2)
5412 {
5413 struct sky2_hw *hw = sky2->hw;
5414 unsigned port = sky2->port;
5415 - u16 reg;
5416 static const char *fc_name[] = {
5417 [FC_NONE] = "none",
5418 [FC_TX] = "tx",
5419 @@ -2010,10 +2022,7 @@ static void sky2_link_up(struct sky2_port *sky2)
5420 [FC_BOTH] = "both",
5421 };
5422
5423 - /* enable Rx/Tx */
5424 - reg = gma_read16(hw, port, GM_GP_CTRL);
5425 - reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
5426 - gma_write16(hw, port, GM_GP_CTRL, reg);
5427 + sky2_enable_rx_tx(sky2);
5428
5429 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
5430
5431 @@ -3293,6 +3302,7 @@ static void sky2_restart(struct work_struct *work)
5432 continue;
5433
5434 sky2_hw_up(sky2);
5435 + sky2_set_multicast(dev);
5436 netif_wake_queue(dev);
5437 }
5438
5439 diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
5440 index b90d876..29f5211 100644
5441 --- a/drivers/net/usb/pegasus.h
5442 +++ b/drivers/net/usb/pegasus.h
5443 @@ -256,7 +256,7 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
5444 DEFAULT_GPIO_RESET )
5445 PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
5446 DEFAULT_GPIO_RESET | PEGASUS_II )
5447 -PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
5448 +PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
5449 DEFAULT_GPIO_RESET | PEGASUS_II )
5450 PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
5451 DEFAULT_GPIO_RESET)
5452 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
5453 index 7177abc..e491064 100644
5454 --- a/drivers/net/usb/usbnet.c
5455 +++ b/drivers/net/usb/usbnet.c
5456 @@ -1290,6 +1290,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
5457 goto out;
5458 }
5459
5460 + /* netdev_printk() needs this so do it as early as possible */
5461 + SET_NETDEV_DEV(net, &udev->dev);
5462 +
5463 dev = netdev_priv(net);
5464 dev->udev = xdev;
5465 dev->intf = udev;
5466 @@ -1374,8 +1377,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
5467 dev->rx_urb_size = dev->hard_mtu;
5468 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
5469
5470 - SET_NETDEV_DEV(net, &udev->dev);
5471 -
5472 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
5473 SET_NETDEV_DEVTYPE(net, &wlan_type);
5474 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
5475 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
5476 index b0577dd..f5b5d74 100644
5477 --- a/drivers/net/virtio_net.c
5478 +++ b/drivers/net/virtio_net.c
5479 @@ -417,7 +417,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
5480 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
5481 {
5482 int err;
5483 - bool oom = false;
5484 + bool oom;
5485
5486 do {
5487 if (vi->mergeable_rx_bufs)
5488 @@ -427,10 +427,9 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
5489 else
5490 err = add_recvbuf_small(vi, gfp);
5491
5492 - if (err < 0) {
5493 - oom = true;
5494 + oom = err == -ENOMEM;
5495 + if (err < 0)
5496 break;
5497 - }
5498 ++vi->num;
5499 } while (err > 0);
5500 if (unlikely(vi->num > vi->max))
5501 @@ -567,7 +566,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
5502 struct virtnet_info *vi = netdev_priv(dev);
5503 int capacity;
5504
5505 -again:
5506 /* Free up any pending old buffers before queueing new ones. */
5507 free_old_xmit_skbs(vi);
5508
5509 @@ -576,14 +574,20 @@ again:
5510
5511 /* This can happen with OOM and indirect buffers. */
5512 if (unlikely(capacity < 0)) {
5513 - netif_stop_queue(dev);
5514 - dev_warn(&dev->dev, "Unexpected full queue\n");
5515 - if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
5516 - vi->svq->vq_ops->disable_cb(vi->svq);
5517 - netif_start_queue(dev);
5518 - goto again;
5519 + if (net_ratelimit()) {
5520 + if (likely(capacity == -ENOMEM)) {
5521 + dev_warn(&dev->dev,
5522 + "TX queue failure: out of memory\n");
5523 + } else {
5524 + dev->stats.tx_fifo_errors++;
5525 + dev_warn(&dev->dev,
5526 + "Unexpected TX queue failure: %d\n",
5527 + capacity);
5528 + }
5529 }
5530 - return NETDEV_TX_BUSY;
5531 + dev->stats.tx_dropped++;
5532 + kfree_skb(skb);
5533 + return NETDEV_TX_OK;
5534 }
5535 vi->svq->vq_ops->kick(vi->svq);
5536
5537 diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
5538 index dc0786c..6a6f37e 100644
5539 --- a/drivers/net/wireless/ath/ath5k/attach.c
5540 +++ b/drivers/net/wireless/ath/ath5k/attach.c
5541 @@ -124,6 +124,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
5542 ah->ah_cw_min = AR5K_TUNE_CWMIN;
5543 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
5544 ah->ah_software_retry = false;
5545 + ah->ah_current_channel = &sc->channels[0];
5546
5547 /*
5548 * Find the mac version
5549 diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
5550 index 8a3bf3a..7203f00 100644
5551 --- a/drivers/net/wireless/ath/ath9k/initvals.h
5552 +++ b/drivers/net/wireless/ath/ath9k/initvals.h
5553 @@ -246,7 +246,7 @@ static const u32 ar5416Common[][2] = {
5554 { 0x00008258, 0x00000000 },
5555 { 0x0000825c, 0x400000ff },
5556 { 0x00008260, 0x00080922 },
5557 - { 0x00008264, 0xa8000010 },
5558 + { 0x00008264, 0x88000010 },
5559 { 0x00008270, 0x00000000 },
5560 { 0x00008274, 0x40000000 },
5561 { 0x00008278, 0x003e4180 },
5562 @@ -2766,7 +2766,7 @@ static const u32 ar9280Common_9280_2[][2] = {
5563 { 0x00008258, 0x00000000 },
5564 { 0x0000825c, 0x400000ff },
5565 { 0x00008260, 0x00080922 },
5566 - { 0x00008264, 0xa8a00010 },
5567 + { 0x00008264, 0x88a00010 },
5568 { 0x00008270, 0x00000000 },
5569 { 0x00008274, 0x40000000 },
5570 { 0x00008278, 0x003e4180 },
5571 @@ -3936,7 +3936,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
5572 { 0x00008258, 0x00000000 },
5573 { 0x0000825c, 0x400000ff },
5574 { 0x00008260, 0x00080922 },
5575 - { 0x00008264, 0xa8a00010 },
5576 + { 0x00008264, 0x88a00010 },
5577 { 0x00008270, 0x00000000 },
5578 { 0x00008274, 0x40000000 },
5579 { 0x00008278, 0x003e4180 },
5580 @@ -5073,7 +5073,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
5581 { 0x00008258, 0x00000000 },
5582 { 0x0000825c, 0x400000ff },
5583 { 0x00008260, 0x00080922 },
5584 - { 0x00008264, 0xa8a00010 },
5585 + { 0x00008264, 0x88a00010 },
5586 { 0x00008270, 0x00000000 },
5587 { 0x00008274, 0x40000000 },
5588 { 0x00008278, 0x003e4180 },
5589 diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
5590 index a36501d..f33e18e 100644
5591 --- a/drivers/net/wireless/hostap/hostap_cs.c
5592 +++ b/drivers/net/wireless/hostap/hostap_cs.c
5593 @@ -603,6 +603,7 @@ static int prism2_config(struct pcmcia_device *link)
5594 local_info_t *local;
5595 int ret = 1;
5596 struct hostap_cs_priv *hw_priv;
5597 + unsigned long flags;
5598
5599 PDEBUG(DEBUG_FLOW, "prism2_config()\n");
5600
5601 @@ -637,6 +638,12 @@ static int prism2_config(struct pcmcia_device *link)
5602 link->dev_node = &hw_priv->node;
5603
5604 /*
5605 + * Make sure the IRQ handler cannot proceed until at least
5606 + * dev->base_addr is initialized.
5607 + */
5608 + spin_lock_irqsave(&local->irq_init_lock, flags);
5609 +
5610 + /*
5611 * Allocate an interrupt line. Note that this does not assign a
5612 * handler to the interrupt, unless the 'Handler' member of the
5613 * irq structure is initialized.
5614 @@ -646,7 +653,7 @@ static int prism2_config(struct pcmcia_device *link)
5615 link->irq.Handler = prism2_interrupt;
5616 ret = pcmcia_request_irq(link, &link->irq);
5617 if (ret)
5618 - goto failed;
5619 + goto failed_unlock;
5620 }
5621
5622 /*
5623 @@ -656,11 +663,13 @@ static int prism2_config(struct pcmcia_device *link)
5624 */
5625 ret = pcmcia_request_configuration(link, &link->conf);
5626 if (ret)
5627 - goto failed;
5628 + goto failed_unlock;
5629
5630 dev->irq = link->irq.AssignedIRQ;
5631 dev->base_addr = link->io.BasePort1;
5632
5633 + spin_unlock_irqrestore(&local->irq_init_lock, flags);
5634 +
5635 /* Finally, report what we've done */
5636 printk(KERN_INFO "%s: index 0x%02x: ",
5637 dev_info, link->conf.ConfigIndex);
5638 @@ -689,6 +698,8 @@ static int prism2_config(struct pcmcia_device *link)
5639 }
5640 return ret;
5641
5642 + failed_unlock:
5643 + spin_unlock_irqrestore(&local->irq_init_lock, flags);
5644 failed:
5645 kfree(hw_priv);
5646 prism2_release((u_long)link);
5647 diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
5648 index d707328..9cad06a 100644
5649 --- a/drivers/net/wireless/hostap/hostap_hw.c
5650 +++ b/drivers/net/wireless/hostap/hostap_hw.c
5651 @@ -2630,6 +2630,18 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
5652 iface = netdev_priv(dev);
5653 local = iface->local;
5654
5655 + /* Detect early interrupt before driver is fully configued */
5656 + spin_lock(&local->irq_init_lock);
5657 + if (!dev->base_addr) {
5658 + if (net_ratelimit()) {
5659 + printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
5660 + dev->name);
5661 + }
5662 + spin_unlock(&local->irq_init_lock);
5663 + return IRQ_HANDLED;
5664 + }
5665 + spin_unlock(&local->irq_init_lock);
5666 +
5667 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
5668
5669 if (local->func->card_present && !local->func->card_present(local)) {
5670 @@ -3147,6 +3159,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
5671 spin_lock_init(&local->cmdlock);
5672 spin_lock_init(&local->baplock);
5673 spin_lock_init(&local->lock);
5674 + spin_lock_init(&local->irq_init_lock);
5675 mutex_init(&local->rid_bap_mtx);
5676
5677 if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
5678 diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
5679 index 3d23891..1ba33be 100644
5680 --- a/drivers/net/wireless/hostap/hostap_wlan.h
5681 +++ b/drivers/net/wireless/hostap/hostap_wlan.h
5682 @@ -654,7 +654,7 @@ struct local_info {
5683 rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
5684 * when removing entries from the list.
5685 * TX and RX paths can use read lock. */
5686 - spinlock_t cmdlock, baplock, lock;
5687 + spinlock_t cmdlock, baplock, lock, irq_init_lock;
5688 struct mutex rid_bap_mtx;
5689 u16 infofid; /* MAC buffer id for info frame */
5690 /* txfid, intransmitfid, next_txtid, and next_alloc are protected by
5691 diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
5692 index 3bf2e6e..89dc401 100644
5693 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c
5694 +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
5695 @@ -211,6 +211,7 @@ static struct iwl_lib_ops iwl1000_lib = {
5696 .set_ct_kill = iwl1000_set_ct_threshold,
5697 },
5698 .add_bcast_station = iwl_add_bcast_station,
5699 + .recover_from_tx_stall = iwl_bg_monitor_recover,
5700 };
5701
5702 static const struct iwl_ops iwl1000_ops = {
5703 @@ -248,6 +249,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
5704 .support_ct_kill_exit = true,
5705 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
5706 .chain_noise_scale = 1000,
5707 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5708 };
5709
5710 struct iwl_cfg iwl1000_bg_cfg = {
5711 @@ -276,6 +278,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
5712 .support_ct_kill_exit = true,
5713 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
5714 .chain_noise_scale = 1000,
5715 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5716 };
5717
5718 MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
5719 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
5720 index 0728054..c92fbe4 100644
5721 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
5722 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
5723 @@ -2792,6 +2792,7 @@ static struct iwl_lib_ops iwl3945_lib = {
5724 .isr = iwl_isr_legacy,
5725 .config_ap = iwl3945_config_ap,
5726 .add_bcast_station = iwl3945_add_bcast_station,
5727 + .recover_from_tx_stall = iwl_bg_monitor_recover,
5728 };
5729
5730 static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
5731 @@ -2827,6 +2828,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
5732 .led_compensation = 64,
5733 .broken_powersave = true,
5734 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5735 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5736 };
5737
5738 static struct iwl_cfg iwl3945_abg_cfg = {
5739 @@ -2845,6 +2847,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
5740 .led_compensation = 64,
5741 .broken_powersave = true,
5742 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5743 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5744 };
5745
5746 DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
5747 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
5748 index 8972166..aa49a6e 100644
5749 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
5750 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
5751 @@ -2251,6 +2251,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
5752 .led_compensation = 61,
5753 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
5754 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5755 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5756 };
5757
5758 /* Module firmware */
5759 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
5760 index e476acb..d05fad4 100644
5761 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
5762 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
5763 @@ -1500,6 +1500,7 @@ struct iwl_lib_ops iwl5000_lib = {
5764 .set_ct_kill = iwl5000_set_ct_threshold,
5765 },
5766 .add_bcast_station = iwl_add_bcast_station,
5767 + .recover_from_tx_stall = iwl_bg_monitor_recover,
5768 };
5769
5770 static struct iwl_lib_ops iwl5150_lib = {
5771 @@ -1554,6 +1555,7 @@ static struct iwl_lib_ops iwl5150_lib = {
5772 .set_ct_kill = iwl5150_set_ct_threshold,
5773 },
5774 .add_bcast_station = iwl_add_bcast_station,
5775 + .recover_from_tx_stall = iwl_bg_monitor_recover,
5776 };
5777
5778 static const struct iwl_ops iwl5000_ops = {
5779 @@ -1603,6 +1605,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
5780 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5781 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5782 .chain_noise_scale = 1000,
5783 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5784 };
5785
5786 struct iwl_cfg iwl5100_bgn_cfg = {
5787 @@ -1629,6 +1632,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
5788 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5789 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5790 .chain_noise_scale = 1000,
5791 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5792 };
5793
5794 struct iwl_cfg iwl5100_abg_cfg = {
5795 @@ -1653,6 +1657,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
5796 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5797 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5798 .chain_noise_scale = 1000,
5799 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5800 };
5801
5802 struct iwl_cfg iwl5100_agn_cfg = {
5803 @@ -1679,6 +1684,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
5804 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5805 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5806 .chain_noise_scale = 1000,
5807 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5808 };
5809
5810 struct iwl_cfg iwl5350_agn_cfg = {
5811 @@ -1705,6 +1711,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
5812 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5813 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5814 .chain_noise_scale = 1000,
5815 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5816 };
5817
5818 struct iwl_cfg iwl5150_agn_cfg = {
5819 @@ -1731,6 +1738,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
5820 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5821 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5822 .chain_noise_scale = 1000,
5823 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5824 };
5825
5826 struct iwl_cfg iwl5150_abg_cfg = {
5827 @@ -1755,6 +1763,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
5828 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
5829 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
5830 .chain_noise_scale = 1000,
5831 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5832 };
5833
5834 MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
5835 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
5836 index 92b3e64..0c965cd 100644
5837 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
5838 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
5839 @@ -277,6 +277,7 @@ static struct iwl_lib_ops iwl6000_lib = {
5840 .set_ct_kill = iwl6000_set_ct_threshold,
5841 },
5842 .add_bcast_station = iwl_add_bcast_station,
5843 + .recover_from_tx_stall = iwl_bg_monitor_recover,
5844 };
5845
5846 static const struct iwl_ops iwl6000_ops = {
5847 @@ -342,6 +343,7 @@ static struct iwl_lib_ops iwl6050_lib = {
5848 .set_calib_version = iwl6050_set_calib_version,
5849 },
5850 .add_bcast_station = iwl_add_bcast_station,
5851 + .recover_from_tx_stall = iwl_bg_monitor_recover,
5852 };
5853
5854 static const struct iwl_ops iwl6050_ops = {
5855 @@ -385,6 +387,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
5856 .support_ct_kill_exit = true,
5857 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5858 .chain_noise_scale = 1000,
5859 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5860 };
5861
5862 struct iwl_cfg iwl6000i_2abg_cfg = {
5863 @@ -416,6 +419,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
5864 .support_ct_kill_exit = true,
5865 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5866 .chain_noise_scale = 1000,
5867 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5868 };
5869
5870 struct iwl_cfg iwl6000i_2bg_cfg = {
5871 @@ -447,6 +451,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
5872 .support_ct_kill_exit = true,
5873 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5874 .chain_noise_scale = 1000,
5875 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5876 };
5877
5878 struct iwl_cfg iwl6050_2agn_cfg = {
5879 @@ -479,6 +484,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
5880 .support_ct_kill_exit = true,
5881 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5882 .chain_noise_scale = 1500,
5883 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5884 };
5885
5886 struct iwl_cfg iwl6050_2abg_cfg = {
5887 @@ -510,6 +516,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
5888 .support_ct_kill_exit = true,
5889 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5890 .chain_noise_scale = 1500,
5891 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5892 };
5893
5894 struct iwl_cfg iwl6000_3agn_cfg = {
5895 @@ -542,6 +549,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
5896 .support_ct_kill_exit = true,
5897 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
5898 .chain_noise_scale = 1000,
5899 + .monitor_recover_period = IWL_MONITORING_PERIOD,
5900 };
5901
5902 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
5903 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
5904 index bdff565..07a9a02 100644
5905 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
5906 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
5907 @@ -2106,6 +2106,13 @@ static void iwl_alive_start(struct iwl_priv *priv)
5908 /* After the ALIVE response, we can send host commands to the uCode */
5909 set_bit(STATUS_ALIVE, &priv->status);
5910
5911 + if (priv->cfg->ops->lib->recover_from_tx_stall) {
5912 + /* Enable timer to monitor the driver queues */
5913 + mod_timer(&priv->monitor_recover,
5914 + jiffies +
5915 + msecs_to_jiffies(priv->cfg->monitor_recover_period));
5916 + }
5917 +
5918 if (iwl_is_rfkill(priv))
5919 return;
5920
5921 @@ -3316,6 +3323,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
5922 priv->ucode_trace.data = (unsigned long)priv;
5923 priv->ucode_trace.function = iwl_bg_ucode_trace;
5924
5925 + if (priv->cfg->ops->lib->recover_from_tx_stall) {
5926 + init_timer(&priv->monitor_recover);
5927 + priv->monitor_recover.data = (unsigned long)priv;
5928 + priv->monitor_recover.function =
5929 + priv->cfg->ops->lib->recover_from_tx_stall;
5930 + }
5931 +
5932 if (!priv->cfg->use_isr_legacy)
5933 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
5934 iwl_irq_tasklet, (unsigned long)priv);
5935 @@ -3336,6 +3350,8 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
5936 cancel_work_sync(&priv->beacon_update);
5937 del_timer_sync(&priv->statistics_periodic);
5938 del_timer_sync(&priv->ucode_trace);
5939 + if (priv->cfg->ops->lib->recover_from_tx_stall)
5940 + del_timer_sync(&priv->monitor_recover);
5941 }
5942
5943 static void iwl_init_hw_rates(struct iwl_priv *priv,
5944 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
5945 index 049b652..a5a2de6 100644
5946 --- a/drivers/net/wireless/iwlwifi/iwl-core.c
5947 +++ b/drivers/net/wireless/iwlwifi/iwl-core.c
5948 @@ -3403,6 +3403,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
5949 }
5950 return 0;
5951 }
5952 +EXPORT_SYMBOL(iwl_force_reset);
5953 +
5954 +/**
5955 + * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
5956 + *
5957 + * During normal condition (no queue is stuck), the timer is continually set to
5958 + * execute every monitor_recover_period milliseconds after the last timer
5959 + * expired. When the queue read_ptr is at the same place, the timer is
5960 + * shorten to 100mSecs. This is
5961 + * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
5962 + * 2) to detect the stuck queues quicker before the station and AP can
5963 + * disassociate each other.
5964 + *
5965 + * This function monitors all the tx queues and recover from it if any
5966 + * of the queues are stuck.
5967 + * 1. It first check the cmd queue for stuck conditions. If it is stuck,
5968 + * it will recover by resetting the firmware and return.
5969 + * 2. Then, it checks for station association. If it associates it will check
5970 + * other queues. If any queue is stuck, it will recover by resetting
5971 + * the firmware.
5972 + * Note: It the number of times the queue read_ptr to be at the same place to
5973 + * be MAX_REPEAT+1 in order to consider to be stuck.
5974 + */
5975 +/*
5976 + * The maximum number of times the read pointer of the tx queue at the
5977 + * same place without considering to be stuck.
5978 + */
5979 +#define MAX_REPEAT (2)
5980 +static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
5981 +{
5982 + struct iwl_tx_queue *txq;
5983 + struct iwl_queue *q;
5984 +
5985 + txq = &priv->txq[cnt];
5986 + q = &txq->q;
5987 + /* queue is empty, skip */
5988 + if (q->read_ptr != q->write_ptr) {
5989 + if (q->read_ptr == q->last_read_ptr) {
5990 + /* a queue has not been read from last time */
5991 + if (q->repeat_same_read_ptr > MAX_REPEAT) {
5992 + IWL_ERR(priv,
5993 + "queue %d stuck %d time. Fw reload.\n",
5994 + q->id, q->repeat_same_read_ptr);
5995 + q->repeat_same_read_ptr = 0;
5996 + iwl_force_reset(priv, IWL_FW_RESET);
5997 + } else {
5998 + q->repeat_same_read_ptr++;
5999 + IWL_DEBUG_RADIO(priv,
6000 + "queue %d, not read %d time\n",
6001 + q->id,
6002 + q->repeat_same_read_ptr);
6003 + mod_timer(&priv->monitor_recover, jiffies +
6004 + msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
6005 + }
6006 + return 1;
6007 + } else {
6008 + q->last_read_ptr = q->read_ptr;
6009 + q->repeat_same_read_ptr = 0;
6010 + }
6011 + }
6012 + return 0;
6013 +}
6014 +
6015 +void iwl_bg_monitor_recover(unsigned long data)
6016 +{
6017 + struct iwl_priv *priv = (struct iwl_priv *)data;
6018 + int cnt;
6019 +
6020 + if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6021 + return;
6022 +
6023 + /* monitor and check for stuck cmd queue */
6024 + if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
6025 + return;
6026 +
6027 + /* monitor and check for other stuck queues */
6028 + if (iwl_is_associated(priv)) {
6029 + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
6030 + /* skip as we already checked the command queue */
6031 + if (cnt == IWL_CMD_QUEUE_NUM)
6032 + continue;
6033 + if (iwl_check_stuck_queue(priv, cnt))
6034 + return;
6035 + }
6036 + }
6037 + /*
6038 + * Reschedule the timer to occur in
6039 + * priv->cfg->monitor_recover_period
6040 + */
6041 + mod_timer(&priv->monitor_recover,
6042 + jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
6043 +}
6044 +EXPORT_SYMBOL(iwl_bg_monitor_recover);
6045
6046 #ifdef CONFIG_PM
6047
6048 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
6049 index 36940a9..9076576 100644
6050 --- a/drivers/net/wireless/iwlwifi/iwl-core.h
6051 +++ b/drivers/net/wireless/iwlwifi/iwl-core.h
6052 @@ -191,6 +191,8 @@ struct iwl_lib_ops {
6053 struct iwl_temp_ops temp_ops;
6054 /* station management */
6055 void (*add_bcast_station)(struct iwl_priv *priv);
6056 + /* recover from tx queue stall */
6057 + void (*recover_from_tx_stall)(unsigned long data);
6058 };
6059
6060 struct iwl_led_ops {
6061 @@ -295,6 +297,8 @@ struct iwl_cfg {
6062 const bool support_wimax_coexist;
6063 u8 plcp_delta_threshold;
6064 s32 chain_noise_scale;
6065 + /* timer period for monitor the driver queues */
6066 + u32 monitor_recover_period;
6067 };
6068
6069 /***************************
6070 @@ -577,6 +581,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
6071 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
6072 return pci_lnk_ctl;
6073 }
6074 +
6075 +void iwl_bg_monitor_recover(unsigned long data);
6076 +
6077 #ifdef CONFIG_PM
6078 int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
6079 int iwl_pci_resume(struct pci_dev *pdev);
6080 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
6081 index ef1720a..447e14b 100644
6082 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
6083 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
6084 @@ -183,6 +183,10 @@ struct iwl_queue {
6085 int n_bd; /* number of BDs in this queue */
6086 int write_ptr; /* 1-st empty entry (index) host_w*/
6087 int read_ptr; /* last used entry (index) host_r*/
6088 + /* use for monitoring and recovering the stuck queue */
6089 + int last_read_ptr; /* storing the last read_ptr */
6090 + /* number of time read_ptr and last_read_ptr are the same */
6091 + u8 repeat_same_read_ptr;
6092 dma_addr_t dma_addr; /* physical addr for BD's */
6093 int n_window; /* safe queue window */
6094 u32 id;
6095 @@ -1039,6 +1043,11 @@ struct iwl_event_log {
6096 #define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
6097 #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
6098
6099 +/* timer constants use to monitor and recover stuck tx queues in mSecs */
6100 +#define IWL_MONITORING_PERIOD (1000)
6101 +#define IWL_ONE_HUNDRED_MSECS (100)
6102 +#define IWL_SIXTY_SECS (60000)
6103 +
6104 enum iwl_reset {
6105 IWL_RF_RESET = 0,
6106 IWL_FW_RESET,
6107 @@ -1339,6 +1348,7 @@ struct iwl_priv {
6108 struct work_struct run_time_calib_work;
6109 struct timer_list statistics_periodic;
6110 struct timer_list ucode_trace;
6111 + struct timer_list monitor_recover;
6112 bool hw_ready;
6113 /*For 3945*/
6114 #define IWL_DEFAULT_TX_POWER 0x0F
6115 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
6116 index 661e36b..a8299a3 100644
6117 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
6118 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
6119 @@ -952,6 +952,7 @@ void iwl_bg_abort_scan(struct work_struct *work)
6120
6121 mutex_lock(&priv->mutex);
6122
6123 + cancel_delayed_work_sync(&priv->scan_check);
6124 set_bit(STATUS_SCAN_ABORTING, &priv->status);
6125 iwl_send_scan_abort(priv);
6126
6127 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
6128 index c243df7..e950153 100644
6129 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
6130 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
6131 @@ -310,6 +310,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
6132 q->high_mark = 2;
6133
6134 q->write_ptr = q->read_ptr = 0;
6135 + q->last_read_ptr = 0;
6136 + q->repeat_same_read_ptr = 0;
6137
6138 return 0;
6139 }
6140 @@ -1635,6 +1637,11 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
6141 sta_id = ba_resp->sta_id;
6142 tid = ba_resp->tid;
6143 agg = &priv->stations[sta_id].tid[tid].agg;
6144 + if (unlikely(agg->txq_id != scd_flow)) {
6145 + IWL_ERR(priv, "BA scd_flow %d does not match txq_id %d\n",
6146 + scd_flow, agg->txq_id);
6147 + return;
6148 + }
6149
6150 /* Find index just before block-ack window */
6151 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
6152 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
6153 index b74a56c..84c040e 100644
6154 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
6155 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
6156 @@ -2513,6 +2513,13 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
6157 /* After the ALIVE response, we can send commands to 3945 uCode */
6158 set_bit(STATUS_ALIVE, &priv->status);
6159
6160 + if (priv->cfg->ops->lib->recover_from_tx_stall) {
6161 + /* Enable timer to monitor the driver queues */
6162 + mod_timer(&priv->monitor_recover,
6163 + jiffies +
6164 + msecs_to_jiffies(priv->cfg->monitor_recover_period));
6165 + }
6166 +
6167 if (iwl_is_rfkill(priv))
6168 return;
6169
6170 @@ -3783,6 +3790,13 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
6171
6172 iwl3945_hw_setup_deferred_work(priv);
6173
6174 + if (priv->cfg->ops->lib->recover_from_tx_stall) {
6175 + init_timer(&priv->monitor_recover);
6176 + priv->monitor_recover.data = (unsigned long)priv;
6177 + priv->monitor_recover.function =
6178 + priv->cfg->ops->lib->recover_from_tx_stall;
6179 + }
6180 +
6181 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6182 iwl3945_irq_tasklet, (unsigned long)priv);
6183 }
6184 @@ -3795,6 +3809,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
6185 cancel_delayed_work(&priv->scan_check);
6186 cancel_delayed_work(&priv->alive_start);
6187 cancel_work_sync(&priv->beacon_update);
6188 + if (priv->cfg->ops->lib->recover_from_tx_stall)
6189 + del_timer_sync(&priv->monitor_recover);
6190 }
6191
6192 static struct attribute *iwl3945_sysfs_entries[] = {
6193 diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
6194 index 7d1a3c6..cd464a2 100644
6195 --- a/drivers/net/wireless/libertas/if_sdio.c
6196 +++ b/drivers/net/wireless/libertas/if_sdio.c
6197 @@ -35,6 +35,8 @@
6198 #include <linux/mmc/card.h>
6199 #include <linux/mmc/sdio_func.h>
6200 #include <linux/mmc/sdio_ids.h>
6201 +#include <linux/mmc/sdio.h>
6202 +#include <linux/mmc/host.h>
6203
6204 #include "host.h"
6205 #include "decl.h"
6206 @@ -943,6 +945,7 @@ static int if_sdio_probe(struct sdio_func *func,
6207 int ret, i;
6208 unsigned int model;
6209 struct if_sdio_packet *packet;
6210 + struct mmc_host *host = func->card->host;
6211
6212 lbs_deb_enter(LBS_DEB_SDIO);
6213
6214 @@ -1023,6 +1026,25 @@ static int if_sdio_probe(struct sdio_func *func,
6215 if (ret)
6216 goto disable;
6217
6218 + /* For 1-bit transfers to the 8686 model, we need to enable the
6219 + * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
6220 + * bit to allow access to non-vendor registers. */
6221 + if ((card->model == IF_SDIO_MODEL_8686) &&
6222 + (host->caps & MMC_CAP_SDIO_IRQ) &&
6223 + (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
6224 + u8 reg;
6225 +
6226 + func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
6227 + reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
6228 + if (ret)
6229 + goto release_int;
6230 +
6231 + reg |= SDIO_BUS_ECSI;
6232 + sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
6233 + if (ret)
6234 + goto release_int;
6235 + }
6236 +
6237 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
6238 if (ret)
6239 goto release_int;
6240 diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
6241 index c24067f..e1e3972 100644
6242 --- a/drivers/net/wireless/p54/p54pci.c
6243 +++ b/drivers/net/wireless/p54/p54pci.c
6244 @@ -41,6 +41,8 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
6245 { PCI_DEVICE(0x1260, 0x3877) },
6246 /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
6247 { PCI_DEVICE(0x1260, 0x3886) },
6248 + /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
6249 + { PCI_DEVICE(0x1260, 0xffff) },
6250 { },
6251 };
6252
6253 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
6254 index 3749912..8abe983 100644
6255 --- a/drivers/pci/pci.c
6256 +++ b/drivers/pci/pci.c
6257 @@ -2294,6 +2294,7 @@ void pci_msi_off(struct pci_dev *dev)
6258 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
6259 }
6260 }
6261 +EXPORT_SYMBOL_GPL(pci_msi_off);
6262
6263 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
6264 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
6265 diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme/pcie_pme.c
6266 index aac285a..d672a0a 100644
6267 --- a/drivers/pci/pcie/pme/pcie_pme.c
6268 +++ b/drivers/pci/pcie/pme/pcie_pme.c
6269 @@ -34,7 +34,7 @@
6270 * being registered. Consequently, the interrupt-based PCIe PME signaling will
6271 * not be used by any PCIe root ports in that case.
6272 */
6273 -static bool pcie_pme_disabled;
6274 +static bool pcie_pme_disabled = true;
6275
6276 /*
6277 * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
6278 @@ -64,12 +64,19 @@ bool pcie_pme_msi_disabled;
6279
6280 static int __init pcie_pme_setup(char *str)
6281 {
6282 - if (!strcmp(str, "off"))
6283 - pcie_pme_disabled = true;
6284 - else if (!strcmp(str, "force"))
6285 + if (!strncmp(str, "auto", 4))
6286 + pcie_pme_disabled = false;
6287 + else if (!strncmp(str, "force", 5))
6288 pcie_pme_force_enable = true;
6289 - else if (!strcmp(str, "nomsi"))
6290 - pcie_pme_msi_disabled = true;
6291 +
6292 + str = strchr(str, ',');
6293 + if (str) {
6294 + str++;
6295 + str += strspn(str, " \t");
6296 + if (*str && !strcmp(str, "nomsi"))
6297 + pcie_pme_msi_disabled = true;
6298 + }
6299 +
6300 return 1;
6301 }
6302 __setup("pcie_pme=", pcie_pme_setup);
6303 diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
6304 index 6df5dff..ff78e26 100644
6305 --- a/drivers/pcmcia/ds.c
6306 +++ b/drivers/pcmcia/ds.c
6307 @@ -1366,6 +1366,7 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
6308 INIT_LIST_HEAD(&socket->devices_list);
6309 memset(&socket->pcmcia_state, 0, sizeof(u8));
6310 socket->device_count = 0;
6311 + atomic_set(&socket->present, 0);
6312
6313 ret = pccard_register_pcmcia(socket, &pcmcia_bus_callback);
6314 if (ret) {
6315 @@ -1374,8 +1375,6 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
6316 return ret;
6317 }
6318
6319 - atomic_set(&socket->present, 0);
6320 -
6321 return 0;
6322 }
6323
6324 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
6325 index c4ec5c1..a01bbe2 100644
6326 --- a/drivers/rtc/rtc-ds1307.c
6327 +++ b/drivers/rtc/rtc-ds1307.c
6328 @@ -775,7 +775,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
6329
6330 read_rtc:
6331 /* read RTC registers */
6332 - tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf);
6333 + tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
6334 if (tmp != 8) {
6335 pr_debug("read error %d\n", tmp);
6336 err = -EIO;
6337 @@ -860,7 +860,7 @@ read_rtc:
6338 if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
6339 tmp += 12;
6340 i2c_smbus_write_byte_data(client,
6341 - DS1307_REG_HOUR,
6342 + ds1307->offset + DS1307_REG_HOUR,
6343 bin2bcd(tmp));
6344 }
6345
6346 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
6347 index 9c0c911..1a5bf57 100644
6348 --- a/drivers/scsi/aacraid/commctrl.c
6349 +++ b/drivers/scsi/aacraid/commctrl.c
6350 @@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
6351 /* Does this really need to be GFP_DMA? */
6352 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
6353 if(!p) {
6354 - kfree (usg);
6355 - dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
6356 + dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
6357 usg->sg[i].count,i,usg->count));
6358 + kfree(usg);
6359 rcode = -ENOMEM;
6360 goto cleanup;
6361 }
6362 diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/