/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1011_linux-2.6.32.12.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1011_linux-2.6.32.12.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (3 years, 11 months ago) by mpagano
File size: 315469 byte(s)
2.6.32-15 release
1 diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
2 index 81c0c59..e1bb5b2 100644
3 --- a/Documentation/i2c/busses/i2c-i801
4 +++ b/Documentation/i2c/busses/i2c-i801
5 @@ -15,7 +15,8 @@ Supported adapters:
6 * Intel 82801I (ICH9)
7 * Intel EP80579 (Tolapai)
8 * Intel 82801JI (ICH10)
9 - * Intel PCH
10 + * Intel 3400/5 Series (PCH)
11 + * Intel Cougar Point (PCH)
12 Datasheets: Publicly available at the Intel website
13
14 Authors:
15 diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
16 index aafcaa6..387eb9c 100644
17 --- a/Documentation/laptops/thinkpad-acpi.txt
18 +++ b/Documentation/laptops/thinkpad-acpi.txt
19 @@ -460,6 +460,8 @@ event code Key Notes
20 For Lenovo ThinkPads with a new
21 BIOS, it has to be handled either
22 by the ACPI OSI, or by userspace.
23 + The driver does the right thing,
24 + never mess with this.
25 0x1011 0x10 FN+END Brightness down. See brightness
26 up for details.
27
28 @@ -582,46 +584,15 @@ with hotkey_report_mode.
29
30 Brightness hotkey notes:
31
32 -These are the current sane choices for brightness key mapping in
33 -thinkpad-acpi:
34 +Don't mess with the brightness hotkeys in a Thinkpad. If you want
35 +notifications for OSD, use the sysfs backlight class event support.
36
37 -For IBM and Lenovo models *without* ACPI backlight control (the ones on
38 -which thinkpad-acpi will autoload its backlight interface by default,
39 -and on which ACPI video does not export a backlight interface):
40 -
41 -1. Don't enable or map the brightness hotkeys in thinkpad-acpi, as
42 - these older firmware versions unfortunately won't respect the hotkey
43 - mask for brightness keys anyway, and always reacts to them. This
44 - usually work fine, unless X.org drivers are doing something to block
45 - the BIOS. In that case, use (3) below. This is the default mode of
46 - operation.
47 -
48 -2. Enable the hotkeys, but map them to something else that is NOT
49 - KEY_BRIGHTNESS_UP/DOWN or any other keycode that would cause
50 - userspace to try to change the backlight level, and use that as an
51 - on-screen-display hint.
52 -
53 -3. IF AND ONLY IF X.org drivers find a way to block the firmware from
54 - automatically changing the brightness, enable the hotkeys and map
55 - them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN, and feed that to
56 - something that calls xbacklight. thinkpad-acpi will not be able to
57 - change brightness in that case either, so you should disable its
58 - backlight interface.
59 -
60 -For Lenovo models *with* ACPI backlight control:
61 -
62 -1. Load up ACPI video and use that. ACPI video will report ACPI
63 - events for brightness change keys. Do not mess with thinkpad-acpi
64 - defaults in this case. thinkpad-acpi should not have anything to do
65 - with backlight events in a scenario where ACPI video is loaded:
66 - brightness hotkeys must be disabled, and the backlight interface is
67 - to be kept disabled as well. This is the default mode of operation.
68 -
69 -2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
70 - and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN. Process
71 - these keys on userspace somehow (e.g. by calling xbacklight).
72 - The driver will do this automatically if it detects that ACPI video
73 - has been disabled.
74 +The driver will issue KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN events
75 +automatically for the cases were userspace has to do something to
76 +implement brightness changes. When you override these events, you will
77 +either fail to handle properly the ThinkPads that require explicit
78 +action to change backlight brightness, or the ThinkPads that require
79 +that no action be taken to work properly.
80
81
82 Bluetooth
83 @@ -679,6 +650,10 @@ LCD, CRT or DVI (if available). The following commands are available:
84 echo expand_toggle > /proc/acpi/ibm/video
85 echo video_switch > /proc/acpi/ibm/video
86
87 +NOTE: Access to this feature is restricted to processes owning the
88 +CAP_SYS_ADMIN capability for safety reasons, as it can interact badly
89 +enough with some versions of X.org to crash it.
90 +
91 Each video output device can be enabled or disabled individually.
92 Reading /proc/acpi/ibm/video shows the status of each device.
93
94 @@ -1465,3 +1440,5 @@ Sysfs interface changelog:
95 and it is always able to disable hot keys. Very old
96 thinkpads are properly supported. hotkey_bios_mask
97 is deprecated and marked for removal.
98 +
99 +0x020600: Marker for backlight change event support.
100 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
101 index 3cb8fa3..61b3a33 100644
102 --- a/arch/arm/boot/compressed/head.S
103 +++ b/arch/arm/boot/compressed/head.S
104 @@ -164,7 +164,7 @@ not_angel:
105 adr r0, LC0
106 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
107 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
108 - THUMB( ldr sp, [r0, #28] )
109 + THUMB( ldr sp, [r0, #32] )
110 subs r0, r0, r1 @ calculate the delta offset
111
112 @ if delta is zero, we are
113 diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
114 index 0ad09f0..2eb6365 100644
115 --- a/arch/ia64/kvm/kvm-ia64.c
116 +++ b/arch/ia64/kvm/kvm-ia64.c
117 @@ -1797,7 +1797,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
118 {
119 struct kvm_memory_slot *memslot;
120 int r, i;
121 - long n, base;
122 + long base;
123 + unsigned long n;
124 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
125 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
126
127 @@ -1810,7 +1811,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
128 if (!memslot->dirty_bitmap)
129 goto out;
130
131 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
132 + n = kvm_dirty_bitmap_bytes(memslot);
133 base = memslot->base_gfn / BITS_PER_LONG;
134
135 for (i = 0; i < n/sizeof(long); ++i) {
136 @@ -1826,7 +1827,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
137 struct kvm_dirty_log *log)
138 {
139 int r;
140 - int n;
141 + unsigned long n;
142 struct kvm_memory_slot *memslot;
143 int is_dirty = 0;
144
145 @@ -1844,7 +1845,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
146 if (is_dirty) {
147 kvm_flush_remote_tlbs(kvm);
148 memslot = &kvm->memslots[log->slot];
149 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
150 + n = kvm_dirty_bitmap_bytes(memslot);
151 memset(memslot->dirty_bitmap, 0, n);
152 }
153 r = 0;
154 diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
155 index ccb1d93..bf6939c 100644
156 --- a/arch/sh/include/asm/elf.h
157 +++ b/arch/sh/include/asm/elf.h
158 @@ -212,7 +212,9 @@ extern void __kernel_vsyscall;
159
160 #define VSYSCALL_AUX_ENT \
161 if (vdso_enabled) \
162 - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
163 + NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
164 + else \
165 + NEW_AUX_ENT(AT_IGNORE, 0);
166 #else
167 #define VSYSCALL_AUX_ENT
168 #endif /* CONFIG_VSYSCALL */
169 @@ -220,7 +222,7 @@ extern void __kernel_vsyscall;
170 #ifdef CONFIG_SH_FPU
171 #define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
172 #else
173 -#define FPU_AUX_ENT
174 +#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
175 #endif
176
177 extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
178 diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
179 index 160db10..71a9c3c 100644
180 --- a/arch/sh/kernel/smp.c
181 +++ b/arch/sh/kernel/smp.c
182 @@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
183 unsigned int cpu;
184 struct mm_struct *mm = &init_mm;
185
186 + enable_mmu();
187 atomic_inc(&mm->mm_count);
188 atomic_inc(&mm->mm_users);
189 current->active_mm = mm;
190 diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
191 index 2201e9c..c1ea9eb 100644
192 --- a/arch/um/sys-x86_64/Makefile
193 +++ b/arch/um/sys-x86_64/Makefile
194 @@ -8,7 +8,8 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
195 setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \
196 sysrq.o ksyms.o tls.o
197
198 -subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
199 +subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \
200 + lib/rwsem_64.o
201 subarch-obj-$(CONFIG_MODULES) += kernel/module.o
202
203 ldt-y = ../sys-i386/ldt.o
204 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
205 index f2824fb..0e56610 100644
206 --- a/arch/x86/Kconfig.cpu
207 +++ b/arch/x86/Kconfig.cpu
208 @@ -323,7 +323,7 @@ config X86_L1_CACHE_SHIFT
209
210 config X86_XADD
211 def_bool y
212 - depends on X86_32 && !M386
213 + depends on X86_64 || !M386
214
215 config X86_PPRO_FENCE
216 bool "PentiumPro memory ordering errata workaround"
217 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
218 index 9cfc88b..2cbf0a2 100644
219 --- a/arch/x86/include/asm/cpufeature.h
220 +++ b/arch/x86/include/asm/cpufeature.h
221 @@ -153,6 +153,7 @@
222 #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
223 #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
224 #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
225 +#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
226
227 /*
228 * Auxiliary flags: Linux defined - For features scattered in various
229 diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
230 index 7c18e12..5ed59ec 100644
231 --- a/arch/x86/include/asm/kvm_emulate.h
232 +++ b/arch/x86/include/asm/kvm_emulate.h
233 @@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
234 struct x86_emulate_ops {
235 /*
236 * read_std: Read bytes of standard (non-emulated/special) memory.
237 - * Used for instruction fetch, stack operations, and others.
238 + * Used for descriptor reading.
239 * @addr: [IN ] Linear address from which to read.
240 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
241 * @bytes: [IN ] Number of bytes to read from memory.
242 */
243 int (*read_std)(unsigned long addr, void *val,
244 - unsigned int bytes, struct kvm_vcpu *vcpu);
245 + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
246 +
247 + /*
248 + * fetch: Read bytes of standard (non-emulated/special) memory.
249 + * Used for instruction fetch.
250 + * @addr: [IN ] Linear address from which to read.
251 + * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
252 + * @bytes: [IN ] Number of bytes to read from memory.
253 + */
254 + int (*fetch)(unsigned long addr, void *val,
255 + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
256
257 /*
258 * read_emulated: Read bytes from emulated/special memory area.
259 @@ -168,6 +178,7 @@ struct x86_emulate_ctxt {
260
261 /* Execution mode, passed to the emulator. */
262 #define X86EMUL_MODE_REAL 0 /* Real mode. */
263 +#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
264 #define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
265 #define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
266 #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
267 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
268 index d759a1f..6ead43e 100644
269 --- a/arch/x86/include/asm/kvm_host.h
270 +++ b/arch/x86/include/asm/kvm_host.h
271 @@ -256,7 +256,8 @@ struct kvm_mmu {
272 void (*new_cr3)(struct kvm_vcpu *vcpu);
273 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
274 void (*free)(struct kvm_vcpu *vcpu);
275 - gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
276 + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
277 + u32 *error);
278 void (*prefetch_page)(struct kvm_vcpu *vcpu,
279 struct kvm_mmu_page *page);
280 int (*sync_page)(struct kvm_vcpu *vcpu,
281 @@ -601,8 +602,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
282 unsigned long value);
283
284 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
285 -int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
286 - int type_bits, int seg);
287 +int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
288
289 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
290
291 @@ -645,6 +645,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
292 int kvm_mmu_load(struct kvm_vcpu *vcpu);
293 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
294 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
295 +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
296 +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
297 +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
298 +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
299
300 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
301
302 @@ -658,6 +662,7 @@ void kvm_disable_tdp(void);
303
304 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
305 int complete_pio(struct kvm_vcpu *vcpu);
306 +bool kvm_check_iopl(struct kvm_vcpu *vcpu);
307
308 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
309
310 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
311 index 8cb8489..7825b0c 100644
312 --- a/arch/x86/include/asm/msr-index.h
313 +++ b/arch/x86/include/asm/msr-index.h
314 @@ -125,6 +125,7 @@
315 #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
316 #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
317 #define FAM10H_MMIO_CONF_BASE_SHIFT 20
318 +#define MSR_FAM10H_NODE_ID 0xc001100c
319
320 /* K8 MSRs */
321 #define MSR_K8_TOP_MEM1 0xc001001a
322 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
323 index ca7517d..606ede1 100644
324 --- a/arch/x86/include/asm/rwsem.h
325 +++ b/arch/x86/include/asm/rwsem.h
326 @@ -41,6 +41,7 @@
327 #include <linux/list.h>
328 #include <linux/spinlock.h>
329 #include <linux/lockdep.h>
330 +#include <asm/asm.h>
331
332 struct rwsem_waiter;
333
334 @@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
335
336 /*
337 * the semaphore definition
338 + *
339 + * The bias values and the counter type limits the number of
340 + * potential readers/writers to 32767 for 32 bits and 2147483647
341 + * for 64 bits.
342 */
343
344 -#define RWSEM_UNLOCKED_VALUE 0x00000000
345 -#define RWSEM_ACTIVE_BIAS 0x00000001
346 -#define RWSEM_ACTIVE_MASK 0x0000ffff
347 -#define RWSEM_WAITING_BIAS (-0x00010000)
348 +#ifdef CONFIG_X86_64
349 +# define RWSEM_ACTIVE_MASK 0xffffffffL
350 +#else
351 +# define RWSEM_ACTIVE_MASK 0x0000ffffL
352 +#endif
353 +
354 +#define RWSEM_UNLOCKED_VALUE 0x00000000L
355 +#define RWSEM_ACTIVE_BIAS 0x00000001L
356 +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
357 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
358 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
359
360 +typedef signed long rwsem_count_t;
361 +
362 struct rw_semaphore {
363 - signed long count;
364 + rwsem_count_t count;
365 spinlock_t wait_lock;
366 struct list_head wait_list;
367 #ifdef CONFIG_DEBUG_LOCK_ALLOC
368 @@ -105,7 +117,7 @@ do { \
369 static inline void __down_read(struct rw_semaphore *sem)
370 {
371 asm volatile("# beginning down_read\n\t"
372 - LOCK_PREFIX " incl (%%eax)\n\t"
373 + LOCK_PREFIX _ASM_INC "(%1)\n\t"
374 /* adds 0x00000001, returns the old value */
375 " jns 1f\n"
376 " call call_rwsem_down_read_failed\n"
377 @@ -121,14 +133,14 @@ static inline void __down_read(struct rw_semaphore *sem)
378 */
379 static inline int __down_read_trylock(struct rw_semaphore *sem)
380 {
381 - __s32 result, tmp;
382 + rwsem_count_t result, tmp;
383 asm volatile("# beginning __down_read_trylock\n\t"
384 - " movl %0,%1\n\t"
385 + " mov %0,%1\n\t"
386 "1:\n\t"
387 - " movl %1,%2\n\t"
388 - " addl %3,%2\n\t"
389 + " mov %1,%2\n\t"
390 + " add %3,%2\n\t"
391 " jle 2f\n\t"
392 - LOCK_PREFIX " cmpxchgl %2,%0\n\t"
393 + LOCK_PREFIX " cmpxchg %2,%0\n\t"
394 " jnz 1b\n\t"
395 "2:\n\t"
396 "# ending __down_read_trylock\n\t"
397 @@ -143,13 +155,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
398 */
399 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
400 {
401 - int tmp;
402 + rwsem_count_t tmp;
403
404 tmp = RWSEM_ACTIVE_WRITE_BIAS;
405 asm volatile("# beginning down_write\n\t"
406 - LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
407 + LOCK_PREFIX " xadd %1,(%2)\n\t"
408 /* subtract 0x0000ffff, returns the old value */
409 - " testl %%edx,%%edx\n\t"
410 + " test %1,%1\n\t"
411 /* was the count 0 before? */
412 " jz 1f\n"
413 " call call_rwsem_down_write_failed\n"
414 @@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem)
415 */
416 static inline int __down_write_trylock(struct rw_semaphore *sem)
417 {
418 - signed long ret = cmpxchg(&sem->count,
419 - RWSEM_UNLOCKED_VALUE,
420 - RWSEM_ACTIVE_WRITE_BIAS);
421 + rwsem_count_t ret = cmpxchg(&sem->count,
422 + RWSEM_UNLOCKED_VALUE,
423 + RWSEM_ACTIVE_WRITE_BIAS);
424 if (ret == RWSEM_UNLOCKED_VALUE)
425 return 1;
426 return 0;
427 @@ -183,9 +195,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
428 */
429 static inline void __up_read(struct rw_semaphore *sem)
430 {
431 - __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
432 + rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
433 asm volatile("# beginning __up_read\n\t"
434 - LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
435 + LOCK_PREFIX " xadd %1,(%2)\n\t"
436 /* subtracts 1, returns the old value */
437 " jns 1f\n\t"
438 " call call_rwsem_wake\n"
439 @@ -201,18 +213,18 @@ static inline void __up_read(struct rw_semaphore *sem)
440 */
441 static inline void __up_write(struct rw_semaphore *sem)
442 {
443 + rwsem_count_t tmp;
444 asm volatile("# beginning __up_write\n\t"
445 - " movl %2,%%edx\n\t"
446 - LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
447 + LOCK_PREFIX " xadd %1,(%2)\n\t"
448 /* tries to transition
449 0xffff0001 -> 0x00000000 */
450 " jz 1f\n"
451 " call call_rwsem_wake\n"
452 "1:\n\t"
453 "# ending __up_write\n"
454 - : "+m" (sem->count)
455 - : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
456 - : "memory", "cc", "edx");
457 + : "+m" (sem->count), "=d" (tmp)
458 + : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
459 + : "memory", "cc");
460 }
461
462 /*
463 @@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem)
464 static inline void __downgrade_write(struct rw_semaphore *sem)
465 {
466 asm volatile("# beginning __downgrade_write\n\t"
467 - LOCK_PREFIX " addl %2,(%%eax)\n\t"
468 - /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
469 + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
470 + /*
471 + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
472 + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
473 + */
474 " jns 1f\n\t"
475 " call call_rwsem_downgrade_wake\n"
476 "1:\n\t"
477 "# ending __downgrade_write\n"
478 : "+m" (sem->count)
479 - : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
480 + : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
481 : "memory", "cc");
482 }
483
484 /*
485 * implement atomic add functionality
486 */
487 -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
488 +static inline void rwsem_atomic_add(rwsem_count_t delta,
489 + struct rw_semaphore *sem)
490 {
491 - asm volatile(LOCK_PREFIX "addl %1,%0"
492 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
493 : "+m" (sem->count)
494 - : "ir" (delta));
495 + : "er" (delta));
496 }
497
498 /*
499 * implement exchange and add functionality
500 */
501 -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
502 +static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
503 + struct rw_semaphore *sem)
504 {
505 - int tmp = delta;
506 + rwsem_count_t tmp = delta;
507
508 asm volatile(LOCK_PREFIX "xadd %0,%1"
509 : "+r" (tmp), "+m" (sem->count)
510 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
511 index 1e79678..4cfc908 100644
512 --- a/arch/x86/include/asm/smp.h
513 +++ b/arch/x86/include/asm/smp.h
514 @@ -135,6 +135,8 @@ int native_cpu_disable(void);
515 void native_cpu_die(unsigned int cpu);
516 void native_play_dead(void);
517 void play_dead_common(void);
518 +void wbinvd_on_cpu(int cpu);
519 +int wbinvd_on_all_cpus(void);
520
521 void native_send_call_func_ipi(const struct cpumask *mask);
522 void native_send_call_func_single_ipi(int cpu);
523 @@ -147,6 +149,13 @@ static inline int num_booting_cpus(void)
524 {
525 return cpumask_weight(cpu_callout_mask);
526 }
527 +#else /* !CONFIG_SMP */
528 +#define wbinvd_on_cpu(cpu) wbinvd()
529 +static inline int wbinvd_on_all_cpus(void)
530 +{
531 + wbinvd();
532 + return 0;
533 +}
534 #endif /* CONFIG_SMP */
535
536 extern unsigned disabled_cpus __cpuinitdata;
537 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
538 index 23fc9fe..c0ebc63 100644
539 --- a/arch/x86/kernel/amd_iommu.c
540 +++ b/arch/x86/kernel/amd_iommu.c
541 @@ -2239,9 +2239,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
542
543 free_pagetable(domain);
544
545 - domain_id_free(domain->id);
546 -
547 - kfree(domain);
548 + protection_domain_free(domain);
549
550 dom->priv = NULL;
551 }
552 diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
553 index 362ab88..3925adf 100644
554 --- a/arch/x86/kernel/amd_iommu_init.c
555 +++ b/arch/x86/kernel/amd_iommu_init.c
556 @@ -1284,6 +1284,8 @@ int __init amd_iommu_init(void)
557 if (ret)
558 goto free;
559
560 + enable_iommus();
561 +
562 if (iommu_pass_through)
563 ret = amd_iommu_init_passthrough();
564 else
565 @@ -1294,8 +1296,6 @@ int __init amd_iommu_init(void)
566
567 amd_iommu_init_api();
568
569 - enable_iommus();
570 -
571 if (iommu_pass_through)
572 goto out;
573
574 @@ -1314,6 +1314,8 @@ out:
575 return ret;
576
577 free:
578 + disable_iommus();
579 +
580 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
581 get_order(MAX_DOMAIN_ID/8));
582
583 diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
584 index 128111d..082089e 100644
585 --- a/arch/x86/kernel/aperture_64.c
586 +++ b/arch/x86/kernel/aperture_64.c
587 @@ -389,6 +389,7 @@ void __init gart_iommu_hole_init(void)
588 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
589 int bus;
590 int dev_base, dev_limit;
591 + u32 ctl;
592
593 bus = bus_dev_ranges[i].bus;
594 dev_base = bus_dev_ranges[i].dev_base;
595 @@ -401,7 +402,19 @@ void __init gart_iommu_hole_init(void)
596 iommu_detected = 1;
597 gart_iommu_aperture = 1;
598
599 - aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
600 + ctl = read_pci_config(bus, slot, 3,
601 + AMD64_GARTAPERTURECTL);
602 +
603 + /*
604 + * Before we do anything else disable the GART. It may
605 + * still be enabled if we boot into a crash-kernel here.
606 + * Reconfiguring the GART while it is enabled could have
607 + * unknown side-effects.
608 + */
609 + ctl &= ~GARTEN;
610 + write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);
611 +
612 + aper_order = (ctl >> 1) & 7;
613 aper_size = (32 * 1024 * 1024) << aper_order;
614 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;
615 aper_base <<= 25;
616 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
617 index 0e69e17..168e172 100644
618 --- a/arch/x86/kernel/apic/apic.c
619 +++ b/arch/x86/kernel/apic/apic.c
620 @@ -1664,8 +1664,10 @@ int __init APIC_init_uniprocessor(void)
621 }
622 #endif
623
624 +#ifndef CONFIG_SMP
625 enable_IR_x2apic();
626 default_setup_apic_routing();
627 +#endif
628
629 verify_local_APIC();
630 connect_bsp_APIC();
631 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
632 index c910a71..3940fee 100644
633 --- a/arch/x86/kernel/cpu/amd.c
634 +++ b/arch/x86/kernel/cpu/amd.c
635 @@ -254,59 +254,36 @@ static int __cpuinit nearby_node(int apicid)
636
637 /*
638 * Fixup core topology information for AMD multi-node processors.
639 - * Assumption 1: Number of cores in each internal node is the same.
640 - * Assumption 2: Mixed systems with both single-node and dual-node
641 - * processors are not supported.
642 + * Assumption: Number of cores in each internal node is the same.
643 */
644 #ifdef CONFIG_X86_HT
645 static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
646 {
647 -#ifdef CONFIG_PCI
648 - u32 t, cpn;
649 - u8 n, n_id;
650 + unsigned long long value;
651 + u32 nodes, cores_per_node;
652 int cpu = smp_processor_id();
653
654 + if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
655 + return;
656 +
657 /* fixup topology information only once for a core */
658 if (cpu_has(c, X86_FEATURE_AMD_DCM))
659 return;
660
661 - /* check for multi-node processor on boot cpu */
662 - t = read_pci_config(0, 24, 3, 0xe8);
663 - if (!(t & (1 << 29)))
664 + rdmsrl(MSR_FAM10H_NODE_ID, value);
665 +
666 + nodes = ((value >> 3) & 7) + 1;
667 + if (nodes == 1)
668 return;
669
670 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
671 + cores_per_node = c->x86_max_cores / nodes;
672
673 - /* cores per node: each internal node has half the number of cores */
674 - cpn = c->x86_max_cores >> 1;
675 + /* store NodeID, use llc_shared_map to store sibling info */
676 + per_cpu(cpu_llc_id, cpu) = value & 7;
677
678 - /* even-numbered NB_id of this dual-node processor */
679 - n = c->phys_proc_id << 1;
680 -
681 - /*
682 - * determine internal node id and assign cores fifty-fifty to
683 - * each node of the dual-node processor
684 - */
685 - t = read_pci_config(0, 24 + n, 3, 0xe8);
686 - n = (t>>30) & 0x3;
687 - if (n == 0) {
688 - if (c->cpu_core_id < cpn)
689 - n_id = 0;
690 - else
691 - n_id = 1;
692 - } else {
693 - if (c->cpu_core_id < cpn)
694 - n_id = 1;
695 - else
696 - n_id = 0;
697 - }
698 -
699 - /* compute entire NodeID, use llc_shared_map to store sibling info */
700 - per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
701 -
702 - /* fixup core id to be in range from 0 to cpn */
703 - c->cpu_core_id = c->cpu_core_id % cpn;
704 -#endif
705 + /* fixup core id to be in range from 0 to (cores_per_node - 1) */
706 + c->cpu_core_id = c->cpu_core_id % cores_per_node;
707 }
708 #endif
709
710 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
711 index 8178d03..be2d432 100644
712 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
713 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
714 @@ -18,6 +18,7 @@
715 #include <asm/processor.h>
716 #include <linux/smp.h>
717 #include <asm/k8.h>
718 +#include <asm/smp.h>
719
720 #define LVL_1_INST 1
721 #define LVL_1_DATA 2
722 @@ -150,7 +151,8 @@ struct _cpuid4_info {
723 union _cpuid4_leaf_ebx ebx;
724 union _cpuid4_leaf_ecx ecx;
725 unsigned long size;
726 - unsigned long can_disable;
727 + bool can_disable;
728 + unsigned int l3_indices;
729 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
730 };
731
732 @@ -160,7 +162,8 @@ struct _cpuid4_info_regs {
733 union _cpuid4_leaf_ebx ebx;
734 union _cpuid4_leaf_ecx ecx;
735 unsigned long size;
736 - unsigned long can_disable;
737 + bool can_disable;
738 + unsigned int l3_indices;
739 };
740
741 unsigned short num_cache_leaves;
742 @@ -290,6 +293,36 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
743 (ebx->split.ways_of_associativity + 1) - 1;
744 }
745
746 +struct _cache_attr {
747 + struct attribute attr;
748 + ssize_t (*show)(struct _cpuid4_info *, char *);
749 + ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
750 +};
751 +
752 +#ifdef CONFIG_CPU_SUP_AMD
753 +static unsigned int __cpuinit amd_calc_l3_indices(void)
754 +{
755 + /*
756 + * We're called over smp_call_function_single() and therefore
757 + * are on the correct cpu.
758 + */
759 + int cpu = smp_processor_id();
760 + int node = cpu_to_node(cpu);
761 + struct pci_dev *dev = node_to_k8_nb_misc(node);
762 + unsigned int sc0, sc1, sc2, sc3;
763 + u32 val = 0;
764 +
765 + pci_read_config_dword(dev, 0x1C4, &val);
766 +
767 + /* calculate subcache sizes */
768 + sc0 = !(val & BIT(0));
769 + sc1 = !(val & BIT(4));
770 + sc2 = !(val & BIT(8)) + !(val & BIT(9));
771 + sc3 = !(val & BIT(12)) + !(val & BIT(13));
772 +
773 + return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
774 +}
775 +
776 static void __cpuinit
777 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
778 {
779 @@ -299,12 +332,103 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
780 if (boot_cpu_data.x86 == 0x11)
781 return;
782
783 - /* see erratum #382 */
784 - if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
785 + /* see errata #382 and #388 */
786 + if ((boot_cpu_data.x86 == 0x10) &&
787 + ((boot_cpu_data.x86_model < 0x8) ||
788 + (boot_cpu_data.x86_mask < 0x1)))
789 return;
790
791 - this_leaf->can_disable = 1;
792 + this_leaf->can_disable = true;
793 + this_leaf->l3_indices = amd_calc_l3_indices();
794 +}
795 +
796 +static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
797 + unsigned int index)
798 +{
799 + int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
800 + int node = amd_get_nb_id(cpu);
801 + struct pci_dev *dev = node_to_k8_nb_misc(node);
802 + unsigned int reg = 0;
803 +
804 + if (!this_leaf->can_disable)
805 + return -EINVAL;
806 +
807 + if (!dev)
808 + return -EINVAL;
809 +
810 + pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
811 + return sprintf(buf, "0x%08x\n", reg);
812 +}
813 +
814 +#define SHOW_CACHE_DISABLE(index) \
815 +static ssize_t \
816 +show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
817 +{ \
818 + return show_cache_disable(this_leaf, buf, index); \
819 +}
820 +SHOW_CACHE_DISABLE(0)
821 +SHOW_CACHE_DISABLE(1)
822 +
823 +static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
824 + const char *buf, size_t count, unsigned int index)
825 +{
826 + int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
827 + int node = amd_get_nb_id(cpu);
828 + struct pci_dev *dev = node_to_k8_nb_misc(node);
829 + unsigned long val = 0;
830 +
831 +#define SUBCACHE_MASK (3UL << 20)
832 +#define SUBCACHE_INDEX 0xfff
833 +
834 + if (!this_leaf->can_disable)
835 + return -EINVAL;
836 +
837 + if (!capable(CAP_SYS_ADMIN))
838 + return -EPERM;
839 +
840 + if (!dev)
841 + return -EINVAL;
842 +
843 + if (strict_strtoul(buf, 10, &val) < 0)
844 + return -EINVAL;
845 +
846 + /* do not allow writes outside of allowed bits */
847 + if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
848 + ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
849 + return -EINVAL;
850 +
851 + val |= BIT(30);
852 + pci_write_config_dword(dev, 0x1BC + index * 4, val);
853 + /*
854 + * We need to WBINVD on a core on the node containing the L3 cache which
855 + * indices we disable therefore a simple wbinvd() is not sufficient.
856 + */
857 + wbinvd_on_cpu(cpu);
858 + pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
859 + return count;
860 +}
861 +
862 +#define STORE_CACHE_DISABLE(index) \
863 +static ssize_t \
864 +store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
865 + const char *buf, size_t count) \
866 +{ \
867 + return store_cache_disable(this_leaf, buf, count, index); \
868 }
869 +STORE_CACHE_DISABLE(0)
870 +STORE_CACHE_DISABLE(1)
871 +
872 +static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
873 + show_cache_disable_0, store_cache_disable_0);
874 +static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
875 + show_cache_disable_1, store_cache_disable_1);
876 +
877 +#else /* CONFIG_CPU_SUP_AMD */
878 +static void __cpuinit
879 +amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
880 +{
881 +};
882 +#endif /* CONFIG_CPU_SUP_AMD */
883
884 static int
885 __cpuinit cpuid4_cache_lookup_regs(int index,
886 @@ -726,82 +850,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
887 #define to_object(k) container_of(k, struct _index_kobject, kobj)
888 #define to_attr(a) container_of(a, struct _cache_attr, attr)
889
890 -static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
891 - unsigned int index)
892 -{
893 - int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
894 - int node = cpu_to_node(cpu);
895 - struct pci_dev *dev = node_to_k8_nb_misc(node);
896 - unsigned int reg = 0;
897 -
898 - if (!this_leaf->can_disable)
899 - return -EINVAL;
900 -
901 - if (!dev)
902 - return -EINVAL;
903 -
904 - pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
905 - return sprintf(buf, "%x\n", reg);
906 -}
907 -
908 -#define SHOW_CACHE_DISABLE(index) \
909 -static ssize_t \
910 -show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
911 -{ \
912 - return show_cache_disable(this_leaf, buf, index); \
913 -}
914 -SHOW_CACHE_DISABLE(0)
915 -SHOW_CACHE_DISABLE(1)
916 -
917 -static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
918 - const char *buf, size_t count, unsigned int index)
919 -{
920 - int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
921 - int node = cpu_to_node(cpu);
922 - struct pci_dev *dev = node_to_k8_nb_misc(node);
923 - unsigned long val = 0;
924 - unsigned int scrubber = 0;
925 -
926 - if (!this_leaf->can_disable)
927 - return -EINVAL;
928 -
929 - if (!capable(CAP_SYS_ADMIN))
930 - return -EPERM;
931 -
932 - if (!dev)
933 - return -EINVAL;
934 -
935 - if (strict_strtoul(buf, 10, &val) < 0)
936 - return -EINVAL;
937 -
938 - val |= 0xc0000000;
939 -
940 - pci_read_config_dword(dev, 0x58, &scrubber);
941 - scrubber &= ~0x1f000000;
942 - pci_write_config_dword(dev, 0x58, scrubber);
943 -
944 - pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
945 - wbinvd();
946 - pci_write_config_dword(dev, 0x1BC + index * 4, val);
947 - return count;
948 -}
949 -
950 -#define STORE_CACHE_DISABLE(index) \
951 -static ssize_t \
952 -store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
953 - const char *buf, size_t count) \
954 -{ \
955 - return store_cache_disable(this_leaf, buf, count, index); \
956 -}
957 -STORE_CACHE_DISABLE(0)
958 -STORE_CACHE_DISABLE(1)
959 -
960 -struct _cache_attr {
961 - struct attribute attr;
962 - ssize_t (*show)(struct _cpuid4_info *, char *);
963 - ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
964 -};
965 -
966 #define define_one_ro(_name) \
967 static struct _cache_attr _name = \
968 __ATTR(_name, 0444, show_##_name, NULL)
969 @@ -816,23 +864,28 @@ define_one_ro(size);
970 define_one_ro(shared_cpu_map);
971 define_one_ro(shared_cpu_list);
972
973 -static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
974 - show_cache_disable_0, store_cache_disable_0);
975 -static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
976 - show_cache_disable_1, store_cache_disable_1);
977 +#define DEFAULT_SYSFS_CACHE_ATTRS \
978 + &type.attr, \
979 + &level.attr, \
980 + &coherency_line_size.attr, \
981 + &physical_line_partition.attr, \
982 + &ways_of_associativity.attr, \
983 + &number_of_sets.attr, \
984 + &size.attr, \
985 + &shared_cpu_map.attr, \
986 + &shared_cpu_list.attr
987
988 static struct attribute *default_attrs[] = {
989 - &type.attr,
990 - &level.attr,
991 - &coherency_line_size.attr,
992 - &physical_line_partition.attr,
993 - &ways_of_associativity.attr,
994 - &number_of_sets.attr,
995 - &size.attr,
996 - &shared_cpu_map.attr,
997 - &shared_cpu_list.attr,
998 + DEFAULT_SYSFS_CACHE_ATTRS,
999 + NULL
1000 +};
1001 +
1002 +static struct attribute *default_l3_attrs[] = {
1003 + DEFAULT_SYSFS_CACHE_ATTRS,
1004 +#ifdef CONFIG_CPU_SUP_AMD
1005 &cache_disable_0.attr,
1006 &cache_disable_1.attr,
1007 +#endif
1008 NULL
1009 };
1010
1011 @@ -923,6 +976,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1012 unsigned int cpu = sys_dev->id;
1013 unsigned long i, j;
1014 struct _index_kobject *this_object;
1015 + struct _cpuid4_info *this_leaf;
1016 int retval;
1017
1018 retval = cpuid4_cache_sysfs_init(cpu);
1019 @@ -941,6 +995,14 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1020 this_object = INDEX_KOBJECT_PTR(cpu, i);
1021 this_object->cpu = cpu;
1022 this_object->index = i;
1023 +
1024 + this_leaf = CPUID4_INFO_IDX(cpu, i);
1025 +
1026 + if (this_leaf->can_disable)
1027 + ktype_cache.default_attrs = default_l3_attrs;
1028 + else
1029 + ktype_cache.default_attrs = default_attrs;
1030 +
1031 retval = kobject_init_and_add(&(this_object->kobj),
1032 &ktype_cache,
1033 per_cpu(cache_kobject, cpu),
1034 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
1035 index b5801c3..efea222 100644
1036 --- a/arch/x86/kernel/cpu/perf_event.c
1037 +++ b/arch/x86/kernel/cpu/perf_event.c
1038 @@ -190,6 +190,97 @@ static u64 __read_mostly hw_cache_event_ids
1039 [PERF_COUNT_HW_CACHE_OP_MAX]
1040 [PERF_COUNT_HW_CACHE_RESULT_MAX];
1041
1042 +static const u64 westmere_hw_cache_event_ids
1043 + [PERF_COUNT_HW_CACHE_MAX]
1044 + [PERF_COUNT_HW_CACHE_OP_MAX]
1045 + [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1046 +{
1047 + [ C(L1D) ] = {
1048 + [ C(OP_READ) ] = {
1049 + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1050 + [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1051 + },
1052 + [ C(OP_WRITE) ] = {
1053 + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1054 + [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1055 + },
1056 + [ C(OP_PREFETCH) ] = {
1057 + [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1058 + [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1059 + },
1060 + },
1061 + [ C(L1I ) ] = {
1062 + [ C(OP_READ) ] = {
1063 + [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1064 + [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1065 + },
1066 + [ C(OP_WRITE) ] = {
1067 + [ C(RESULT_ACCESS) ] = -1,
1068 + [ C(RESULT_MISS) ] = -1,
1069 + },
1070 + [ C(OP_PREFETCH) ] = {
1071 + [ C(RESULT_ACCESS) ] = 0x0,
1072 + [ C(RESULT_MISS) ] = 0x0,
1073 + },
1074 + },
1075 + [ C(LL ) ] = {
1076 + [ C(OP_READ) ] = {
1077 + [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
1078 + [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
1079 + },
1080 + [ C(OP_WRITE) ] = {
1081 + [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
1082 + [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
1083 + },
1084 + [ C(OP_PREFETCH) ] = {
1085 + [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
1086 + [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
1087 + },
1088 + },
1089 + [ C(DTLB) ] = {
1090 + [ C(OP_READ) ] = {
1091 + [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1092 + [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1093 + },
1094 + [ C(OP_WRITE) ] = {
1095 + [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1096 + [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1097 + },
1098 + [ C(OP_PREFETCH) ] = {
1099 + [ C(RESULT_ACCESS) ] = 0x0,
1100 + [ C(RESULT_MISS) ] = 0x0,
1101 + },
1102 + },
1103 + [ C(ITLB) ] = {
1104 + [ C(OP_READ) ] = {
1105 + [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1106 + [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1107 + },
1108 + [ C(OP_WRITE) ] = {
1109 + [ C(RESULT_ACCESS) ] = -1,
1110 + [ C(RESULT_MISS) ] = -1,
1111 + },
1112 + [ C(OP_PREFETCH) ] = {
1113 + [ C(RESULT_ACCESS) ] = -1,
1114 + [ C(RESULT_MISS) ] = -1,
1115 + },
1116 + },
1117 + [ C(BPU ) ] = {
1118 + [ C(OP_READ) ] = {
1119 + [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1120 + [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1121 + },
1122 + [ C(OP_WRITE) ] = {
1123 + [ C(RESULT_ACCESS) ] = -1,
1124 + [ C(RESULT_MISS) ] = -1,
1125 + },
1126 + [ C(OP_PREFETCH) ] = {
1127 + [ C(RESULT_ACCESS) ] = -1,
1128 + [ C(RESULT_MISS) ] = -1,
1129 + },
1130 + },
1131 +};
1132 +
1133 static const u64 nehalem_hw_cache_event_ids
1134 [PERF_COUNT_HW_CACHE_MAX]
1135 [PERF_COUNT_HW_CACHE_OP_MAX]
1136 @@ -1999,6 +2090,7 @@ static int intel_pmu_init(void)
1137 * Install the hw-cache-events table:
1138 */
1139 switch (boot_cpu_data.x86_model) {
1140 +
1141 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1142 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1143 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1144 @@ -2009,7 +2101,9 @@ static int intel_pmu_init(void)
1145 pr_cont("Core2 events, ");
1146 break;
1147 default:
1148 - case 26:
1149 + case 26: /* 45 nm nehalem, "Bloomfield" */
1150 + case 30: /* 45 nm nehalem, "Lynnfield" */
1151 + case 46: /* 45 nm nehalem-ex, "Beckton" */
1152 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1153 sizeof(hw_cache_event_ids));
1154
1155 @@ -2021,6 +2115,14 @@ static int intel_pmu_init(void)
1156
1157 pr_cont("Atom events, ");
1158 break;
1159 +
1160 + case 37: /* 32 nm nehalem, "Clarkdale" */
1161 + case 44: /* 32 nm nehalem, "Gulftown" */
1162 + memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1163 + sizeof(hw_cache_event_ids));
1164 +
1165 + pr_cont("Westmere events, ");
1166 + break;
1167 }
1168 return 0;
1169 }
1170 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
1171 index 5e409dc..ff95824 100644
1172 --- a/arch/x86/kernel/crash.c
1173 +++ b/arch/x86/kernel/crash.c
1174 @@ -27,7 +27,6 @@
1175 #include <asm/cpu.h>
1176 #include <asm/reboot.h>
1177 #include <asm/virtext.h>
1178 -#include <asm/iommu.h>
1179
1180
1181 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
1182 @@ -104,10 +103,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
1183 #ifdef CONFIG_HPET_TIMER
1184 hpet_disable();
1185 #endif
1186 -
1187 -#ifdef CONFIG_X86_64
1188 - pci_iommu_shutdown();
1189 -#endif
1190 -
1191 crash_save_cpu(regs, safe_smp_processor_id());
1192 }
1193 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
1194 index 5877873..74f5a3f 100644
1195 --- a/arch/x86/kernel/hpet.c
1196 +++ b/arch/x86/kernel/hpet.c
1197 @@ -385,11 +385,28 @@ static int hpet_next_event(unsigned long delta,
1198 hpet_writel(cnt, HPET_Tn_CMP(timer));
1199
1200 /*
1201 - * We need to read back the CMP register to make sure that
1202 - * what we wrote hit the chip before we compare it to the
1203 - * counter.
1204 + * We need to read back the CMP register on certain HPET
1205 + * implementations (ATI chipsets) which seem to delay the
1206 + * transfer of the compare register into the internal compare
1207 + * logic. With small deltas this might actually be too late as
1208 + * the counter could already be higher than the compare value
1209 + * at that point and we would wait for the next hpet interrupt
1210 + * forever. We found out that reading the CMP register back
1211 + * forces the transfer so we can rely on the comparison with
1212 + * the counter register below. If the read back from the
1213 + * compare register does not match the value we programmed
1214 + * then we might have a real hardware problem. We can not do
1215 + * much about it here, but at least alert the user/admin with
1216 + * a prominent warning.
1217 + * An erratum on some chipsets (ICH9,..), results in comparator read
1218 + * immediately following a write returning old value. Workaround
1219 + * for this is to read this value second time, when first
1220 + * read returns old value.
1221 */
1222 - WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
1223 + if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
1224 + WARN_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt,
1225 + KERN_WARNING "hpet: compare register read back failed.\n");
1226 + }
1227
1228 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
1229 }
1230 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
1231 index fcc0b5c..c245b6a 100644
1232 --- a/arch/x86/kernel/pci-gart_64.c
1233 +++ b/arch/x86/kernel/pci-gart_64.c
1234 @@ -553,6 +553,9 @@ static void enable_gart_translations(void)
1235
1236 enable_gart_translation(dev, __pa(agp_gatt_table));
1237 }
1238 +
1239 + /* Flush the GART-TLB to remove stale entries */
1240 + k8_flush_garts();
1241 }
1242
1243 /*
1244 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1245 index 7371e65..1350e43 100644
1246 --- a/arch/x86/kvm/emulate.c
1247 +++ b/arch/x86/kvm/emulate.c
1248 @@ -339,11 +339,18 @@ static u32 group2_table[] = {
1249 };
1250
1251 /* EFLAGS bit definitions. */
1252 +#define EFLG_ID (1<<21)
1253 +#define EFLG_VIP (1<<20)
1254 +#define EFLG_VIF (1<<19)
1255 +#define EFLG_AC (1<<18)
1256 #define EFLG_VM (1<<17)
1257 #define EFLG_RF (1<<16)
1258 +#define EFLG_IOPL (3<<12)
1259 +#define EFLG_NT (1<<14)
1260 #define EFLG_OF (1<<11)
1261 #define EFLG_DF (1<<10)
1262 #define EFLG_IF (1<<9)
1263 +#define EFLG_TF (1<<8)
1264 #define EFLG_SF (1<<7)
1265 #define EFLG_ZF (1<<6)
1266 #define EFLG_AF (1<<4)
1267 @@ -612,7 +619,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
1268
1269 if (linear < fc->start || linear >= fc->end) {
1270 size = min(15UL, PAGE_SIZE - offset_in_page(linear));
1271 - rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
1272 + rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
1273 if (rc)
1274 return rc;
1275 fc->start = linear;
1276 @@ -667,11 +674,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
1277 op_bytes = 3;
1278 *address = 0;
1279 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
1280 - ctxt->vcpu);
1281 + ctxt->vcpu, NULL);
1282 if (rc)
1283 return rc;
1284 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
1285 - ctxt->vcpu);
1286 + ctxt->vcpu, NULL);
1287 return rc;
1288 }
1289
1290 @@ -895,6 +902,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1291
1292 switch (mode) {
1293 case X86EMUL_MODE_REAL:
1294 + case X86EMUL_MODE_VM86:
1295 case X86EMUL_MODE_PROT16:
1296 def_op_bytes = def_ad_bytes = 2;
1297 break;
1298 @@ -1204,6 +1212,49 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1299 return rc;
1300 }
1301
1302 +static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1303 + struct x86_emulate_ops *ops,
1304 + void *dest, int len)
1305 +{
1306 + int rc;
1307 + unsigned long val, change_mask;
1308 + int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1309 + int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
1310 +
1311 + rc = emulate_pop(ctxt, ops, &val, len);
1312 + if (rc != X86EMUL_CONTINUE)
1313 + return rc;
1314 +
1315 + change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1316 + | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1317 +
1318 + switch(ctxt->mode) {
1319 + case X86EMUL_MODE_PROT64:
1320 + case X86EMUL_MODE_PROT32:
1321 + case X86EMUL_MODE_PROT16:
1322 + if (cpl == 0)
1323 + change_mask |= EFLG_IOPL;
1324 + if (cpl <= iopl)
1325 + change_mask |= EFLG_IF;
1326 + break;
1327 + case X86EMUL_MODE_VM86:
1328 + if (iopl < 3) {
1329 + kvm_inject_gp(ctxt->vcpu, 0);
1330 + return X86EMUL_PROPAGATE_FAULT;
1331 + }
1332 + change_mask |= EFLG_IF;
1333 + break;
1334 + default: /* real mode */
1335 + change_mask |= (EFLG_IOPL | EFLG_IF);
1336 + break;
1337 + }
1338 +
1339 + *(unsigned long *)dest =
1340 + (ctxt->eflags & ~change_mask) | (val & change_mask);
1341 +
1342 + return rc;
1343 +}
1344 +
1345 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1346 struct x86_emulate_ops *ops)
1347 {
1348 @@ -1345,7 +1396,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1349 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1350 if (rc)
1351 return rc;
1352 - rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS);
1353 + rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
1354 return rc;
1355 }
1356
1357 @@ -1453,7 +1504,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
1358
1359 /* syscall is not available in real mode */
1360 if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
1361 - || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
1362 + || ctxt->mode == X86EMUL_MODE_VM86)
1363 return -1;
1364
1365 setup_syscalls_segments(ctxt, &cs, &ss);
1366 @@ -1505,9 +1556,8 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1367 if (c->lock_prefix)
1368 return -1;
1369
1370 - /* inject #GP if in real mode or paging is disabled */
1371 - if (ctxt->mode == X86EMUL_MODE_REAL ||
1372 - !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1373 + /* inject #GP if in real mode */
1374 + if (ctxt->mode == X86EMUL_MODE_REAL) {
1375 kvm_inject_gp(ctxt->vcpu, 0);
1376 return -1;
1377 }
1378 @@ -1571,9 +1621,9 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1379 if (c->lock_prefix)
1380 return -1;
1381
1382 - /* inject #GP if in real mode or paging is disabled */
1383 - if (ctxt->mode == X86EMUL_MODE_REAL
1384 - || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1385 + /* inject #GP if in real mode or Virtual 8086 mode */
1386 + if (ctxt->mode == X86EMUL_MODE_REAL ||
1387 + ctxt->mode == X86EMUL_MODE_VM86) {
1388 kvm_inject_gp(ctxt->vcpu, 0);
1389 return -1;
1390 }
1391 @@ -1620,6 +1670,57 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1392 return 0;
1393 }
1394
1395 +static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
1396 +{
1397 + int iopl;
1398 + if (ctxt->mode == X86EMUL_MODE_REAL)
1399 + return false;
1400 + if (ctxt->mode == X86EMUL_MODE_VM86)
1401 + return true;
1402 + iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1403 + return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
1404 +}
1405 +
1406 +static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1407 + struct x86_emulate_ops *ops,
1408 + u16 port, u16 len)
1409 +{
1410 + struct kvm_segment tr_seg;
1411 + int r;
1412 + u16 io_bitmap_ptr;
1413 + u8 perm, bit_idx = port & 0x7;
1414 + unsigned mask = (1 << len) - 1;
1415 +
1416 + kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR);
1417 + if (tr_seg.unusable)
1418 + return false;
1419 + if (tr_seg.limit < 103)
1420 + return false;
1421 + r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu,
1422 + NULL);
1423 + if (r != X86EMUL_CONTINUE)
1424 + return false;
1425 + if (io_bitmap_ptr + port/8 > tr_seg.limit)
1426 + return false;
1427 + r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1,
1428 + ctxt->vcpu, NULL);
1429 + if (r != X86EMUL_CONTINUE)
1430 + return false;
1431 + if ((perm >> bit_idx) & mask)
1432 + return false;
1433 + return true;
1434 +}
1435 +
1436 +static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1437 + struct x86_emulate_ops *ops,
1438 + u16 port, u16 len)
1439 +{
1440 + if (emulator_bad_iopl(ctxt))
1441 + if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1442 + return false;
1443 + return true;
1444 +}
1445 +
1446 int
1447 x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1448 {
1449 @@ -1779,7 +1880,12 @@ special_insn:
1450 break;
1451 case 0x6c: /* insb */
1452 case 0x6d: /* insw/insd */
1453 - if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
1454 + if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
1455 + (c->d & ByteOp) ? 1 : c->op_bytes)) {
1456 + kvm_inject_gp(ctxt->vcpu, 0);
1457 + goto done;
1458 + }
1459 + if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
1460 1,
1461 (c->d & ByteOp) ? 1 : c->op_bytes,
1462 c->rep_prefix ?
1463 @@ -1795,6 +1901,11 @@ special_insn:
1464 return 0;
1465 case 0x6e: /* outsb */
1466 case 0x6f: /* outsw/outsd */
1467 + if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
1468 + (c->d & ByteOp) ? 1 : c->op_bytes)) {
1469 + kvm_inject_gp(ctxt->vcpu, 0);
1470 + goto done;
1471 + }
1472 if (kvm_emulate_pio_string(ctxt->vcpu, NULL,
1473 0,
1474 (c->d & ByteOp) ? 1 : c->op_bytes,
1475 @@ -1881,12 +1992,11 @@ special_insn:
1476 break;
1477 case 0x8e: { /* mov seg, r/m16 */
1478 uint16_t sel;
1479 - int type_bits;
1480 - int err;
1481
1482 sel = c->src.val;
1483
1484 - if (c->modrm_reg == VCPU_SREG_CS) {
1485 + if (c->modrm_reg == VCPU_SREG_CS ||
1486 + c->modrm_reg > VCPU_SREG_GS) {
1487 kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
1488 goto done;
1489 }
1490 @@ -1894,18 +2004,7 @@ special_insn:
1491 if (c->modrm_reg == VCPU_SREG_SS)
1492 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
1493
1494 - if (c->modrm_reg <= 5) {
1495 - type_bits = (c->modrm_reg == 1) ? 9 : 1;
1496 - err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
1497 - type_bits, c->modrm_reg);
1498 - } else {
1499 - printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
1500 - c->modrm);
1501 - goto cannot_emulate;
1502 - }
1503 -
1504 - if (err < 0)
1505 - goto cannot_emulate;
1506 + rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
1507
1508 c->dst.type = OP_NONE; /* Disable writeback. */
1509 break;
1510 @@ -1934,7 +2033,10 @@ special_insn:
1511 c->dst.type = OP_REG;
1512 c->dst.ptr = (unsigned long *) &ctxt->eflags;
1513 c->dst.bytes = c->op_bytes;
1514 - goto pop_instruction;
1515 + rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
1516 + if (rc != X86EMUL_CONTINUE)
1517 + goto done;
1518 + break;
1519 case 0xa0 ... 0xa1: /* mov */
1520 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
1521 c->dst.val = c->src.val;
1522 @@ -2072,11 +2174,9 @@ special_insn:
1523 case 0xe9: /* jmp rel */
1524 goto jmp;
1525 case 0xea: /* jmp far */
1526 - if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9,
1527 - VCPU_SREG_CS) < 0) {
1528 - DPRINTF("jmp far: Failed to load CS descriptor\n");
1529 - goto cannot_emulate;
1530 - }
1531 + if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
1532 + VCPU_SREG_CS))
1533 + goto done;
1534
1535 c->eip = c->src.val;
1536 break;
1537 @@ -2094,7 +2194,13 @@ special_insn:
1538 case 0xef: /* out (e/r)ax,dx */
1539 port = c->regs[VCPU_REGS_RDX];
1540 io_dir_in = 0;
1541 - do_io: if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in,
1542 + do_io:
1543 + if (!emulator_io_permited(ctxt, ops, port,
1544 + (c->d & ByteOp) ? 1 : c->op_bytes)) {
1545 + kvm_inject_gp(ctxt->vcpu, 0);
1546 + goto done;
1547 + }
1548 + if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in,
1549 (c->d & ByteOp) ? 1 : c->op_bytes,
1550 port) != 0) {
1551 c->eip = saved_eip;
1552 @@ -2119,13 +2225,21 @@ special_insn:
1553 c->dst.type = OP_NONE; /* Disable writeback. */
1554 break;
1555 case 0xfa: /* cli */
1556 - ctxt->eflags &= ~X86_EFLAGS_IF;
1557 - c->dst.type = OP_NONE; /* Disable writeback. */
1558 + if (emulator_bad_iopl(ctxt))
1559 + kvm_inject_gp(ctxt->vcpu, 0);
1560 + else {
1561 + ctxt->eflags &= ~X86_EFLAGS_IF;
1562 + c->dst.type = OP_NONE; /* Disable writeback. */
1563 + }
1564 break;
1565 case 0xfb: /* sti */
1566 - toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
1567 - ctxt->eflags |= X86_EFLAGS_IF;
1568 - c->dst.type = OP_NONE; /* Disable writeback. */
1569 + if (emulator_bad_iopl(ctxt))
1570 + kvm_inject_gp(ctxt->vcpu, 0);
1571 + else {
1572 + toggle_interruptibility(ctxt, X86_SHADOW_INT_STI);
1573 + ctxt->eflags |= X86_EFLAGS_IF;
1574 + c->dst.type = OP_NONE; /* Disable writeback. */
1575 + }
1576 break;
1577 case 0xfc: /* cld */
1578 ctxt->eflags &= ~EFLG_DF;
1579 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1580 index 3a01519..762efc2 100644
1581 --- a/arch/x86/kvm/mmu.c
1582 +++ b/arch/x86/kvm/mmu.c
1583 @@ -136,12 +136,6 @@ module_param(oos_shadow, bool, 0644);
1584 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
1585 | PT64_NX_MASK)
1586
1587 -#define PFERR_PRESENT_MASK (1U << 0)
1588 -#define PFERR_WRITE_MASK (1U << 1)
1589 -#define PFERR_USER_MASK (1U << 2)
1590 -#define PFERR_RSVD_MASK (1U << 3)
1591 -#define PFERR_FETCH_MASK (1U << 4)
1592 -
1593 #define PT_PDPE_LEVEL 3
1594 #define PT_DIRECTORY_LEVEL 2
1595 #define PT_PAGE_TABLE_LEVEL 1
1596 @@ -1502,8 +1496,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1597 for_each_sp(pages, sp, parents, i) {
1598 kvm_mmu_zap_page(kvm, sp);
1599 mmu_pages_clear_parents(&parents);
1600 + zapped++;
1601 }
1602 - zapped += pages.nr;
1603 kvm_mmu_pages_init(parent, &parents, &pages);
1604 }
1605
1606 @@ -1554,14 +1548,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1607 */
1608
1609 if (used_pages > kvm_nr_mmu_pages) {
1610 - while (used_pages > kvm_nr_mmu_pages) {
1611 + while (used_pages > kvm_nr_mmu_pages &&
1612 + !list_empty(&kvm->arch.active_mmu_pages)) {
1613 struct kvm_mmu_page *page;
1614
1615 page = container_of(kvm->arch.active_mmu_pages.prev,
1616 struct kvm_mmu_page, link);
1617 - kvm_mmu_zap_page(kvm, page);
1618 + used_pages -= kvm_mmu_zap_page(kvm, page);
1619 used_pages--;
1620 }
1621 + kvm_nr_mmu_pages = used_pages;
1622 kvm->arch.n_free_mmu_pages = 0;
1623 }
1624 else
1625 @@ -1608,7 +1604,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1626 && !sp->role.invalid) {
1627 pgprintk("%s: zap %lx %x\n",
1628 __func__, gfn, sp->role.word);
1629 - kvm_mmu_zap_page(kvm, sp);
1630 + if (kvm_mmu_zap_page(kvm, sp))
1631 + nn = bucket->first;
1632 }
1633 }
1634 }
1635 @@ -1639,7 +1636,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1636 {
1637 struct page *page;
1638
1639 - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1640 + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
1641
1642 if (gpa == UNMAPPED_GVA)
1643 return NULL;
1644 @@ -2162,8 +2159,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
1645 spin_unlock(&vcpu->kvm->mmu_lock);
1646 }
1647
1648 -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1649 +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
1650 + u32 access, u32 *error)
1651 {
1652 + if (error)
1653 + *error = 0;
1654 return vaddr;
1655 }
1656
1657 @@ -2747,7 +2747,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1658 if (tdp_enabled)
1659 return 0;
1660
1661 - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1662 + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
1663
1664 spin_lock(&vcpu->kvm->mmu_lock);
1665 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1666 @@ -3245,7 +3245,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1667 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
1668 audit_mappings_page(vcpu, ent, va, level - 1);
1669 else {
1670 - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1671 + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
1672 gfn_t gfn = gpa >> PAGE_SHIFT;
1673 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
1674 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
1675 diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
1676 index 61a1b38..bac7529 100644
1677 --- a/arch/x86/kvm/mmu.h
1678 +++ b/arch/x86/kvm/mmu.h
1679 @@ -37,6 +37,12 @@
1680 #define PT32_ROOT_LEVEL 2
1681 #define PT32E_ROOT_LEVEL 3
1682
1683 +#define PFERR_PRESENT_MASK (1U << 0)
1684 +#define PFERR_WRITE_MASK (1U << 1)
1685 +#define PFERR_USER_MASK (1U << 2)
1686 +#define PFERR_RSVD_MASK (1U << 3)
1687 +#define PFERR_FETCH_MASK (1U << 4)
1688 +
1689 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
1690
1691 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1692 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
1693 index 5fa3325..8faa821 100644
1694 --- a/arch/x86/kvm/paging_tmpl.h
1695 +++ b/arch/x86/kvm/paging_tmpl.h
1696 @@ -491,18 +491,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
1697 spin_unlock(&vcpu->kvm->mmu_lock);
1698 }
1699
1700 -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
1701 +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
1702 + u32 *error)
1703 {
1704 struct guest_walker walker;
1705 gpa_t gpa = UNMAPPED_GVA;
1706 int r;
1707
1708 - r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
1709 + r = FNAME(walk_addr)(&walker, vcpu, vaddr,
1710 + !!(access & PFERR_WRITE_MASK),
1711 + !!(access & PFERR_USER_MASK),
1712 + !!(access & PFERR_FETCH_MASK));
1713
1714 if (r) {
1715 gpa = gfn_to_gpa(walker.gfn);
1716 gpa |= vaddr & ~PAGE_MASK;
1717 - }
1718 + } else if (error)
1719 + *error = walker.error_code;
1720
1721 return gpa;
1722 }
1723 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1724 index c17404a..8e65552 100644
1725 --- a/arch/x86/kvm/svm.c
1726 +++ b/arch/x86/kvm/svm.c
1727 @@ -625,11 +625,12 @@ static void init_vmcb(struct vcpu_svm *svm)
1728 save->rip = 0x0000fff0;
1729 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1730
1731 - /*
1732 - * cr0 val on cpu init should be 0x60000010, we enable cpu
1733 - * cache by default. the orderly way is to enable cache in bios.
1734 + /* This is the guest-visible cr0 value.
1735 + * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1736 */
1737 - save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
1738 + svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
1739 + kvm_set_cr0(&svm->vcpu, svm->vcpu.arch.cr0);
1740 +
1741 save->cr4 = X86_CR4_PAE;
1742 /* rdx = ?? */
1743
1744 @@ -693,29 +694,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1745 if (err)
1746 goto free_svm;
1747
1748 + err = -ENOMEM;
1749 page = alloc_page(GFP_KERNEL);
1750 - if (!page) {
1751 - err = -ENOMEM;
1752 + if (!page)
1753 goto uninit;
1754 - }
1755
1756 - err = -ENOMEM;
1757 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1758 if (!msrpm_pages)
1759 - goto uninit;
1760 + goto free_page1;
1761
1762 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1763 if (!nested_msrpm_pages)
1764 - goto uninit;
1765 -
1766 - svm->msrpm = page_address(msrpm_pages);
1767 - svm_vcpu_init_msrpm(svm->msrpm);
1768 + goto free_page2;
1769
1770 hsave_page = alloc_page(GFP_KERNEL);
1771 if (!hsave_page)
1772 - goto uninit;
1773 + goto free_page3;
1774 +
1775 svm->nested.hsave = page_address(hsave_page);
1776
1777 + svm->msrpm = page_address(msrpm_pages);
1778 + svm_vcpu_init_msrpm(svm->msrpm);
1779 +
1780 svm->nested.msrpm = page_address(nested_msrpm_pages);
1781
1782 svm->vmcb = page_address(page);
1783 @@ -732,6 +732,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1784
1785 return &svm->vcpu;
1786
1787 +free_page3:
1788 + __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1789 +free_page2:
1790 + __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1791 +free_page1:
1792 + __free_page(page);
1793 uninit:
1794 kvm_vcpu_uninit(&svm->vcpu);
1795 free_svm:
1796 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1797 index ed53b42..fa297d6 100644
1798 --- a/arch/x86/kvm/vmx.c
1799 +++ b/arch/x86/kvm/vmx.c
1800 @@ -61,6 +61,8 @@ module_param_named(unrestricted_guest,
1801 static int __read_mostly emulate_invalid_guest_state = 0;
1802 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
1803
1804 +#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
1805 +
1806 struct vmcs {
1807 u32 revision_id;
1808 u32 abort;
1809 @@ -92,7 +94,7 @@ struct vcpu_vmx {
1810 } host_state;
1811 struct {
1812 int vm86_active;
1813 - u8 save_iopl;
1814 + ulong save_rflags;
1815 struct kvm_save_segment {
1816 u16 selector;
1817 unsigned long base;
1818 @@ -783,18 +785,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1819
1820 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1821 {
1822 - unsigned long rflags;
1823 + unsigned long rflags, save_rflags;
1824
1825 rflags = vmcs_readl(GUEST_RFLAGS);
1826 - if (to_vmx(vcpu)->rmode.vm86_active)
1827 - rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1828 + if (to_vmx(vcpu)->rmode.vm86_active) {
1829 + rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1830 + save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1831 + rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1832 + }
1833 return rflags;
1834 }
1835
1836 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1837 {
1838 - if (to_vmx(vcpu)->rmode.vm86_active)
1839 + if (to_vmx(vcpu)->rmode.vm86_active) {
1840 + to_vmx(vcpu)->rmode.save_rflags = rflags;
1841 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1842 + }
1843 vmcs_writel(GUEST_RFLAGS, rflags);
1844 }
1845
1846 @@ -1431,8 +1438,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1847 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1848
1849 flags = vmcs_readl(GUEST_RFLAGS);
1850 - flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1851 - flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1852 + flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1853 + flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1854 vmcs_writel(GUEST_RFLAGS, flags);
1855
1856 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1857 @@ -1501,8 +1508,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1858 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1859
1860 flags = vmcs_readl(GUEST_RFLAGS);
1861 - vmx->rmode.save_iopl
1862 - = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1863 + vmx->rmode.save_rflags = flags;
1864
1865 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1866
1867 @@ -2302,8 +2308,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
1868 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1869 if (vmx->vpid == 0)
1870 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
1871 - if (!enable_ept)
1872 + if (!enable_ept) {
1873 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
1874 + enable_unrestricted_guest = 0;
1875 + }
1876 if (!enable_unrestricted_guest)
1877 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
1878 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
1879 @@ -2510,7 +2518,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1880 if (vmx->vpid != 0)
1881 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1882
1883 - vmx->vcpu.arch.cr0 = 0x60000010;
1884 + vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
1885 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
1886 vmx_set_cr4(&vmx->vcpu, 0);
1887 vmx_set_efer(&vmx->vcpu, 0);
1888 @@ -2674,6 +2682,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1889 kvm_queue_exception(vcpu, vec);
1890 return 1;
1891 case BP_VECTOR:
1892 + /*
1893 + * Update instruction length as we may reinject the exception
1894 + * from user space while in guest debugging mode.
1895 + */
1896 + to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
1897 + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1898 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1899 return 0;
1900 /* fall through */
1901 @@ -2790,6 +2804,13 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1902 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
1903 /* fall through */
1904 case BP_VECTOR:
1905 + /*
1906 + * Update instruction length as we may reinject #BP from
1907 + * user space while in guest debugging mode. Reading it for
1908 + * #DB as well causes no harm, it is not used in that case.
1909 + */
1910 + vmx->vcpu.arch.event_exit_inst_len =
1911 + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1912 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1913 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
1914 kvm_run->debug.arch.exception = ex_no;
1915 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1916 index e78d990..389fc55 100644
1917 --- a/arch/x86/kvm/x86.c
1918 +++ b/arch/x86/kvm/x86.c
1919 @@ -297,21 +297,16 @@ out:
1920 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1921 {
1922 if (cr0 & CR0_RESERVED_BITS) {
1923 - printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
1924 - cr0, vcpu->arch.cr0);
1925 kvm_inject_gp(vcpu, 0);
1926 return;
1927 }
1928
1929 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
1930 - printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
1931 kvm_inject_gp(vcpu, 0);
1932 return;
1933 }
1934
1935 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
1936 - printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
1937 - "and a clear PE flag\n");
1938 kvm_inject_gp(vcpu, 0);
1939 return;
1940 }
1941 @@ -322,15 +317,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1942 int cs_db, cs_l;
1943
1944 if (!is_pae(vcpu)) {
1945 - printk(KERN_DEBUG "set_cr0: #GP, start paging "
1946 - "in long mode while PAE is disabled\n");
1947 kvm_inject_gp(vcpu, 0);
1948 return;
1949 }
1950 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1951 if (cs_l) {
1952 - printk(KERN_DEBUG "set_cr0: #GP, start paging "
1953 - "in long mode while CS.L == 1\n");
1954 kvm_inject_gp(vcpu, 0);
1955 return;
1956
1957 @@ -338,8 +329,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1958 } else
1959 #endif
1960 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1961 - printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
1962 - "reserved bits\n");
1963 kvm_inject_gp(vcpu, 0);
1964 return;
1965 }
1966 @@ -366,28 +355,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1967 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
1968
1969 if (cr4 & CR4_RESERVED_BITS) {
1970 - printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
1971 kvm_inject_gp(vcpu, 0);
1972 return;
1973 }
1974
1975 if (is_long_mode(vcpu)) {
1976 if (!(cr4 & X86_CR4_PAE)) {
1977 - printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
1978 - "in long mode\n");
1979 kvm_inject_gp(vcpu, 0);
1980 return;
1981 }
1982 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1983 && ((cr4 ^ old_cr4) & pdptr_bits)
1984 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
1985 - printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
1986 kvm_inject_gp(vcpu, 0);
1987 return;
1988 }
1989
1990 if (cr4 & X86_CR4_VMXE) {
1991 - printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
1992 kvm_inject_gp(vcpu, 0);
1993 return;
1994 }
1995 @@ -408,21 +392,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1996
1997 if (is_long_mode(vcpu)) {
1998 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
1999 - printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
2000 kvm_inject_gp(vcpu, 0);
2001 return;
2002 }
2003 } else {
2004 if (is_pae(vcpu)) {
2005 if (cr3 & CR3_PAE_RESERVED_BITS) {
2006 - printk(KERN_DEBUG
2007 - "set_cr3: #GP, reserved bits\n");
2008 kvm_inject_gp(vcpu, 0);
2009 return;
2010 }
2011 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
2012 - printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
2013 - "reserved bits\n");
2014 kvm_inject_gp(vcpu, 0);
2015 return;
2016 }
2017 @@ -454,7 +433,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
2018 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
2019 {
2020 if (cr8 & CR8_RESERVED_BITS) {
2021 - printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
2022 kvm_inject_gp(vcpu, 0);
2023 return;
2024 }
2025 @@ -508,15 +486,12 @@ static u32 emulated_msrs[] = {
2026 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
2027 {
2028 if (efer & efer_reserved_bits) {
2029 - printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
2030 - efer);
2031 kvm_inject_gp(vcpu, 0);
2032 return;
2033 }
2034
2035 if (is_paging(vcpu)
2036 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
2037 - printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
2038 kvm_inject_gp(vcpu, 0);
2039 return;
2040 }
2041 @@ -526,7 +501,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
2042
2043 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
2044 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
2045 - printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
2046 kvm_inject_gp(vcpu, 0);
2047 return;
2048 }
2049 @@ -537,7 +511,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
2050
2051 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
2052 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
2053 - printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
2054 kvm_inject_gp(vcpu, 0);
2055 return;
2056 }
2057 @@ -826,9 +799,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2058 if (msr >= MSR_IA32_MC0_CTL &&
2059 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
2060 u32 offset = msr - MSR_IA32_MC0_CTL;
2061 - /* only 0 or all 1s can be written to IA32_MCi_CTL */
2062 + /* only 0 or all 1s can be written to IA32_MCi_CTL
2063 + * some Linux kernels though clear bit 10 in bank 4 to
2064 + * workaround a BIOS/GART TBL issue on AMD K8s, ignore
2065 + * this to avoid an uncatched #GP in the guest
2066 + */
2067 if ((offset & 0x3) == 0 &&
2068 - data != 0 && data != ~(u64)0)
2069 + data != 0 && (data | (1 << 10)) != ~(u64)0)
2070 return -1;
2071 vcpu->arch.mce_banks[offset] = data;
2072 break;
2073 @@ -1242,8 +1219,8 @@ int kvm_dev_ioctl_check_extension(long ext)
2074 case KVM_CAP_NR_MEMSLOTS:
2075 r = KVM_MEMORY_SLOTS;
2076 break;
2077 - case KVM_CAP_PV_MMU:
2078 - r = !tdp_enabled;
2079 + case KVM_CAP_PV_MMU: /* obsolete */
2080 + r = 0;
2081 break;
2082 case KVM_CAP_IOMMU:
2083 r = iommu_found();
2084 @@ -2156,7 +2133,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2085 struct kvm_dirty_log *log)
2086 {
2087 int r;
2088 - int n;
2089 + unsigned long n;
2090 struct kvm_memory_slot *memslot;
2091 int is_dirty = 0;
2092
2093 @@ -2172,7 +2149,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2094 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2095 spin_unlock(&kvm->mmu_lock);
2096 memslot = &kvm->memslots[log->slot];
2097 - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
2098 + n = kvm_dirty_bitmap_bytes(memslot);
2099 memset(memslot->dirty_bitmap, 0, n);
2100 }
2101 r = 0;
2102 @@ -2505,14 +2482,41 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2103 return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
2104 }
2105
2106 -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2107 - struct kvm_vcpu *vcpu)
2108 +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
2109 +{
2110 + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
2111 + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
2112 +}
2113 +
2114 + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
2115 +{
2116 + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
2117 + access |= PFERR_FETCH_MASK;
2118 + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
2119 +}
2120 +
2121 +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
2122 +{
2123 + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
2124 + access |= PFERR_WRITE_MASK;
2125 + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
2126 +}
2127 +
2128 +/* uses this to access any guest's mapped memory without checking CPL */
2129 +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
2130 +{
2131 + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
2132 +}
2133 +
2134 +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
2135 + struct kvm_vcpu *vcpu, u32 access,
2136 + u32 *error)
2137 {
2138 void *data = val;
2139 int r = X86EMUL_CONTINUE;
2140
2141 while (bytes) {
2142 - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2143 + gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
2144 unsigned offset = addr & (PAGE_SIZE-1);
2145 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
2146 int ret;
2147 @@ -2535,14 +2539,37 @@ out:
2148 return r;
2149 }
2150
2151 +/* used for instruction fetching */
2152 +static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
2153 + struct kvm_vcpu *vcpu, u32 *error)
2154 +{
2155 + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
2156 + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
2157 + access | PFERR_FETCH_MASK, error);
2158 +}
2159 +
2160 +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2161 + struct kvm_vcpu *vcpu, u32 *error)
2162 +{
2163 + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
2164 + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
2165 + error);
2166 +}
2167 +
2168 +static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
2169 + struct kvm_vcpu *vcpu, u32 *error)
2170 +{
2171 + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
2172 +}
2173 +
2174 static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2175 - struct kvm_vcpu *vcpu)
2176 + struct kvm_vcpu *vcpu, u32 *error)
2177 {
2178 void *data = val;
2179 int r = X86EMUL_CONTINUE;
2180
2181 while (bytes) {
2182 - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2183 + gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
2184 unsigned offset = addr & (PAGE_SIZE-1);
2185 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2186 int ret;
2187 @@ -2572,6 +2599,7 @@ static int emulator_read_emulated(unsigned long addr,
2188 struct kvm_vcpu *vcpu)
2189 {
2190 gpa_t gpa;
2191 + u32 error_code;
2192
2193 if (vcpu->mmio_read_completed) {
2194 memcpy(val, vcpu->mmio_data, bytes);
2195 @@ -2581,17 +2609,20 @@ static int emulator_read_emulated(unsigned long addr,
2196 return X86EMUL_CONTINUE;
2197 }
2198
2199 - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2200 + gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
2201 +
2202 + if (gpa == UNMAPPED_GVA) {
2203 + kvm_inject_page_fault(vcpu, addr, error_code);
2204 + return X86EMUL_PROPAGATE_FAULT;
2205 + }
2206
2207 /* For APIC access vmexit */
2208 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2209 goto mmio;
2210
2211 - if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2212 + if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
2213 == X86EMUL_CONTINUE)
2214 return X86EMUL_CONTINUE;
2215 - if (gpa == UNMAPPED_GVA)
2216 - return X86EMUL_PROPAGATE_FAULT;
2217
2218 mmio:
2219 /*
2220 @@ -2630,11 +2661,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
2221 struct kvm_vcpu *vcpu)
2222 {
2223 gpa_t gpa;
2224 + u32 error_code;
2225
2226 - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2227 + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
2228
2229 if (gpa == UNMAPPED_GVA) {
2230 - kvm_inject_page_fault(vcpu, addr, 2);
2231 + kvm_inject_page_fault(vcpu, addr, error_code);
2232 return X86EMUL_PROPAGATE_FAULT;
2233 }
2234
2235 @@ -2698,7 +2730,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
2236 char *kaddr;
2237 u64 val;
2238
2239 - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2240 + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
2241
2242 if (gpa == UNMAPPED_GVA ||
2243 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2244 @@ -2777,7 +2809,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2245
2246 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2247
2248 - kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
2249 + kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
2250
2251 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2252 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2253 @@ -2785,7 +2817,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2254 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2255
2256 static struct x86_emulate_ops emulate_ops = {
2257 - .read_std = kvm_read_guest_virt,
2258 + .read_std = kvm_read_guest_virt_system,
2259 + .fetch = kvm_fetch_guest_virt,
2260 .read_emulated = emulator_read_emulated,
2261 .write_emulated = emulator_write_emulated,
2262 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2263 @@ -2828,8 +2861,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2264 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2265 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2266 vcpu->arch.emulate_ctxt.mode =
2267 + (!(vcpu->arch.cr0 & X86_CR0_PE)) ? X86EMUL_MODE_REAL :
2268 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
2269 - ? X86EMUL_MODE_REAL : cs_l
2270 + ? X86EMUL_MODE_VM86 : cs_l
2271 ? X86EMUL_MODE_PROT64 : cs_db
2272 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2273
2274 @@ -2921,12 +2955,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
2275 gva_t q = vcpu->arch.pio.guest_gva;
2276 unsigned bytes;
2277 int ret;
2278 + u32 error_code;
2279
2280 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2281 if (vcpu->arch.pio.in)
2282 - ret = kvm_write_guest_virt(q, p, bytes, vcpu);
2283 + ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
2284 else
2285 - ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2286 + ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
2287 +
2288 + if (ret == X86EMUL_PROPAGATE_FAULT)
2289 + kvm_inject_page_fault(vcpu, q, error_code);
2290 +
2291 return ret;
2292 }
2293
2294 @@ -2947,7 +2986,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
2295 if (io->in) {
2296 r = pio_copy_data(vcpu);
2297 if (r)
2298 - return r;
2299 + goto out;
2300 }
2301
2302 delta = 1;
2303 @@ -2974,7 +3013,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
2304 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2305 }
2306 }
2307 -
2308 +out:
2309 io->count -= io->cur_count;
2310 io->cur_count = 0;
2311
2312 @@ -3017,6 +3056,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2313 {
2314 unsigned long val;
2315
2316 + trace_kvm_pio(!in, port, size, 1);
2317 +
2318 vcpu->run->exit_reason = KVM_EXIT_IO;
2319 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2320 vcpu->run->io.size = vcpu->arch.pio.size = size;
2321 @@ -3028,9 +3069,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2322 vcpu->arch.pio.down = 0;
2323 vcpu->arch.pio.rep = 0;
2324
2325 - trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2326 - size, 1);
2327 -
2328 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2329 memcpy(vcpu->arch.pio_data, &val, 4);
2330
2331 @@ -3049,6 +3087,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2332 unsigned now, in_page;
2333 int ret = 0;
2334
2335 + trace_kvm_pio(!in, port, size, count);
2336 +
2337 vcpu->run->exit_reason = KVM_EXIT_IO;
2338 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
2339 vcpu->run->io.size = vcpu->arch.pio.size = size;
2340 @@ -3060,9 +3100,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2341 vcpu->arch.pio.down = down;
2342 vcpu->arch.pio.rep = rep;
2343
2344 - trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2345 - size, count);
2346 -
2347 if (!count) {
2348 kvm_x86_ops->skip_emulated_instruction(vcpu);
2349 return 1;
2350 @@ -3094,10 +3131,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2351 if (!vcpu->arch.pio.in) {
2352 /* string PIO write */
2353 ret = pio_copy_data(vcpu);
2354 - if (ret == X86EMUL_PROPAGATE_FAULT) {
2355 - kvm_inject_gp(vcpu, 0);
2356 + if (ret == X86EMUL_PROPAGATE_FAULT)
2357 return 1;
2358 - }
2359 if (ret == 0 && !pio_string_write(vcpu)) {
2360 complete_pio(vcpu);
2361 if (vcpu->arch.pio.count == 0)
2362 @@ -4077,7 +4112,9 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
2363 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
2364 return 1;
2365 }
2366 - return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
2367 + return kvm_read_guest_virt_system(dtable.base + index*8,
2368 + seg_desc, sizeof(*seg_desc),
2369 + vcpu, NULL);
2370 }
2371
2372 /* allowed just for 8 bytes segments */
2373 @@ -4091,15 +4128,23 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
2374
2375 if (dtable.limit < index * 8 + 7)
2376 return 1;
2377 - return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
2378 + return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
2379 }
2380
2381 -static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
2382 +static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
2383 + struct desc_struct *seg_desc)
2384 +{
2385 + u32 base_addr = get_desc_base(seg_desc);
2386 +
2387 + return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
2388 +}
2389 +
2390 +static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
2391 struct desc_struct *seg_desc)
2392 {
2393 u32 base_addr = get_desc_base(seg_desc);
2394
2395 - return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
2396 + return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
2397 }
2398
2399 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
2400 @@ -4139,7 +4184,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
2401 .unusable = 0,
2402 };
2403 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
2404 - return 0;
2405 + return X86EMUL_CONTINUE;
2406 }
2407
2408 static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
2409 @@ -4149,24 +4194,113 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
2410 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
2411 }
2412
2413 -int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
2414 - int type_bits, int seg)
2415 +int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
2416 {
2417 struct kvm_segment kvm_seg;
2418 + struct desc_struct seg_desc;
2419 + u8 dpl, rpl, cpl;
2420 + unsigned err_vec = GP_VECTOR;
2421 + u32 err_code = 0;
2422 + bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
2423 + int ret;
2424
2425 if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
2426 return kvm_load_realmode_segment(vcpu, selector, seg);
2427 - if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
2428 - return 1;
2429 - kvm_seg.type |= type_bits;
2430
2431 - if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
2432 - seg != VCPU_SREG_LDTR)
2433 - if (!kvm_seg.s)
2434 - kvm_seg.unusable = 1;
2435
2436 + /* NULL selector is not valid for TR, CS and SS */
2437 + if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
2438 + && null_selector)
2439 + goto exception;
2440 +
2441 + /* TR should be in GDT only */
2442 + if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
2443 + goto exception;
2444 +
2445 + ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
2446 + if (ret)
2447 + return ret;
2448 +
2449 + seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
2450 +
2451 + if (null_selector) { /* for NULL selector skip all following checks */
2452 + kvm_seg.unusable = 1;
2453 + goto load;
2454 + }
2455 +
2456 + err_code = selector & 0xfffc;
2457 + err_vec = GP_VECTOR;
2458 +
2459 + /* can't load system descriptor into segment selecor */
2460 + if (seg <= VCPU_SREG_GS && !kvm_seg.s)
2461 + goto exception;
2462 +
2463 + if (!kvm_seg.present) {
2464 + err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
2465 + goto exception;
2466 + }
2467 +
2468 + rpl = selector & 3;
2469 + dpl = kvm_seg.dpl;
2470 + cpl = kvm_x86_ops->get_cpl(vcpu);
2471 +
2472 + switch (seg) {
2473 + case VCPU_SREG_SS:
2474 + /*
2475 + * segment is not a writable data segment or segment
2476 + * selector's RPL != CPL or segment selector's RPL != CPL
2477 + */
2478 + if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
2479 + goto exception;
2480 + break;
2481 + case VCPU_SREG_CS:
2482 + if (!(kvm_seg.type & 8))
2483 + goto exception;
2484 +
2485 + if (kvm_seg.type & 4) {
2486 + /* conforming */
2487 + if (dpl > cpl)
2488 + goto exception;
2489 + } else {
2490 + /* nonconforming */
2491 + if (rpl > cpl || dpl != cpl)
2492 + goto exception;
2493 + }
2494 + /* CS(RPL) <- CPL */
2495 + selector = (selector & 0xfffc) | cpl;
2496 + break;
2497 + case VCPU_SREG_TR:
2498 + if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
2499 + goto exception;
2500 + break;
2501 + case VCPU_SREG_LDTR:
2502 + if (kvm_seg.s || kvm_seg.type != 2)
2503 + goto exception;
2504 + break;
2505 + default: /* DS, ES, FS, or GS */
2506 + /*
2507 + * segment is not a data or readable code segment or
2508 + * ((segment is a data or nonconforming code segment)
2509 + * and (both RPL and CPL > DPL))
2510 + */
2511 + if ((kvm_seg.type & 0xa) == 0x8 ||
2512 + (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
2513 + goto exception;
2514 + break;
2515 + }
2516 +
2517 + if (!kvm_seg.unusable && kvm_seg.s) {
2518 + /* mark segment as accessed */
2519 + kvm_seg.type |= 1;
2520 + seg_desc.type |= 1;
2521 + save_guest_segment_descriptor(vcpu, selector, &seg_desc);
2522 + }
2523 +load:
2524 kvm_set_segment(vcpu, &kvm_seg, seg);
2525 - return 0;
2526 + return X86EMUL_CONTINUE;
2527 +exception:
2528 + kvm_queue_exception_e(vcpu, err_vec, err_code);
2529 + return X86EMUL_PROPAGATE_FAULT;
2530 }
2531
2532 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
2533 @@ -4192,6 +4326,14 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
2534 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
2535 }
2536
2537 +static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
2538 +{
2539 + struct kvm_segment kvm_seg;
2540 + kvm_get_segment(vcpu, &kvm_seg, seg);
2541 + kvm_seg.selector = sel;
2542 + kvm_set_segment(vcpu, &kvm_seg, seg);
2543 +}
2544 +
2545 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
2546 struct tss_segment_32 *tss)
2547 {
2548 @@ -4209,25 +4351,41 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
2549 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
2550 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
2551
2552 - if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
2553 + /*
2554 + * SDM says that segment selectors are loaded before segment
2555 + * descriptors
2556 + */
2557 + kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
2558 + kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
2559 + kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
2560 + kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
2561 + kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
2562 + kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
2563 + kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
2564 +
2565 + /*
2566 + * Now load segment descriptors. If fault happenes at this stage
2567 + * it is handled in a context of new task
2568 + */
2569 + if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
2570 return 1;
2571
2572 - if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
2573 + if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
2574 return 1;
2575
2576 - if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
2577 + if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
2578 return 1;
2579
2580 - if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
2581 + if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
2582 return 1;
2583
2584 - if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
2585 + if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
2586 return 1;
2587
2588 - if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
2589 + if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
2590 return 1;
2591
2592 - if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
2593 + if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
2594 return 1;
2595 return 0;
2596 }
2597 @@ -4268,19 +4426,33 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
2598 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
2599 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
2600
2601 - if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
2602 + /*
2603 + * SDM says that segment selectors are loaded before segment
2604 + * descriptors
2605 + */
2606 + kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
2607 + kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
2608 + kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
2609 + kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
2610 + kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
2611 +
2612 + /*
2613 + * Now load segment descriptors. If fault happenes at this stage
2614 + * it is handled in a context of new task
2615 + */
2616 + if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
2617 return 1;
2618
2619 - if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
2620 + if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
2621 return 1;
2622
2623 - if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
2624 + if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
2625 return 1;
2626
2627 - if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
2628 + if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
2629 return 1;
2630
2631 - if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
2632 + if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
2633 return 1;
2634 return 0;
2635 }
2636 @@ -4302,7 +4474,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
2637 sizeof tss_segment_16))
2638 goto out;
2639
2640 - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
2641 + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
2642 &tss_segment_16, sizeof tss_segment_16))
2643 goto out;
2644
2645 @@ -4310,7 +4482,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
2646 tss_segment_16.prev_task_link = old_tss_sel;
2647
2648 if (kvm_write_guest(vcpu->kvm,
2649 - get_tss_base_addr(vcpu, nseg_desc),
2650 + get_tss_base_addr_write(vcpu, nseg_desc),
2651 &tss_segment_16.prev_task_link,
2652 sizeof tss_segment_16.prev_task_link))
2653 goto out;
2654 @@ -4341,7 +4513,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
2655 sizeof tss_segment_32))
2656 goto out;
2657
2658 - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
2659 + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
2660 &tss_segment_32, sizeof tss_segment_32))
2661 goto out;
2662
2663 @@ -4349,7 +4521,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
2664 tss_segment_32.prev_task_link = old_tss_sel;
2665
2666 if (kvm_write_guest(vcpu->kvm,
2667 - get_tss_base_addr(vcpu, nseg_desc),
2668 + get_tss_base_addr_write(vcpu, nseg_desc),
2669 &tss_segment_32.prev_task_link,
2670 sizeof tss_segment_32.prev_task_link))
2671 goto out;
2672 @@ -4371,8 +4543,9 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
2673 int ret = 0;
2674 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
2675 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
2676 + u32 desc_limit;
2677
2678 - old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
2679 + old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
2680
2681 /* FIXME: Handle errors. Failure to read either TSS or their
2682 * descriptors should generate a pagefault.
2683 @@ -4393,7 +4566,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
2684 }
2685 }
2686
2687 - if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
2688 + desc_limit = get_desc_limit(&nseg_desc);
2689 + if (!nseg_desc.p ||
2690 + ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
2691 + desc_limit < 0x2b)) {
2692 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
2693 return 1;
2694 }
2695 @@ -4581,7 +4757,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2696
2697 vcpu_load(vcpu);
2698 down_read(&vcpu->kvm->slots_lock);
2699 - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
2700 + gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
2701 up_read(&vcpu->kvm->slots_lock);
2702 tr->physical_address = gpa;
2703 tr->valid = gpa != UNMAPPED_GVA;
2704 diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
2705 index c2b6f39..ac2d426 100644
2706 --- a/arch/x86/lib/Makefile
2707 +++ b/arch/x86/lib/Makefile
2708 @@ -2,7 +2,7 @@
2709 # Makefile for x86 specific library files.
2710 #
2711
2712 -obj-$(CONFIG_SMP) += msr-smp.o
2713 +obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
2714
2715 lib-y := delay.o
2716 lib-y += thunk_$(BITS).o
2717 @@ -26,4 +26,5 @@ else
2718 lib-y += thunk_64.o clear_page_64.o copy_page_64.o
2719 lib-y += memmove_64.o memset_64.o
2720 lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
2721 + lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
2722 endif
2723 diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
2724 new file mode 100644
2725 index 0000000..a3c6688
2726 --- /dev/null
2727 +++ b/arch/x86/lib/cache-smp.c
2728 @@ -0,0 +1,19 @@
2729 +#include <linux/smp.h>
2730 +#include <linux/module.h>
2731 +
2732 +static void __wbinvd(void *dummy)
2733 +{
2734 + wbinvd();
2735 +}
2736 +
2737 +void wbinvd_on_cpu(int cpu)
2738 +{
2739 + smp_call_function_single(cpu, __wbinvd, NULL, 1);
2740 +}
2741 +EXPORT_SYMBOL(wbinvd_on_cpu);
2742 +
2743 +int wbinvd_on_all_cpus(void)
2744 +{
2745 + return on_each_cpu(__wbinvd, NULL, 1);
2746 +}
2747 +EXPORT_SYMBOL(wbinvd_on_all_cpus);
2748 diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
2749 new file mode 100644
2750 index 0000000..15acecf
2751 --- /dev/null
2752 +++ b/arch/x86/lib/rwsem_64.S
2753 @@ -0,0 +1,81 @@
2754 +/*
2755 + * x86-64 rwsem wrappers
2756 + *
2757 + * This interfaces the inline asm code to the slow-path
2758 + * C routines. We need to save the call-clobbered regs
2759 + * that the asm does not mark as clobbered, and move the
2760 + * argument from %rax to %rdi.
2761 + *
2762 + * NOTE! We don't need to save %rax, because the functions
2763 + * will always return the semaphore pointer in %rax (which
2764 + * is also the input argument to these helpers)
2765 + *
2766 + * The following can clobber %rdx because the asm clobbers it:
2767 + * call_rwsem_down_write_failed
2768 + * call_rwsem_wake
2769 + * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
2770 + */
2771 +
2772 +#include <linux/linkage.h>
2773 +#include <asm/rwlock.h>
2774 +#include <asm/alternative-asm.h>
2775 +#include <asm/frame.h>
2776 +#include <asm/dwarf2.h>
2777 +
2778 +#define save_common_regs \
2779 + pushq %rdi; \
2780 + pushq %rsi; \
2781 + pushq %rcx; \
2782 + pushq %r8; \
2783 + pushq %r9; \
2784 + pushq %r10; \
2785 + pushq %r11
2786 +
2787 +#define restore_common_regs \
2788 + popq %r11; \
2789 + popq %r10; \
2790 + popq %r9; \
2791 + popq %r8; \
2792 + popq %rcx; \
2793 + popq %rsi; \
2794 + popq %rdi
2795 +
2796 +/* Fix up special calling conventions */
2797 +ENTRY(call_rwsem_down_read_failed)
2798 + save_common_regs
2799 + pushq %rdx
2800 + movq %rax,%rdi
2801 + call rwsem_down_read_failed
2802 + popq %rdx
2803 + restore_common_regs
2804 + ret
2805 + ENDPROC(call_rwsem_down_read_failed)
2806 +
2807 +ENTRY(call_rwsem_down_write_failed)
2808 + save_common_regs
2809 + movq %rax,%rdi
2810 + call rwsem_down_write_failed
2811 + restore_common_regs
2812 + ret
2813 + ENDPROC(call_rwsem_down_write_failed)
2814 +
2815 +ENTRY(call_rwsem_wake)
2816 + decw %dx /* do nothing if still outstanding active readers */
2817 + jnz 1f
2818 + save_common_regs
2819 + movq %rax,%rdi
2820 + call rwsem_wake
2821 + restore_common_regs
2822 +1: ret
2823 + ENDPROC(call_rwsem_wake)
2824 +
2825 +/* Fix up special calling conventions */
2826 +ENTRY(call_rwsem_downgrade_wake)
2827 + save_common_regs
2828 + pushq %rdx
2829 + movq %rax,%rdi
2830 + call rwsem_downgrade_wake
2831 + popq %rdx
2832 + restore_common_regs
2833 + ret
2834 + ENDPROC(call_rwsem_downgrade_wake)
2835 diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
2836 index 0696d50..b02f6d8 100644
2837 --- a/arch/x86/pci/irq.c
2838 +++ b/arch/x86/pci/irq.c
2839 @@ -590,6 +590,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
2840 case PCI_DEVICE_ID_INTEL_ICH10_1:
2841 case PCI_DEVICE_ID_INTEL_ICH10_2:
2842 case PCI_DEVICE_ID_INTEL_ICH10_3:
2843 + case PCI_DEVICE_ID_INTEL_CPT_LPC1:
2844 + case PCI_DEVICE_ID_INTEL_CPT_LPC2:
2845 r->name = "PIIX/ICH";
2846 r->get = pirq_piix_get;
2847 r->set = pirq_piix_set;
2848 diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
2849 index b641388..ad47dae 100644
2850 --- a/arch/x86/power/hibernate_asm_32.S
2851 +++ b/arch/x86/power/hibernate_asm_32.S
2852 @@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend)
2853 ret
2854
2855 ENTRY(restore_image)
2856 + movl mmu_cr4_features, %ecx
2857 movl resume_pg_dir, %eax
2858 subl $__PAGE_OFFSET, %eax
2859 movl %eax, %cr3
2860
2861 + jecxz 1f # cr4 Pentium and higher, skip if zero
2862 + andl $~(X86_CR4_PGE), %ecx
2863 + movl %ecx, %cr4; # turn off PGE
2864 + movl %cr3, %eax; # flush TLB
2865 + movl %eax, %cr3
2866 +1:
2867 movl restore_pblist, %edx
2868 .p2align 4,,7
2869
2870 @@ -54,16 +61,8 @@ done:
2871 movl $swapper_pg_dir, %eax
2872 subl $__PAGE_OFFSET, %eax
2873 movl %eax, %cr3
2874 - /* Flush TLB, including "global" things (vmalloc) */
2875 movl mmu_cr4_features, %ecx
2876 jecxz 1f # cr4 Pentium and higher, skip if zero
2877 - movl %ecx, %edx
2878 - andl $~(X86_CR4_PGE), %edx
2879 - movl %edx, %cr4; # turn off PGE
2880 -1:
2881 - movl %cr3, %eax; # flush TLB
2882 - movl %eax, %cr3
2883 - jecxz 1f # cr4 Pentium and higher, skip if zero
2884 movl %ecx, %cr4; # turn PGE back on
2885 1:
2886
2887 diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
2888 index 52fec07..83b6252 100644
2889 --- a/drivers/acpi/acpica/exprep.c
2890 +++ b/drivers/acpi/acpica/exprep.c
2891 @@ -468,6 +468,23 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
2892
2893 acpi_ut_add_reference(obj_desc->field.region_obj);
2894
2895 + /* allow full data read from EC address space */
2896 + if (obj_desc->field.region_obj->region.space_id ==
2897 + ACPI_ADR_SPACE_EC) {
2898 + if (obj_desc->common_field.bit_length > 8) {
2899 + unsigned width =
2900 + ACPI_ROUND_BITS_UP_TO_BYTES(
2901 + obj_desc->common_field.bit_length);
2902 + // access_bit_width is u8, don't overflow it
2903 + if (width > 8)
2904 + width = 8;
2905 + obj_desc->common_field.access_byte_width =
2906 + width;
2907 + obj_desc->common_field.access_bit_width =
2908 + 8 * width;
2909 + }
2910 + }
2911 +
2912 ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
2913 "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
2914 obj_desc->field.start_field_bit_offset,
2915 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
2916 index f1670e0..45d2aa9 100644
2917 --- a/drivers/acpi/ec.c
2918 +++ b/drivers/acpi/ec.c
2919 @@ -588,12 +588,12 @@ static u32 acpi_ec_gpe_handler(void *data)
2920
2921 static acpi_status
2922 acpi_ec_space_handler(u32 function, acpi_physical_address address,
2923 - u32 bits, acpi_integer *value,
2924 + u32 bits, acpi_integer *value64,
2925 void *handler_context, void *region_context)
2926 {
2927 struct acpi_ec *ec = handler_context;
2928 - int result = 0, i;
2929 - u8 temp = 0;
2930 + int result = 0, i, bytes = bits / 8;
2931 + u8 *value = (u8 *)value64;
2932
2933 if ((address > 0xFF) || !value || !handler_context)
2934 return AE_BAD_PARAMETER;
2935 @@ -601,32 +601,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
2936 if (function != ACPI_READ && function != ACPI_WRITE)
2937 return AE_BAD_PARAMETER;
2938
2939 - if (bits != 8 && acpi_strict)
2940 - return AE_BAD_PARAMETER;
2941 -
2942 - if (EC_FLAGS_MSI)
2943 + if (EC_FLAGS_MSI || bits > 8)
2944 acpi_ec_burst_enable(ec);
2945
2946 - if (function == ACPI_READ) {
2947 - result = acpi_ec_read(ec, address, &temp);
2948 - *value = temp;
2949 - } else {
2950 - temp = 0xff & (*value);
2951 - result = acpi_ec_write(ec, address, temp);
2952 - }
2953 -
2954 - for (i = 8; unlikely(bits - i > 0); i += 8) {
2955 - ++address;
2956 - if (function == ACPI_READ) {
2957 - result = acpi_ec_read(ec, address, &temp);
2958 - (*value) |= ((acpi_integer)temp) << i;
2959 - } else {
2960 - temp = 0xff & ((*value) >> i);
2961 - result = acpi_ec_write(ec, address, temp);
2962 - }
2963 - }
2964 + for (i = 0; i < bytes; ++i, ++address, ++value)
2965 + result = (function == ACPI_READ) ?
2966 + acpi_ec_read(ec, address, value) :
2967 + acpi_ec_write(ec, address, *value);
2968
2969 - if (EC_FLAGS_MSI)
2970 + if (EC_FLAGS_MSI || bits > 8)
2971 acpi_ec_burst_disable(ec);
2972
2973 switch (result) {
2974 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
2975 index 2c53024..cb05205 100644
2976 --- a/drivers/ata/ahci.c
2977 +++ b/drivers/ata/ahci.c
2978 @@ -570,6 +570,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
2979 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
2980 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
2981 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
2982 + { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
2983 + { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
2984 + { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
2985 + { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
2986 + { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
2987 + { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
2988
2989 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
2990 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
2991 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
2992 index 0c6155f..4f94e22 100644
2993 --- a/drivers/ata/ata_piix.c
2994 +++ b/drivers/ata/ata_piix.c
2995 @@ -291,6 +291,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
2996 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
2997 /* SATA Controller IDE (PCH) */
2998 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
2999 + /* SATA Controller IDE (CPT) */
3000 + { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
3001 + /* SATA Controller IDE (CPT) */
3002 + { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
3003 + /* SATA Controller IDE (CPT) */
3004 + { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
3005 + /* SATA Controller IDE (CPT) */
3006 + { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
3007 { } /* terminate list */
3008 };
3009
3010 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
3011 index 91fed3c..3b09e83 100644
3012 --- a/drivers/ata/libata-core.c
3013 +++ b/drivers/ata/libata-core.c
3014 @@ -4348,6 +4348,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3015 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3016 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3017
3018 + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3019 + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
3020 +
3021 /* devices which puke on READ_NATIVE_MAX */
3022 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3023 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3024 diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
3025 index 1432dc9..9434114 100644
3026 --- a/drivers/ata/pata_ali.c
3027 +++ b/drivers/ata/pata_ali.c
3028 @@ -453,7 +453,9 @@ static void ali_init_chipset(struct pci_dev *pdev)
3029 /* Clear CD-ROM DMA write bit */
3030 tmp &= 0x7F;
3031 /* Cable and UDMA */
3032 - pci_write_config_byte(pdev, 0x4B, tmp | 0x09);
3033 + if (pdev->revision >= 0xc2)
3034 + tmp |= 0x01;
3035 + pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
3036 /*
3037 * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
3038 * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
3039 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
3040 index 88984b8..1d73b8d 100644
3041 --- a/drivers/ata/pata_via.c
3042 +++ b/drivers/ata/pata_via.c
3043 @@ -661,6 +661,7 @@ static const struct pci_device_id via[] = {
3044 { PCI_VDEVICE(VIA, 0x3164), },
3045 { PCI_VDEVICE(VIA, 0x5324), },
3046 { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
3047 + { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
3048
3049 { },
3050 };
3051 diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
3052 index 9047b27..dc8a6f7 100644
3053 --- a/drivers/char/agp/hp-agp.c
3054 +++ b/drivers/char/agp/hp-agp.c
3055 @@ -488,9 +488,8 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
3056 handle = obj;
3057 do {
3058 status = acpi_get_object_info(handle, &info);
3059 - if (ACPI_SUCCESS(status)) {
3060 + if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
3061 /* TBD check _CID also */
3062 - info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
3063 match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
3064 kfree(info);
3065 if (match) {
3066 diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
3067 index 4dcfef0..b8e0219 100644
3068 --- a/drivers/char/agp/intel-agp.c
3069 +++ b/drivers/char/agp/intel-agp.c
3070 @@ -8,6 +8,7 @@
3071 #include <linux/kernel.h>
3072 #include <linux/pagemap.h>
3073 #include <linux/agp_backend.h>
3074 +#include <asm/smp.h>
3075 #include "agp.h"
3076
3077 /*
3078 @@ -815,12 +816,6 @@ static void intel_i830_setup_flush(void)
3079 intel_i830_fini_flush();
3080 }
3081
3082 -static void
3083 -do_wbinvd(void *null)
3084 -{
3085 - wbinvd();
3086 -}
3087 -
3088 /* The chipset_flush interface needs to get data that has already been
3089 * flushed out of the CPU all the way out to main memory, because the GPU
3090 * doesn't snoop those buffers.
3091 @@ -837,12 +832,10 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
3092
3093 memset(pg, 0, 1024);
3094
3095 - if (cpu_has_clflush) {
3096 + if (cpu_has_clflush)
3097 clflush_cache_range(pg, 1024);
3098 - } else {
3099 - if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
3100 - printk(KERN_ERR "Timed out waiting for cache flush.\n");
3101 - }
3102 + else if (wbinvd_on_all_cpus() != 0)
3103 + printk(KERN_ERR "Timed out waiting for cache flush.\n");
3104 }
3105
3106 /* The intel i830 automatically initializes the agp aperture during POST.
3107 diff --git a/drivers/char/raw.c b/drivers/char/raw.c
3108 index 64acd05..9abc3a1 100644
3109 --- a/drivers/char/raw.c
3110 +++ b/drivers/char/raw.c
3111 @@ -247,6 +247,7 @@ static const struct file_operations raw_fops = {
3112 .aio_read = generic_file_aio_read,
3113 .write = do_sync_write,
3114 .aio_write = blkdev_aio_write,
3115 + .fsync = block_fsync,
3116 .open = raw_open,
3117 .release= raw_release,
3118 .ioctl = raw_ioctl,
3119 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
3120 index 27e8de4..2405f17 100644
3121 --- a/drivers/char/tpm/tpm_tis.c
3122 +++ b/drivers/char/tpm/tpm_tis.c
3123 @@ -637,6 +637,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
3124 {"", 0}, /* User Specified */
3125 {"", 0} /* Terminator */
3126 };
3127 +MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
3128
3129 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
3130 {
3131 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
3132 index 05cab2c..53ffcfc 100644
3133 --- a/drivers/char/tty_io.c
3134 +++ b/drivers/char/tty_io.c
3135 @@ -1408,6 +1408,8 @@ static void release_one_tty(struct work_struct *work)
3136 list_del_init(&tty->tty_files);
3137 file_list_unlock();
3138
3139 + put_pid(tty->pgrp);
3140 + put_pid(tty->session);
3141 free_tty_struct(tty);
3142 }
3143
3144 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
3145 index afed886..08173fc 100644
3146 --- a/drivers/gpu/drm/drm_crtc_helper.c
3147 +++ b/drivers/gpu/drm/drm_crtc_helper.c
3148 @@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
3149 if (connector->status == connector_status_disconnected) {
3150 DRM_DEBUG_KMS("%s is disconnected\n",
3151 drm_get_connector_name(connector));
3152 + drm_mode_connector_update_edid_property(connector, NULL);
3153 goto prune;
3154 }
3155
3156 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
3157 index d5671c3..8fb1346 100644
3158 --- a/drivers/gpu/drm/drm_edid.c
3159 +++ b/drivers/gpu/drm/drm_edid.c
3160 @@ -85,6 +85,8 @@ static struct edid_quirk {
3161
3162 /* Envision Peripherals, Inc. EN-7100e */
3163 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
3164 + /* Envision EN2028 */
3165 + { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
3166
3167 /* Funai Electronics PM36B */
3168 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
3169 @@ -653,15 +655,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
3170 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
3171 mode->vtotal = mode->vdisplay + vblank;
3172
3173 - /* perform the basic check for the detailed timing */
3174 - if (mode->hsync_end > mode->htotal ||
3175 - mode->vsync_end > mode->vtotal) {
3176 - drm_mode_destroy(dev, mode);
3177 - DRM_DEBUG_KMS("Incorrect detailed timing. "
3178 - "Sync is beyond the blank.\n");
3179 - return NULL;
3180 - }
3181 -
3182 /* Some EDIDs have bogus h/vtotal values */
3183 if (mode->hsync_end > mode->htotal)
3184 mode->htotal = mode->hsync_end + 1;
3185 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
3186 index 251bc0e..ba14553 100644
3187 --- a/drivers/gpu/drm/drm_fops.c
3188 +++ b/drivers/gpu/drm/drm_fops.c
3189 @@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
3190 spin_unlock(&dev->count_lock);
3191 }
3192 out:
3193 - mutex_lock(&dev->struct_mutex);
3194 - if (minor->type == DRM_MINOR_LEGACY) {
3195 - BUG_ON((dev->dev_mapping != NULL) &&
3196 - (dev->dev_mapping != inode->i_mapping));
3197 - if (dev->dev_mapping == NULL)
3198 - dev->dev_mapping = inode->i_mapping;
3199 + if (!retcode) {
3200 + mutex_lock(&dev->struct_mutex);
3201 + if (minor->type == DRM_MINOR_LEGACY) {
3202 + if (dev->dev_mapping == NULL)
3203 + dev->dev_mapping = inode->i_mapping;
3204 + else if (dev->dev_mapping != inode->i_mapping)
3205 + retcode = -ENODEV;
3206 + }
3207 + mutex_unlock(&dev->struct_mutex);
3208 }
3209 - mutex_unlock(&dev->struct_mutex);
3210
3211 return retcode;
3212 }
3213 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
3214 index a177b57..4a85aa3 100644
3215 --- a/drivers/gpu/drm/i915/intel_lvds.c
3216 +++ b/drivers/gpu/drm/i915/intel_lvds.c
3217 @@ -885,6 +885,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
3218 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
3219 },
3220 },
3221 + {
3222 + .callback = intel_no_lvds_dmi_callback,
3223 + .ident = "Clientron U800",
3224 + .matches = {
3225 + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
3226 + DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
3227 + },
3228 + },
3229
3230 { } /* terminating entry */
3231 };
3232 diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
3233 index 2f43ee8..d8c4f72 100644
3234 --- a/drivers/gpu/drm/radeon/r300.c
3235 +++ b/drivers/gpu/drm/radeon/r300.c
3236 @@ -346,11 +346,12 @@ void r300_gpu_init(struct radeon_device *rdev)
3237
3238 r100_hdp_reset(rdev);
3239 /* FIXME: rv380 one pipes ? */
3240 - if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
3241 + if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
3242 + (rdev->family == CHIP_R350)) {
3243 /* r300,r350 */
3244 rdev->num_gb_pipes = 2;
3245 } else {
3246 - /* rv350,rv370,rv380 */
3247 + /* rv350,rv370,rv380,r300 AD */
3248 rdev->num_gb_pipes = 1;
3249 }
3250 rdev->num_z_pipes = 1;
3251 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
3252 index 29763ce..b1dc1a1 100644
3253 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
3254 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
3255 @@ -140,12 +140,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
3256 {
3257 struct drm_device *dev = connector->dev;
3258 struct drm_connector *conflict;
3259 + struct radeon_connector *radeon_conflict;
3260 int i;
3261
3262 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
3263 if (conflict == connector)
3264 continue;
3265
3266 + radeon_conflict = to_radeon_connector(conflict);
3267 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
3268 if (conflict->encoder_ids[i] == 0)
3269 break;
3270 @@ -155,6 +157,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
3271 if (conflict->status != connector_status_connected)
3272 continue;
3273
3274 + if (radeon_conflict->use_digital)
3275 + continue;
3276 +
3277 if (priority == true) {
3278 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
3279 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
3280 @@ -281,7 +286,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
3281 radeon_encoder = to_radeon_encoder(encoder);
3282 if (!radeon_encoder->enc_priv)
3283 return 0;
3284 - if (rdev->is_atom_bios) {
3285 + if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
3286 struct radeon_encoder_atom_dac *dac_int;
3287 dac_int = radeon_encoder->enc_priv;
3288 dac_int->tv_std = val;
3289 diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
3290 index 4f7afc7..1c46848 100644
3291 --- a/drivers/gpu/drm/radeon/radeon_cp.c
3292 +++ b/drivers/gpu/drm/radeon/radeon_cp.c
3293 @@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
3294 return -EBUSY;
3295 }
3296
3297 -static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
3298 +static void radeon_init_pipes(struct drm_device *dev)
3299 {
3300 + drm_radeon_private_t *dev_priv = dev->dev_private;
3301 uint32_t gb_tile_config, gb_pipe_sel = 0;
3302
3303 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
3304 @@ -436,11 +437,12 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
3305 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
3306 } else {
3307 /* R3xx */
3308 - if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
3309 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
3310 + dev->pdev->device != 0x4144) ||
3311 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
3312 dev_priv->num_gb_pipes = 2;
3313 } else {
3314 - /* R3Vxx */
3315 + /* RV3xx/R300 AD */
3316 dev_priv->num_gb_pipes = 1;
3317 }
3318 }
3319 @@ -736,7 +738,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
3320
3321 /* setup the raster pipes */
3322 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
3323 - radeon_init_pipes(dev_priv);
3324 + radeon_init_pipes(dev);
3325
3326 /* Reset the CP ring */
3327 radeon_do_cp_reset(dev_priv);
3328 diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
3329 index 484f791..20c52da 100644
3330 --- a/drivers/gpu/drm/radeon/radeon_cs.c
3331 +++ b/drivers/gpu/drm/radeon/radeon_cs.c
3332 @@ -247,7 +247,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3333 }
3334 r = radeon_cs_parser_relocs(&parser);
3335 if (r) {
3336 - DRM_ERROR("Failed to parse relocation !\n");
3337 + if (r != -ERESTARTSYS)
3338 + DRM_ERROR("Failed to parse relocation %d!\n", r);
3339 radeon_cs_parser_fini(&parser, r);
3340 mutex_unlock(&rdev->cs_mutex);
3341 return r;
3342 diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
3343 index d42bc51..4478b99 100644
3344 --- a/drivers/gpu/drm/radeon/radeon_encoders.c
3345 +++ b/drivers/gpu/drm/radeon/radeon_encoders.c
3346 @@ -1155,8 +1155,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
3347 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
3348 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3349 atombios_dac_setup(encoder, ATOM_ENABLE);
3350 - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
3351 - atombios_tv_setup(encoder, ATOM_ENABLE);
3352 + if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
3353 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
3354 + atombios_tv_setup(encoder, ATOM_ENABLE);
3355 + else
3356 + atombios_tv_setup(encoder, ATOM_DISABLE);
3357 + }
3358 break;
3359 }
3360 atombios_apply_encoder_quirks(encoder, adjusted_mode);
3361 diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
3362 index 3a12bb0..fc64a20 100644
3363 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
3364 +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
3365 @@ -57,6 +57,10 @@
3366 #define NTSC_TV_PLL_N_14 693
3367 #define NTSC_TV_PLL_P_14 7
3368
3369 +#define PAL_TV_PLL_M_14 19
3370 +#define PAL_TV_PLL_N_14 353
3371 +#define PAL_TV_PLL_P_14 5
3372 +
3373 #define VERT_LEAD_IN_LINES 2
3374 #define FRAC_BITS 0xe
3375 #define FRAC_MASK 0x3fff
3376 @@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
3377 630627, /* defRestart */
3378 347, /* crtcPLL_N */
3379 14, /* crtcPLL_M */
3380 - 8, /* crtcPLL_postDiv */
3381 + 8, /* crtcPLL_postDiv */
3382 1022, /* pixToTV */
3383 },
3384 + { /* PAL timing for 14 Mhz ref clk */
3385 + 800, /* horResolution */
3386 + 600, /* verResolution */
3387 + TV_STD_PAL, /* standard */
3388 + 1131, /* horTotal */
3389 + 742, /* verTotal */
3390 + 813, /* horStart */
3391 + 840, /* horSyncStart */
3392 + 633, /* verSyncStart */
3393 + 708369, /* defRestart */
3394 + 211, /* crtcPLL_N */
3395 + 9, /* crtcPLL_M */
3396 + 8, /* crtcPLL_postDiv */
3397 + 759, /* pixToTV */
3398 + },
3399 };
3400
3401 #define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
3402 @@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
3403 if (pll->reference_freq == 2700)
3404 const_ptr = &available_tv_modes[1];
3405 else
3406 - const_ptr = &available_tv_modes[1]; /* FIX ME */
3407 + const_ptr = &available_tv_modes[3];
3408 }
3409 return const_ptr;
3410 }
3411 @@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
3412 n = PAL_TV_PLL_N_27;
3413 p = PAL_TV_PLL_P_27;
3414 } else {
3415 - m = PAL_TV_PLL_M_27;
3416 - n = PAL_TV_PLL_N_27;
3417 - p = PAL_TV_PLL_P_27;
3418 + m = PAL_TV_PLL_M_14;
3419 + n = PAL_TV_PLL_N_14;
3420 + p = PAL_TV_PLL_P_14;
3421 }
3422 }
3423
3424 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
3425 index 4444f48..1700297 100644
3426 --- a/drivers/gpu/drm/radeon/rs600.c
3427 +++ b/drivers/gpu/drm/radeon/rs600.c
3428 @@ -57,7 +57,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
3429 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
3430
3431 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
3432 - tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
3433 + tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
3434 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
3435
3436 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
3437 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
3438 index 1ac0c93..aa8688d 100644
3439 --- a/drivers/gpu/vga/vgaarb.c
3440 +++ b/drivers/gpu/vga/vgaarb.c
3441 @@ -954,6 +954,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
3442 }
3443
3444 } else if (strncmp(curr_pos, "target ", 7) == 0) {
3445 + struct pci_bus *pbus;
3446 unsigned int domain, bus, devfn;
3447 struct vga_device *vgadev;
3448
3449 @@ -961,7 +962,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
3450 remaining -= 7;
3451 pr_devel("client 0x%p called 'target'\n", priv);
3452 /* if target is default */
3453 - if (!strncmp(buf, "default", 7))
3454 + if (!strncmp(curr_pos, "default", 7))
3455 pdev = pci_dev_get(vga_default_device());
3456 else {
3457 if (!vga_pci_str_to_vars(curr_pos, remaining,
3458 @@ -969,18 +970,31 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
3459 ret_val = -EPROTO;
3460 goto done;
3461 }
3462 -
3463 - pdev = pci_get_bus_and_slot(bus, devfn);
3464 + pr_devel("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
3465 + domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
3466 +
3467 + pbus = pci_find_bus(domain, bus);
3468 + pr_devel("vgaarb: pbus %p\n", pbus);
3469 + if (pbus == NULL) {
3470 + pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
3471 + domain, bus);
3472 + ret_val = -ENODEV;
3473 + goto done;
3474 + }
3475 + pdev = pci_get_slot(pbus, devfn);
3476 + pr_devel("vgaarb: pdev %p\n", pdev);
3477 if (!pdev) {
3478 - pr_info("vgaarb: invalid PCI address!\n");
3479 + pr_err("vgaarb: invalid PCI address %x:%x\n",
3480 + bus, devfn);
3481 ret_val = -ENODEV;
3482 goto done;
3483 }
3484 }
3485
3486 vgadev = vgadev_find(pdev);
3487 + pr_devel("vgaarb: vgadev %p\n", vgadev);
3488 if (vgadev == NULL) {
3489 - pr_info("vgaarb: this pci device is not a vga device\n");
3490 + pr_err("vgaarb: this pci device is not a vga device\n");
3491 pci_dev_put(pdev);
3492 ret_val = -ENODEV;
3493 goto done;
3494 @@ -998,7 +1012,8 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
3495 }
3496 }
3497 if (i == MAX_USER_CARDS) {
3498 - pr_err("vgaarb: maximum user cards number reached!\n");
3499 + pr_err("vgaarb: maximum user cards (%d) number reached!\n",
3500 + MAX_USER_CARDS);
3501 pci_dev_put(pdev);
3502 /* XXX: which value to return? */
3503 ret_val = -ENOMEM;
3504 diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
3505 index cab13e8..62416e6 100644
3506 --- a/drivers/hid/hid-gyration.c
3507 +++ b/drivers/hid/hid-gyration.c
3508 @@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
3509 static int gyration_event(struct hid_device *hdev, struct hid_field *field,
3510 struct hid_usage *usage, __s32 value)
3511 {
3512 - struct input_dev *input = field->hidinput->input;
3513 +
3514 + if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
3515 + return 0;
3516
3517 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
3518 (usage->hid & 0xff) == 0x82) {
3519 + struct input_dev *input = field->hidinput->input;
3520 input_event(input, usage->type, usage->code, 1);
3521 input_sync(input);
3522 input_event(input, usage->type, usage->code, 0);
3523 diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
3524 index cf5afb9..5d5ed69 100644
3525 --- a/drivers/hwmon/lis3lv02d.c
3526 +++ b/drivers/hwmon/lis3lv02d.c
3527 @@ -127,12 +127,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
3528
3529 /*
3530 * Common configuration
3531 - * BDU: LSB and MSB values are not updated until both have been read.
3532 - * So the value read will always be correct.
3533 + * BDU: (12 bits sensors only) LSB and MSB values are not updated until
3534 + * both have been read. So the value read will always be correct.
3535 */
3536 - lis3->read(lis3, CTRL_REG2, &reg);
3537 - reg |= CTRL2_BDU;
3538 - lis3->write(lis3, CTRL_REG2, reg);
3539 + if (lis3->whoami == LIS_DOUBLE_ID) {
3540 + lis3->read(lis3, CTRL_REG2, &reg);
3541 + reg |= CTRL2_BDU;
3542 + lis3->write(lis3, CTRL_REG2, reg);
3543 + }
3544 }
3545 EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
3546
3547 @@ -361,7 +363,8 @@ static ssize_t lis3lv02d_calibrate_store(struct device *dev,
3548 }
3549
3550 /* conversion btw sampling rate and the register values */
3551 -static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560};
3552 +static int lis3_12_rates[4] = {40, 160, 640, 2560};
3553 +static int lis3_8_rates[2] = {100, 400};
3554 static ssize_t lis3lv02d_rate_show(struct device *dev,
3555 struct device_attribute *attr, char *buf)
3556 {
3557 @@ -369,8 +372,13 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
3558 int val;
3559
3560 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
3561 - val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
3562 - return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
3563 +
3564 + if (lis3_dev.whoami == LIS_DOUBLE_ID)
3565 + val = lis3_12_rates[(ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4];
3566 + else
3567 + val = lis3_8_rates[(ctrl & CTRL1_DR) >> 7];
3568 +
3569 + return sprintf(buf, "%d\n", val);
3570 }
3571
3572 static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
3573 diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
3574 index 3e1ff46..7cdd76f 100644
3575 --- a/drivers/hwmon/lis3lv02d.h
3576 +++ b/drivers/hwmon/lis3lv02d.h
3577 @@ -103,6 +103,7 @@ enum lis3lv02d_ctrl1 {
3578 CTRL1_DF1 = 0x20,
3579 CTRL1_PD0 = 0x40,
3580 CTRL1_PD1 = 0x80,
3581 + CTRL1_DR = 0x80, /* Data rate on 8 bits */
3582 };
3583 enum lis3lv02d_ctrl2 {
3584 CTRL2_DAS = 0x01,
3585 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
3586 index 864a371..fbc997e 100644
3587 --- a/drivers/hwmon/sht15.c
3588 +++ b/drivers/hwmon/sht15.c
3589 @@ -302,13 +302,13 @@ error_ret:
3590 **/
3591 static inline int sht15_calc_temp(struct sht15_data *data)
3592 {
3593 - int d1 = 0;
3594 + int d1 = temppoints[0].d1;
3595 int i;
3596
3597 - for (i = 1; i < ARRAY_SIZE(temppoints); i++)
3598 + for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
3599 /* Find pointer to interpolate */
3600 if (data->supply_uV > temppoints[i - 1].vdd) {
3601 - d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
3602 + d1 = (data->supply_uV - temppoints[i - 1].vdd)
3603 * (temppoints[i].d1 - temppoints[i - 1].d1)
3604 / (temppoints[i].vdd - temppoints[i - 1].vdd)
3605 + temppoints[i - 1].d1;
3606 @@ -541,7 +541,12 @@ static int __devinit sht15_probe(struct platform_device *pdev)
3607 /* If a regulator is available, query what the supply voltage actually is!*/
3608 data->reg = regulator_get(data->dev, "vcc");
3609 if (!IS_ERR(data->reg)) {
3610 - data->supply_uV = regulator_get_voltage(data->reg);
3611 + int voltage;
3612 +
3613 + voltage = regulator_get_voltage(data->reg);
3614 + if (voltage)
3615 + data->supply_uV = voltage;
3616 +
3617 regulator_enable(data->reg);
3618 /* setup a notifier block to update this if another device
3619 * causes the voltage to change */
3620 diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
3621 index e8fe7f1..2dd2ce9 100644
3622 --- a/drivers/i2c/busses/Kconfig
3623 +++ b/drivers/i2c/busses/Kconfig
3624 @@ -77,7 +77,7 @@ config I2C_AMD8111
3625 will be called i2c-amd8111.
3626
3627 config I2C_I801
3628 - tristate "Intel 82801 (ICH)"
3629 + tristate "Intel 82801 (ICH/PCH)"
3630 depends on PCI
3631 help
3632 If you say yes to this option, support will be included for the Intel
3633 @@ -97,7 +97,8 @@ config I2C_I801
3634 ICH9
3635 Tolapai
3636 ICH10
3637 - PCH
3638 + 3400/5 Series (PCH)
3639 + Cougar Point (PCH)
3640
3641 This driver can also be built as a module. If so, the module
3642 will be called i2c-i801.
3643 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
3644 index 4d73fcf..806f033 100644
3645 --- a/drivers/i2c/busses/i2c-i801.c
3646 +++ b/drivers/i2c/busses/i2c-i801.c
3647 @@ -41,7 +41,8 @@
3648 Tolapai 0x5032 32 hard yes yes yes
3649 ICH10 0x3a30 32 hard yes yes yes
3650 ICH10 0x3a60 32 hard yes yes yes
3651 - PCH 0x3b30 32 hard yes yes yes
3652 + 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
3653 + Cougar Point (PCH) 0x1c22 32 hard yes yes yes
3654
3655 Features supported by this driver:
3656 Software PEC no
3657 @@ -580,6 +581,7 @@ static struct pci_device_id i801_ids[] = {
3658 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
3659 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
3660 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
3661 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
3662 { 0, }
3663 };
3664
3665 @@ -709,6 +711,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
3666 case PCI_DEVICE_ID_INTEL_ICH10_4:
3667 case PCI_DEVICE_ID_INTEL_ICH10_5:
3668 case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
3669 + case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
3670 i801_features |= FEATURE_I2C_BLOCK_READ;
3671 /* fall through */
3672 case PCI_DEVICE_ID_INTEL_82801DB_3:
3673 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
3674 index 30bdf42..f8302c2 100644
3675 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
3676 +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
3677 @@ -752,6 +752,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
3678 if (++priv->tx_outstanding == ipoib_sendq_size) {
3679 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
3680 tx->qp->qp_num);
3681 + if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
3682 + ipoib_warn(priv, "request notify on send CQ failed\n");
3683 netif_stop_queue(dev);
3684 }
3685 }
3686 diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
3687 index b9453d0..274c883 100644
3688 --- a/drivers/infiniband/ulp/iser/iser_memory.c
3689 +++ b/drivers/infiniband/ulp/iser/iser_memory.c
3690 @@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
3691 mem_copy->copy_buf = NULL;
3692 }
3693
3694 +#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
3695 +
3696 /**
3697 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
3698 * and returns the length of resulting physical address array (may be less than
3699 @@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
3700 * where --few fragments of the same page-- are present in the SG as
3701 * consecutive elements. Also, it handles one entry SG.
3702 */
3703 +
3704 static int iser_sg_to_page_vec(struct iser_data_buf *data,
3705 struct iser_page_vec *page_vec,
3706 struct ib_device *ibdev)
3707 {
3708 - struct scatterlist *sgl = (struct scatterlist *)data->buf;
3709 - struct scatterlist *sg;
3710 - u64 first_addr, last_addr, page;
3711 - int end_aligned;
3712 - unsigned int cur_page = 0;
3713 + struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
3714 + u64 start_addr, end_addr, page, chunk_start = 0;
3715 unsigned long total_sz = 0;
3716 - int i;
3717 + unsigned int dma_len;
3718 + int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
3719
3720 /* compute the offset of first element */
3721 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
3722
3723 + new_chunk = 1;
3724 + cur_page = 0;
3725 for_each_sg(sgl, sg, data->dma_nents, i) {
3726 - unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
3727 -
3728 + start_addr = ib_sg_dma_address(ibdev, sg);
3729 + if (new_chunk)
3730 + chunk_start = start_addr;
3731 + dma_len = ib_sg_dma_len(ibdev, sg);
3732 + end_addr = start_addr + dma_len;
3733 total_sz += dma_len;
3734
3735 - first_addr = ib_sg_dma_address(ibdev, sg);
3736 - last_addr = first_addr + dma_len;
3737 -
3738 - end_aligned = !(last_addr & ~MASK_4K);
3739 -
3740 - /* continue to collect page fragments till aligned or SG ends */
3741 - while (!end_aligned && (i + 1 < data->dma_nents)) {
3742 - sg = sg_next(sg);
3743 - i++;
3744 - dma_len = ib_sg_dma_len(ibdev, sg);
3745 - total_sz += dma_len;
3746 - last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
3747 - end_aligned = !(last_addr & ~MASK_4K);
3748 + /* collect page fragments until aligned or end of SG list */
3749 + if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
3750 + new_chunk = 0;
3751 + continue;
3752 }
3753 -
3754 - /* handle the 1st page in the 1st DMA element */
3755 - if (cur_page == 0) {
3756 - page = first_addr & MASK_4K;
3757 - page_vec->pages[cur_page] = page;
3758 - cur_page++;
3759 + new_chunk = 1;
3760 +
3761 + /* address of the first page in the contiguous chunk;
3762 + masking relevant for the very first SG entry,
3763 + which might be unaligned */
3764 + page = chunk_start & MASK_4K;
3765 + do {
3766 + page_vec->pages[cur_page++] = page;
3767 page += SIZE_4K;
3768 - } else
3769 - page = first_addr;
3770 -
3771 - for (; page < last_addr; page += SIZE_4K) {
3772 - page_vec->pages[cur_page] = page;
3773 - cur_page++;
3774 - }
3775 -
3776 + } while (page < end_addr);
3777 }
3778 +
3779 page_vec->data_size = total_sz;
3780 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
3781 return cur_page;
3782 }
3783
3784 -#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
3785
3786 /**
3787 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
3788 @@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
3789 * the number of entries which are aligned correctly. Supports the case where
3790 * consecutive SG elements are actually fragments of the same physcial page.
3791 */
3792 -static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
3793 - struct ib_device *ibdev)
3794 +static int iser_data_buf_aligned_len(struct iser_data_buf *data,
3795 + struct ib_device *ibdev)
3796 {
3797 - struct scatterlist *sgl, *sg;
3798 - u64 end_addr, next_addr;
3799 - int i, cnt;
3800 - unsigned int ret_len = 0;
3801 + struct scatterlist *sgl, *sg, *next_sg = NULL;
3802 + u64 start_addr, end_addr;
3803 + int i, ret_len, start_check = 0;
3804 +
3805 + if (data->dma_nents == 1)
3806 + return 1;
3807
3808 sgl = (struct scatterlist *)data->buf;
3809 + start_addr = ib_sg_dma_address(ibdev, sgl);
3810
3811 - cnt = 0;
3812 for_each_sg(sgl, sg, data->dma_nents, i) {
3813 - /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
3814 - "offset: %ld sz: %ld\n", i,
3815 - (unsigned long)sg_phys(sg),
3816 - (unsigned long)sg->offset,
3817 - (unsigned long)sg->length); */
3818 - end_addr = ib_sg_dma_address(ibdev, sg) +
3819 - ib_sg_dma_len(ibdev, sg);
3820 - /* iser_dbg("Checking sg iobuf end address "
3821 - "0x%08lX\n", end_addr); */
3822 - if (i + 1 < data->dma_nents) {
3823 - next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
3824 - /* are i, i+1 fragments of the same page? */
3825 - if (end_addr == next_addr) {
3826 - cnt++;
3827 - continue;
3828 - } else if (!IS_4K_ALIGNED(end_addr)) {
3829 - ret_len = cnt + 1;
3830 - break;
3831 - }
3832 - }
3833 - cnt++;
3834 + if (start_check && !IS_4K_ALIGNED(start_addr))
3835 + break;
3836 +
3837 + next_sg = sg_next(sg);
3838 + if (!next_sg)
3839 + break;
3840 +
3841 + end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
3842 + start_addr = ib_sg_dma_address(ibdev, next_sg);
3843 +
3844 + if (end_addr == start_addr) {
3845 + start_check = 0;
3846 + continue;
3847 + } else
3848 + start_check = 1;
3849 +
3850 + if (!IS_4K_ALIGNED(end_addr))
3851 + break;
3852 }
3853 - if (i == data->dma_nents)
3854 - ret_len = cnt; /* loop ended */
3855 + ret_len = (next_sg) ? i : i+1;
3856 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
3857 ret_len, data->dma_nents, data);
3858 return ret_len;
3859 diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
3860 index b5b69cc..69fc4b8 100644
3861 --- a/drivers/input/tablet/wacom_sys.c
3862 +++ b/drivers/input/tablet/wacom_sys.c
3863 @@ -562,11 +562,15 @@ static int wacom_resume(struct usb_interface *intf)
3864 int rv;
3865
3866 mutex_lock(&wacom->lock);
3867 - if (wacom->open) {
3868 +
3869 + /* switch to wacom mode first */
3870 + wacom_query_tablet_data(intf);
3871 +
3872 + if (wacom->open)
3873 rv = usb_submit_urb(wacom->irq, GFP_NOIO);
3874 - wacom_query_tablet_data(intf);
3875 - } else
3876 + else
3877 rv = 0;
3878 +
3879 mutex_unlock(&wacom->lock);
3880
3881 return rv;
3882 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3883 index adb3f8a..d7786e3 100644
3884 --- a/drivers/md/dm.c
3885 +++ b/drivers/md/dm.c
3886 @@ -1487,10 +1487,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
3887 return BLKPREP_OK;
3888 }
3889
3890 -static void map_request(struct dm_target *ti, struct request *rq,
3891 - struct mapped_device *md)
3892 +/*
3893 + * Returns:
3894 + * 0 : the request has been processed (not requeued)
3895 + * !0 : the request has been requeued
3896 + */
3897 +static int map_request(struct dm_target *ti, struct request *rq,
3898 + struct mapped_device *md)
3899 {
3900 - int r;
3901 + int r, requeued = 0;
3902 struct request *clone = rq->special;
3903 struct dm_rq_target_io *tio = clone->end_io_data;
3904
3905 @@ -1516,6 +1521,7 @@ static void map_request(struct dm_target *ti, struct request *rq,
3906 case DM_MAPIO_REQUEUE:
3907 /* The target wants to requeue the I/O */
3908 dm_requeue_unmapped_request(clone);
3909 + requeued = 1;
3910 break;
3911 default:
3912 if (r > 0) {
3913 @@ -1527,6 +1533,8 @@ static void map_request(struct dm_target *ti, struct request *rq,
3914 dm_kill_unmapped_request(clone, r);
3915 break;
3916 }
3917 +
3918 + return requeued;
3919 }
3920
3921 /*
3922 @@ -1568,12 +1576,17 @@ static void dm_request_fn(struct request_queue *q)
3923
3924 blk_start_request(rq);
3925 spin_unlock(q->queue_lock);
3926 - map_request(ti, rq, md);
3927 + if (map_request(ti, rq, md))
3928 + goto requeued;
3929 +
3930 spin_lock_irq(q->queue_lock);
3931 }
3932
3933 goto out;
3934
3935 +requeued:
3936 + spin_lock_irq(q->queue_lock);
3937 +
3938 plug_and_out:
3939 if (!elv_queue_empty(q))
3940 /* Some requests still remain, retry later */
3941 diff --git a/drivers/md/linear.c b/drivers/md/linear.c
3942 index 1ceceb3..dff9d2f 100644
3943 --- a/drivers/md/linear.c
3944 +++ b/drivers/md/linear.c
3945 @@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
3946 disk_stack_limits(mddev->gendisk, rdev->bdev,
3947 rdev->data_offset << 9);
3948 /* as we don't honour merge_bvec_fn, we must never risk
3949 - * violating it, so limit ->max_sector to one PAGE, as
3950 - * a one page request is never in violation.
3951 + * violating it, so limit max_phys_segments to 1 lying within
3952 + * a single page.
3953 */
3954 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
3955 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
3956 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
3957 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
3958 + blk_queue_max_phys_segments(mddev->queue, 1);
3959 + blk_queue_segment_boundary(mddev->queue,
3960 + PAGE_CACHE_SIZE - 1);
3961 + }
3962
3963 conf->array_sectors += rdev->sectors;
3964 cnt++;
3965 diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
3966 index ee7646f..e4b11f1 100644
3967 --- a/drivers/md/multipath.c
3968 +++ b/drivers/md/multipath.c
3969 @@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
3970 rdev->data_offset << 9);
3971
3972 /* as we don't honour merge_bvec_fn, we must never risk
3973 - * violating it, so limit ->max_sector to one PAGE, as
3974 - * a one page request is never in violation.
3975 + * violating it, so limit ->max_phys_segments to one, lying
3976 + * within a single page.
3977 * (Note: it is very unlikely that a device with
3978 * merge_bvec_fn will be involved in multipath.)
3979 */
3980 - if (q->merge_bvec_fn &&
3981 - queue_max_sectors(q) > (PAGE_SIZE>>9))
3982 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
3983 + if (q->merge_bvec_fn) {
3984 + blk_queue_max_phys_segments(mddev->queue, 1);
3985 + blk_queue_segment_boundary(mddev->queue,
3986 + PAGE_CACHE_SIZE - 1);
3987 + }
3988
3989 conf->working_disks++;
3990 mddev->degraded--;
3991 @@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev)
3992 /* as we don't honour merge_bvec_fn, we must never risk
3993 * violating it, not that we ever expect a device with
3994 * a merge_bvec_fn to be involved in multipath */
3995 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
3996 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
3997 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
3998 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
3999 + blk_queue_max_phys_segments(mddev->queue, 1);
4000 + blk_queue_segment_boundary(mddev->queue,
4001 + PAGE_CACHE_SIZE - 1);
4002 + }
4003
4004 if (!test_bit(Faulty, &rdev->flags))
4005 conf->working_disks++;
4006 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
4007 index d3a4ce0..3db857c 100644
4008 --- a/drivers/md/raid0.c
4009 +++ b/drivers/md/raid0.c
4010 @@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev)
4011 disk_stack_limits(mddev->gendisk, rdev1->bdev,
4012 rdev1->data_offset << 9);
4013 /* as we don't honour merge_bvec_fn, we must never risk
4014 - * violating it, so limit ->max_sector to one PAGE, as
4015 - * a one page request is never in violation.
4016 + * violating it, so limit ->max_phys_segments to 1, lying within
4017 + * a single page.
4018 */
4019
4020 - if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
4021 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
4022 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
4023 -
4024 + if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
4025 + blk_queue_max_phys_segments(mddev->queue, 1);
4026 + blk_queue_segment_boundary(mddev->queue,
4027 + PAGE_CACHE_SIZE - 1);
4028 + }
4029 if (!smallest || (rdev1->sectors < smallest->sectors))
4030 smallest = rdev1;
4031 cnt++;
4032 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
4033 index c2cb7b8..3860ac7 100644
4034 --- a/drivers/md/raid10.c
4035 +++ b/drivers/md/raid10.c
4036 @@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
4037
4038 disk_stack_limits(mddev->gendisk, rdev->bdev,
4039 rdev->data_offset << 9);
4040 - /* as we don't honour merge_bvec_fn, we must never risk
4041 - * violating it, so limit ->max_sector to one PAGE, as
4042 - * a one page request is never in violation.
4043 + /* as we don't honour merge_bvec_fn, we must
4044 + * never risk violating it, so limit
4045 + * ->max_phys_segments to one lying with a single
4046 + * page, as a one page request is never in
4047 + * violation.
4048 */
4049 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
4050 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
4051 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
4052 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
4053 + blk_queue_max_phys_segments(mddev->queue, 1);
4054 + blk_queue_segment_boundary(mddev->queue,
4055 + PAGE_CACHE_SIZE - 1);
4056 + }
4057
4058 p->head_position = 0;
4059 rdev->raid_disk = mirror;
4060 @@ -2155,12 +2159,14 @@ static int run(mddev_t *mddev)
4061 disk_stack_limits(mddev->gendisk, rdev->bdev,
4062 rdev->data_offset << 9);
4063 /* as we don't honour merge_bvec_fn, we must never risk
4064 - * violating it, so limit ->max_sector to one PAGE, as
4065 - * a one page request is never in violation.
4066 + * violating it, so limit max_phys_segments to 1 lying
4067 + * within a single page.
4068 */
4069 - if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
4070 - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
4071 - blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
4072 + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
4073 + blk_queue_max_phys_segments(mddev->queue, 1);
4074 + blk_queue_segment_boundary(mddev->queue,
4075 + PAGE_CACHE_SIZE - 1);
4076 + }
4077
4078 disk->head_position = 0;
4079 }
4080 diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
4081 index 9b2e219..352acd0 100644
4082 --- a/drivers/message/fusion/mptctl.c
4083 +++ b/drivers/message/fusion/mptctl.c
4084 @@ -621,11 +621,8 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4085 */
4086 iocnumX = khdr.iocnum & 0xFF;
4087 if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
4088 - (iocp == NULL)) {
4089 - printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - ioc%d not found!\n",
4090 - __FILE__, __LINE__, iocnumX);
4091 + (iocp == NULL))
4092 return -ENODEV;
4093 - }
4094
4095 if (!iocp->active) {
4096 printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n",
4097 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
4098 index 6cea718..f622734 100644
4099 --- a/drivers/message/fusion/mptscsih.c
4100 +++ b/drivers/message/fusion/mptscsih.c
4101 @@ -792,11 +792,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
4102 * precedence!
4103 */
4104 sc->result = (DID_OK << 16) | scsi_status;
4105 - if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
4106 - /* Have already saved the status and sense data
4107 + if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
4108 +
4109 + /*
4110 + * For an Errata on LSI53C1030
4111 + * When the length of request data
4112 + * and transfer data are different
4113 + * with result of command (READ or VERIFY),
4114 + * DID_SOFT_ERROR is set.
4115 */
4116 - ;
4117 - } else {
4118 + if (ioc->bus_type == SPI) {
4119 + if (pScsiReq->CDB[0] == READ_6 ||
4120 + pScsiReq->CDB[0] == READ_10 ||
4121 + pScsiReq->CDB[0] == READ_12 ||
4122 + pScsiReq->CDB[0] == READ_16 ||
4123 + pScsiReq->CDB[0] == VERIFY ||
4124 + pScsiReq->CDB[0] == VERIFY_16) {
4125 + if (scsi_bufflen(sc) !=
4126 + xfer_cnt) {
4127 + sc->result =
4128 + DID_SOFT_ERROR << 16;
4129 + printk(KERN_WARNING "Errata"
4130 + "on LSI53C1030 occurred."
4131 + "sc->req_bufflen=0x%02x,"
4132 + "xfer_cnt=0x%02x\n",
4133 + scsi_bufflen(sc),
4134 + xfer_cnt);
4135 + }
4136 + }
4137 + }
4138 +
4139 if (xfer_cnt < sc->underflow) {
4140 if (scsi_status == SAM_STAT_BUSY)
4141 sc->result = SAM_STAT_BUSY;
4142 @@ -835,7 +860,58 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
4143 sc->result = (DID_OK << 16) | scsi_status;
4144 if (scsi_state == 0) {
4145 ;
4146 - } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
4147 + } else if (scsi_state &
4148 + MPI_SCSI_STATE_AUTOSENSE_VALID) {
4149 +
4150 + /*
4151 + * For potential trouble on LSI53C1030.
4152 + * (date:2007.xx.)
4153 + * It is checked whether the length of
4154 + * request data is equal to
4155 + * the length of transfer and residual.
4156 + * MEDIUM_ERROR is set by incorrect data.
4157 + */
4158 + if ((ioc->bus_type == SPI) &&
4159 + (sc->sense_buffer[2] & 0x20)) {
4160 + u32 difftransfer;
4161 + difftransfer =
4162 + sc->sense_buffer[3] << 24 |
4163 + sc->sense_buffer[4] << 16 |
4164 + sc->sense_buffer[5] << 8 |
4165 + sc->sense_buffer[6];
4166 + if (((sc->sense_buffer[3] & 0x80) ==
4167 + 0x80) && (scsi_bufflen(sc)
4168 + != xfer_cnt)) {
4169 + sc->sense_buffer[2] =
4170 + MEDIUM_ERROR;
4171 + sc->sense_buffer[12] = 0xff;
4172 + sc->sense_buffer[13] = 0xff;
4173 + printk(KERN_WARNING"Errata"
4174 + "on LSI53C1030 occurred."
4175 + "sc->req_bufflen=0x%02x,"
4176 + "xfer_cnt=0x%02x\n" ,
4177 + scsi_bufflen(sc),
4178 + xfer_cnt);
4179 + }
4180 + if (((sc->sense_buffer[3] & 0x80)
4181 + != 0x80) &&
4182 + (scsi_bufflen(sc) !=
4183 + xfer_cnt + difftransfer)) {
4184 + sc->sense_buffer[2] =
4185 + MEDIUM_ERROR;
4186 + sc->sense_buffer[12] = 0xff;
4187 + sc->sense_buffer[13] = 0xff;
4188 + printk(KERN_WARNING
4189 + "Errata on LSI53C1030 occurred"
4190 + "sc->req_bufflen=0x%02x,"
4191 + " xfer_cnt=0x%02x,"
4192 + "difftransfer=0x%02x\n",
4193 + scsi_bufflen(sc),
4194 + xfer_cnt,
4195 + difftransfer);
4196 + }
4197 + }
4198 +
4199 /*
4200 * If running against circa 200003dd 909 MPT f/w,
4201 * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
4202 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
4203 index f590bea..433f4dd 100644
4204 --- a/drivers/net/e1000e/netdev.c
4205 +++ b/drivers/net/e1000e/netdev.c
4206 @@ -665,6 +665,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
4207 i = 0;
4208 }
4209
4210 + if (i == tx_ring->next_to_use)
4211 + break;
4212 eop = tx_ring->buffer_info[i].next_to_watch;
4213 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4214 }
4215 diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
4216 index f8f5772..c6d97eb 100644
4217 --- a/drivers/net/igb/e1000_82575.c
4218 +++ b/drivers/net/igb/e1000_82575.c
4219 @@ -81,6 +81,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
4220 break;
4221 case E1000_DEV_ID_82576:
4222 case E1000_DEV_ID_82576_NS:
4223 + case E1000_DEV_ID_82576_NS_SERDES:
4224 case E1000_DEV_ID_82576_FIBER:
4225 case E1000_DEV_ID_82576_SERDES:
4226 case E1000_DEV_ID_82576_QUAD_COPPER:
4227 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
4228 index 119869b..1a23aeb 100644
4229 --- a/drivers/net/igb/e1000_hw.h
4230 +++ b/drivers/net/igb/e1000_hw.h
4231 @@ -42,6 +42,7 @@ struct e1000_hw;
4232 #define E1000_DEV_ID_82576_SERDES 0x10E7
4233 #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
4234 #define E1000_DEV_ID_82576_NS 0x150A
4235 +#define E1000_DEV_ID_82576_NS_SERDES 0x1518
4236 #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
4237 #define E1000_DEV_ID_82575EB_COPPER 0x10A7
4238 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
4239 diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
4240 index 714c3a4..8111776 100644
4241 --- a/drivers/net/igb/igb_main.c
4242 +++ b/drivers/net/igb/igb_main.c
4243 @@ -63,6 +63,7 @@ static const struct e1000_info *igb_info_tbl[] = {
4244 static struct pci_device_id igb_pci_tbl[] = {
4245 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
4246 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
4247 + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
4248 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
4249 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
4250 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
4251 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
4252 index 0fe2fc9..ab75323 100644
4253 --- a/drivers/net/r8169.c
4254 +++ b/drivers/net/r8169.c
4255 @@ -186,7 +186,12 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
4256
4257 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
4258
4259 -static int rx_copybreak = 200;
4260 +/*
4261 + * we set our copybreak very high so that we don't have
4262 + * to allocate 16k frames all the time (see note in
4263 + * rtl8169_open()
4264 + */
4265 +static int rx_copybreak = 16383;
4266 static int use_dac;
4267 static struct {
4268 u32 msg_enable;
4269 @@ -3245,9 +3250,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
4270 }
4271
4272 static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
4273 - struct net_device *dev)
4274 + unsigned int mtu)
4275 {
4276 - unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
4277 + unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
4278 +
4279 + if (max_frame != 16383)
4280 + printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
4281 + "NIC may lead to frame reception errors!\n");
4282
4283 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
4284 }
4285 @@ -3259,7 +3268,17 @@ static int rtl8169_open(struct net_device *dev)
4286 int retval = -ENOMEM;
4287
4288
4289 - rtl8169_set_rxbufsize(tp, dev);
4290 + /*
4291 + * Note that we use a magic value here, its wierd I know
4292 + * its done because, some subset of rtl8169 hardware suffers from
4293 + * a problem in which frames received that are longer than
4294 + * the size set in RxMaxSize register return garbage sizes
4295 + * when received. To avoid this we need to turn off filtering,
4296 + * which is done by setting a value of 16383 in the RxMaxSize register
4297 + * and allocating 16k frames to handle the largest possible rx value
4298 + * thats what the magic math below does.
4299 + */
4300 + rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
4301
4302 /*
4303 * Rx and Tx desscriptors needs 256 bytes alignment.
4304 @@ -3912,7 +3931,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
4305
4306 rtl8169_down(dev);
4307
4308 - rtl8169_set_rxbufsize(tp, dev);
4309 + rtl8169_set_rxbufsize(tp, dev->mtu);
4310
4311 ret = rtl8169_init_ring(dev);
4312 if (ret < 0)
4313 diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
4314 index f141a4f..6c70481 100644
4315 --- a/drivers/net/wireless/ath/ar9170/usb.c
4316 +++ b/drivers/net/wireless/ath/ar9170/usb.c
4317 @@ -414,7 +414,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
4318 spin_unlock_irqrestore(&aru->common.cmdlock, flags);
4319
4320 usb_fill_int_urb(urb, aru->udev,
4321 - usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
4322 + usb_sndintpipe(aru->udev, AR9170_EP_CMD),
4323 aru->common.cmdbuf, plen + 4,
4324 ar9170_usb_tx_urb_complete, NULL, 1);
4325
4326 diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
4327 index 54ea61c..9da5373 100644
4328 --- a/drivers/net/wireless/b43/Kconfig
4329 +++ b/drivers/net/wireless/b43/Kconfig
4330 @@ -78,11 +78,11 @@ config B43_SDIO
4331
4332 If unsure, say N.
4333
4334 -# Data transfers to the device via PIO
4335 -# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
4336 +#Data transfers to the device via PIO. We want it as a fallback even
4337 +# if we can do DMA.
4338 config B43_PIO
4339 bool
4340 - depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
4341 + depends on B43
4342 select SSB_BLOCKIO
4343 default y
4344
4345 diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
4346 index 84772a2..5e83b6f 100644
4347 --- a/drivers/net/wireless/b43/Makefile
4348 +++ b/drivers/net/wireless/b43/Makefile
4349 @@ -12,7 +12,7 @@ b43-y += xmit.o
4350 b43-y += lo.o
4351 b43-y += wa.o
4352 b43-y += dma.o
4353 -b43-$(CONFIG_B43_PIO) += pio.o
4354 +b43-y += pio.o
4355 b43-y += rfkill.o
4356 b43-$(CONFIG_B43_LEDS) += leds.o
4357 b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
4358 diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
4359 index 0e6b154..805d28a 100644
4360 --- a/drivers/net/wireless/b43/b43.h
4361 +++ b/drivers/net/wireless/b43/b43.h
4362 @@ -696,6 +696,7 @@ struct b43_wldev {
4363 bool radio_hw_enable; /* saved state of radio hardware enabled state */
4364 bool qos_enabled; /* TRUE, if QoS is used. */
4365 bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
4366 + bool use_pio; /* TRUE if next init should use PIO */
4367
4368 /* PHY/Radio device. */
4369 struct b43_phy phy;
4370 @@ -750,12 +751,6 @@ struct b43_wldev {
4371 #endif
4372 };
4373
4374 -/*
4375 - * Include goes here to avoid a dependency problem.
4376 - * A better fix would be to integrate xmit.h into b43.h.
4377 - */
4378 -#include "xmit.h"
4379 -
4380 /* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
4381 struct b43_wl {
4382 /* Pointer to the active wireless device on this chip */
4383 @@ -830,15 +825,9 @@ struct b43_wl {
4384 /* The device LEDs. */
4385 struct b43_leds leds;
4386
4387 -#ifdef CONFIG_B43_PIO
4388 - /*
4389 - * RX/TX header/tail buffers used by the frame transmit functions.
4390 - */
4391 - struct b43_rxhdr_fw4 rxhdr;
4392 - struct b43_txhdr txhdr;
4393 - u8 rx_tail[4];
4394 - u8 tx_tail[4];
4395 -#endif /* CONFIG_B43_PIO */
4396 + /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
4397 + u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
4398 + u8 pio_tailspace[4] __attribute__((__aligned__(8)));
4399 };
4400
4401 static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
4402 @@ -889,20 +878,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
4403
4404 static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
4405 {
4406 -#ifdef CONFIG_B43_PIO
4407 return dev->__using_pio_transfers;
4408 -#else
4409 - return 0;
4410 -#endif
4411 }
4412
4413 #ifdef CONFIG_B43_FORCE_PIO
4414 -# define B43_FORCE_PIO 1
4415 +# define B43_PIO_DEFAULT 1
4416 #else
4417 -# define B43_FORCE_PIO 0
4418 +# define B43_PIO_DEFAULT 0
4419 #endif
4420
4421 -
4422 /* Message printing */
4423 void b43info(struct b43_wl *wl, const char *fmt, ...)
4424 __attribute__ ((format(printf, 2, 3)));
4425 diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
4426 index de4e804..571d475 100644
4427 --- a/drivers/net/wireless/b43/dma.c
4428 +++ b/drivers/net/wireless/b43/dma.c
4429 @@ -1620,7 +1620,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
4430 b43_power_saving_ctl_bits(dev, 0);
4431 }
4432
4433 -#ifdef CONFIG_B43_PIO
4434 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
4435 u16 mmio_base, bool enable)
4436 {
4437 @@ -1654,4 +1653,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
4438 mmio_base = b43_dmacontroller_base(type, engine_index);
4439 direct_fifo_rx(dev, type, mmio_base, enable);
4440 }
4441 -#endif /* CONFIG_B43_PIO */
4442 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
4443 index a8a00d2..d605634 100644
4444 --- a/drivers/net/wireless/b43/main.c
4445 +++ b/drivers/net/wireless/b43/main.c
4446 @@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
4447 module_param_named(verbose, b43_modparam_verbose, int, 0644);
4448 MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
4449
4450 +int b43_modparam_pio = B43_PIO_DEFAULT;
4451 +module_param_named(pio, b43_modparam_pio, int, 0644);
4452 +MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
4453
4454 static const struct ssb_device_id b43_ssb_tbl[] = {
4455 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
4456 @@ -1788,6 +1791,10 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
4457 dma_reason[0], dma_reason[1],
4458 dma_reason[2], dma_reason[3],
4459 dma_reason[4], dma_reason[5]);
4460 + b43err(dev->wl, "This device does not support DMA "
4461 + "on your system. Please use PIO instead.\n");
4462 + /* Fall back to PIO transfers if we get fatal DMA errors! */
4463 + dev->use_pio = 1;
4464 b43_controller_restart(dev, "DMA error");
4465 return;
4466 }
4467 @@ -4355,7 +4362,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4468
4469 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
4470 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
4471 - B43_FORCE_PIO) {
4472 + dev->use_pio) {
4473 dev->__using_pio_transfers = 1;
4474 err = b43_pio_init(dev);
4475 } else {
4476 @@ -4823,6 +4830,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4477 if (!wldev)
4478 goto out;
4479
4480 + wldev->use_pio = b43_modparam_pio;
4481 wldev->dev = dev;
4482 wldev->wl = wl;
4483 b43_set_status(wldev, B43_STAT_UNINIT);
4484 diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
4485 index 9b90444..c5cd3bc 100644
4486 --- a/drivers/net/wireless/b43/pio.c
4487 +++ b/drivers/net/wireless/b43/pio.c
4488 @@ -342,12 +342,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
4489 q->mmio_base + B43_PIO_TXDATA,
4490 sizeof(u16));
4491 if (data_len & 1) {
4492 + u8 *tail = wl->pio_tailspace;
4493 + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
4494 +
4495 /* Write the last byte. */
4496 ctl &= ~B43_PIO_TXCTL_WRITEHI;
4497 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
4498 - wl->tx_tail[0] = data[data_len - 1];
4499 - wl->tx_tail[1] = 0;
4500 - ssb_block_write(dev->dev, wl->tx_tail, 2,
4501 + tail[0] = data[data_len - 1];
4502 + tail[1] = 0;
4503 + ssb_block_write(dev->dev, tail, 2,
4504 q->mmio_base + B43_PIO_TXDATA,
4505 sizeof(u16));
4506 }
4507 @@ -393,31 +396,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
4508 q->mmio_base + B43_PIO8_TXDATA,
4509 sizeof(u32));
4510 if (data_len & 3) {
4511 - wl->tx_tail[3] = 0;
4512 + u8 *tail = wl->pio_tailspace;
4513 + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
4514 +
4515 + memset(tail, 0, 4);
4516 /* Write the last few bytes. */
4517 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
4518 B43_PIO8_TXCTL_24_31);
4519 switch (data_len & 3) {
4520 case 3:
4521 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
4522 - wl->tx_tail[0] = data[data_len - 3];
4523 - wl->tx_tail[1] = data[data_len - 2];
4524 - wl->tx_tail[2] = data[data_len - 1];
4525 + tail[0] = data[data_len - 3];
4526 + tail[1] = data[data_len - 2];
4527 + tail[2] = data[data_len - 1];
4528 break;
4529 case 2:
4530 ctl |= B43_PIO8_TXCTL_8_15;
4531 - wl->tx_tail[0] = data[data_len - 2];
4532 - wl->tx_tail[1] = data[data_len - 1];
4533 - wl->tx_tail[2] = 0;
4534 + tail[0] = data[data_len - 2];
4535 + tail[1] = data[data_len - 1];
4536 break;
4537 case 1:
4538 - wl->tx_tail[0] = data[data_len - 1];
4539 - wl->tx_tail[1] = 0;
4540 - wl->tx_tail[2] = 0;
4541 + tail[0] = data[data_len - 1];
4542 break;
4543 }
4544 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
4545 - ssb_block_write(dev->dev, wl->tx_tail, 4,
4546 + ssb_block_write(dev->dev, tail, 4,
4547 q->mmio_base + B43_PIO8_TXDATA,
4548 sizeof(u32));
4549 }
4550 @@ -456,6 +459,7 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
4551 int err;
4552 unsigned int hdrlen;
4553 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4554 + struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
4555
4556 B43_WARN_ON(list_empty(&q->packets_list));
4557 pack = list_entry(q->packets_list.next,
4558 @@ -463,7 +467,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
4559
4560 cookie = generate_cookie(q, pack);
4561 hdrlen = b43_txhdr_size(dev);
4562 - err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
4563 + BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
4564 + B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
4565 + err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
4566 info, cookie);
4567 if (err)
4568 return err;
4569 @@ -477,9 +483,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
4570
4571 pack->skb = skb;
4572 if (q->rev >= 8)
4573 - pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
4574 + pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
4575 else
4576 - pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
4577 + pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
4578
4579 /* Remove it from the list of available packet slots.
4580 * It will be put back when we receive the status report. */
4581 @@ -625,8 +631,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
4582 unsigned int i, padding;
4583 struct sk_buff *skb;
4584 const char *err_msg = NULL;
4585 + struct b43_rxhdr_fw4 *rxhdr =
4586 + (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
4587
4588 - memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
4589 + BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
4590 + memset(rxhdr, 0, sizeof(*rxhdr));
4591
4592 /* Check if we have data and wait for it to get ready. */
4593 if (q->rev >= 8) {
4594 @@ -664,16 +673,16 @@ data_ready:
4595
4596 /* Get the preamble (RX header) */
4597 if (q->rev >= 8) {
4598 - ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
4599 + ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
4600 q->mmio_base + B43_PIO8_RXDATA,
4601 sizeof(u32));
4602 } else {
4603 - ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
4604 + ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
4605 q->mmio_base + B43_PIO_RXDATA,
4606 sizeof(u16));
4607 }
4608 /* Sanity checks. */
4609 - len = le16_to_cpu(wl->rxhdr.frame_len);
4610 + len = le16_to_cpu(rxhdr->frame_len);
4611 if (unlikely(len > 0x700)) {
4612 err_msg = "len > 0x700";
4613 goto rx_error;
4614 @@ -683,7 +692,7 @@ data_ready:
4615 goto rx_error;
4616 }
4617
4618 - macstat = le32_to_cpu(wl->rxhdr.mac_status);
4619 + macstat = le32_to_cpu(rxhdr->mac_status);
4620 if (macstat & B43_RX_MAC_FCSERR) {
4621 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
4622 /* Drop frames with failed FCS. */
4623 @@ -708,22 +717,25 @@ data_ready:
4624 q->mmio_base + B43_PIO8_RXDATA,
4625 sizeof(u32));
4626 if (len & 3) {
4627 + u8 *tail = wl->pio_tailspace;
4628 + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
4629 +
4630 /* Read the last few bytes. */
4631 - ssb_block_read(dev->dev, wl->rx_tail, 4,
4632 + ssb_block_read(dev->dev, tail, 4,
4633 q->mmio_base + B43_PIO8_RXDATA,
4634 sizeof(u32));
4635 switch (len & 3) {
4636 case 3:
4637 - skb->data[len + padding - 3] = wl->rx_tail[0];
4638 - skb->data[len + padding - 2] = wl->rx_tail[1];
4639 - skb->data[len + padding - 1] = wl->rx_tail[2];
4640 + skb->data[len + padding - 3] = tail[0];
4641 + skb->data[len + padding - 2] = tail[1];
4642 + skb->data[len + padding - 1] = tail[2];
4643 break;
4644 case 2:
4645 - skb->data[len + padding - 2] = wl->rx_tail[0];
4646 - skb->data[len + padding - 1] = wl->rx_tail[1];
4647 + skb->data[len + padding - 2] = tail[0];
4648 + skb->data[len + padding - 1] = tail[1];
4649 break;
4650 case 1:
4651 - skb->data[len + padding - 1] = wl->rx_tail[0];
4652 + skb->data[len + padding - 1] = tail[0];
4653 break;
4654 }
4655 }
4656 @@ -732,15 +744,18 @@ data_ready:
4657 q->mmio_base + B43_PIO_RXDATA,
4658 sizeof(u16));
4659 if (len & 1) {
4660 + u8 *tail = wl->pio_tailspace;
4661 + BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
4662 +
4663 /* Read the last byte. */
4664 - ssb_block_read(dev->dev, wl->rx_tail, 2,
4665 + ssb_block_read(dev->dev, tail, 2,
4666 q->mmio_base + B43_PIO_RXDATA,
4667 sizeof(u16));
4668 - skb->data[len + padding - 1] = wl->rx_tail[0];
4669 + skb->data[len + padding - 1] = tail[0];
4670 }
4671 }
4672
4673 - b43_rx(q->dev, skb, &wl->rxhdr);
4674 + b43_rx(q->dev, skb, rxhdr);
4675
4676 return 1;
4677
4678 diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
4679 index 7dd649c..7b3c42f 100644
4680 --- a/drivers/net/wireless/b43/pio.h
4681 +++ b/drivers/net/wireless/b43/pio.h
4682 @@ -55,8 +55,6 @@
4683 #define B43_PIO_MAX_NR_TXPACKETS 32
4684
4685
4686 -#ifdef CONFIG_B43_PIO
4687 -
4688 struct b43_pio_txpacket {
4689 /* Pointer to the TX queue we belong to. */
4690 struct b43_pio_txqueue *queue;
4691 @@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
4692 void b43_pio_tx_suspend(struct b43_wldev *dev);
4693 void b43_pio_tx_resume(struct b43_wldev *dev);
4694
4695 -
4696 -#else /* CONFIG_B43_PIO */
4697 -
4698 -
4699 -static inline int b43_pio_init(struct b43_wldev *dev)
4700 -{
4701 - return 0;
4702 -}
4703 -static inline void b43_pio_free(struct b43_wldev *dev)
4704 -{
4705 -}
4706 -static inline void b43_pio_stop(struct b43_wldev *dev)
4707 -{
4708 -}
4709 -static inline int b43_pio_tx(struct b43_wldev *dev,
4710 - struct sk_buff *skb)
4711 -{
4712 - return 0;
4713 -}
4714 -static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
4715 - const struct b43_txstatus *status)
4716 -{
4717 -}
4718 -static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
4719 - struct ieee80211_tx_queue_stats *stats)
4720 -{
4721 -}
4722 -static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
4723 -{
4724 -}
4725 -static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
4726 -{
4727 -}
4728 -static inline void b43_pio_tx_resume(struct b43_wldev *dev)
4729 -{
4730 -}
4731 -
4732 -#endif /* CONFIG_B43_PIO */
4733 #endif /* B43_PIO_H_ */
4734 diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
4735 index f4e9695..51d6897 100644
4736 --- a/drivers/net/wireless/b43/xmit.c
4737 +++ b/drivers/net/wireless/b43/xmit.c
4738 @@ -27,7 +27,7 @@
4739
4740 */
4741
4742 -#include "b43.h"
4743 +#include "xmit.h"
4744 #include "phy_common.h"
4745 #include "dma.h"
4746 #include "pio.h"
4747 diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
4748 index 852753b..a5ed51a 100644
4749 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c
4750 +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
4751 @@ -715,6 +715,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
4752
4753 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
4754
4755 + /* reset to 0 to enable all the queue first */
4756 + priv->txq_ctx_active_msk = 0;
4757 /* Map each Tx/cmd queue to its corresponding fifo */
4758 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
4759 int ac = default_queue_to_tx_fifo[i];
4760 @@ -2134,7 +2136,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
4761 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
4762 "%d index %d\n", scd_ssn , index);
4763 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
4764 - iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
4765 + if (qc)
4766 + iwl_free_tfds_in_queue(priv, sta_id,
4767 + tid, freed);
4768
4769 if (priv->mac80211_registered &&
4770 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
4771 @@ -2162,13 +2166,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
4772
4773 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
4774 if (qc && likely(sta_id != IWL_INVALID_STATION))
4775 - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
4776 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
4777 + else if (sta_id == IWL_INVALID_STATION)
4778 + IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
4779
4780 if (priv->mac80211_registered &&
4781 (iwl_queue_space(&txq->q) > txq->q.low_mark))
4782 iwl_wake_queue(priv, txq_id);
4783 }
4784 -
4785 if (qc && likely(sta_id != IWL_INVALID_STATION))
4786 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
4787
4788 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
4789 index 2f89b62..2a8eb2f 100644
4790 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
4791 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
4792 @@ -793,6 +793,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
4793
4794 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
4795
4796 + /* reset to 0 to enable all the queue first */
4797 + priv->txq_ctx_active_msk = 0;
4798 /* map qos queues to fifos one-to-one */
4799 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
4800 int ac = iwl5000_default_queue_to_tx_fifo[i];
4801 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
4802 index 4f3a108..71c0ad4 100644
4803 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
4804 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
4805 @@ -405,21 +405,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
4806
4807 static int iwl_scan_initiate(struct iwl_priv *priv)
4808 {
4809 - if (!iwl_is_ready_rf(priv)) {
4810 - IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
4811 - return -EIO;
4812 - }
4813 -
4814 - if (test_bit(STATUS_SCANNING, &priv->status)) {
4815 - IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
4816 - return -EAGAIN;
4817 - }
4818 -
4819 - if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4820 - IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
4821 - return -EAGAIN;
4822 - }
4823 -
4824 IWL_DEBUG_INFO(priv, "Starting scan...\n");
4825 set_bit(STATUS_SCANNING, &priv->status);
4826 priv->scan_start = jiffies;
4827 @@ -450,6 +435,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
4828 goto out_unlock;
4829 }
4830
4831 + if (test_bit(STATUS_SCANNING, &priv->status)) {
4832 + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
4833 + ret = -EAGAIN;
4834 + goto out_unlock;
4835 + }
4836 +
4837 + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
4838 + IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
4839 + ret = -EAGAIN;
4840 + goto out_unlock;
4841 + }
4842 +
4843 /* We don't schedule scan within next_scan_jiffies period.
4844 * Avoid scanning during possible EAPOL exchange, return
4845 * success immediately.
4846 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
4847 index e143adc..cc96d13 100644
4848 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
4849 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
4850 @@ -1097,7 +1097,6 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
4851 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
4852
4853 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
4854 - nfreed++;
4855 }
4856 return nfreed;
4857 }
4858 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
4859 index 064d3cd..619590d 100644
4860 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
4861 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
4862 @@ -1904,7 +1904,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
4863 {
4864 int i;
4865
4866 - for (i = 0; i < IWL_RATE_COUNT; i++) {
4867 + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
4868 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
4869 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4870 rates[i].hw_value_short = i;
4871 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4872 index 4493060..bd667d2 100644
4873 --- a/drivers/pci/pci.c
4874 +++ b/drivers/pci/pci.c
4875 @@ -2541,6 +2541,23 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4876 return 0;
4877 }
4878
4879 +/* Some architectures require additional programming to enable VGA */
4880 +static arch_set_vga_state_t arch_set_vga_state;
4881 +
4882 +void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4883 +{
4884 + arch_set_vga_state = func; /* NULL disables */
4885 +}
4886 +
4887 +static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4888 + unsigned int command_bits, bool change_bridge)
4889 +{
4890 + if (arch_set_vga_state)
4891 + return arch_set_vga_state(dev, decode, command_bits,
4892 + change_bridge);
4893 + return 0;
4894 +}
4895 +
4896 /**
4897 * pci_set_vga_state - set VGA decode state on device and parents if requested
4898 * @dev: the PCI device
4899 @@ -2554,9 +2571,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
4900 struct pci_bus *bus;
4901 struct pci_dev *bridge;
4902 u16 cmd;
4903 + int rc;
4904
4905 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
4906
4907 + /* ARCH specific VGA enables */
4908 + rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
4909 + if (rc)
4910 + return rc;
4911 +
4912 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4913 if (decode == true)
4914 cmd |= command_bits;
4915 @@ -2803,4 +2826,3 @@ EXPORT_SYMBOL(pci_target_state);
4916 EXPORT_SYMBOL(pci_prepare_to_sleep);
4917 EXPORT_SYMBOL(pci_back_from_sleep);
4918 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
4919 -
4920 diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
4921 index 0d91a8a..b8fb987 100644
4922 --- a/drivers/pci/pcie/aer/aer_inject.c
4923 +++ b/drivers/pci/pcie/aer/aer_inject.c
4924 @@ -302,7 +302,7 @@ static int aer_inject(struct aer_error_inj *einj)
4925 unsigned long flags;
4926 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
4927 int pos_cap_err, rp_pos_cap_err;
4928 - u32 sever;
4929 + u32 sever, cor_mask, uncor_mask;
4930 int ret = 0;
4931
4932 dev = pci_get_bus_and_slot(einj->bus, devfn);
4933 @@ -320,6 +320,9 @@ static int aer_inject(struct aer_error_inj *einj)
4934 goto out_put;
4935 }
4936 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
4937 + pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
4938 + pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
4939 + &uncor_mask);
4940
4941 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
4942 if (!rp_pos_cap_err) {
4943 @@ -354,6 +357,21 @@ static int aer_inject(struct aer_error_inj *einj)
4944 err->header_log2 = einj->header_log2;
4945 err->header_log3 = einj->header_log3;
4946
4947 + if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
4948 + ret = -EINVAL;
4949 + printk(KERN_WARNING "The correctable error(s) is masked "
4950 + "by device\n");
4951 + spin_unlock_irqrestore(&inject_lock, flags);
4952 + goto out_put;
4953 + }
4954 + if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
4955 + ret = -EINVAL;
4956 + printk(KERN_WARNING "The uncorrectable error(s) is masked "
4957 + "by device\n");
4958 + spin_unlock_irqrestore(&inject_lock, flags);
4959 + goto out_put;
4960 + }
4961 +
4962 rperr = __find_aer_error_by_dev(rpdev);
4963 if (!rperr) {
4964 rperr = rperr_alloc;
4965 diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
4966 index 55ca39d..6e2a4ca 100644
4967 --- a/drivers/platform/x86/Kconfig
4968 +++ b/drivers/platform/x86/Kconfig
4969 @@ -291,9 +291,15 @@ config THINKPAD_ACPI_VIDEO
4970 server running, phase of the moon, and the current mood of
4971 Schroedinger's cat. If you can use X.org's RandR to control
4972 your ThinkPad's video output ports instead of this feature,
4973 - don't think twice: do it and say N here to save some memory.
4974 + don't think twice: do it and say N here to save memory and avoid
4975 + bad interactions with X.org.
4976
4977 - If you are not sure, say Y here.
4978 + NOTE: access to this feature is limited to processes with the
4979 + CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
4980 + where it interacts badly with X.org.
4981 +
4982 + If you are not sure, say Y here but do try to check if you could
4983 + be using X.org RandR instead.
4984
4985 config THINKPAD_ACPI_HOTKEY_POLL
4986 bool "Support NVRAM polling for hot keys"
4987 diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
4988 index 4226e53..329093e 100644
4989 --- a/drivers/platform/x86/eeepc-laptop.c
4990 +++ b/drivers/platform/x86/eeepc-laptop.c
4991 @@ -34,6 +34,7 @@
4992 #include <linux/rfkill.h>
4993 #include <linux/pci.h>
4994 #include <linux/pci_hotplug.h>
4995 +#include <linux/dmi.h>
4996
4997 #define EEEPC_LAPTOP_VERSION "0.1"
4998
4999 @@ -135,6 +136,8 @@ struct eeepc_hotk {
5000 acpi_handle handle; /* the handle of the hotk device */
5001 u32 cm_supported; /* the control methods supported
5002 by this BIOS */
5003 + bool cpufv_disabled;
5004 + bool hotplug_disabled;
5005 uint init_flag; /* Init flags */
5006 u16 event_count[128]; /* count for each event */
5007 struct input_dev *inputdev;
5008 @@ -251,6 +254,14 @@ MODULE_AUTHOR("Corentin Chary, Eric Cooper");
5009 MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
5010 MODULE_LICENSE("GPL");
5011
5012 +static bool hotplug_disabled;
5013 +
5014 +module_param(hotplug_disabled, bool, 0644);
5015 +MODULE_PARM_DESC(hotplug_disabled,
5016 + "Disable hotplug for wireless device. "
5017 + "If your laptop need that, please report to "
5018 + "acpi4asus-user@lists.sourceforge.net.");
5019 +
5020 /*
5021 * ACPI Helpers
5022 */
5023 @@ -467,6 +478,8 @@ static ssize_t store_cpufv(struct device *dev,
5024 struct eeepc_cpufv c;
5025 int rv, value;
5026
5027 + if (ehotk->cpufv_disabled)
5028 + return -EPERM;
5029 if (get_cpufv(&c))
5030 return -ENODEV;
5031 rv = parse_arg(buf, count, &value);
5032 @@ -478,6 +491,38 @@ static ssize_t store_cpufv(struct device *dev,
5033 return rv;
5034 }
5035
5036 +static ssize_t show_cpufv_disabled(struct device *dev,
5037 + struct device_attribute *attr,
5038 + char *buf)
5039 +{
5040 + return sprintf(buf, "%d\n", ehotk->cpufv_disabled);
5041 +}
5042 +
5043 +static ssize_t store_cpufv_disabled(struct device *dev,
5044 + struct device_attribute *attr,
5045 + const char *buf, size_t count)
5046 +{
5047 + int rv, value;
5048 +
5049 + rv = parse_arg(buf, count, &value);
5050 + if (rv < 0)
5051 + return rv;
5052 +
5053 + switch (value) {
5054 + case 0:
5055 + if (ehotk->cpufv_disabled)
5056 + pr_warning("cpufv enabled (not officially supported "
5057 + "on this model)\n");
5058 + ehotk->cpufv_disabled = false;
5059 + return rv;
5060 + case 1:
5061 + return -EPERM;
5062 + default:
5063 + return -EINVAL;
5064 + }
5065 +}
5066 +
5067 +
5068 static struct device_attribute dev_attr_cpufv = {
5069 .attr = {
5070 .name = "cpufv",
5071 @@ -493,12 +538,22 @@ static struct device_attribute dev_attr_available_cpufv = {
5072 .show = show_available_cpufv
5073 };
5074
5075 +static struct device_attribute dev_attr_cpufv_disabled = {
5076 + .attr = {
5077 + .name = "cpufv_disabled",
5078 + .mode = 0644 },
5079 + .show = show_cpufv_disabled,
5080 + .store = store_cpufv_disabled
5081 +};
5082 +
5083 +
5084 static struct attribute *platform_attributes[] = {
5085 &dev_attr_camera.attr,
5086 &dev_attr_cardr.attr,
5087 &dev_attr_disp.attr,
5088 &dev_attr_cpufv.attr,
5089 &dev_attr_available_cpufv.attr,
5090 + &dev_attr_cpufv_disabled.attr,
5091 NULL
5092 };
5093
5094 @@ -564,6 +619,54 @@ static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
5095 return -EINVAL;
5096 }
5097
5098 +static void eeepc_dmi_check(void)
5099 +{
5100 + const char *model;
5101 +
5102 + model = dmi_get_system_info(DMI_PRODUCT_NAME);
5103 + if (!model)
5104 + return;
5105 +
5106 + /*
5107 + * Blacklist for setting cpufv (cpu speed).
5108 + *
5109 + * EeePC 4G ("701") implements CFVS, but it is not supported
5110 + * by the pre-installed OS, and the original option to change it
5111 + * in the BIOS setup screen was removed in later versions.
5112 + *
5113 + * Judging by the lack of "Super Hybrid Engine" on Asus product pages,
5114 + * this applies to all "701" models (4G/4G Surf/2G Surf).
5115 + *
5116 + * So Asus made a deliberate decision not to support it on this model.
5117 + * We have several reports that using it can cause the system to hang
5118 + *
5119 + * The hang has also been reported on a "702" (Model name "8G"?).
5120 + *
5121 + * We avoid dmi_check_system() / dmi_match(), because they use
5122 + * substring matching. We don't want to affect the "701SD"
5123 + * and "701SDX" models, because they do support S.H.E.
5124 + */
5125 + if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
5126 + ehotk->cpufv_disabled = true;
5127 + pr_info("model %s does not officially support setting cpu "
5128 + "speed\n", model);
5129 + pr_info("cpufv disabled to avoid instability\n");
5130 + }
5131 +
5132 + /*
5133 + * Blacklist for wlan hotplug
5134 + *
5135 + * Eeepc 1005HA doesn't work like others models and don't need the
5136 + * hotplug code. In fact, current hotplug code seems to unplug another
5137 + * device...
5138 + */
5139 + if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
5140 + strcmp(model, "1005PE") == 0) {
5141 + ehotk->hotplug_disabled = true;
5142 + pr_info("wlan hotplug disabled\n");
5143 + }
5144 +}
5145 +
5146 static void cmsg_quirk(int cm, const char *name)
5147 {
5148 int dummy;
5149 @@ -1095,6 +1198,9 @@ static int eeepc_rfkill_init(struct device *dev)
5150 if (result && result != -ENODEV)
5151 goto exit;
5152
5153 + if (ehotk->hotplug_disabled)
5154 + return 0;
5155 +
5156 result = eeepc_setup_pci_hotplug();
5157 /*
5158 * If we get -EBUSY then something else is handling the PCI hotplug -
5159 @@ -1208,6 +1314,10 @@ static int __devinit eeepc_hotk_add(struct acpi_device *device)
5160 device->driver_data = ehotk;
5161 ehotk->device = device;
5162
5163 + ehotk->hotplug_disabled = hotplug_disabled;
5164 +
5165 + eeepc_dmi_check();
5166 +
5167 result = eeepc_hotk_check();
5168 if (result)
5169 goto fail_platform_driver;
5170 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
5171 index fa0a0d3..7e51d5b 100644
5172 --- a/drivers/platform/x86/thinkpad_acpi.c
5173 +++ b/drivers/platform/x86/thinkpad_acpi.c
5174 @@ -22,7 +22,7 @@
5175 */
5176
5177 #define TPACPI_VERSION "0.23"
5178 -#define TPACPI_SYSFS_VERSION 0x020500
5179 +#define TPACPI_SYSFS_VERSION 0x020600
5180
5181 /*
5182 * Changelog:
5183 @@ -61,6 +61,7 @@
5184
5185 #include <linux/nvram.h>
5186 #include <linux/proc_fs.h>
5187 +#include <linux/seq_file.h>
5188 #include <linux/sysfs.h>
5189 #include <linux/backlight.h>
5190 #include <linux/fb.h>
5191 @@ -256,7 +257,7 @@ struct tp_acpi_drv_struct {
5192 struct ibm_struct {
5193 char *name;
5194
5195 - int (*read) (char *);
5196 + int (*read) (struct seq_file *);
5197 int (*write) (char *);
5198 void (*exit) (void);
5199 void (*resume) (void);
5200 @@ -280,6 +281,7 @@ struct ibm_init_struct {
5201 char param[32];
5202
5203 int (*init) (struct ibm_init_struct *);
5204 + mode_t base_procfs_mode;
5205 struct ibm_struct *data;
5206 };
5207
5208 @@ -776,36 +778,25 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
5209 ****************************************************************************
5210 ****************************************************************************/
5211
5212 -static int dispatch_procfs_read(char *page, char **start, off_t off,
5213 - int count, int *eof, void *data)
5214 +static int dispatch_proc_show(struct seq_file *m, void *v)
5215 {
5216 - struct ibm_struct *ibm = data;
5217 - int len;
5218 + struct ibm_struct *ibm = m->private;
5219
5220 if (!ibm || !ibm->read)
5221 return -EINVAL;
5222 + return ibm->read(m);
5223 +}
5224
5225 - len = ibm->read(page);
5226 - if (len < 0)
5227 - return len;
5228 -
5229 - if (len <= off + count)
5230 - *eof = 1;
5231 - *start = page + off;
5232 - len -= off;
5233 - if (len > count)
5234 - len = count;
5235 - if (len < 0)
5236 - len = 0;
5237 -
5238 - return len;
5239 +static int dispatch_proc_open(struct inode *inode, struct file *file)
5240 +{
5241 + return single_open(file, dispatch_proc_show, PDE(inode)->data);
5242 }
5243
5244 -static int dispatch_procfs_write(struct file *file,
5245 +static ssize_t dispatch_proc_write(struct file *file,
5246 const char __user *userbuf,
5247 - unsigned long count, void *data)
5248 + size_t count, loff_t *pos)
5249 {
5250 - struct ibm_struct *ibm = data;
5251 + struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
5252 char *kernbuf;
5253 int ret;
5254
5255 @@ -834,6 +825,15 @@ static int dispatch_procfs_write(struct file *file,
5256 return ret;
5257 }
5258
5259 +static const struct file_operations dispatch_proc_fops = {
5260 + .owner = THIS_MODULE,
5261 + .open = dispatch_proc_open,
5262 + .read = seq_read,
5263 + .llseek = seq_lseek,
5264 + .release = single_release,
5265 + .write = dispatch_proc_write,
5266 +};
5267 +
5268 static char *next_cmd(char **cmds)
5269 {
5270 char *start = *cmds;
5271 @@ -1264,6 +1264,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
5272 struct tpacpi_rfk *atp_rfk;
5273 int res;
5274 bool sw_state = false;
5275 + bool hw_state;
5276 int sw_status;
5277
5278 BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
5279 @@ -1298,7 +1299,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
5280 rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
5281 }
5282 }
5283 - rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
5284 + hw_state = tpacpi_rfk_check_hwblock_state();
5285 + rfkill_set_hw_state(atp_rfk->rfkill, hw_state);
5286
5287 res = rfkill_register(atp_rfk->rfkill);
5288 if (res < 0) {
5289 @@ -1311,6 +1313,9 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
5290 }
5291
5292 tpacpi_rfkill_switches[id] = atp_rfk;
5293 +
5294 + printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
5295 + name, (sw_state || hw_state) ? "" : "un");
5296 return 0;
5297 }
5298
5299 @@ -1383,12 +1388,11 @@ static ssize_t tpacpi_rfk_sysfs_enable_store(const enum tpacpi_rfk_id id,
5300 }
5301
5302 /* procfs -------------------------------------------------------------- */
5303 -static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
5304 +static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id,
5305 + struct seq_file *m)
5306 {
5307 - int len = 0;
5308 -
5309 if (id >= TPACPI_RFK_SW_MAX)
5310 - len += sprintf(p + len, "status:\t\tnot supported\n");
5311 + seq_printf(m, "status:\t\tnot supported\n");
5312 else {
5313 int status;
5314
5315 @@ -1402,13 +1406,13 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
5316 return status;
5317 }
5318
5319 - len += sprintf(p + len, "status:\t\t%s\n",
5320 + seq_printf(m, "status:\t\t%s\n",
5321 (status == TPACPI_RFK_RADIO_ON) ?
5322 "enabled" : "disabled");
5323 - len += sprintf(p + len, "commands:\tenable, disable\n");
5324 + seq_printf(m, "commands:\tenable, disable\n");
5325 }
5326
5327 - return len;
5328 + return 0;
5329 }
5330
5331 static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
5332 @@ -1779,7 +1783,7 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
5333
5334 TPV_QL1('7', '9', 'E', '3', '5', '0'), /* T60/p */
5335 TPV_QL1('7', 'C', 'D', '2', '2', '2'), /* R60, R60i */
5336 - TPV_QL0('7', 'E', 'D', '0'), /* R60e, R60i */
5337 + TPV_QL1('7', 'E', 'D', '0', '1', '5'), /* R60e, R60i */
5338
5339 /* BIOS FW BIOS VERS EC FW EC VERS */
5340 TPV_QI2('1', 'W', '9', '0', '1', 'V', '2', '8'), /* R50e (1) */
5341 @@ -1795,8 +1799,8 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
5342 TPV_QI1('7', '4', '6', '4', '2', '7'), /* X41 (0) */
5343 TPV_QI1('7', '5', '6', '0', '2', '0'), /* X41t (0) */
5344
5345 - TPV_QL0('7', 'B', 'D', '7'), /* X60/s */
5346 - TPV_QL0('7', 'J', '3', '0'), /* X60t */
5347 + TPV_QL1('7', 'B', 'D', '7', '4', '0'), /* X60/s */
5348 + TPV_QL1('7', 'J', '3', '0', '1', '3'), /* X60t */
5349
5350 /* (0) - older versions lack DMI EC fw string and functionality */
5351 /* (1) - older versions known to lack functionality */
5352 @@ -1886,14 +1890,11 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
5353 return 0;
5354 }
5355
5356 -static int thinkpad_acpi_driver_read(char *p)
5357 +static int thinkpad_acpi_driver_read(struct seq_file *m)
5358 {
5359 - int len = 0;
5360 -
5361 - len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
5362 - len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
5363 -
5364 - return len;
5365 + seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
5366 + seq_printf(m, "version:\t%s\n", TPACPI_VERSION);
5367 + return 0;
5368 }
5369
5370 static struct ibm_struct thinkpad_acpi_driver_data = {
5371 @@ -2190,7 +2191,8 @@ static int hotkey_mask_set(u32 mask)
5372 fwmask, hotkey_acpi_mask);
5373 }
5374
5375 - hotkey_mask_warn_incomplete_mask();
5376 + if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
5377 + hotkey_mask_warn_incomplete_mask();
5378
5379 return rc;
5380 }
5381 @@ -3187,6 +3189,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5382 int res, i;
5383 int status;
5384 int hkeyv;
5385 + bool radiosw_state = false;
5386 + bool tabletsw_state = false;
5387
5388 unsigned long quirks;
5389
5390 @@ -3292,6 +3296,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5391 #ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
5392 if (dbg_wlswemul) {
5393 tp_features.hotkey_wlsw = 1;
5394 + radiosw_state = !!tpacpi_wlsw_emulstate;
5395 printk(TPACPI_INFO
5396 "radio switch emulation enabled\n");
5397 } else
5398 @@ -3299,6 +3304,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5399 /* Not all thinkpads have a hardware radio switch */
5400 if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
5401 tp_features.hotkey_wlsw = 1;
5402 + radiosw_state = !!status;
5403 printk(TPACPI_INFO
5404 "radio switch found; radios are %s\n",
5405 enabled(status, 0));
5406 @@ -3310,11 +3316,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5407 /* For X41t, X60t, X61t Tablets... */
5408 if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
5409 tp_features.hotkey_tablet = 1;
5410 + tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
5411 printk(TPACPI_INFO
5412 "possible tablet mode switch found; "
5413 "ThinkPad in %s mode\n",
5414 - (status & TP_HOTKEY_TABLET_MASK)?
5415 - "tablet" : "laptop");
5416 + (tabletsw_state) ? "tablet" : "laptop");
5417 res = add_to_attr_set(hotkey_dev_attributes,
5418 &dev_attr_hotkey_tablet_mode.attr);
5419 }
5420 @@ -3349,16 +3355,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5421 TPACPI_HOTKEY_MAP_SIZE);
5422 }
5423
5424 - set_bit(EV_KEY, tpacpi_inputdev->evbit);
5425 - set_bit(EV_MSC, tpacpi_inputdev->evbit);
5426 - set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
5427 + input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
5428 tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
5429 tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
5430 tpacpi_inputdev->keycode = hotkey_keycode_map;
5431 for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
5432 if (hotkey_keycode_map[i] != KEY_RESERVED) {
5433 - set_bit(hotkey_keycode_map[i],
5434 - tpacpi_inputdev->keybit);
5435 + input_set_capability(tpacpi_inputdev, EV_KEY,
5436 + hotkey_keycode_map[i]);
5437 } else {
5438 if (i < sizeof(hotkey_reserved_mask)*8)
5439 hotkey_reserved_mask |= 1 << i;
5440 @@ -3366,12 +3370,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5441 }
5442
5443 if (tp_features.hotkey_wlsw) {
5444 - set_bit(EV_SW, tpacpi_inputdev->evbit);
5445 - set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
5446 + input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
5447 + input_report_switch(tpacpi_inputdev,
5448 + SW_RFKILL_ALL, radiosw_state);
5449 }
5450 if (tp_features.hotkey_tablet) {
5451 - set_bit(EV_SW, tpacpi_inputdev->evbit);
5452 - set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
5453 + input_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);
5454 + input_report_switch(tpacpi_inputdev,
5455 + SW_TABLET_MODE, tabletsw_state);
5456 }
5457
5458 /* Do not issue duplicate brightness change events to
5459 @@ -3438,8 +3444,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
5460 tpacpi_inputdev->close = &hotkey_inputdev_close;
5461
5462 hotkey_poll_setup_safe(true);
5463 - tpacpi_send_radiosw_update();
5464 - tpacpi_input_send_tabletsw();
5465
5466 return 0;
5467
5468 @@ -3547,49 +3551,57 @@ static bool hotkey_notify_usrevent(const u32 hkey,
5469 }
5470 }
5471
5472 +static void thermal_dump_all_sensors(void);
5473 +
5474 static bool hotkey_notify_thermal(const u32 hkey,
5475 bool *send_acpi_ev,
5476 bool *ignore_acpi_ev)
5477 {
5478 + bool known = true;
5479 +
5480 /* 0x6000-0x6FFF: thermal alarms */
5481 *send_acpi_ev = true;
5482 *ignore_acpi_ev = false;
5483
5484 switch (hkey) {
5485 + case TP_HKEY_EV_THM_TABLE_CHANGED:
5486 + printk(TPACPI_INFO
5487 + "EC reports that Thermal Table has changed\n");
5488 + /* recommended action: do nothing, we don't have
5489 + * Lenovo ATM information */
5490 + return true;
5491 case TP_HKEY_EV_ALARM_BAT_HOT:
5492 printk(TPACPI_CRIT
5493 "THERMAL ALARM: battery is too hot!\n");
5494 /* recommended action: warn user through gui */
5495 - return true;
5496 + break;
5497 case TP_HKEY_EV_ALARM_BAT_XHOT:
5498 printk(TPACPI_ALERT
5499 "THERMAL EMERGENCY: battery is extremely hot!\n");
5500 /* recommended action: immediate sleep/hibernate */
5501 - return true;
5502 + break;
5503 case TP_HKEY_EV_ALARM_SENSOR_HOT:
5504 printk(TPACPI_CRIT
5505 "THERMAL ALARM: "
5506 "a sensor reports something is too hot!\n");
5507 /* recommended action: warn user through gui, that */
5508 /* some internal component is too hot */
5509 - return true;
5510 + break;
5511 case TP_HKEY_EV_ALARM_SENSOR_XHOT:
5512 printk(TPACPI_ALERT
5513 "THERMAL EMERGENCY: "
5514 "a sensor reports something is extremely hot!\n");
5515 /* recommended action: immediate sleep/hibernate */
5516 - return true;
5517 - case TP_HKEY_EV_THM_TABLE_CHANGED:
5518 - printk(TPACPI_INFO
5519 - "EC reports that Thermal Table has changed\n");
5520 - /* recommended action: do nothing, we don't have
5521 - * Lenovo ATM information */
5522 - return true;
5523 + break;
5524 default:
5525 printk(TPACPI_ALERT
5526 "THERMAL ALERT: unknown thermal alarm received\n");
5527 - return false;
5528 + known = false;
5529 }
5530 +
5531 + thermal_dump_all_sensors();
5532 +
5533 + return known;
5534 }
5535
5536 static void hotkey_notify(struct ibm_struct *ibm, u32 event)
5537 @@ -3738,14 +3750,13 @@ static void hotkey_resume(void)
5538 }
5539
5540 /* procfs -------------------------------------------------------------- */
5541 -static int hotkey_read(char *p)
5542 +static int hotkey_read(struct seq_file *m)
5543 {
5544 int res, status;
5545 - int len = 0;
5546
5547 if (!tp_features.hotkey) {
5548 - len += sprintf(p + len, "status:\t\tnot supported\n");
5549 - return len;
5550 + seq_printf(m, "status:\t\tnot supported\n");
5551 + return 0;
5552 }
5553
5554 if (mutex_lock_killable(&hotkey_mutex))
5555 @@ -3757,17 +3768,16 @@ static int hotkey_read(char *p)
5556 if (res)
5557 return res;
5558
5559 - len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
5560 + seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
5561 if (hotkey_all_mask) {
5562 - len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
5563 - len += sprintf(p + len,
5564 - "commands:\tenable, disable, reset, <mask>\n");
5565 + seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
5566 + seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
5567 } else {
5568 - len += sprintf(p + len, "mask:\t\tnot supported\n");
5569 - len += sprintf(p + len, "commands:\tenable, disable, reset\n");
5570 + seq_printf(m, "mask:\t\tnot supported\n");
5571 + seq_printf(m, "commands:\tenable, disable, reset\n");
5572 }
5573
5574 - return len;
5575 + return 0;
5576 }
5577
5578 static void hotkey_enabledisable_warn(bool enable)
5579 @@ -4034,9 +4044,9 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
5580 }
5581
5582 /* procfs -------------------------------------------------------------- */
5583 -static int bluetooth_read(char *p)
5584 +static int bluetooth_read(struct seq_file *m)
5585 {
5586 - return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, p);
5587 + return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, m);
5588 }
5589
5590 static int bluetooth_write(char *buf)
5591 @@ -4225,9 +4235,9 @@ static int __init wan_init(struct ibm_init_struct *iibm)
5592 }
5593
5594 /* procfs -------------------------------------------------------------- */
5595 -static int wan_read(char *p)
5596 +static int wan_read(struct seq_file *m)
5597 {
5598 - return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, p);
5599 + return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, m);
5600 }
5601
5602 static int wan_write(char *buf)
5603 @@ -4602,16 +4612,19 @@ static int video_expand_toggle(void)
5604 /* not reached */
5605 }
5606
5607 -static int video_read(char *p)
5608 +static int video_read(struct seq_file *m)
5609 {
5610 int status, autosw;
5611 - int len = 0;
5612
5613 if (video_supported == TPACPI_VIDEO_NONE) {
5614 - len += sprintf(p + len, "status:\t\tnot supported\n");
5615 - return len;
5616 + seq_printf(m, "status:\t\tnot supported\n");
5617 + return 0;
5618 }
5619
5620 + /* Even reads can crash X.org, so... */
5621 + if (!capable(CAP_SYS_ADMIN))
5622 + return -EPERM;
5623 +
5624 status = video_outputsw_get();
5625 if (status < 0)
5626 return status;
5627 @@ -4620,20 +4633,20 @@ static int video_read(char *p)
5628 if (autosw < 0)
5629 return autosw;
5630
5631 - len += sprintf(p + len, "status:\t\tsupported\n");
5632 - len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
5633 - len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
5634 + seq_printf(m, "status:\t\tsupported\n");
5635 + seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
5636 + seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
5637 if (video_supported == TPACPI_VIDEO_NEW)
5638 - len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
5639 - len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
5640 - len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
5641 - len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
5642 + seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
5643 + seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
5644 + seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
5645 + seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
5646 if (video_supported == TPACPI_VIDEO_NEW)
5647 - len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
5648 - len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
5649 - len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
5650 + seq_printf(m, "commands:\tdvi_enable, dvi_disable\n");
5651 + seq_printf(m, "commands:\tauto_enable, auto_disable\n");
5652 + seq_printf(m, "commands:\tvideo_switch, expand_toggle\n");
5653
5654 - return len;
5655 + return 0;
5656 }
5657
5658 static int video_write(char *buf)
5659 @@ -4645,6 +4658,10 @@ static int video_write(char *buf)
5660 if (video_supported == TPACPI_VIDEO_NONE)
5661 return -ENODEV;
5662
5663 + /* Even reads can crash X.org, let alone writes... */
5664 + if (!capable(CAP_SYS_ADMIN))
5665 + return -EPERM;
5666 +
5667 enable = 0;
5668 disable = 0;
5669
5670 @@ -4825,25 +4842,24 @@ static void light_exit(void)
5671 flush_workqueue(tpacpi_wq);
5672 }
5673
5674 -static int light_read(char *p)
5675 +static int light_read(struct seq_file *m)
5676 {
5677 - int len = 0;
5678 int status;
5679
5680 if (!tp_features.light) {
5681 - len += sprintf(p + len, "status:\t\tnot supported\n");
5682 + seq_printf(m, "status:\t\tnot supported\n");
5683 } else if (!tp_features.light_status) {
5684 - len += sprintf(p + len, "status:\t\tunknown\n");
5685 - len += sprintf(p + len, "commands:\ton, off\n");
5686 + seq_printf(m, "status:\t\tunknown\n");
5687 + seq_printf(m, "commands:\ton, off\n");
5688 } else {
5689 status = light_get_status();
5690 if (status < 0)
5691 return status;
5692 - len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
5693 - len += sprintf(p + len, "commands:\ton, off\n");
5694 + seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
5695 + seq_printf(m, "commands:\ton, off\n");
5696 }
5697
5698 - return len;
5699 + return 0;
5700 }
5701
5702 static int light_write(char *buf)
5703 @@ -4921,20 +4937,18 @@ static void cmos_exit(void)
5704 device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
5705 }
5706
5707 -static int cmos_read(char *p)
5708 +static int cmos_read(struct seq_file *m)
5709 {
5710 - int len = 0;
5711 -
5712 /* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
5713 R30, R31, T20-22, X20-21 */
5714 if (!cmos_handle)
5715 - len += sprintf(p + len, "status:\t\tnot supported\n");
5716 + seq_printf(m, "status:\t\tnot supported\n");
5717 else {
5718 - len += sprintf(p + len, "status:\t\tsupported\n");
5719 - len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
5720 + seq_printf(m, "status:\t\tsupported\n");
5721 + seq_printf(m, "commands:\t<cmd> (<cmd> is 0-21)\n");
5722 }
5723
5724 - return len;
5725 + return 0;
5726 }
5727
5728 static int cmos_write(char *buf)
5729 @@ -5309,15 +5323,13 @@ static int __init led_init(struct ibm_init_struct *iibm)
5730 ((s) == TPACPI_LED_OFF ? "off" : \
5731 ((s) == TPACPI_LED_ON ? "on" : "blinking"))
5732
5733 -static int led_read(char *p)
5734 +static int led_read(struct seq_file *m)
5735 {
5736 - int len = 0;
5737 -
5738 if (!led_supported) {
5739 - len += sprintf(p + len, "status:\t\tnot supported\n");
5740 - return len;
5741 + seq_printf(m, "status:\t\tnot supported\n");
5742 + return 0;
5743 }
5744 - len += sprintf(p + len, "status:\t\tsupported\n");
5745 + seq_printf(m, "status:\t\tsupported\n");
5746
5747 if (led_supported == TPACPI_LED_570) {
5748 /* 570 */
5749 @@ -5326,15 +5338,15 @@ static int led_read(char *p)
5750 status = led_get_status(i);
5751 if (status < 0)
5752 return -EIO;
5753 - len += sprintf(p + len, "%d:\t\t%s\n",
5754 + seq_printf(m, "%d:\t\t%s\n",
5755 i, str_led_status(status));
5756 }
5757 }
5758
5759 - len += sprintf(p + len, "commands:\t"
5760 + seq_printf(m, "commands:\t"
5761 "<led> on, <led> off, <led> blink (<led> is 0-15)\n");
5762
5763 - return len;
5764 + return 0;
5765 }
5766
5767 static int led_write(char *buf)
5768 @@ -5407,18 +5419,16 @@ static int __init beep_init(struct ibm_init_struct *iibm)
5769 return (beep_handle)? 0 : 1;
5770 }
5771
5772 -static int beep_read(char *p)
5773 +static int beep_read(struct seq_file *m)
5774 {
5775 - int len = 0;
5776 -
5777 if (!beep_handle)
5778 - len += sprintf(p + len, "status:\t\tnot supported\n");
5779 + seq_printf(m, "status:\t\tnot supported\n");
5780 else {
5781 - len += sprintf(p + len, "status:\t\tsupported\n");
5782 - len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
5783 + seq_printf(m, "status:\t\tsupported\n");
5784 + seq_printf(m, "commands:\t<cmd> (<cmd> is 0-17)\n");
5785 }
5786
5787 - return len;
5788 + return 0;
5789 }
5790
5791 static int beep_write(char *buf)
5792 @@ -5471,8 +5481,11 @@ enum { /* TPACPI_THERMAL_TPEC_* */
5793 TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
5794 TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
5795 TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
5796 +
5797 + TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
5798 };
5799
5800 +
5801 #define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
5802 struct ibm_thermal_sensors_struct {
5803 s32 temp[TPACPI_MAX_THERMAL_SENSORS];
5804 @@ -5562,6 +5575,28 @@ static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
5805 return n;
5806 }
5807
5808 +static void thermal_dump_all_sensors(void)
5809 +{
5810 + int n, i;
5811 + struct ibm_thermal_sensors_struct t;
5812 +
5813 + n = thermal_get_sensors(&t);
5814 + if (n <= 0)
5815 + return;
5816 +
5817 + printk(TPACPI_NOTICE
5818 + "temperatures (Celsius):");
5819 +
5820 + for (i = 0; i < n; i++) {
5821 + if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
5822 + printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
5823 + else
5824 + printk(KERN_CONT " N/A");
5825 + }
5826 +
5827 + printk(KERN_CONT "\n");
5828 +}
5829 +
5830 /* sysfs temp##_input -------------------------------------------------- */
5831
5832 static ssize_t thermal_temp_input_show(struct device *dev,
5833 @@ -5577,7 +5612,7 @@ static ssize_t thermal_temp_input_show(struct device *dev,
5834 res = thermal_get_sensor(idx, &value);
5835 if (res)
5836 return res;
5837 - if (value == TP_EC_THERMAL_TMP_NA * 1000)
5838 + if (value == TPACPI_THERMAL_SENSOR_NA)
5839 return -ENXIO;
5840
5841 return snprintf(buf, PAGE_SIZE, "%d\n", value);
5842 @@ -5754,9 +5789,8 @@ static void thermal_exit(void)
5843 }
5844 }
5845
5846 -static int thermal_read(char *p)
5847 +static int thermal_read(struct seq_file *m)
5848 {
5849 - int len = 0;
5850 int n, i;
5851 struct ibm_thermal_sensors_struct t;
5852
5853 @@ -5764,16 +5798,16 @@ static int thermal_read(char *p)
5854 if (unlikely(n < 0))
5855 return n;
5856
5857 - len += sprintf(p + len, "temperatures:\t");
5858 + seq_printf(m, "temperatures:\t");
5859
5860 if (n > 0) {
5861 for (i = 0; i < (n - 1); i++)
5862 - len += sprintf(p + len, "%d ", t.temp[i] / 1000);
5863 - len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
5864 + seq_printf(m, "%d ", t.temp[i] / 1000);
5865 + seq_printf(m, "%d\n", t.temp[i] / 1000);
5866 } else
5867 - len += sprintf(p + len, "not supported\n");
5868 + seq_printf(m, "not supported\n");
5869
5870 - return len;
5871 + return 0;
5872 }
5873
5874 static struct ibm_struct thermal_driver_data = {
5875 @@ -5788,39 +5822,38 @@ static struct ibm_struct thermal_driver_data = {
5876
5877 static u8 ecdump_regs[256];
5878
5879 -static int ecdump_read(char *p)
5880 +static int ecdump_read(struct seq_file *m)
5881 {
5882 - int len = 0;
5883 int i, j;
5884 u8 v;
5885
5886 - len += sprintf(p + len, "EC "
5887 + seq_printf(m, "EC "
5888 " +00 +01 +02 +03 +04 +05 +06 +07"
5889 " +08 +09 +0a +0b +0c +0d +0e +0f\n");
5890 for (i = 0; i < 256; i += 16) {
5891 - len += sprintf(p + len, "EC 0x%02x:", i);
5892 + seq_printf(m, "EC 0x%02x:", i);
5893 for (j = 0; j < 16; j++) {
5894 if (!acpi_ec_read(i + j, &v))
5895 break;
5896 if (v != ecdump_regs[i + j])
5897 - len += sprintf(p + len, " *%02x", v);
5898 + seq_printf(m, " *%02x", v);
5899 else
5900 - len += sprintf(p + len, " %02x", v);
5901 + seq_printf(m, " %02x", v);
5902 ecdump_regs[i + j] = v;
5903 }
5904 - len += sprintf(p + len, "\n");
5905 + seq_putc(m, '\n');
5906 if (j != 16)
5907 break;
5908 }
5909
5910 /* These are way too dangerous to advertise openly... */
5911 #if 0
5912 - len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
5913 + seq_printf(m, "commands:\t0x<offset> 0x<value>"
5914 " (<offset> is 00-ff, <value> is 00-ff)\n");
5915 - len += sprintf(p + len, "commands:\t0x<offset> <value> "
5916 + seq_printf(m, "commands:\t0x<offset> <value> "
5917 " (<offset> is 00-ff, <value> is 0-255)\n");
5918 #endif
5919 - return len;
5920 + return 0;
5921 }
5922
5923 static int ecdump_write(char *buf)
5924 @@ -6083,6 +6116,12 @@ static int brightness_get(struct backlight_device *bd)
5925 return status & TP_EC_BACKLIGHT_LVLMSK;
5926 }
5927
5928 +static void tpacpi_brightness_notify_change(void)
5929 +{
5930 + backlight_force_update(ibm_backlight_device,
5931 + BACKLIGHT_UPDATE_HOTKEY);
5932 +}
5933 +
5934 static struct backlight_ops ibm_backlight_data = {
5935 .get_brightness = brightness_get,
5936 .update_status = brightness_update_status,
5937 @@ -6237,6 +6276,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
5938 ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
5939 backlight_update_status(ibm_backlight_device);
5940
5941 + vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
5942 + "brightness: registering brightness hotkeys "
5943 + "as change notification\n");
5944 + tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
5945 + | TP_ACPI_HKEY_BRGHTUP_MASK
5946 + | TP_ACPI_HKEY_BRGHTDWN_MASK);;
5947 return 0;
5948 }
5949
5950 @@ -6261,23 +6306,22 @@ static void brightness_exit(void)
5951 tpacpi_brightness_checkpoint_nvram();
5952 }
5953
5954 -static int brightness_read(char *p)
5955 +static int brightness_read(struct seq_file *m)
5956 {
5957 - int len = 0;
5958 int level;
5959
5960 level = brightness_get(NULL);
5961 if (level < 0) {
5962 - len += sprintf(p + len, "level:\t\tunreadable\n");
5963 + seq_printf(m, "level:\t\tunreadable\n");
5964 } else {
5965 - len += sprintf(p + len, "level:\t\t%d\n", level);
5966 - len += sprintf(p + len, "commands:\tup, down\n");
5967 - len += sprintf(p + len, "commands:\tlevel <level>"
5968 + seq_printf(m, "level:\t\t%d\n", level);
5969 + seq_printf(m, "commands:\tup, down\n");
5970 + seq_printf(m, "commands:\tlevel <level>"
5971 " (<level> is 0-%d)\n",
5972 (tp_features.bright_16levels) ? 15 : 7);
5973 }
5974
5975 - return len;
5976 + return 0;
5977 }
5978
5979 static int brightness_write(char *buf)
5980 @@ -6313,6 +6357,9 @@ static int brightness_write(char *buf)
5981 * Doing it this way makes the syscall restartable in case of EINTR
5982 */
5983 rc = brightness_set(level);
5984 + if (!rc && ibm_backlight_device)
5985 + backlight_force_update(ibm_backlight_device,
5986 + BACKLIGHT_UPDATE_SYSFS);
5987 return (rc == -EINTR)? -ERESTARTSYS : rc;
5988 }
5989
5990 @@ -6331,22 +6378,21 @@ static struct ibm_struct brightness_driver_data = {
5991
5992 static int volume_offset = 0x30;
5993
5994 -static int volume_read(char *p)
5995 +static int volume_read(struct seq_file *m)
5996 {
5997 - int len = 0;
5998 u8 level;
5999
6000 if (!acpi_ec_read(volume_offset, &level)) {
6001 - len += sprintf(p + len, "level:\t\tunreadable\n");
6002 + seq_printf(m, "level:\t\tunreadable\n");
6003 } else {
6004 - len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
6005 - len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
6006 - len += sprintf(p + len, "commands:\tup, down, mute\n");
6007 - len += sprintf(p + len, "commands:\tlevel <level>"
6008 + seq_printf(m, "level:\t\t%d\n", level & 0xf);
6009 + seq_printf(m, "mute:\t\t%s\n", onoff(level, 6));
6010 + seq_printf(m, "commands:\tup, down, mute\n");
6011 + seq_printf(m, "commands:\tlevel <level>"
6012 " (<level> is 0-15)\n");
6013 }
6014
6015 - return len;
6016 + return 0;
6017 }
6018
6019 static int volume_write(char *buf)
6020 @@ -7498,9 +7544,8 @@ static void fan_resume(void)
6021 }
6022 }
6023
6024 -static int fan_read(char *p)
6025 +static int fan_read(struct seq_file *m)
6026 {
6027 - int len = 0;
6028 int rc;
6029 u8 status;
6030 unsigned int speed = 0;
6031 @@ -7512,7 +7557,7 @@ static int fan_read(char *p)
6032 if (rc < 0)
6033 return rc;
6034
6035 - len += sprintf(p + len, "status:\t\t%s\n"
6036 + seq_printf(m, "status:\t\t%s\n"
6037 "level:\t\t%d\n",
6038 (status != 0) ? "enabled" : "disabled", status);
6039 break;
6040 @@ -7523,54 +7568,54 @@ static int fan_read(char *p)
6041 if (rc < 0)
6042 return rc;
6043
6044 - len += sprintf(p + len, "status:\t\t%s\n",
6045 + seq_printf(m, "status:\t\t%s\n",
6046 (status != 0) ? "enabled" : "disabled");
6047
6048 rc = fan_get_speed(&speed);
6049 if (rc < 0)
6050 return rc;
6051
6052 - len += sprintf(p + len, "speed:\t\t%d\n", speed);
6053 + seq_printf(m, "speed:\t\t%d\n", speed);
6054
6055 if (status & TP_EC_FAN_FULLSPEED)
6056 /* Disengaged mode takes precedence */
6057 - len += sprintf(p + len, "level:\t\tdisengaged\n");
6058 + seq_printf(m, "level:\t\tdisengaged\n");
6059 else if (status & TP_EC_FAN_AUTO)
6060 - len += sprintf(p + len, "level:\t\tauto\n");
6061 + seq_printf(m, "level:\t\tauto\n");
6062 else
6063 - len += sprintf(p + len, "level:\t\t%d\n", status);
6064 + seq_printf(m, "level:\t\t%d\n", status);
6065 break;
6066
6067 case TPACPI_FAN_NONE:
6068 default:
6069 - len += sprintf(p + len, "status:\t\tnot supported\n");
6070 + seq_printf(m, "status:\t\tnot supported\n");
6071 }
6072
6073 if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
6074 - len += sprintf(p + len, "commands:\tlevel <level>");
6075 + seq_printf(m, "commands:\tlevel <level>");
6076
6077 switch (fan_control_access_mode) {
6078 case TPACPI_FAN_WR_ACPI_SFAN:
6079 - len += sprintf(p + len, " (<level> is 0-7)\n");
6080 + seq_printf(m, " (<level> is 0-7)\n");
6081 break;
6082
6083 default:
6084 - len += sprintf(p + len, " (<level> is 0-7, "
6085 + seq_printf(m, " (<level> is 0-7, "
6086 "auto, disengaged, full-speed)\n");
6087 break;
6088 }
6089 }
6090
6091 if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
6092 - len += sprintf(p + len, "commands:\tenable, disable\n"
6093 + seq_printf(m, "commands:\tenable, disable\n"
6094 "commands:\twatchdog <timeout> (<timeout> "
6095 "is 0 (off), 1-120 (seconds))\n");
6096
6097 if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
6098 - len += sprintf(p + len, "commands:\tspeed <speed>"
6099 + seq_printf(m, "commands:\tspeed <speed>"
6100 " (<speed> is 0-65535)\n");
6101
6102 - return len;
6103 + return 0;
6104 }
6105
6106 static int fan_write_cmd_level(const char *cmd, int *rc)
6107 @@ -7712,6 +7757,13 @@ static struct ibm_struct fan_driver_data = {
6108 */
6109 static void tpacpi_driver_event(const unsigned int hkey_event)
6110 {
6111 + if (ibm_backlight_device) {
6112 + switch (hkey_event) {
6113 + case TP_HKEY_EV_BRGHT_UP:
6114 + case TP_HKEY_EV_BRGHT_DOWN:
6115 + tpacpi_brightness_notify_change();
6116 + }
6117 + }
6118 }
6119
6120
6121 @@ -7844,19 +7896,20 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
6122 "%s installed\n", ibm->name);
6123
6124 if (ibm->read) {
6125 - entry = create_proc_entry(ibm->name,
6126 - S_IFREG | S_IRUGO | S_IWUSR,
6127 - proc_dir);
6128 + mode_t mode = iibm->base_procfs_mode;
6129 +
6130 + if (!mode)
6131 + mode = S_IRUGO;
6132 + if (ibm->write)
6133 + mode |= S_IWUSR;
6134 + entry = proc_create_data(ibm->name, mode, proc_dir,
6135 + &dispatch_proc_fops, ibm);
6136 if (!entry) {
6137 printk(TPACPI_ERR "unable to create proc entry %s\n",
6138 ibm->name);
6139 ret = -ENODEV;
6140 goto err_out;
6141 }
6142 - entry->data = ibm;
6143 - entry->read_proc = &dispatch_procfs_read;
6144 - if (ibm->write)
6145 - entry->write_proc = &dispatch_procfs_write;
6146 ibm->flags.proc_created = 1;
6147 }
6148
6149 @@ -8037,6 +8090,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
6150 #ifdef CONFIG_THINKPAD_ACPI_VIDEO
6151 {
6152 .init = video_init,
6153 + .base_procfs_mode = S_IRUSR,
6154 .data = &video_driver_data,
6155 },
6156 #endif
6157 @@ -8103,32 +8157,32 @@ static int __init set_ibm_param(const char *val, struct kernel_param *kp)
6158 return -EINVAL;
6159 }
6160
6161 -module_param(experimental, int, 0);
6162 +module_param(experimental, int, 0444);
6163 MODULE_PARM_DESC(experimental,
6164 "Enables experimental features when non-zero");
6165
6166 module_param_named(debug, dbg_level, uint, 0);
6167 MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
6168
6169 -module_param(force_load, bool, 0);
6170 +module_param(force_load, bool, 0444);
6171 MODULE_PARM_DESC(force_load,
6172 "Attempts to load the driver even on a "
6173 "mis-identified ThinkPad when true");
6174
6175 -module_param_named(fan_control, fan_control_allowed, bool, 0);
6176 +module_param_named(fan_control, fan_control_allowed, bool, 0444);
6177 MODULE_PARM_DESC(fan_control,
6178 "Enables setting fan parameters features when true");
6179
6180 -module_param_named(brightness_mode, brightness_mode, uint, 0);
6181 +module_param_named(brightness_mode, brightness_mode, uint, 0444);
6182 MODULE_PARM_DESC(brightness_mode,
6183 "Selects brightness control strategy: "
6184 "0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
6185
6186 -module_param(brightness_enable, uint, 0);
6187 +module_param(brightness_enable, uint, 0444);
6188 MODULE_PARM_DESC(brightness_enable,
6189 "Enables backlight control when 1, disables when 0");
6190
6191 -module_param(hotkey_report_mode, uint, 0);
6192 +module_param(hotkey_report_mode, uint, 0444);
6193 MODULE_PARM_DESC(hotkey_report_mode,
6194 "used for backwards compatibility with userspace, "
6195 "see documentation");
6196 @@ -8151,25 +8205,25 @@ TPACPI_PARAM(volume);
6197 TPACPI_PARAM(fan);
6198
6199 #ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
6200 -module_param(dbg_wlswemul, uint, 0);
6201 +module_param(dbg_wlswemul, uint, 0444);
6202 MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
6203 module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
6204 MODULE_PARM_DESC(wlsw_state,
6205 "Initial state of the emulated WLSW switch");
6206
6207 -module_param(dbg_bluetoothemul, uint, 0);
6208 +module_param(dbg_bluetoothemul, uint, 0444);
6209 MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
6210 module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
6211 MODULE_PARM_DESC(bluetooth_state,
6212 "Initial state of the emulated bluetooth switch");
6213
6214 -module_param(dbg_wwanemul, uint, 0);
6215 +module_param(dbg_wwanemul, uint, 0444);
6216 MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
6217 module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
6218 MODULE_PARM_DESC(wwan_state,
6219 "Initial state of the emulated WWAN switch");
6220
6221 -module_param(dbg_uwbemul, uint, 0);
6222 +module_param(dbg_uwbemul, uint, 0444);
6223 MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
6224 module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
6225 MODULE_PARM_DESC(uwb_state,
6226 @@ -8362,6 +8416,7 @@ static int __init thinkpad_acpi_module_init(void)
6227 PCI_VENDOR_ID_IBM;
6228 tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
6229 tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
6230 + tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
6231 }
6232 for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
6233 ret = ibm_init(&ibms_init[i]);
6234 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
6235 index f1a4246..c7a6a89 100644
6236 --- a/drivers/scsi/libiscsi.c
6237 +++ b/drivers/scsi/libiscsi.c
6238 @@ -2823,14 +2823,15 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
6239 session->state = ISCSI_STATE_TERMINATE;
6240 else if (conn->stop_stage != STOP_CONN_RECOVER)
6241 session->state = ISCSI_STATE_IN_RECOVERY;
6242 +
6243 + old_stop_stage = conn->stop_stage;
6244 + conn->stop_stage = flag;
6245 spin_unlock_bh(&session->lock);
6246
6247 del_timer_sync(&conn->transport_timer);
6248 iscsi_suspend_tx(conn);
6249
6250 spin_lock_bh(&session->lock);
6251 - old_stop_stage = conn->stop_stage;
6252 - conn->stop_stage = flag;
6253 conn->c_stage = ISCSI_CONN_STOPPED;
6254 spin_unlock_bh(&session->lock);
6255
6256 diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
6257 index b98f763..d9564fb 100644
6258 --- a/drivers/scsi/scsi_ioctl.c
6259 +++ b/drivers/scsi/scsi_ioctl.c
6260 @@ -308,6 +308,9 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
6261 case SG_SCSI_RESET_DEVICE:
6262 val = SCSI_TRY_RESET_DEVICE;
6263 break;
6264 + case SG_SCSI_RESET_TARGET:
6265 + val = SCSI_TRY_RESET_TARGET;
6266 + break;
6267 case SG_SCSI_RESET_BUS:
6268 val = SCSI_TRY_RESET_BUS;
6269 break;
6270 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
6271 index 64084aa..db02e31 100644
6272 --- a/drivers/scsi/scsi_transport_fc.c
6273 +++ b/drivers/scsi/scsi_transport_fc.c
6274 @@ -3796,8 +3796,9 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
6275 return;
6276
6277 while (!blk_queue_plugged(q)) {
6278 - if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
6279 - break;
6280 + if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
6281 + !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
6282 + break;
6283
6284 req = blk_fetch_request(q);
6285 if (!req)
6286 diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
6287 index cca4e86..5c9c1bc 100644
6288 --- a/drivers/staging/comedi/drivers/usbdux.c
6289 +++ b/drivers/staging/comedi/drivers/usbdux.c
6290 @@ -1,4 +1,4 @@
6291 -#define DRIVER_VERSION "v2.2"
6292 +#define DRIVER_VERSION "v2.4"
6293 #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
6294 #define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"
6295 /*
6296 @@ -80,6 +80,9 @@ sampling rate. If you sample two channels you get 4kHz and so on.
6297 * 2.0: PWM seems to be stable and is not interfering with the other functions
6298 * 2.1: changed PWM API
6299 * 2.2: added firmware kernel request to fix an udev problem
6300 + * 2.3: corrected a bug in bulk timeouts which were far too short
6301 + * 2.4: fixed a bug which causes the driver to hang when it ran out of data.
6302 + * Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
6303 *
6304 */
6305
6306 @@ -101,8 +104,8 @@ sampling rate. If you sample two channels you get 4kHz and so on.
6307
6308 #define BOARDNAME "usbdux"
6309
6310 -/* timeout for the USB-transfer */
6311 -#define EZTIMEOUT 30
6312 +/* timeout for the USB-transfer in ms*/
6313 +#define BULK_TIMEOUT 1000
6314
6315 /* constants for "firmware" upload and download */
6316 #define USBDUXSUB_FIRMWARE 0xA0
6317 @@ -531,6 +534,7 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
6318 }
6319 }
6320 /* tell comedi that data is there */
6321 + s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
6322 comedi_event(this_usbduxsub->comedidev, s);
6323 }
6324
6325 @@ -750,7 +754,7 @@ static int usbduxsub_start(struct usbduxsub *usbduxsub)
6326 /* Length */
6327 1,
6328 /* Timeout */
6329 - EZTIMEOUT);
6330 + BULK_TIMEOUT);
6331 if (errcode < 0) {
6332 dev_err(&usbduxsub->interface->dev,
6333 "comedi_: control msg failed (start)\n");
6334 @@ -780,7 +784,7 @@ static int usbduxsub_stop(struct usbduxsub *usbduxsub)
6335 /* Length */
6336 1,
6337 /* Timeout */
6338 - EZTIMEOUT);
6339 + BULK_TIMEOUT);
6340 if (errcode < 0) {
6341 dev_err(&usbduxsub->interface->dev,
6342 "comedi_: control msg failed (stop)\n");
6343 @@ -810,7 +814,7 @@ static int usbduxsub_upload(struct usbduxsub *usbduxsub,
6344 /* length */
6345 len,
6346 /* timeout */
6347 - EZTIMEOUT);
6348 + BULK_TIMEOUT);
6349 dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
6350 if (errcode < 0) {
6351 dev_err(&usbduxsub->interface->dev, "comedi_: upload failed\n");
6352 @@ -1110,7 +1114,7 @@ static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
6353 usb_sndbulkpipe(this_usbduxsub->usbdev,
6354 COMMAND_OUT_EP),
6355 this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
6356 - &nsent, 10);
6357 + &nsent, BULK_TIMEOUT);
6358 if (result < 0)
6359 dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
6360 "could not transmit dux_command to the usb-device, "
6361 @@ -1130,7 +1134,7 @@ static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
6362 usb_rcvbulkpipe(this_usbduxsub->usbdev,
6363 COMMAND_IN_EP),