/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1007_linux-2.6.32.8.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1007_linux-2.6.32.8.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (4 years, 6 months ago) by mpagano
File size: 146455 byte(s)
2.6.32-15 release
1 diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
2 index 5a4bc8c..db3a706 100644
3 --- a/Documentation/kvm/api.txt
4 +++ b/Documentation/kvm/api.txt
5 @@ -593,6 +593,42 @@ struct kvm_irqchip {
6 } chip;
7 };
8
9 +4.27 KVM_GET_CLOCK
10 +
11 +Capability: KVM_CAP_ADJUST_CLOCK
12 +Architectures: x86
13 +Type: vm ioctl
14 +Parameters: struct kvm_clock_data (out)
15 +Returns: 0 on success, -1 on error
16 +
17 +Gets the current timestamp of kvmclock as seen by the current guest. In
18 +conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
19 +such as migration.
20 +
21 +struct kvm_clock_data {
22 + __u64 clock; /* kvmclock current value */
23 + __u32 flags;
24 + __u32 pad[9];
25 +};
26 +
27 +4.28 KVM_SET_CLOCK
28 +
29 +Capability: KVM_CAP_ADJUST_CLOCK
30 +Architectures: x86
31 +Type: vm ioctl
32 +Parameters: struct kvm_clock_data (in)
33 +Returns: 0 on success, -1 on error
34 +
35 +Sets the current timestamp of kvmclock to the valued specific in its parameter.
36 +In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
37 +such as migration.
38 +
39 +struct kvm_clock_data {
40 + __u64 clock; /* kvmclock current value */
41 + __u32 flags;
42 + __u32 pad[9];
43 +};
44 +
45 5. The kvm_run structure
46
47 Application code obtains a pointer to the kvm_run structure by
48 diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
49 index 944a07c..1d04e40 100644
50 --- a/arch/blackfin/include/asm/page.h
51 +++ b/arch/blackfin/include/asm/page.h
52 @@ -10,4 +10,9 @@
53 #include <asm-generic/page.h>
54 #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
55
56 +#define VM_DATA_DEFAULT_FLAGS \
57 + (VM_READ | VM_WRITE | \
58 + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
59 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
60 +
61 #endif
62 diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h
63 index 25c6a50..8c97068 100644
64 --- a/arch/frv/include/asm/page.h
65 +++ b/arch/frv/include/asm/page.h
66 @@ -63,12 +63,10 @@ extern unsigned long max_pfn;
67 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
68
69
70 -#ifdef CONFIG_MMU
71 #define VM_DATA_DEFAULT_FLAGS \
72 (VM_READ | VM_WRITE | \
73 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
74 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
75 -#endif
76
77 #endif /* __ASSEMBLY__ */
78
79 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
80 index 014a624..5698502 100644
81 --- a/arch/powerpc/include/asm/elf.h
82 +++ b/arch/powerpc/include/asm/elf.h
83 @@ -236,14 +236,10 @@ typedef elf_vrregset_t elf_fpxregset_t;
84 #ifdef __powerpc64__
85 # define SET_PERSONALITY(ex) \
86 do { \
87 - unsigned long new_flags = 0; \
88 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
89 - new_flags = _TIF_32BIT; \
90 - if ((current_thread_info()->flags & _TIF_32BIT) \
91 - != new_flags) \
92 - set_thread_flag(TIF_ABI_PENDING); \
93 + set_thread_flag(TIF_32BIT); \
94 else \
95 - clear_thread_flag(TIF_ABI_PENDING); \
96 + clear_thread_flag(TIF_32BIT); \
97 if (personality(current->personality) != PER_LINUX32) \
98 set_personality(PER_LINUX | \
99 (current->personality & (~PER_MASK))); \
100 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
101 index c8b3292..aa9d383 100644
102 --- a/arch/powerpc/include/asm/thread_info.h
103 +++ b/arch/powerpc/include/asm/thread_info.h
104 @@ -111,7 +111,6 @@ static inline struct thread_info *current_thread_info(void)
105 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
106 #define TIF_FREEZE 14 /* Freezing for suspend */
107 #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
108 -#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
109
110 /* as above, but as bit values */
111 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112 @@ -129,7 +128,6 @@ static inline struct thread_info *current_thread_info(void)
113 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
114 #define _TIF_FREEZE (1<<TIF_FREEZE)
115 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
116 -#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
117 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
118
119 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
120 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
121 index c930ac3..7b816da 100644
122 --- a/arch/powerpc/kernel/process.c
123 +++ b/arch/powerpc/kernel/process.c
124 @@ -554,18 +554,6 @@ void exit_thread(void)
125
126 void flush_thread(void)
127 {
128 -#ifdef CONFIG_PPC64
129 - struct thread_info *t = current_thread_info();
130 -
131 - if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
132 - clear_ti_thread_flag(t, TIF_ABI_PENDING);
133 - if (test_ti_thread_flag(t, TIF_32BIT))
134 - clear_ti_thread_flag(t, TIF_32BIT);
135 - else
136 - set_ti_thread_flag(t, TIF_32BIT);
137 - }
138 -#endif
139 -
140 discard_lazy_cpu_state();
141
142 if (current->thread.dabr) {
143 diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
144 index 48215d1..e8ef21c 100644
145 --- a/arch/s390/kernel/entry.S
146 +++ b/arch/s390/kernel/entry.S
147 @@ -571,6 +571,7 @@ pgm_svcper:
148 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
149 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
150 TRACE_IRQS_ON
151 + lm %r2,%r6,SP_R2(%r15) # load svc arguments
152 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
153 b BASED(sysc_do_svc)
154
155 diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
156 index 9aff1d4..f33658f 100644
157 --- a/arch/s390/kernel/entry64.S
158 +++ b/arch/s390/kernel/entry64.S
159 @@ -549,6 +549,7 @@ pgm_svcper:
160 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
161 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
162 TRACE_IRQS_ON
163 + lmg %r2,%r6,SP_R2(%r15) # load svc arguments
164 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
165 j sysc_do_svc
166
167 diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
168 index 1192398..44aa119 100644
169 --- a/arch/sh/kernel/process_64.c
170 +++ b/arch/sh/kernel/process_64.c
171 @@ -367,7 +367,7 @@ void exit_thread(void)
172 void flush_thread(void)
173 {
174
175 - /* Called by fs/exec.c (flush_old_exec) to remove traces of a
176 + /* Called by fs/exec.c (setup_new_exec) to remove traces of a
177 * previously running executable. */
178 #ifdef CONFIG_SH_FPU
179 if (last_task_used_math == current) {
180 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
181 index d42e393..9968085 100644
182 --- a/arch/sparc/include/asm/elf_64.h
183 +++ b/arch/sparc/include/asm/elf_64.h
184 @@ -196,17 +196,10 @@ static inline unsigned int sparc64_elf_hwcap(void)
185 #define ELF_PLATFORM (NULL)
186
187 #define SET_PERSONALITY(ex) \
188 -do { unsigned long new_flags = current_thread_info()->flags; \
189 - new_flags &= _TIF_32BIT; \
190 - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
191 - new_flags |= _TIF_32BIT; \
192 +do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
193 + set_thread_flag(TIF_32BIT); \
194 else \
195 - new_flags &= ~_TIF_32BIT; \
196 - if ((current_thread_info()->flags & _TIF_32BIT) \
197 - != new_flags) \
198 - set_thread_flag(TIF_ABI_PENDING); \
199 - else \
200 - clear_thread_flag(TIF_ABI_PENDING); \
201 + clear_thread_flag(TIF_32BIT); \
202 /* flush_thread will update pgd cache */ \
203 if (personality(current->personality) != PER_LINUX32) \
204 set_personality(PER_LINUX | \
205 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
206 index 1b45a7b..f78ad9a 100644
207 --- a/arch/sparc/include/asm/thread_info_64.h
208 +++ b/arch/sparc/include/asm/thread_info_64.h
209 @@ -227,12 +227,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
210 /* flag bit 8 is available */
211 #define TIF_SECCOMP 9 /* secure computing */
212 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
213 -/* flag bit 11 is available */
214 /* NOTE: Thread flags >= 12 should be ones we have no interest
215 * in using in assembly, else we can't use the mask as
216 * an immediate value in instructions such as andcc.
217 */
218 -#define TIF_ABI_PENDING 12
219 +/* flag bit 12 is available */
220 #define TIF_MEMDIE 13
221 #define TIF_POLLING_NRFLAG 14
222 #define TIF_FREEZE 15 /* is freezing for suspend */
223 @@ -246,7 +245,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
224 #define _TIF_32BIT (1<<TIF_32BIT)
225 #define _TIF_SECCOMP (1<<TIF_SECCOMP)
226 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
227 -#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
228 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
229 #define _TIF_FREEZE (1<<TIF_FREEZE)
230
231 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
232 index 18d6785..c3f1cce 100644
233 --- a/arch/sparc/kernel/process_64.c
234 +++ b/arch/sparc/kernel/process_64.c
235 @@ -365,14 +365,6 @@ void flush_thread(void)
236 struct thread_info *t = current_thread_info();
237 struct mm_struct *mm;
238
239 - if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
240 - clear_ti_thread_flag(t, TIF_ABI_PENDING);
241 - if (test_ti_thread_flag(t, TIF_32BIT))
242 - clear_ti_thread_flag(t, TIF_32BIT);
243 - else
244 - set_ti_thread_flag(t, TIF_32BIT);
245 - }
246 -
247 mm = t->task->mm;
248 if (mm)
249 tsb_context_switch(mm);
250 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
251 index 72ace95..4fdb669 100644
252 --- a/arch/x86/Kconfig
253 +++ b/arch/x86/Kconfig
254 @@ -984,12 +984,6 @@ config X86_CPUID
255 with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
256 /dev/cpu/31/cpuid.
257
258 -config X86_CPU_DEBUG
259 - tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
260 - ---help---
261 - If you select this option, this will provide various x86 CPUs
262 - information through debugfs.
263 -
264 choice
265 prompt "High Memory Support"
266 default HIGHMEM4G if !X86_NUMAQ
267 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
268 index 2a4d073..f9f4724 100644
269 --- a/arch/x86/ia32/ia32_aout.c
270 +++ b/arch/x86/ia32/ia32_aout.c
271 @@ -308,14 +308,15 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
272 if (retval)
273 return retval;
274
275 - regs->cs = __USER32_CS;
276 - regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
277 - regs->r13 = regs->r14 = regs->r15 = 0;
278 -
279 /* OK, This is the point of no return */
280 set_personality(PER_LINUX);
281 set_thread_flag(TIF_IA32);
282 - clear_thread_flag(TIF_ABI_PENDING);
283 +
284 + setup_new_exec(bprm);
285 +
286 + regs->cs = __USER32_CS;
287 + regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
288 + regs->r13 = regs->r14 = regs->r15 = 0;
289
290 current->mm->end_code = ex.a_text +
291 (current->mm->start_code = N_TXTADDR(ex));
292 diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
293 deleted file mode 100644
294 index d96c1ee..0000000
295 --- a/arch/x86/include/asm/cpu_debug.h
296 +++ /dev/null
297 @@ -1,127 +0,0 @@
298 -#ifndef _ASM_X86_CPU_DEBUG_H
299 -#define _ASM_X86_CPU_DEBUG_H
300 -
301 -/*
302 - * CPU x86 architecture debug
303 - *
304 - * Copyright(C) 2009 Jaswinder Singh Rajput
305 - */
306 -
307 -/* Register flags */
308 -enum cpu_debug_bit {
309 -/* Model Specific Registers (MSRs) */
310 - CPU_MC_BIT, /* Machine Check */
311 - CPU_MONITOR_BIT, /* Monitor */
312 - CPU_TIME_BIT, /* Time */
313 - CPU_PMC_BIT, /* Performance Monitor */
314 - CPU_PLATFORM_BIT, /* Platform */
315 - CPU_APIC_BIT, /* APIC */
316 - CPU_POWERON_BIT, /* Power-on */
317 - CPU_CONTROL_BIT, /* Control */
318 - CPU_FEATURES_BIT, /* Features control */
319 - CPU_LBRANCH_BIT, /* Last Branch */
320 - CPU_BIOS_BIT, /* BIOS */
321 - CPU_FREQ_BIT, /* Frequency */
322 - CPU_MTTR_BIT, /* MTRR */
323 - CPU_PERF_BIT, /* Performance */
324 - CPU_CACHE_BIT, /* Cache */
325 - CPU_SYSENTER_BIT, /* Sysenter */
326 - CPU_THERM_BIT, /* Thermal */
327 - CPU_MISC_BIT, /* Miscellaneous */
328 - CPU_DEBUG_BIT, /* Debug */
329 - CPU_PAT_BIT, /* PAT */
330 - CPU_VMX_BIT, /* VMX */
331 - CPU_CALL_BIT, /* System Call */
332 - CPU_BASE_BIT, /* BASE Address */
333 - CPU_VER_BIT, /* Version ID */
334 - CPU_CONF_BIT, /* Configuration */
335 - CPU_SMM_BIT, /* System mgmt mode */
336 - CPU_SVM_BIT, /*Secure Virtual Machine*/
337 - CPU_OSVM_BIT, /* OS-Visible Workaround*/
338 -/* Standard Registers */
339 - CPU_TSS_BIT, /* Task Stack Segment */
340 - CPU_CR_BIT, /* Control Registers */
341 - CPU_DT_BIT, /* Descriptor Table */
342 -/* End of Registers flags */
343 - CPU_REG_ALL_BIT, /* Select all Registers */
344 -};
345 -
346 -#define CPU_REG_ALL (~0) /* Select all Registers */
347 -
348 -#define CPU_MC (1 << CPU_MC_BIT)
349 -#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
350 -#define CPU_TIME (1 << CPU_TIME_BIT)
351 -#define CPU_PMC (1 << CPU_PMC_BIT)
352 -#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
353 -#define CPU_APIC (1 << CPU_APIC_BIT)
354 -#define CPU_POWERON (1 << CPU_POWERON_BIT)
355 -#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
356 -#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
357 -#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
358 -#define CPU_BIOS (1 << CPU_BIOS_BIT)
359 -#define CPU_FREQ (1 << CPU_FREQ_BIT)
360 -#define CPU_MTRR (1 << CPU_MTTR_BIT)
361 -#define CPU_PERF (1 << CPU_PERF_BIT)
362 -#define CPU_CACHE (1 << CPU_CACHE_BIT)
363 -#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
364 -#define CPU_THERM (1 << CPU_THERM_BIT)
365 -#define CPU_MISC (1 << CPU_MISC_BIT)
366 -#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
367 -#define CPU_PAT (1 << CPU_PAT_BIT)
368 -#define CPU_VMX (1 << CPU_VMX_BIT)
369 -#define CPU_CALL (1 << CPU_CALL_BIT)
370 -#define CPU_BASE (1 << CPU_BASE_BIT)
371 -#define CPU_VER (1 << CPU_VER_BIT)
372 -#define CPU_CONF (1 << CPU_CONF_BIT)
373 -#define CPU_SMM (1 << CPU_SMM_BIT)
374 -#define CPU_SVM (1 << CPU_SVM_BIT)
375 -#define CPU_OSVM (1 << CPU_OSVM_BIT)
376 -#define CPU_TSS (1 << CPU_TSS_BIT)
377 -#define CPU_CR (1 << CPU_CR_BIT)
378 -#define CPU_DT (1 << CPU_DT_BIT)
379 -
380 -/* Register file flags */
381 -enum cpu_file_bit {
382 - CPU_INDEX_BIT, /* index */
383 - CPU_VALUE_BIT, /* value */
384 -};
385 -
386 -#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
387 -
388 -#define MAX_CPU_FILES 512
389 -
390 -struct cpu_private {
391 - unsigned cpu;
392 - unsigned type;
393 - unsigned reg;
394 - unsigned file;
395 -};
396 -
397 -struct cpu_debug_base {
398 - char *name; /* Register name */
399 - unsigned flag; /* Register flag */
400 - unsigned write; /* Register write flag */
401 -};
402 -
403 -/*
404 - * Currently it looks similar to cpu_debug_base but once we add more files
405 - * cpu_file_base will go in different direction
406 - */
407 -struct cpu_file_base {
408 - char *name; /* Register file name */
409 - unsigned flag; /* Register file flag */
410 - unsigned write; /* Register write flag */
411 -};
412 -
413 -struct cpu_cpuX_base {
414 - struct dentry *dentry; /* Register dentry */
415 - int init; /* Register index file */
416 -};
417 -
418 -struct cpu_debug_range {
419 - unsigned min; /* Register range min */
420 - unsigned max; /* Register range max */
421 - unsigned flag; /* Supported flags */
422 -};
423 -
424 -#endif /* _ASM_X86_CPU_DEBUG_H */
425 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
426 index 456a304..8ac9d9a 100644
427 --- a/arch/x86/include/asm/elf.h
428 +++ b/arch/x86/include/asm/elf.h
429 @@ -197,14 +197,8 @@ do { \
430 set_fs(USER_DS); \
431 } while (0)
432
433 -#define COMPAT_SET_PERSONALITY(ex) \
434 -do { \
435 - if (test_thread_flag(TIF_IA32)) \
436 - clear_thread_flag(TIF_ABI_PENDING); \
437 - else \
438 - set_thread_flag(TIF_ABI_PENDING); \
439 - current->personality |= force_personality32; \
440 -} while (0)
441 +void set_personality_ia32(void);
442 +#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32()
443
444 #define COMPAT_ELF_PLATFORM ("i686")
445
446 diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
447 index 1c22cb0..3251e23 100644
448 --- a/arch/x86/include/asm/hpet.h
449 +++ b/arch/x86/include/asm/hpet.h
450 @@ -66,6 +66,7 @@
451 extern unsigned long hpet_address;
452 extern unsigned long force_hpet_address;
453 extern int hpet_force_user;
454 +extern u8 hpet_msi_disable;
455 extern int is_hpet_enabled(void);
456 extern int hpet_enable(void);
457 extern void hpet_disable(void);
458 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
459 index d838922..d759a1f 100644
460 --- a/arch/x86/include/asm/kvm_host.h
461 +++ b/arch/x86/include/asm/kvm_host.h
462 @@ -412,6 +412,7 @@ struct kvm_arch{
463 unsigned long irq_sources_bitmap;
464 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
465 u64 vm_init_tsc;
466 + s64 kvmclock_offset;
467 };
468
469 struct kvm_vm_stat {
470 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
471 index d27d0a2..19c3ce4 100644
472 --- a/arch/x86/include/asm/thread_info.h
473 +++ b/arch/x86/include/asm/thread_info.h
474 @@ -86,7 +86,6 @@ struct thread_info {
475 #define TIF_NOTSC 16 /* TSC is not accessible in userland */
476 #define TIF_IA32 17 /* 32bit process */
477 #define TIF_FORK 18 /* ret_from_fork */
478 -#define TIF_ABI_PENDING 19
479 #define TIF_MEMDIE 20
480 #define TIF_DEBUG 21 /* uses debug registers */
481 #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
482 @@ -110,7 +109,6 @@ struct thread_info {
483 #define _TIF_NOTSC (1 << TIF_NOTSC)
484 #define _TIF_IA32 (1 << TIF_IA32)
485 #define _TIF_FORK (1 << TIF_FORK)
486 -#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING)
487 #define _TIF_DEBUG (1 << TIF_DEBUG)
488 #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
489 #define _TIF_FREEZE (1 << TIF_FREEZE)
490 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
491 index 90b9b55..e3f85fe 100644
492 --- a/arch/x86/kernel/amd_iommu.c
493 +++ b/arch/x86/kernel/amd_iommu.c
494 @@ -540,7 +540,7 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
495 static void flush_devices_by_domain(struct protection_domain *domain)
496 {
497 struct amd_iommu *iommu;
498 - int i;
499 + unsigned long i;
500
501 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
502 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
503 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
504 index 68537e9..ff502cc 100644
505 --- a/arch/x86/kernel/cpu/Makefile
506 +++ b/arch/x86/kernel/cpu/Makefile
507 @@ -18,8 +18,6 @@ obj-y += vmware.o hypervisor.o sched.o
508 obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
509 obj-$(CONFIG_X86_64) += bugs_64.o
510
511 -obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
512 -
513 obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
514 obj-$(CONFIG_CPU_SUP_AMD) += amd.o
515 obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
516 diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
517 deleted file mode 100644
518 index dca325c..0000000
519 --- a/arch/x86/kernel/cpu/cpu_debug.c
520 +++ /dev/null
521 @@ -1,688 +0,0 @@
522 -/*
523 - * CPU x86 architecture debug code
524 - *
525 - * Copyright(C) 2009 Jaswinder Singh Rajput
526 - *
527 - * For licencing details see kernel-base/COPYING
528 - */
529 -
530 -#include <linux/interrupt.h>
531 -#include <linux/compiler.h>
532 -#include <linux/seq_file.h>
533 -#include <linux/debugfs.h>
534 -#include <linux/kprobes.h>
535 -#include <linux/uaccess.h>
536 -#include <linux/kernel.h>
537 -#include <linux/module.h>
538 -#include <linux/percpu.h>
539 -#include <linux/signal.h>
540 -#include <linux/errno.h>
541 -#include <linux/sched.h>
542 -#include <linux/types.h>
543 -#include <linux/init.h>
544 -#include <linux/slab.h>
545 -#include <linux/smp.h>
546 -
547 -#include <asm/cpu_debug.h>
548 -#include <asm/paravirt.h>
549 -#include <asm/system.h>
550 -#include <asm/traps.h>
551 -#include <asm/apic.h>
552 -#include <asm/desc.h>
553 -
554 -static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
555 -static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
556 -static DEFINE_PER_CPU(int, cpu_priv_count);
557 -
558 -static DEFINE_MUTEX(cpu_debug_lock);
559 -
560 -static struct dentry *cpu_debugfs_dir;
561 -
562 -static struct cpu_debug_base cpu_base[] = {
563 - { "mc", CPU_MC, 0 },
564 - { "monitor", CPU_MONITOR, 0 },
565 - { "time", CPU_TIME, 0 },
566 - { "pmc", CPU_PMC, 1 },
567 - { "platform", CPU_PLATFORM, 0 },
568 - { "apic", CPU_APIC, 0 },
569 - { "poweron", CPU_POWERON, 0 },
570 - { "control", CPU_CONTROL, 0 },
571 - { "features", CPU_FEATURES, 0 },
572 - { "lastbranch", CPU_LBRANCH, 0 },
573 - { "bios", CPU_BIOS, 0 },
574 - { "freq", CPU_FREQ, 0 },
575 - { "mtrr", CPU_MTRR, 0 },
576 - { "perf", CPU_PERF, 0 },
577 - { "cache", CPU_CACHE, 0 },
578 - { "sysenter", CPU_SYSENTER, 0 },
579 - { "therm", CPU_THERM, 0 },
580 - { "misc", CPU_MISC, 0 },
581 - { "debug", CPU_DEBUG, 0 },
582 - { "pat", CPU_PAT, 0 },
583 - { "vmx", CPU_VMX, 0 },
584 - { "call", CPU_CALL, 0 },
585 - { "base", CPU_BASE, 0 },
586 - { "ver", CPU_VER, 0 },
587 - { "conf", CPU_CONF, 0 },
588 - { "smm", CPU_SMM, 0 },
589 - { "svm", CPU_SVM, 0 },
590 - { "osvm", CPU_OSVM, 0 },
591 - { "tss", CPU_TSS, 0 },
592 - { "cr", CPU_CR, 0 },
593 - { "dt", CPU_DT, 0 },
594 - { "registers", CPU_REG_ALL, 0 },
595 -};
596 -
597 -static struct cpu_file_base cpu_file[] = {
598 - { "index", CPU_REG_ALL, 0 },
599 - { "value", CPU_REG_ALL, 1 },
600 -};
601 -
602 -/* CPU Registers Range */
603 -static struct cpu_debug_range cpu_reg_range[] = {
604 - { 0x00000000, 0x00000001, CPU_MC, },
605 - { 0x00000006, 0x00000007, CPU_MONITOR, },
606 - { 0x00000010, 0x00000010, CPU_TIME, },
607 - { 0x00000011, 0x00000013, CPU_PMC, },
608 - { 0x00000017, 0x00000017, CPU_PLATFORM, },
609 - { 0x0000001B, 0x0000001B, CPU_APIC, },
610 - { 0x0000002A, 0x0000002B, CPU_POWERON, },
611 - { 0x0000002C, 0x0000002C, CPU_FREQ, },
612 - { 0x0000003A, 0x0000003A, CPU_CONTROL, },
613 - { 0x00000040, 0x00000047, CPU_LBRANCH, },
614 - { 0x00000060, 0x00000067, CPU_LBRANCH, },
615 - { 0x00000079, 0x00000079, CPU_BIOS, },
616 - { 0x00000088, 0x0000008A, CPU_CACHE, },
617 - { 0x0000008B, 0x0000008B, CPU_BIOS, },
618 - { 0x0000009B, 0x0000009B, CPU_MONITOR, },
619 - { 0x000000C1, 0x000000C4, CPU_PMC, },
620 - { 0x000000CD, 0x000000CD, CPU_FREQ, },
621 - { 0x000000E7, 0x000000E8, CPU_PERF, },
622 - { 0x000000FE, 0x000000FE, CPU_MTRR, },
623 -
624 - { 0x00000116, 0x0000011E, CPU_CACHE, },
625 - { 0x00000174, 0x00000176, CPU_SYSENTER, },
626 - { 0x00000179, 0x0000017B, CPU_MC, },
627 - { 0x00000186, 0x00000189, CPU_PMC, },
628 - { 0x00000198, 0x00000199, CPU_PERF, },
629 - { 0x0000019A, 0x0000019A, CPU_TIME, },
630 - { 0x0000019B, 0x0000019D, CPU_THERM, },
631 - { 0x000001A0, 0x000001A0, CPU_MISC, },
632 - { 0x000001C9, 0x000001C9, CPU_LBRANCH, },
633 - { 0x000001D7, 0x000001D8, CPU_LBRANCH, },
634 - { 0x000001D9, 0x000001D9, CPU_DEBUG, },
635 - { 0x000001DA, 0x000001E0, CPU_LBRANCH, },
636 -
637 - { 0x00000200, 0x0000020F, CPU_MTRR, },
638 - { 0x00000250, 0x00000250, CPU_MTRR, },
639 - { 0x00000258, 0x00000259, CPU_MTRR, },
640 - { 0x00000268, 0x0000026F, CPU_MTRR, },
641 - { 0x00000277, 0x00000277, CPU_PAT, },
642 - { 0x000002FF, 0x000002FF, CPU_MTRR, },
643 -
644 - { 0x00000300, 0x00000311, CPU_PMC, },
645 - { 0x00000345, 0x00000345, CPU_PMC, },
646 - { 0x00000360, 0x00000371, CPU_PMC, },
647 - { 0x0000038D, 0x00000390, CPU_PMC, },
648 - { 0x000003A0, 0x000003BE, CPU_PMC, },
649 - { 0x000003C0, 0x000003CD, CPU_PMC, },
650 - { 0x000003E0, 0x000003E1, CPU_PMC, },
651 - { 0x000003F0, 0x000003F2, CPU_PMC, },
652 -
653 - { 0x00000400, 0x00000417, CPU_MC, },
654 - { 0x00000480, 0x0000048B, CPU_VMX, },
655 -
656 - { 0x00000600, 0x00000600, CPU_DEBUG, },
657 - { 0x00000680, 0x0000068F, CPU_LBRANCH, },
658 - { 0x000006C0, 0x000006CF, CPU_LBRANCH, },
659 -
660 - { 0x000107CC, 0x000107D3, CPU_PMC, },
661 -
662 - { 0xC0000080, 0xC0000080, CPU_FEATURES, },
663 - { 0xC0000081, 0xC0000084, CPU_CALL, },
664 - { 0xC0000100, 0xC0000102, CPU_BASE, },
665 - { 0xC0000103, 0xC0000103, CPU_TIME, },
666 -
667 - { 0xC0010000, 0xC0010007, CPU_PMC, },
668 - { 0xC0010010, 0xC0010010, CPU_CONF, },
669 - { 0xC0010015, 0xC0010015, CPU_CONF, },
670 - { 0xC0010016, 0xC001001A, CPU_MTRR, },
671 - { 0xC001001D, 0xC001001D, CPU_MTRR, },
672 - { 0xC001001F, 0xC001001F, CPU_CONF, },
673 - { 0xC0010030, 0xC0010035, CPU_BIOS, },
674 - { 0xC0010044, 0xC0010048, CPU_MC, },
675 - { 0xC0010050, 0xC0010056, CPU_SMM, },
676 - { 0xC0010058, 0xC0010058, CPU_CONF, },
677 - { 0xC0010060, 0xC0010060, CPU_CACHE, },
678 - { 0xC0010061, 0xC0010068, CPU_SMM, },
679 - { 0xC0010069, 0xC001006B, CPU_SMM, },
680 - { 0xC0010070, 0xC0010071, CPU_SMM, },
681 - { 0xC0010111, 0xC0010113, CPU_SMM, },
682 - { 0xC0010114, 0xC0010118, CPU_SVM, },
683 - { 0xC0010140, 0xC0010141, CPU_OSVM, },
684 - { 0xC0011022, 0xC0011023, CPU_CONF, },
685 -};
686 -
687 -static int is_typeflag_valid(unsigned cpu, unsigned flag)
688 -{
689 - int i;
690 -
691 - /* Standard Registers should be always valid */
692 - if (flag >= CPU_TSS)
693 - return 1;
694 -
695 - for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
696 - if (cpu_reg_range[i].flag == flag)
697 - return 1;
698 - }
699 -
700 - /* Invalid */
701 - return 0;
702 -}
703 -
704 -static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
705 - int index, unsigned flag)
706 -{
707 - if (cpu_reg_range[index].flag == flag) {
708 - *min = cpu_reg_range[index].min;
709 - *max = cpu_reg_range[index].max;
710 - } else
711 - *max = 0;
712 -
713 - return *max;
714 -}
715 -
716 -/* This function can also be called with seq = NULL for printk */
717 -static void print_cpu_data(struct seq_file *seq, unsigned type,
718 - u32 low, u32 high)
719 -{
720 - struct cpu_private *priv;
721 - u64 val = high;
722 -
723 - if (seq) {
724 - priv = seq->private;
725 - if (priv->file) {
726 - val = (val << 32) | low;
727 - seq_printf(seq, "0x%llx\n", val);
728 - } else
729 - seq_printf(seq, " %08x: %08x_%08x\n",
730 - type, high, low);
731 - } else
732 - printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
733 -}
734 -
735 -/* This function can also be called with seq = NULL for printk */
736 -static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
737 -{
738 - unsigned msr, msr_min, msr_max;
739 - struct cpu_private *priv;
740 - u32 low, high;
741 - int i;
742 -
743 - if (seq) {
744 - priv = seq->private;
745 - if (priv->file) {
746 - if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
747 - &low, &high))
748 - print_cpu_data(seq, priv->reg, low, high);
749 - return;
750 - }
751 - }
752 -
753 - for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
754 - if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
755 - continue;
756 -
757 - for (msr = msr_min; msr <= msr_max; msr++) {
758 - if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
759 - continue;
760 - print_cpu_data(seq, msr, low, high);
761 - }
762 - }
763 -}
764 -
765 -static void print_tss(void *arg)
766 -{
767 - struct pt_regs *regs = task_pt_regs(current);
768 - struct seq_file *seq = arg;
769 - unsigned int seg;
770 -
771 - seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
772 - seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
773 - seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
774 - seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
775 -
776 - seq_printf(seq, " RSI\t: %016lx\n", regs->si);
777 - seq_printf(seq, " RDI\t: %016lx\n", regs->di);
778 - seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
779 - seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
780 -
781 -#ifdef CONFIG_X86_64
782 - seq_printf(seq, " R08\t: %016lx\n", regs->r8);
783 - seq_printf(seq, " R09\t: %016lx\n", regs->r9);
784 - seq_printf(seq, " R10\t: %016lx\n", regs->r10);
785 - seq_printf(seq, " R11\t: %016lx\n", regs->r11);
786 - seq_printf(seq, " R12\t: %016lx\n", regs->r12);
787 - seq_printf(seq, " R13\t: %016lx\n", regs->r13);
788 - seq_printf(seq, " R14\t: %016lx\n", regs->r14);
789 - seq_printf(seq, " R15\t: %016lx\n", regs->r15);
790 -#endif
791 -
792 - asm("movl %%cs,%0" : "=r" (seg));
793 - seq_printf(seq, " CS\t: %04x\n", seg);
794 - asm("movl %%ds,%0" : "=r" (seg));
795 - seq_printf(seq, " DS\t: %04x\n", seg);
796 - seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
797 - asm("movl %%es,%0" : "=r" (seg));
798 - seq_printf(seq, " ES\t: %04x\n", seg);
799 - asm("movl %%fs,%0" : "=r" (seg));
800 - seq_printf(seq, " FS\t: %04x\n", seg);
801 - asm("movl %%gs,%0" : "=r" (seg));
802 - seq_printf(seq, " GS\t: %04x\n", seg);
803 -
804 - seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
805 -
806 - seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
807 -}
808 -
809 -static void print_cr(void *arg)
810 -{
811 - struct seq_file *seq = arg;
812 -
813 - seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
814 - seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
815 - seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
816 - seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
817 -#ifdef CONFIG_X86_64
818 - seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
819 -#endif
820 -}
821 -
822 -static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
823 -{
824 - seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
825 -}
826 -
827 -static void print_dt(void *seq)
828 -{
829 - struct desc_ptr dt;
830 - unsigned long ldt;
831 -
832 - /* IDT */
833 - store_idt((struct desc_ptr *)&dt);
834 - print_desc_ptr("IDT", seq, dt);
835 -
836 - /* GDT */
837 - store_gdt((struct desc_ptr *)&dt);
838 - print_desc_ptr("GDT", seq, dt);
839 -
840 - /* LDT */
841 - store_ldt(ldt);
842 - seq_printf(seq, " LDT\t: %016lx\n", ldt);
843 -
844 - /* TR */
845 - store_tr(ldt);
846 - seq_printf(seq, " TR\t: %016lx\n", ldt);
847 -}
848 -
849 -static void print_dr(void *arg)
850 -{
851 - struct seq_file *seq = arg;
852 - unsigned long dr;
853 - int i;
854 -
855 - for (i = 0; i < 8; i++) {
856 - /* Ignore db4, db5 */
857 - if ((i == 4) || (i == 5))
858 - continue;
859 - get_debugreg(dr, i);
860 - seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
861 - }
862 -
863 - seq_printf(seq, "\n MSR\t:\n");
864 -}
865 -
866 -static void print_apic(void *arg)
867 -{
868 - struct seq_file *seq = arg;
869 -
870 -#ifdef CONFIG_X86_LOCAL_APIC
871 - seq_printf(seq, " LAPIC\t:\n");
872 - seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
873 - seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
874 - seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
875 - seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
876 - seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
877 - seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
878 - seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
879 - seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
880 - seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
881 - seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
882 - seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
883 - seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
884 - seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
885 - seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
886 - seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
887 - seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
888 - seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
889 - seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
890 - seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
891 - seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
892 - seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
893 - if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
894 - unsigned int i, v, maxeilvt;
895 -
896 - v = apic_read(APIC_EFEAT);
897 - maxeilvt = (v >> 16) & 0xff;
898 - seq_printf(seq, " EFEAT\t\t: %08x\n", v);
899 - seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
900 -
901 - for (i = 0; i < maxeilvt; i++) {
902 - v = apic_read(APIC_EILVTn(i));
903 - seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
904 - }
905 - }
906 -#endif /* CONFIG_X86_LOCAL_APIC */
907 - seq_printf(seq, "\n MSR\t:\n");
908 -}
909 -
910 -static int cpu_seq_show(struct seq_file *seq, void *v)
911 -{
912 - struct cpu_private *priv = seq->private;
913 -
914 - if (priv == NULL)
915 - return -EINVAL;
916 -
917 - switch (cpu_base[priv->type].flag) {
918 - case CPU_TSS:
919 - smp_call_function_single(priv->cpu, print_tss, seq, 1);
920 - break;
921 - case CPU_CR:
922 - smp_call_function_single(priv->cpu, print_cr, seq, 1);
923 - break;
924 - case CPU_DT:
925 - smp_call_function_single(priv->cpu, print_dt, seq, 1);
926 - break;
927 - case CPU_DEBUG:
928 - if (priv->file == CPU_INDEX_BIT)
929 - smp_call_function_single(priv->cpu, print_dr, seq, 1);
930 - print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
931 - break;
932 - case CPU_APIC:
933 - if (priv->file == CPU_INDEX_BIT)
934 - smp_call_function_single(priv->cpu, print_apic, seq, 1);
935 - print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
936 - break;
937 -
938 - default:
939 - print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
940 - break;
941 - }
942 - seq_printf(seq, "\n");
943 -
944 - return 0;
945 -}
946 -
947 -static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
948 -{
949 - if (*pos == 0) /* One time is enough ;-) */
950 - return seq;
951 -
952 - return NULL;
953 -}
954 -
955 -static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
956 -{
957 - (*pos)++;
958 -
959 - return cpu_seq_start(seq, pos);
960 -}
961 -
962 -static void cpu_seq_stop(struct seq_file *seq, void *v)
963 -{
964 -}
965 -
966 -static const struct seq_operations cpu_seq_ops = {
967 - .start = cpu_seq_start,
968 - .next = cpu_seq_next,
969 - .stop = cpu_seq_stop,
970 - .show = cpu_seq_show,
971 -};
972 -
973 -static int cpu_seq_open(struct inode *inode, struct file *file)
974 -{
975 - struct cpu_private *priv = inode->i_private;
976 - struct seq_file *seq;
977 - int err;
978 -
979 - err = seq_open(file, &cpu_seq_ops);
980 - if (!err) {
981 - seq = file->private_data;
982 - seq->private = priv;
983 - }
984 -
985 - return err;
986 -}
987 -
988 -static int write_msr(struct cpu_private *priv, u64 val)
989 -{
990 - u32 low, high;
991 -
992 - high = (val >> 32) & 0xffffffff;
993 - low = val & 0xffffffff;
994 -
995 - if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
996 - return 0;
997 -
998 - return -EPERM;
999 -}
1000 -
1001 -static int write_cpu_register(struct cpu_private *priv, const char *buf)
1002 -{
1003 - int ret = -EPERM;
1004 - u64 val;
1005 -
1006 - ret = strict_strtoull(buf, 0, &val);
1007 - if (ret < 0)
1008 - return ret;
1009 -
1010 - /* Supporting only MSRs */
1011 - if (priv->type < CPU_TSS_BIT)
1012 - return write_msr(priv, val);
1013 -
1014 - return ret;
1015 -}
1016 -
1017 -static ssize_t cpu_write(struct file *file, const char __user *ubuf,
1018 - size_t count, loff_t *off)
1019 -{
1020 - struct seq_file *seq = file->private_data;
1021 - struct cpu_private *priv = seq->private;
1022 - char buf[19];
1023 -
1024 - if ((priv == NULL) || (count >= sizeof(buf)))
1025 - return -EINVAL;
1026 -
1027 - if (copy_from_user(&buf, ubuf, count))
1028 - return -EFAULT;
1029 -
1030 - buf[count] = 0;
1031 -
1032 - if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
1033 - if (!write_cpu_register(priv, buf))
1034 - return count;
1035 -
1036 - return -EACCES;
1037 -}
1038 -
1039 -static const struct file_operations cpu_fops = {
1040 - .owner = THIS_MODULE,
1041 - .open = cpu_seq_open,
1042 - .read = seq_read,
1043 - .write = cpu_write,
1044 - .llseek = seq_lseek,
1045 - .release = seq_release,
1046 -};
1047 -
1048 -static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
1049 - unsigned file, struct dentry *dentry)
1050 -{
1051 - struct cpu_private *priv = NULL;
1052 -
1053 - /* Already intialized */
1054 - if (file == CPU_INDEX_BIT)
1055 - if (per_cpu(cpu_arr[type].init, cpu))
1056 - return 0;
1057 -
1058 - priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1059 - if (priv == NULL)
1060 - return -ENOMEM;
1061 -
1062 - priv->cpu = cpu;
1063 - priv->type = type;
1064 - priv->reg = reg;
1065 - priv->file = file;
1066 - mutex_lock(&cpu_debug_lock);
1067 - per_cpu(priv_arr[type], cpu) = priv;
1068 - per_cpu(cpu_priv_count, cpu)++;
1069 - mutex_unlock(&cpu_debug_lock);
1070 -
1071 - if (file)
1072 - debugfs_create_file(cpu_file[file].name, S_IRUGO,
1073 - dentry, (void *)priv, &cpu_fops);
1074 - else {
1075 - debugfs_create_file(cpu_base[type].name, S_IRUGO,
1076 - per_cpu(cpu_arr[type].dentry, cpu),
1077 - (void *)priv, &cpu_fops);
1078 - mutex_lock(&cpu_debug_lock);
1079 - per_cpu(cpu_arr[type].init, cpu) = 1;
1080 - mutex_unlock(&cpu_debug_lock);
1081 - }
1082 -
1083 - return 0;
1084 -}
1085 -
1086 -static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
1087 - struct dentry *dentry)
1088 -{
1089 - unsigned file;
1090 - int err = 0;
1091 -
1092 - for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
1093 - err = cpu_create_file(cpu, type, reg, file, dentry);
1094 - if (err)
1095 - return err;
1096 - }
1097 -
1098 - return err;
1099 -}
1100 -
1101 -static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
1102 -{
1103 - struct dentry *cpu_dentry = NULL;
1104 - unsigned reg, reg_min, reg_max;
1105 - int i, err = 0;
1106 - char reg_dir[12];
1107 - u32 low, high;
1108 -
1109 - for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
1110 - if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
1111 - cpu_base[type].flag))
1112 - continue;
1113 -
1114 - for (reg = reg_min; reg <= reg_max; reg++) {
1115 - if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
1116 - continue;
1117 -
1118 - sprintf(reg_dir, "0x%x", reg);
1119 - cpu_dentry = debugfs_create_dir(reg_dir, dentry);
1120 - err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
1121 - if (err)
1122 - return err;
1123 - }
1124 - }
1125 -
1126 - return err;
1127 -}
1128 -
1129 -static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
1130 -{
1131 - struct dentry *cpu_dentry = NULL;
1132 - unsigned type;
1133 - int err = 0;
1134 -
1135 - for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
1136 - if (!is_typeflag_valid(cpu, cpu_base[type].flag))
1137 - continue;
1138 - cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
1139 - per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
1140 -
1141 - if (type < CPU_TSS_BIT)
1142 - err = cpu_init_msr(cpu, type, cpu_dentry);
1143 - else
1144 - err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
1145 - cpu_dentry);
1146 - if (err)
1147 - return err;
1148 - }
1149 -
1150 - return err;
1151 -}
1152 -
1153 -static int cpu_init_cpu(void)
1154 -{
1155 - struct dentry *cpu_dentry = NULL;
1156 - struct cpuinfo_x86 *cpui;
1157 - char cpu_dir[12];
1158 - unsigned cpu;
1159 - int err = 0;
1160 -
1161 - for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1162 - cpui = &cpu_data(cpu);
1163 - if (!cpu_has(cpui, X86_FEATURE_MSR))
1164 - continue;
1165 -
1166 - sprintf(cpu_dir, "cpu%d", cpu);
1167 - cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
1168 - err = cpu_init_allreg(cpu, cpu_dentry);
1169 -
1170 - pr_info("cpu%d(%d) debug files %d\n",
1171 - cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
1172 - if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
1173 - pr_err("Register files count %d exceeds limit %d\n",
1174 - per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
1175 - per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
1176 - err = -ENFILE;
1177 - }
1178 - if (err)
1179 - return err;
1180 - }
1181 -
1182 - return err;
1183 -}
1184 -
1185 -static int __init cpu_debug_init(void)
1186 -{
1187 - cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
1188 -
1189 - return cpu_init_cpu();
1190 -}
1191 -
1192 -static void __exit cpu_debug_exit(void)
1193 -{
1194 - int i, cpu;
1195 -
1196 - if (cpu_debugfs_dir)
1197 - debugfs_remove_recursive(cpu_debugfs_dir);
1198 -
1199 - for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1200 - for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
1201 - kfree(per_cpu(priv_arr[i], cpu));
1202 -}
1203 -
1204 -module_init(cpu_debug_init);
1205 -module_exit(cpu_debug_exit);
1206 -
1207 -MODULE_AUTHOR("Jaswinder Singh Rajput");
1208 -MODULE_DESCRIPTION("CPU Debug module");
1209 -MODULE_LICENSE("GPL");
1210 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
1211 index dedc2bd..5877873 100644
1212 --- a/arch/x86/kernel/hpet.c
1213 +++ b/arch/x86/kernel/hpet.c
1214 @@ -33,6 +33,8 @@
1215 * HPET address is set in acpi/boot.c, when an ACPI entry exists
1216 */
1217 unsigned long hpet_address;
1218 +u8 hpet_msi_disable;
1219 +
1220 #ifdef CONFIG_PCI_MSI
1221 static unsigned long hpet_num_timers;
1222 #endif
1223 @@ -584,6 +586,9 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
1224 unsigned int num_timers_used = 0;
1225 int i;
1226
1227 + if (hpet_msi_disable)
1228 + return;
1229 +
1230 id = hpet_readl(HPET_ID);
1231
1232 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
1233 @@ -911,6 +916,9 @@ static __init int hpet_late_init(void)
1234 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
1235 hpet_print_config();
1236
1237 + if (hpet_msi_disable)
1238 + return 0;
1239 +
1240 for_each_online_cpu(cpu) {
1241 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
1242 }
1243 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
1244 index 5284cd2..f010ab4 100644
1245 --- a/arch/x86/kernel/process.c
1246 +++ b/arch/x86/kernel/process.c
1247 @@ -91,18 +91,6 @@ void flush_thread(void)
1248 {
1249 struct task_struct *tsk = current;
1250
1251 -#ifdef CONFIG_X86_64
1252 - if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
1253 - clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
1254 - if (test_tsk_thread_flag(tsk, TIF_IA32)) {
1255 - clear_tsk_thread_flag(tsk, TIF_IA32);
1256 - } else {
1257 - set_tsk_thread_flag(tsk, TIF_IA32);
1258 - current_thread_info()->status |= TS_COMPAT;
1259 - }
1260 - }
1261 -#endif
1262 -
1263 clear_tsk_thread_flag(tsk, TIF_DEBUG);
1264
1265 tsk->thread.debugreg0 = 0;
1266 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1267 index eb62cbc..f9ce04f 100644
1268 --- a/arch/x86/kernel/process_64.c
1269 +++ b/arch/x86/kernel/process_64.c
1270 @@ -540,6 +540,17 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
1271 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
1272 }
1273
1274 +void set_personality_ia32(void)
1275 +{
1276 + /* inherit personality from parent */
1277 +
1278 + /* Make sure to be in 32bit mode */
1279 + set_thread_flag(TIF_IA32);
1280 +
1281 + /* Prepare the first "return" to user space */
1282 + current_thread_info()->status |= TS_COMPAT;
1283 +}
1284 +
1285 unsigned long get_wchan(struct task_struct *p)
1286 {
1287 unsigned long stack;
1288 diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
1289 index 6c3b2c6..0040164 100644
1290 --- a/arch/x86/kernel/quirks.c
1291 +++ b/arch/x86/kernel/quirks.c
1292 @@ -491,6 +491,19 @@ void force_hpet_resume(void)
1293 break;
1294 }
1295 }
1296 +
1297 +/*
1298 + * HPET MSI on some boards (ATI SB700/SB800) has side effect on
1299 + * floppy DMA. Disable HPET MSI on such platforms.
1300 + */
1301 +static void force_disable_hpet_msi(struct pci_dev *unused)
1302 +{
1303 + hpet_msi_disable = 1;
1304 +}
1305 +
1306 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
1307 + force_disable_hpet_msi);
1308 +
1309 #endif
1310
1311 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
1312 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
1313 index 6caf260..bff34d6 100644
1314 --- a/arch/x86/kernel/reboot.c
1315 +++ b/arch/x86/kernel/reboot.c
1316 @@ -203,6 +203,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
1317 DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
1318 },
1319 },
1320 + { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/
1321 + .callback = set_bios_reboot,
1322 + .ident = "Dell OptiPlex 760",
1323 + .matches = {
1324 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1325 + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"),
1326 + DMI_MATCH(DMI_BOARD_NAME, "0G919G"),
1327 + },
1328 + },
1329 { /* Handle problems with rebooting on Dell 2400's */
1330 .callback = set_bios_reboot,
1331 .ident = "Dell PowerEdge 2400",
1332 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
1333 index 51aa5b2..8425f7e 100644
1334 --- a/arch/x86/kernel/setup.c
1335 +++ b/arch/x86/kernel/setup.c
1336 @@ -667,19 +667,27 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
1337 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"),
1338 },
1339 },
1340 - {
1341 /*
1342 - * AMI BIOS with low memory corruption was found on Intel DG45ID board.
1343 - * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
1344 + * AMI BIOS with low memory corruption was found on Intel DG45ID and
1345 + * DG45FC boards.
1346 + * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
1347 * match only DMI_BOARD_NAME and see if there is more bad products
1348 * with this vendor.
1349 */
1350 + {
1351 .callback = dmi_low_memory_corruption,
1352 .ident = "AMI BIOS",
1353 .matches = {
1354 DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
1355 },
1356 },
1357 + {
1358 + .callback = dmi_low_memory_corruption,
1359 + .ident = "AMI BIOS",
1360 + .matches = {
1361 + DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
1362 + },
1363 + },
1364 #endif
1365 {}
1366 };
1367 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1368 index 6378e07..145741c 100644
1369 --- a/arch/x86/kvm/x86.c
1370 +++ b/arch/x86/kvm/x86.c
1371 @@ -680,7 +680,8 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
1372 /* With all the info we got, fill in the values */
1373
1374 vcpu->hv_clock.system_time = ts.tv_nsec +
1375 - (NSEC_PER_SEC * (u64)ts.tv_sec);
1376 + (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
1377 +
1378 /*
1379 * The interface expects us to write an even number signaling that the
1380 * update is finished. Since the guest won't see the intermediate
1381 @@ -1227,6 +1228,7 @@ int kvm_dev_ioctl_check_extension(long ext)
1382 case KVM_CAP_PIT2:
1383 case KVM_CAP_PIT_STATE2:
1384 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1385 + case KVM_CAP_ADJUST_CLOCK:
1386 r = 1;
1387 break;
1388 case KVM_CAP_COALESCED_MMIO:
1389 @@ -2424,6 +2426,44 @@ long kvm_arch_vm_ioctl(struct file *filp,
1390 r = 0;
1391 break;
1392 }
1393 + case KVM_SET_CLOCK: {
1394 + struct timespec now;
1395 + struct kvm_clock_data user_ns;
1396 + u64 now_ns;
1397 + s64 delta;
1398 +
1399 + r = -EFAULT;
1400 + if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
1401 + goto out;
1402 +
1403 + r = -EINVAL;
1404 + if (user_ns.flags)
1405 + goto out;
1406 +
1407 + r = 0;
1408 + ktime_get_ts(&now);
1409 + now_ns = timespec_to_ns(&now);
1410 + delta = user_ns.clock - now_ns;
1411 + kvm->arch.kvmclock_offset = delta;
1412 + break;
1413 + }
1414 + case KVM_GET_CLOCK: {
1415 + struct timespec now;
1416 + struct kvm_clock_data user_ns;
1417 + u64 now_ns;
1418 +
1419 + ktime_get_ts(&now);
1420 + now_ns = timespec_to_ns(&now);
1421 + user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
1422 + user_ns.flags = 0;
1423 +
1424 + r = -EFAULT;
1425 + if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
1426 + goto out;
1427 + r = 0;
1428 + break;
1429 + }
1430 +
1431 default:
1432 ;
1433 }
1434 diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
1435 index dbb5381..3871c60 100644
1436 --- a/arch/x86/mm/srat_64.c
1437 +++ b/arch/x86/mm/srat_64.c
1438 @@ -229,9 +229,11 @@ update_nodes_add(int node, unsigned long start, unsigned long end)
1439 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
1440 }
1441
1442 - if (changed)
1443 + if (changed) {
1444 + node_set(node, cpu_nodes_parsed);
1445 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
1446 nd->start, nd->end);
1447 + }
1448 }
1449
1450 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
1451 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
1452 index cb88b1a..3347f69 100644
1453 --- a/arch/x86/oprofile/nmi_int.c
1454 +++ b/arch/x86/oprofile/nmi_int.c
1455 @@ -222,7 +222,7 @@ static void nmi_cpu_switch(void *dummy)
1456
1457 /* move to next set */
1458 si += model->num_counters;
1459 - if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
1460 + if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
1461 per_cpu(switch_index, cpu) = 0;
1462 else
1463 per_cpu(switch_index, cpu) = si;
1464 @@ -598,6 +598,7 @@ static int __init ppro_init(char **cpu_type)
1465 case 15: case 23:
1466 *cpu_type = "i386/core_2";
1467 break;
1468 + case 0x2e:
1469 case 26:
1470 spec = &op_arch_perfmon_spec;
1471 *cpu_type = "i386/core_i7";
1472 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
1473 index 7411915..49f6ede 100644
1474 --- a/drivers/acpi/bus.c
1475 +++ b/drivers/acpi/bus.c
1476 @@ -344,6 +344,167 @@ bool acpi_bus_can_wakeup(acpi_handle handle)
1477
1478 EXPORT_SYMBOL(acpi_bus_can_wakeup);
1479
1480 +static void acpi_print_osc_error(acpi_handle handle,
1481 + struct acpi_osc_context *context, char *error)
1482 +{
1483 + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
1484 + int i;
1485 +
1486 + if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
1487 + printk(KERN_DEBUG "%s\n", error);
1488 + else {
1489 + printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error);
1490 + kfree(buffer.pointer);
1491 + }
1492 + printk(KERN_DEBUG"_OSC request data:");
1493 + for (i = 0; i < context->cap.length; i += sizeof(u32))
1494 + printk("%x ", *((u32 *)(context->cap.pointer + i)));
1495 + printk("\n");
1496 +}
1497 +
1498 +static u8 hex_val(unsigned char c)
1499 +{
1500 + return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
1501 +}
1502 +
1503 +static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
1504 +{
1505 + int i;
1506 + static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
1507 + 24, 26, 28, 30, 32, 34};
1508 +
1509 + if (strlen(str) != 36)
1510 + return AE_BAD_PARAMETER;
1511 + for (i = 0; i < 36; i++) {
1512 + if (i == 8 || i == 13 || i == 18 || i == 23) {
1513 + if (str[i] != '-')
1514 + return AE_BAD_PARAMETER;
1515 + } else if (!isxdigit(str[i]))
1516 + return AE_BAD_PARAMETER;
1517 + }
1518 + for (i = 0; i < 16; i++) {
1519 + uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
1520 + uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
1521 + }
1522 + return AE_OK;
1523 +}
1524 +
1525 +acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
1526 +{
1527 + acpi_status status;
1528 + struct acpi_object_list input;
1529 + union acpi_object in_params[4];
1530 + union acpi_object *out_obj;
1531 + u8 uuid[16];
1532 + u32 errors;
1533 + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
1534 +
1535 + if (!context)
1536 + return AE_ERROR;
1537 + if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
1538 + return AE_ERROR;
1539 + context->ret.length = ACPI_ALLOCATE_BUFFER;
1540 + context->ret.pointer = NULL;
1541 +
1542 + /* Setting up input parameters */
1543 + input.count = 4;
1544 + input.pointer = in_params;
1545 + in_params[0].type = ACPI_TYPE_BUFFER;
1546 + in_params[0].buffer.length = 16;
1547 + in_params[0].buffer.pointer = uuid;
1548 + in_params[1].type = ACPI_TYPE_INTEGER;
1549 + in_params[1].integer.value = context->rev;
1550 + in_params[2].type = ACPI_TYPE_INTEGER;
1551 + in_params[2].integer.value = context->cap.length/sizeof(u32);
1552 + in_params[3].type = ACPI_TYPE_BUFFER;
1553 + in_params[3].buffer.length = context->cap.length;
1554 + in_params[3].buffer.pointer = context->cap.pointer;
1555 +
1556 + status = acpi_evaluate_object(handle, "_OSC", &input, &output);
1557 + if (ACPI_FAILURE(status))
1558 + return status;
1559 +
1560 + if (!output.length)
1561 + return AE_NULL_OBJECT;
1562 +
1563 + out_obj = output.pointer;
1564 + if (out_obj->type != ACPI_TYPE_BUFFER
1565 + || out_obj->buffer.length != context->cap.length) {
1566 + acpi_print_osc_error(handle, context,
1567 + "_OSC evaluation returned wrong type");
1568 + status = AE_TYPE;
1569 + goto out_kfree;
1570 + }
1571 + /* Need to ignore the bit0 in result code */
1572 + errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
1573 + if (errors) {
1574 + if (errors & OSC_REQUEST_ERROR)
1575 + acpi_print_osc_error(handle, context,
1576 + "_OSC request failed");
1577 + if (errors & OSC_INVALID_UUID_ERROR)
1578 + acpi_print_osc_error(handle, context,
1579 + "_OSC invalid UUID");
1580 + if (errors & OSC_INVALID_REVISION_ERROR)
1581 + acpi_print_osc_error(handle, context,
1582 + "_OSC invalid revision");
1583 + if (errors & OSC_CAPABILITIES_MASK_ERROR) {
1584 + if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
1585 + & OSC_QUERY_ENABLE)
1586 + goto out_success;
1587 + status = AE_SUPPORT;
1588 + goto out_kfree;
1589 + }
1590 + status = AE_ERROR;
1591 + goto out_kfree;
1592 + }
1593 +out_success:
1594 + context->ret.length = out_obj->buffer.length;
1595 + context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
1596 + if (!context->ret.pointer) {
1597 + status = AE_NO_MEMORY;
1598 + goto out_kfree;
1599 + }
1600 + memcpy(context->ret.pointer, out_obj->buffer.pointer,
1601 + context->ret.length);
1602 + status = AE_OK;
1603 +
1604 +out_kfree:
1605 + kfree(output.pointer);
1606 + if (status != AE_OK)
1607 + context->ret.pointer = NULL;
1608 + return status;
1609 +}
1610 +EXPORT_SYMBOL(acpi_run_osc);
1611 +
1612 +static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
1613 +static void acpi_bus_osc_support(void)
1614 +{
1615 + u32 capbuf[2];
1616 + struct acpi_osc_context context = {
1617 + .uuid_str = sb_uuid_str,
1618 + .rev = 1,
1619 + .cap.length = 8,
1620 + .cap.pointer = capbuf,
1621 + };
1622 + acpi_handle handle;
1623 +
1624 + capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
1625 + capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
1626 +#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
1627 + defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
1628 + capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
1629 +#endif
1630 +
1631 +#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
1632 + capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
1633 +#endif
1634 + if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
1635 + return;
1636 + if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
1637 + kfree(context.ret.pointer);
1638 + /* do we need to check the returned cap? Sounds no */
1639 +}
1640 +
1641 /* --------------------------------------------------------------------------
1642 Event Management
1643 -------------------------------------------------------------------------- */
1644 @@ -734,6 +895,8 @@ static int __init acpi_bus_init(void)
1645 status = acpi_ec_ecdt_probe();
1646 /* Ignore result. Not having an ECDT is not fatal. */
1647
1648 + acpi_bus_osc_support();
1649 +
1650 status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
1651 if (ACPI_FAILURE(status)) {
1652 printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
1653 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1654 index dc72690..91fed3c 100644
1655 --- a/drivers/ata/libata-core.c
1656 +++ b/drivers/ata/libata-core.c
1657 @@ -3790,21 +3790,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
1658 int sata_link_resume(struct ata_link *link, const unsigned long *params,
1659 unsigned long deadline)
1660 {
1661 + int tries = ATA_LINK_RESUME_TRIES;
1662 u32 scontrol, serror;
1663 int rc;
1664
1665 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
1666 return rc;
1667
1668 - scontrol = (scontrol & 0x0f0) | 0x300;
1669 + /*
1670 + * Writes to SControl sometimes get ignored under certain
1671 + * controllers (ata_piix SIDPR). Make sure DET actually is
1672 + * cleared.
1673 + */
1674 + do {
1675 + scontrol = (scontrol & 0x0f0) | 0x300;
1676 + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
1677 + return rc;
1678 + /*
1679 + * Some PHYs react badly if SStatus is pounded
1680 + * immediately after resuming. Delay 200ms before
1681 + * debouncing.
1682 + */
1683 + msleep(200);
1684
1685 - if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
1686 - return rc;
1687 + /* is SControl restored correctly? */
1688 + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
1689 + return rc;
1690 + } while ((scontrol & 0xf0f) != 0x300 && --tries);
1691
1692 - /* Some PHYs react badly if SStatus is pounded immediately
1693 - * after resuming. Delay 200ms before debouncing.
1694 - */
1695 - msleep(200);
1696 + if ((scontrol & 0xf0f) != 0x300) {
1697 + ata_link_printk(link, KERN_ERR,
1698 + "failed to resume link (SControl %X)\n",
1699 + scontrol);
1700 + return 0;
1701 + }
1702 +
1703 + if (tries < ATA_LINK_RESUME_TRIES)
1704 + ata_link_printk(link, KERN_WARNING,
1705 + "link resume succeeded after %d retries\n",
1706 + ATA_LINK_RESUME_TRIES - tries);
1707
1708 if ((rc = sata_link_debounce(link, params, deadline)))
1709 return rc;
1710 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1711 index bba2ae5..7d8d3c3 100644
1712 --- a/drivers/ata/libata-eh.c
1713 +++ b/drivers/ata/libata-eh.c
1714 @@ -2019,8 +2019,9 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1715 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1716
1717 /* determine whether the command is worth retrying */
1718 - if (!(qc->err_mask & AC_ERR_INVALID) &&
1719 - ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
1720 + if (qc->flags & ATA_QCFLAG_IO ||
1721 + (!(qc->err_mask & AC_ERR_INVALID) &&
1722 + qc->err_mask != AC_ERR_DEV))
1723 qc->flags |= ATA_QCFLAG_RETRY;
1724
1725 /* accumulate error info */
1726 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
1727 index 2ddf03a..68b5957 100644
1728 --- a/drivers/block/pktcdvd.c
1729 +++ b/drivers/block/pktcdvd.c
1730 @@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
1731 pkt_kobj_remove(pd->kobj_stat);
1732 pkt_kobj_remove(pd->kobj_wqueue);
1733 if (class_pktcdvd)
1734 - device_destroy(class_pktcdvd, pd->pkt_dev);
1735 + device_unregister(pd->dev);
1736 }
1737
1738
1739 diff --git a/drivers/char/random.c b/drivers/char/random.c
1740 index 04b505e..908ac1f 100644
1741 --- a/drivers/char/random.c
1742 +++ b/drivers/char/random.c
1743 @@ -1051,12 +1051,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1744 /* like a named pipe */
1745 }
1746
1747 - /*
1748 - * If we gave the user some bytes, update the access time.
1749 - */
1750 - if (count)
1751 - file_accessed(file);
1752 -
1753 return (count ? count : retval);
1754 }
1755
1756 @@ -1107,7 +1101,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
1757 size_t count, loff_t *ppos)
1758 {
1759 size_t ret;
1760 - struct inode *inode = file->f_path.dentry->d_inode;
1761
1762 ret = write_pool(&blocking_pool, buffer, count);
1763 if (ret)
1764 @@ -1116,8 +1109,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
1765 if (ret)
1766 return ret;
1767
1768 - inode->i_mtime = current_fs_time(inode->i_sb);
1769 - mark_inode_dirty(inode);
1770 return (ssize_t)count;
1771 }
1772
1773 diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
1774 index f060246..537c29a 100644
1775 --- a/drivers/connector/connector.c
1776 +++ b/drivers/connector/connector.c
1777 @@ -36,17 +36,6 @@ MODULE_LICENSE("GPL");
1778 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
1779 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
1780
1781 -static u32 cn_idx = CN_IDX_CONNECTOR;
1782 -static u32 cn_val = CN_VAL_CONNECTOR;
1783 -
1784 -module_param(cn_idx, uint, 0);
1785 -module_param(cn_val, uint, 0);
1786 -MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
1787 -MODULE_PARM_DESC(cn_val, "Connector's main device val.");
1788 -
1789 -static DEFINE_MUTEX(notify_lock);
1790 -static LIST_HEAD(notify_list);
1791 -
1792 static struct cn_dev cdev;
1793
1794 static int cn_already_initialized;
1795 @@ -210,54 +199,6 @@ static void cn_rx_skb(struct sk_buff *__skb)
1796 }
1797
1798 /*
1799 - * Notification routing.
1800 - *
1801 - * Gets id and checks if there are notification request for it's idx
1802 - * and val. If there are such requests notify the listeners with the
1803 - * given notify event.
1804 - *
1805 - */
1806 -static void cn_notify(struct cb_id *id, u32 notify_event)
1807 -{
1808 - struct cn_ctl_entry *ent;
1809 -
1810 - mutex_lock(&notify_lock);
1811 - list_for_each_entry(ent, &notify_list, notify_entry) {
1812 - int i;
1813 - struct cn_notify_req *req;
1814 - struct cn_ctl_msg *ctl = ent->msg;
1815 - int idx_found, val_found;
1816 -
1817 - idx_found = val_found = 0;
1818 -
1819 - req = (struct cn_notify_req *)ctl->data;
1820 - for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
1821 - if (id->idx >= req->first &&
1822 - id->idx < req->first + req->range) {
1823 - idx_found = 1;
1824 - break;
1825 - }
1826 - }
1827 -
1828 - for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
1829 - if (id->val >= req->first &&
1830 - id->val < req->first + req->range) {
1831 - val_found = 1;
1832 - break;
1833 - }
1834 - }
1835 -
1836 - if (idx_found && val_found) {
1837 - struct cn_msg m = { .ack = notify_event, };
1838 -
1839 - memcpy(&m.id, id, sizeof(m.id));
1840 - cn_netlink_send(&m, ctl->group, GFP_KERNEL);
1841 - }
1842 - }
1843 - mutex_unlock(&notify_lock);
1844 -}
1845 -
1846 -/*
1847 * Callback add routing - adds callback with given ID and name.
1848 * If there is registered callback with the same ID it will not be added.
1849 *
1850 @@ -276,8 +217,6 @@ int cn_add_callback(struct cb_id *id, char *name,
1851 if (err)
1852 return err;
1853
1854 - cn_notify(id, 0);
1855 -
1856 return 0;
1857 }
1858 EXPORT_SYMBOL_GPL(cn_add_callback);
1859 @@ -295,111 +234,9 @@ void cn_del_callback(struct cb_id *id)
1860 struct cn_dev *dev = &cdev;
1861
1862 cn_queue_del_callback(dev->cbdev, id);
1863 - cn_notify(id, 1);
1864 }
1865 EXPORT_SYMBOL_GPL(cn_del_callback);
1866
1867 -/*
1868 - * Checks two connector's control messages to be the same.
1869 - * Returns 1 if they are the same or if the first one is corrupted.
1870 - */
1871 -static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
1872 -{
1873 - int i;
1874 - struct cn_notify_req *req1, *req2;
1875 -
1876 - if (m1->idx_notify_num != m2->idx_notify_num)
1877 - return 0;
1878 -
1879 - if (m1->val_notify_num != m2->val_notify_num)
1880 - return 0;
1881 -
1882 - if (m1->len != m2->len)
1883 - return 0;
1884 -
1885 - if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
1886 - m1->len)
1887 - return 1;
1888 -
1889 - req1 = (struct cn_notify_req *)m1->data;
1890 - req2 = (struct cn_notify_req *)m2->data;
1891 -
1892 - for (i = 0; i < m1->idx_notify_num; ++i) {
1893 - if (req1->first != req2->first || req1->range != req2->range)
1894 - return 0;
1895 - req1++;
1896 - req2++;
1897 - }
1898 -
1899 - for (i = 0; i < m1->val_notify_num; ++i) {
1900 - if (req1->first != req2->first || req1->range != req2->range)
1901 - return 0;
1902 - req1++;
1903 - req2++;
1904 - }
1905 -
1906 - return 1;
1907 -}
1908 -
1909 -/*
1910 - * Main connector device's callback.
1911 - *
1912 - * Used for notification of a request's processing.
1913 - */
1914 -static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
1915 -{
1916 - struct cn_ctl_msg *ctl;
1917 - struct cn_ctl_entry *ent;
1918 - u32 size;
1919 -
1920 - if (msg->len < sizeof(*ctl))
1921 - return;
1922 -
1923 - ctl = (struct cn_ctl_msg *)msg->data;
1924 -
1925 - size = (sizeof(*ctl) + ((ctl->idx_notify_num +
1926 - ctl->val_notify_num) *
1927 - sizeof(struct cn_notify_req)));
1928 -
1929 - if (msg->len != size)
1930 - return;
1931 -
1932 - if (ctl->len + sizeof(*ctl) != msg->len)
1933 - return;
1934 -
1935 - /*
1936 - * Remove notification.
1937 - */
1938 - if (ctl->group == 0) {
1939 - struct cn_ctl_entry *n;
1940 -
1941 - mutex_lock(&notify_lock);
1942 - list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
1943 - if (cn_ctl_msg_equals(ent->msg, ctl)) {
1944 - list_del(&ent->notify_entry);
1945 - kfree(ent);
1946 - }
1947 - }
1948 - mutex_unlock(&notify_lock);
1949 -
1950 - return;
1951 - }
1952 -
1953 - size += sizeof(*ent);
1954 -
1955 - ent = kzalloc(size, GFP_KERNEL);
1956 - if (!ent)
1957 - return;
1958 -
1959 - ent->msg = (struct cn_ctl_msg *)(ent + 1);
1960 -
1961 - memcpy(ent->msg, ctl, size - sizeof(*ent));
1962 -
1963 - mutex_lock(&notify_lock);
1964 - list_add(&ent->notify_entry, &notify_list);
1965 - mutex_unlock(&notify_lock);
1966 -}
1967 -
1968 static int cn_proc_show(struct seq_file *m, void *v)
1969 {
1970 struct cn_queue_dev *dev = cdev.cbdev;
1971 @@ -437,11 +274,8 @@ static const struct file_operations cn_file_ops = {
1972 static int __devinit cn_init(void)
1973 {
1974 struct cn_dev *dev = &cdev;
1975 - int err;
1976
1977 dev->input = cn_rx_skb;
1978 - dev->id.idx = cn_idx;
1979 - dev->id.val = cn_val;
1980
1981 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
1982 CN_NETLINK_USERS + 0xf,
1983 @@ -457,14 +291,6 @@ static int __devinit cn_init(void)
1984
1985 cn_already_initialized = 1;
1986
1987 - err = cn_add_callback(&dev->id, "connector", &cn_callback);
1988 - if (err) {
1989 - cn_already_initialized = 0;
1990 - cn_queue_free_dev(dev->cbdev);
1991 - netlink_kernel_release(dev->nls);
1992 - return -EINVAL;
1993 - }
1994 -
1995 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
1996
1997 return 0;
1998 @@ -478,7 +304,6 @@ static void __devexit cn_fini(void)
1999
2000 proc_net_remove(&init_net, "connector");
2001
2002 - cn_del_callback(&dev->id);
2003 cn_queue_free_dev(dev->cbdev);
2004 netlink_kernel_release(dev->nls);
2005 }
2006 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
2007 index e4864e8..ed635ae 100644
2008 --- a/drivers/firewire/core-card.c
2009 +++ b/drivers/firewire/core-card.c
2010 @@ -57,6 +57,9 @@ static LIST_HEAD(card_list);
2011 static LIST_HEAD(descriptor_list);
2012 static int descriptor_count;
2013
2014 +/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
2015 +static size_t config_rom_length = 1 + 4 + 1 + 1;
2016 +
2017 #define BIB_CRC(v) ((v) << 0)
2018 #define BIB_CRC_LENGTH(v) ((v) << 16)
2019 #define BIB_INFO_LENGTH(v) ((v) << 24)
2020 @@ -72,7 +75,7 @@ static int descriptor_count;
2021 #define BIB_CMC ((1) << 30)
2022 #define BIB_IMC ((1) << 31)
2023
2024 -static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
2025 +static u32 *generate_config_rom(struct fw_card *card)
2026 {
2027 struct fw_descriptor *desc;
2028 static u32 config_rom[256];
2029 @@ -131,7 +134,7 @@ static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
2030 for (i = 0; i < j; i += length + 1)
2031 length = fw_compute_block_crc(config_rom + i);
2032
2033 - *config_rom_length = j;
2034 + WARN_ON(j != config_rom_length);
2035
2036 return config_rom;
2037 }
2038 @@ -140,17 +143,24 @@ static void update_config_roms(void)
2039 {
2040 struct fw_card *card;
2041 u32 *config_rom;
2042 - size_t length;
2043
2044 list_for_each_entry (card, &card_list, link) {
2045 - config_rom = generate_config_rom(card, &length);
2046 - card->driver->set_config_rom(card, config_rom, length);
2047 + config_rom = generate_config_rom(card);
2048 + card->driver->set_config_rom(card, config_rom,
2049 + config_rom_length);
2050 }
2051 }
2052
2053 +static size_t required_space(struct fw_descriptor *desc)
2054 +{
2055 + /* descriptor + entry into root dir + optional immediate entry */
2056 + return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
2057 +}
2058 +
2059 int fw_core_add_descriptor(struct fw_descriptor *desc)
2060 {
2061 size_t i;
2062 + int ret;
2063
2064 /*
2065 * Check descriptor is valid; the length of all blocks in the
2066 @@ -166,15 +176,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
2067
2068 mutex_lock(&card_mutex);
2069
2070 - list_add_tail(&desc->link, &descriptor_list);
2071 - descriptor_count++;
2072 - if (desc->immediate > 0)
2073 + if (config_rom_length + required_space(desc) > 256) {
2074 + ret = -EBUSY;
2075 + } else {
2076 + list_add_tail(&desc->link, &descriptor_list);
2077 + config_rom_length += required_space(desc);
2078 descriptor_count++;
2079 - update_config_roms();
2080 + if (desc->immediate > 0)
2081 + descriptor_count++;
2082 + update_config_roms();
2083 + ret = 0;
2084 + }
2085
2086 mutex_unlock(&card_mutex);
2087
2088 - return 0;
2089 + return ret;
2090 }
2091 EXPORT_SYMBOL(fw_core_add_descriptor);
2092
2093 @@ -183,6 +199,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
2094 mutex_lock(&card_mutex);
2095
2096 list_del(&desc->link);
2097 + config_rom_length -= required_space(desc);
2098 descriptor_count--;
2099 if (desc->immediate > 0)
2100 descriptor_count--;
2101 @@ -436,7 +453,6 @@ int fw_card_add(struct fw_card *card,
2102 u32 max_receive, u32 link_speed, u64 guid)
2103 {
2104 u32 *config_rom;
2105 - size_t length;
2106 int ret;
2107
2108 card->max_receive = max_receive;
2109 @@ -445,8 +461,8 @@ int fw_card_add(struct fw_card *card,
2110
2111 mutex_lock(&card_mutex);
2112
2113 - config_rom = generate_config_rom(card, &length);
2114 - ret = card->driver->enable(card, config_rom, length);
2115 + config_rom = generate_config_rom(card);
2116 + ret = card->driver->enable(card, config_rom, config_rom_length);
2117 if (ret == 0)
2118 list_add_tail(&card->link, &card_list);
2119
2120 diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
2121 index 1e504de..720b39b 100644
2122 --- a/drivers/firewire/ohci.c
2123 +++ b/drivers/firewire/ohci.c
2124 @@ -2412,6 +2412,7 @@ static void ohci_pmac_off(struct pci_dev *dev)
2125
2126 #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2127 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
2128 +#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
2129
2130 static int __devinit pci_probe(struct pci_dev *dev,
2131 const struct pci_device_id *ent)
2132 @@ -2477,7 +2478,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
2133 #if !defined(CONFIG_X86_32)
2134 /* dual-buffer mode is broken with descriptor addresses above 2G */
2135 if (dev->vendor == PCI_VENDOR_ID_TI &&
2136 - dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2137 + (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
2138 + dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
2139 ohci->use_dualbuffer = false;
2140 #endif
2141
2142 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
2143 index e9dbb48..8bf3770 100644
2144 --- a/drivers/gpu/drm/drm_gem.c
2145 +++ b/drivers/gpu/drm/drm_gem.c
2146 @@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
2147 if (IS_ERR(obj->filp))
2148 goto free;
2149
2150 - /* Basically we want to disable the OOM killer and handle ENOMEM
2151 - * ourselves by sacrificing pages from cached buffers.
2152 - * XXX shmem_file_[gs]et_gfp_mask()
2153 - */
2154 - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
2155 - GFP_HIGHUSER |
2156 - __GFP_COLD |
2157 - __GFP_FS |
2158 - __GFP_RECLAIMABLE |
2159 - __GFP_NORETRY |
2160 - __GFP_NOWARN |
2161 - __GFP_NOMEMALLOC);
2162 -
2163 kref_init(&obj->refcount);
2164 kref_init(&obj->handlecount);
2165 obj->size = size;
2166 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
2167 index 26bf055..af655e8 100644
2168 --- a/drivers/gpu/drm/i915/i915_debugfs.c
2169 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
2170 @@ -288,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
2171 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
2172 obj = obj_priv->obj;
2173 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
2174 - ret = i915_gem_object_get_pages(obj);
2175 + ret = i915_gem_object_get_pages(obj, 0);
2176 if (ret) {
2177 DRM_ERROR("Failed to get pages: %d\n", ret);
2178 spin_unlock(&dev_priv->mm.active_list_lock);
2179 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
2180 index bc2db7d..eaa1893 100644
2181 --- a/drivers/gpu/drm/i915/i915_dma.c
2182 +++ b/drivers/gpu/drm/i915/i915_dma.c
2183 @@ -1252,6 +1252,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
2184 if (ret)
2185 goto destroy_ringbuffer;
2186
2187 + intel_modeset_init(dev);
2188 +
2189 ret = drm_irq_install(dev);
2190 if (ret)
2191 goto destroy_ringbuffer;
2192 @@ -1266,8 +1268,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
2193
2194 I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
2195
2196 - intel_modeset_init(dev);
2197 -
2198 drm_helper_initial_config(dev);
2199
2200 return 0;
2201 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2202 index 791fded..7277246 100644
2203 --- a/drivers/gpu/drm/i915/i915_drv.h
2204 +++ b/drivers/gpu/drm/i915/i915_drv.h
2205 @@ -822,7 +822,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
2206 void i915_gem_detach_phys_object(struct drm_device *dev,
2207 struct drm_gem_object *obj);
2208 void i915_gem_free_all_phys_object(struct drm_device *dev);
2209 -int i915_gem_object_get_pages(struct drm_gem_object *obj);
2210 +int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
2211 void i915_gem_object_put_pages(struct drm_gem_object *obj);
2212 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
2213
2214 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2215 index df2c625..8ad244a 100644
2216 --- a/drivers/gpu/drm/i915/i915_gem.c
2217 +++ b/drivers/gpu/drm/i915/i915_gem.c
2218 @@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
2219
2220 mutex_lock(&dev->struct_mutex);
2221
2222 - ret = i915_gem_object_get_pages(obj);
2223 + ret = i915_gem_object_get_pages(obj, 0);
2224 if (ret != 0)
2225 goto fail_unlock;
2226
2227 @@ -321,40 +321,24 @@ fail_unlock:
2228 return ret;
2229 }
2230
2231 -static inline gfp_t
2232 -i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
2233 -{
2234 - return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
2235 -}
2236 -
2237 -static inline void
2238 -i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
2239 -{
2240 - mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
2241 -}
2242 -
2243 static int
2244 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
2245 {
2246 int ret;
2247
2248 - ret = i915_gem_object_get_pages(obj);
2249 + ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
2250
2251 /* If we've insufficient memory to map in the pages, attempt
2252 * to make some space by throwing out some old buffers.
2253 */
2254 if (ret == -ENOMEM) {
2255 struct drm_device *dev = obj->dev;
2256 - gfp_t gfp;
2257
2258 ret = i915_gem_evict_something(dev, obj->size);
2259 if (ret)
2260 return ret;
2261
2262 - gfp = i915_gem_object_get_page_gfp_mask(obj);
2263 - i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
2264 - ret = i915_gem_object_get_pages(obj);
2265 - i915_gem_object_set_page_gfp_mask (obj, gfp);
2266 + ret = i915_gem_object_get_pages(obj, 0);
2267 }
2268
2269 return ret;
2270 @@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
2271
2272 mutex_lock(&dev->struct_mutex);
2273
2274 - ret = i915_gem_object_get_pages(obj);
2275 + ret = i915_gem_object_get_pages(obj, 0);
2276 if (ret != 0)
2277 goto fail_unlock;
2278
2279 @@ -2219,7 +2203,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2280 }
2281
2282 int
2283 -i915_gem_object_get_pages(struct drm_gem_object *obj)
2284 +i915_gem_object_get_pages(struct drm_gem_object *obj,
2285 + gfp_t gfpmask)
2286 {
2287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2288 int page_count, i;
2289 @@ -2245,7 +2230,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2290 inode = obj->filp->f_path.dentry->d_inode;
2291 mapping = inode->i_mapping;
2292 for (i = 0; i < page_count; i++) {
2293 - page = read_mapping_page(mapping, i, NULL);
2294 + page = read_cache_page_gfp(mapping, i,
2295 + mapping_gfp_mask (mapping) |
2296 + __GFP_COLD |
2297 + gfpmask);
2298 if (IS_ERR(page)) {
2299 ret = PTR_ERR(page);
2300 i915_gem_object_put_pages(obj);
2301 @@ -2568,7 +2556,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2302 drm_i915_private_t *dev_priv = dev->dev_private;
2303 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2304 struct drm_mm_node *free_space;
2305 - bool retry_alloc = false;
2306 + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2307 int ret;
2308
2309 if (obj_priv->madv != I915_MADV_WILLNEED) {
2310 @@ -2612,15 +2600,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2311 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2312 obj->size, obj_priv->gtt_offset);
2313 #endif
2314 - if (retry_alloc) {
2315 - i915_gem_object_set_page_gfp_mask (obj,
2316 - i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2317 - }
2318 - ret = i915_gem_object_get_pages(obj);
2319 - if (retry_alloc) {
2320 - i915_gem_object_set_page_gfp_mask (obj,
2321 - i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2322 - }
2323 + ret = i915_gem_object_get_pages(obj, gfpmask);
2324 if (ret) {
2325 drm_mm_put_block(obj_priv->gtt_space);
2326 obj_priv->gtt_space = NULL;
2327 @@ -2630,9 +2610,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2328 ret = i915_gem_evict_something(dev, obj->size);
2329 if (ret) {
2330 /* now try to shrink everyone else */
2331 - if (! retry_alloc) {
2332 - retry_alloc = true;
2333 - goto search_free;
2334 + if (gfpmask) {
2335 + gfpmask = 0;
2336 + goto search_free;
2337 }
2338
2339 return ret;
2340 @@ -4695,7 +4675,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
2341 if (!obj_priv->phys_obj)
2342 return;
2343
2344 - ret = i915_gem_object_get_pages(obj);
2345 + ret = i915_gem_object_get_pages(obj, 0);
2346 if (ret)
2347 goto out;
2348
2349 @@ -4753,7 +4733,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
2350 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
2351 obj_priv->phys_obj->cur_obj = obj;
2352
2353 - ret = i915_gem_object_get_pages(obj);
2354 + ret = i915_gem_object_get_pages(obj, 0);
2355 if (ret) {
2356 DRM_ERROR("failed to get page list\n");
2357 goto out;
2358 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
2359 index 7d1357e..63f28ad 100644
2360 --- a/drivers/gpu/drm/i915/i915_irq.c
2361 +++ b/drivers/gpu/drm/i915/i915_irq.c
2362 @@ -282,6 +282,8 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
2363 dev_priv->mm.irq_gem_seqno = seqno;
2364 trace_i915_gem_request_complete(dev, seqno);
2365 DRM_WAKEUP(&dev_priv->irq_queue);
2366 + dev_priv->hangcheck_count = 0;
2367 + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
2368 }
2369
2370 I915_WRITE(GTIIR, gt_iir);
2371 @@ -1042,6 +1044,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
2372 (void) I915_READ(IER);
2373 }
2374
2375 +/*
2376 + * Must be called after intel_modeset_init or hotplug interrupts won't be
2377 + * enabled correctly.
2378 + */
2379 int i915_driver_irq_postinstall(struct drm_device *dev)
2380 {
2381 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2382 @@ -1064,19 +1070,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
2383 if (I915_HAS_HOTPLUG(dev)) {
2384 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2385
2386 - /* Leave other bits alone */
2387 - hotplug_en |= HOTPLUG_EN_MASK;
2388 + /* Note HDMI and DP share bits */
2389 + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2390 + hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2391 + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2392 + hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2393 + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2394 + hotplug_en |= HDMID_HOTPLUG_INT_EN;
2395 + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2396 + hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2397 + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2398 + hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2399 + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
2400 + hotplug_en |= CRT_HOTPLUG_INT_EN;
2401 + /* Ignore TV since it's buggy */
2402 +
2403 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2404
2405 - dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
2406 - TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
2407 - SDVOB_HOTPLUG_INT_STATUS;
2408 - if (IS_G4X(dev)) {
2409 - dev_priv->hotplug_supported_mask |=
2410 - HDMIB_HOTPLUG_INT_STATUS |
2411 - HDMIC_HOTPLUG_INT_STATUS |
2412 - HDMID_HOTPLUG_INT_STATUS;
2413 - }
2414 /* Enable in IER... */
2415 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2416 /* and unmask in IMR */
2417 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2418 index 54e5907..fd537f4 100644
2419 --- a/drivers/gpu/drm/i915/i915_reg.h
2420 +++ b/drivers/gpu/drm/i915/i915_reg.h
2421 @@ -863,14 +863,6 @@
2422 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
2423 #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
2424 #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
2425 -#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
2426 - HDMIC_HOTPLUG_INT_EN | \
2427 - HDMID_HOTPLUG_INT_EN | \
2428 - SDVOB_HOTPLUG_INT_EN | \
2429 - SDVOC_HOTPLUG_INT_EN | \
2430 - TV_HOTPLUG_INT_EN | \
2431 - CRT_HOTPLUG_INT_EN)
2432 -
2433
2434 #define PORT_HOTPLUG_STAT 0x61114
2435 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
2436 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
2437 index e505144..6d3730f 100644
2438 --- a/drivers/gpu/drm/i915/intel_crt.c
2439 +++ b/drivers/gpu/drm/i915/intel_crt.c
2440 @@ -576,4 +576,6 @@ void intel_crt_init(struct drm_device *dev)
2441 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
2442
2443 drm_sysfs_connector_add(connector);
2444 +
2445 + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
2446 }
2447 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2448 index 121b92e..601415d 100644
2449 --- a/drivers/gpu/drm/i915/intel_display.c
2450 +++ b/drivers/gpu/drm/i915/intel_display.c
2451 @@ -4068,29 +4068,43 @@ static void intel_setup_outputs(struct drm_device *dev)
2452 bool found = false;
2453
2454 if (I915_READ(SDVOB) & SDVO_DETECTED) {
2455 + DRM_DEBUG_KMS("probing SDVOB\n");
2456 found = intel_sdvo_init(dev, SDVOB);
2457 - if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
2458 + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
2459 + DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
2460 intel_hdmi_init(dev, SDVOB);
2461 + }
2462
2463 - if (!found && SUPPORTS_INTEGRATED_DP(dev))
2464 + if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
2465 + DRM_DEBUG_KMS("probing DP_B\n");
2466 intel_dp_init(dev, DP_B);
2467 + }
2468 }
2469
2470 /* Before G4X SDVOC doesn't have its own detect register */
2471
2472 - if (I915_READ(SDVOB) & SDVO_DETECTED)
2473 + if (I915_READ(SDVOB) & SDVO_DETECTED) {
2474 + DRM_DEBUG_KMS("probing SDVOC\n");
2475 found = intel_sdvo_init(dev, SDVOC);
2476 + }
2477
2478 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
2479
2480 - if (SUPPORTS_INTEGRATED_HDMI(dev))
2481 + if (SUPPORTS_INTEGRATED_HDMI(dev)) {
2482 + DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
2483 intel_hdmi_init(dev, SDVOC);
2484 - if (SUPPORTS_INTEGRATED_DP(dev))
2485 + }
2486 + if (SUPPORTS_INTEGRATED_DP(dev)) {
2487 + DRM_DEBUG_KMS("probing DP_C\n");
2488 intel_dp_init(dev, DP_C);
2489 + }
2490 }
2491
2492 - if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
2493 + if (SUPPORTS_INTEGRATED_DP(dev) &&
2494 + (I915_READ(DP_D) & DP_DETECTED)) {
2495 + DRM_DEBUG_KMS("probing DP_D\n");
2496 intel_dp_init(dev, DP_D);
2497 + }
2498 } else if (IS_I8XX(dev))
2499 intel_dvo_init(dev);
2500
2501 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2502 index 92a3d7b..d487771 100644
2503 --- a/drivers/gpu/drm/i915/intel_dp.c
2504 +++ b/drivers/gpu/drm/i915/intel_dp.c
2505 @@ -1290,14 +1290,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2506 break;
2507 case DP_B:
2508 case PCH_DP_B:
2509 + dev_priv->hotplug_supported_mask |=
2510 + HDMIB_HOTPLUG_INT_STATUS;
2511 name = "DPDDC-B";
2512 break;
2513 case DP_C:
2514 case PCH_DP_C:
2515 + dev_priv->hotplug_supported_mask |=
2516 + HDMIC_HOTPLUG_INT_STATUS;
2517 name = "DPDDC-C";
2518 break;
2519 case DP_D:
2520 case PCH_DP_D:
2521 + dev_priv->hotplug_supported_mask |=
2522 + HDMID_HOTPLUG_INT_STATUS;
2523 name = "DPDDC-D";
2524 break;
2525 }
2526 diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
2527 index c33451a..85760bf 100644
2528 --- a/drivers/gpu/drm/i915/intel_hdmi.c
2529 +++ b/drivers/gpu/drm/i915/intel_hdmi.c
2530 @@ -254,21 +254,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
2531 if (sdvox_reg == SDVOB) {
2532 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
2533 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
2534 + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
2535 } else if (sdvox_reg == SDVOC) {
2536 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
2537 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
2538 + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
2539 } else if (sdvox_reg == HDMIB) {
2540 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
2541 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
2542 "HDMIB");
2543 + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
2544 } else if (sdvox_reg == HDMIC) {
2545 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
2546 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
2547 "HDMIC");
2548 + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
2549 } else if (sdvox_reg == HDMID) {
2550 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
2551 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
2552 "HDMID");
2553 + dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
2554 }
2555 if (!intel_output->ddc_bus)
2556 goto err_connector;
2557 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
2558 index 29e21d3..3f5aaf1 100644
2559 --- a/drivers/gpu/drm/i915/intel_sdvo.c
2560 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
2561 @@ -2743,6 +2743,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2562
2563 bool intel_sdvo_init(struct drm_device *dev, int output_device)
2564 {
2565 + struct drm_i915_private *dev_priv = dev->dev_private;
2566 struct drm_connector *connector;
2567 struct intel_output *intel_output;
2568 struct intel_sdvo_priv *sdvo_priv;
2569 @@ -2789,10 +2790,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2570 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2571 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2572 "SDVOB/VGA DDC BUS");
2573 + dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2574 } else {
2575 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2576 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2577 "SDVOC/VGA DDC BUS");
2578 + dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2579 }
2580
2581 if (intel_output->ddc_bus == NULL)
2582 diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
2583 index 5b28b4e..ce026f0 100644
2584 --- a/drivers/gpu/drm/i915/intel_tv.c
2585 +++ b/drivers/gpu/drm/i915/intel_tv.c
2586 @@ -1801,6 +1801,8 @@ intel_tv_init(struct drm_device *dev)
2587 drm_connector_attach_property(connector,
2588 dev->mode_config.tv_bottom_margin_property,
2589 tv_priv->margin[TV_MARGIN_BOTTOM]);
2590 +
2591 + dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
2592 out:
2593 drm_sysfs_connector_add(connector);
2594 }
2595 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
2596 index b368406..100da85 100644
2597 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
2598 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
2599 @@ -346,10 +346,8 @@ static int ipathfs_fill_super(struct super_block *sb, void *data,
2600 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
2601 spin_unlock_irqrestore(&ipath_devs_lock, flags);
2602 ret = create_device_files(sb, dd);
2603 - if (ret) {
2604 - deactivate_locked_super(sb);
2605 + if (ret)
2606 goto bail;
2607 - }
2608 spin_lock_irqsave(&ipath_devs_lock, flags);
2609 }
2610
2611 diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c
2612 index 33309fe..c8f5a9a 100644
2613 --- a/drivers/input/misc/winbond-cir.c
2614 +++ b/drivers/input/misc/winbond-cir.c
2615 @@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data)
2616 return;
2617 }
2618
2619 - dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
2620 + dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
2621 "toggle %u mode %u scan 0x%08X\n",
2622 address,
2623 command,
2624 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
2625 index 610e914..b6992b7 100644
2626 --- a/drivers/message/fusion/mptbase.c
2627 +++ b/drivers/message/fusion/mptbase.c
2628 @@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc)
2629
2630 if (ioc->bus_type == SPI)
2631 num_chain *= MPT_SCSI_CAN_QUEUE;
2632 + else if (ioc->bus_type == SAS)
2633 + num_chain *= MPT_SAS_CAN_QUEUE;
2634 else
2635 num_chain *= MPT_FC_CAN_QUEUE;
2636
2637 diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
2638 index f237ddb..111ea41 100644
2639 --- a/drivers/mtd/ubi/cdev.c
2640 +++ b/drivers/mtd/ubi/cdev.c
2641 @@ -853,7 +853,6 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
2642 break;
2643 }
2644
2645 - req.name[req.name_len] = '\0';
2646 err = verify_mkvol_req(ubi, &req);
2647 if (err)
2648 break;
2649 diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
2650 index a84f1c5..511b922 100644
2651 --- a/drivers/net/benet/be.h
2652 +++ b/drivers/net/benet/be.h
2653 @@ -272,8 +272,13 @@ struct be_adapter {
2654 u32 cap;
2655 u32 rx_fc; /* Rx flow control */
2656 u32 tx_fc; /* Tx flow control */
2657 + u8 generation; /* BladeEngine ASIC generation */
2658 };
2659
2660 +/* BladeEngine Generation numbers */
2661 +#define BE_GEN2 2
2662 +#define BE_GEN3 3
2663 +
2664 extern const struct ethtool_ops be_ethtool_ops;
2665
2666 #define drvr_stats(adapter) (&adapter->stats.drvr_stats)
2667 diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
2668 index e5f9676..ad33d55 100644
2669 --- a/drivers/net/benet/be_cmds.h
2670 +++ b/drivers/net/benet/be_cmds.h
2671 @@ -154,7 +154,8 @@ struct be_cmd_req_hdr {
2672 u8 domain; /* dword 0 */
2673 u32 timeout; /* dword 1 */
2674 u32 request_length; /* dword 2 */
2675 - u32 rsvd; /* dword 3 */
2676 + u8 version; /* dword 3 */
2677 + u8 rsvd[3]; /* dword 3 */
2678 };
2679
2680 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
2681 diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
2682 index 3749bb1..ec983cb 100644
2683 --- a/drivers/net/benet/be_main.c
2684 +++ b/drivers/net/benet/be_main.c
2685 @@ -1944,6 +1944,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
2686 static int be_map_pci_bars(struct be_adapter *adapter)
2687 {
2688 u8 __iomem *addr;
2689 + int pcicfg_reg;
2690
2691 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2692 pci_resource_len(adapter->pdev, 2));
2693 @@ -1957,8 +1958,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2694 goto pci_map_err;
2695 adapter->db = addr;
2696
2697 - addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
2698 - pci_resource_len(adapter->pdev, 1));
2699 + if (adapter->generation == BE_GEN2)
2700 + pcicfg_reg = 1;
2701 + else
2702 + pcicfg_reg = 0;
2703 +
2704 + addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2705 + pci_resource_len(adapter->pdev, pcicfg_reg));
2706 if (addr == NULL)
2707 goto pci_map_err;
2708 adapter->pcicfg = addr;
2709 @@ -2028,6 +2034,7 @@ static int be_stats_init(struct be_adapter *adapter)
2710 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2711 if (cmd->va == NULL)
2712 return -1;
2713 + memset(cmd->va, 0, cmd->size);
2714 return 0;
2715 }
2716
2717 @@ -2101,6 +2108,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
2718 goto rel_reg;
2719 }
2720 adapter = netdev_priv(netdev);
2721 +
2722 + switch (pdev->device) {
2723 + case BE_DEVICE_ID1:
2724 + case OC_DEVICE_ID1:
2725 + adapter->generation = BE_GEN2;
2726 + break;
2727 + case BE_DEVICE_ID2:
2728 + case OC_DEVICE_ID2:
2729 + adapter->generation = BE_GEN3;
2730 + break;
2731 + default:
2732 + adapter->generation = 0;
2733 + }
2734 +
2735 adapter->pdev = pdev;
2736 pci_set_drvdata(pdev, adapter);
2737 adapter->netdev = netdev;
2738 diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
2739 index 42e2b7e..4a2ee85 100644
2740 --- a/drivers/net/e1000/e1000.h
2741 +++ b/drivers/net/e1000/e1000.h
2742 @@ -326,6 +326,8 @@ struct e1000_adapter {
2743 /* for ioport free */
2744 int bars;
2745 int need_ioport;
2746 +
2747 + bool discarding;
2748 };
2749
2750 enum e1000_state_t {
2751 diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
2752 index bcd192c..1a23f16 100644
2753 --- a/drivers/net/e1000/e1000_main.c
2754 +++ b/drivers/net/e1000/e1000_main.c
2755 @@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2756 rctl &= ~E1000_RCTL_SZ_4096;
2757 rctl |= E1000_RCTL_BSEX;
2758 switch (adapter->rx_buffer_len) {
2759 - case E1000_RXBUFFER_256:
2760 - rctl |= E1000_RCTL_SZ_256;
2761 - rctl &= ~E1000_RCTL_BSEX;
2762 - break;
2763 - case E1000_RXBUFFER_512:
2764 - rctl |= E1000_RCTL_SZ_512;
2765 - rctl &= ~E1000_RCTL_BSEX;
2766 - break;
2767 - case E1000_RXBUFFER_1024:
2768 - rctl |= E1000_RCTL_SZ_1024;
2769 - rctl &= ~E1000_RCTL_BSEX;
2770 - break;
2771 case E1000_RXBUFFER_2048:
2772 default:
2773 rctl |= E1000_RCTL_SZ_2048;
2774 @@ -3154,13 +3142,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
2775 * however with the new *_jumbo_rx* routines, jumbo receives will use
2776 * fragmented skbs */
2777
2778 - if (max_frame <= E1000_RXBUFFER_256)
2779 - adapter->rx_buffer_len = E1000_RXBUFFER_256;
2780 - else if (max_frame <= E1000_RXBUFFER_512)
2781 - adapter->rx_buffer_len = E1000_RXBUFFER_512;
2782 - else if (max_frame <= E1000_RXBUFFER_1024)
2783 - adapter->rx_buffer_len = E1000_RXBUFFER_1024;
2784 - else if (max_frame <= E1000_RXBUFFER_2048)
2785 + if (max_frame <= E1000_RXBUFFER_2048)
2786 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2787 else
2788 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
2789 @@ -3827,13 +3809,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
2790
2791 length = le16_to_cpu(rx_desc->length);
2792 /* !EOP means multiple descriptors were used to store a single
2793 - * packet, also make sure the frame isn't just CRC only */
2794 - if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
2795 + * packet, if thats the case we need to toss it. In fact, we
2796 + * to toss every packet with the EOP bit clear and the next
2797 + * frame that _does_ have the EOP bit set, as it is by
2798 + * definition only a frame fragment
2799 + */
2800 + if (unlikely(!(status & E1000_RXD_STAT_EOP)))
2801 + adapter->discarding = true;
2802 +
2803 + if (adapter->discarding) {
2804 /* All receives must fit into a single buffer */
2805 E1000_DBG("%s: Receive packet consumed multiple"
2806 " buffers\n", netdev->name);
2807 /* recycle */
2808 buffer_info->skb = skb;
2809 + if (status & E1000_RXD_STAT_EOP)
2810 + adapter->discarding = false;
2811 goto next_desc;
2812 }
2813
2814 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
2815 index 3e187b0..47db9bd 100644
2816 --- a/drivers/net/e1000e/e1000.h
2817 +++ b/drivers/net/e1000e/e1000.h
2818 @@ -417,6 +417,7 @@ struct e1000_info {
2819 /* CRC Stripping defines */
2820 #define FLAG2_CRC_STRIPPING (1 << 0)
2821 #define FLAG2_HAS_PHY_WAKEUP (1 << 1)
2822 +#define FLAG2_IS_DISCARDING (1 << 2)
2823
2824 #define E1000_RX_DESC_PS(R, i) \
2825 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
2826 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
2827 index fad8f9e..2154530 100644
2828 --- a/drivers/net/e1000e/netdev.c
2829 +++ b/drivers/net/e1000e/netdev.c
2830 @@ -482,14 +482,24 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
2831
2832 length = le16_to_cpu(rx_desc->length);
2833
2834 - /* !EOP means multiple descriptors were used to store a single
2835 - * packet, also make sure the frame isn't just CRC only */
2836 - if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
2837 + /*
2838 + * !EOP means multiple descriptors were used to store a single
2839 + * packet, if that's the case we need to toss it. In fact, we
2840 + * need to toss every packet with the EOP bit clear and the
2841 + * next frame that _does_ have the EOP bit set, as it is by
2842 + * definition only a frame fragment
2843 + */
2844 + if (unlikely(!(status & E1000_RXD_STAT_EOP)))
2845 + adapter->flags2 |= FLAG2_IS_DISCARDING;
2846 +
2847 + if (adapter->flags2 & FLAG2_IS_DISCARDING) {
2848 /* All receives must fit into a single buffer */
2849 e_dbg("%s: Receive packet consumed multiple buffers\n",
2850 netdev->name);
2851 /* recycle */
2852 buffer_info->skb = skb;
2853 + if (status & E1000_RXD_STAT_EOP)
2854 + adapter->flags2 &= ~FLAG2_IS_DISCARDING;
2855 goto next_desc;
2856 }
2857
2858 @@ -747,10 +757,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
2859 PCI_DMA_FROMDEVICE);
2860 buffer_info->dma = 0;
2861
2862 - if (!(staterr & E1000_RXD_STAT_EOP)) {
2863 + /* see !EOP comment in other rx routine */
2864 + if (!(staterr & E1000_RXD_STAT_EOP))
2865 + adapter->flags2 |= FLAG2_IS_DISCARDING;
2866 +
2867 + if (adapter->flags2 & FLAG2_IS_DISCARDING) {
2868 e_dbg("%s: Packet Split buffers didn't pick up the "
2869 "full packet\n", netdev->name);
2870 dev_kfree_skb_irq(skb);
2871 + if (staterr & E1000_RXD_STAT_EOP)
2872 + adapter->flags2 &= ~FLAG2_IS_DISCARDING;
2873 goto next_desc;
2874 }
2875
2876 @@ -1120,6 +1136,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
2877
2878 rx_ring->next_to_clean = 0;
2879 rx_ring->next_to_use = 0;
2880 + adapter->flags2 &= ~FLAG2_IS_DISCARDING;
2881
2882 writel(0, adapter->hw.hw_addr + rx_ring->head);
2883 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2884 @@ -2330,18 +2347,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2885 rctl &= ~E1000_RCTL_SZ_4096;
2886 rctl |= E1000_RCTL_BSEX;
2887 switch (adapter->rx_buffer_len) {
2888 - case 256:
2889 - rctl |= E1000_RCTL_SZ_256;
2890 - rctl &= ~E1000_RCTL_BSEX;
2891 - break;
2892 - case 512:
2893 - rctl |= E1000_RCTL_SZ_512;
2894 - rctl &= ~E1000_RCTL_BSEX;
2895 - break;
2896 - case 1024:
2897 - rctl |= E1000_RCTL_SZ_1024;
2898 - rctl &= ~E1000_RCTL_BSEX;
2899 - break;
2900 case 2048:
2901 default:
2902 rctl |= E1000_RCTL_SZ_2048;
2903 @@ -4321,13 +4326,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
2904 * fragmented skbs
2905 */
2906
2907 - if (max_frame <= 256)
2908 - adapter->rx_buffer_len = 256;
2909 - else if (max_frame <= 512)
2910 - adapter->rx_buffer_len = 512;
2911 - else if (max_frame <= 1024)
2912 - adapter->rx_buffer_len = 1024;
2913 - else if (max_frame <= 2048)
2914 + if (max_frame <= 2048)
2915 adapter->rx_buffer_len = 2048;
2916 else
2917 adapter->rx_buffer_len = 4096;
2918 diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
2919 index 6a10d7b..f3600b3 100644
2920 --- a/drivers/net/sky2.c
2921 +++ b/drivers/net/sky2.c
2922 @@ -1806,7 +1806,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
2923 sky2->tx_cons = idx;
2924 smp_mb();
2925
2926 - if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
2927 + /* Wake unless it's detached, and called e.g. from sky2_down() */
2928 + if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
2929 netif_wake_queue(dev);
2930 }
2931
2932 diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
2933 index a36e2b5..e65ee4d 100644
2934 --- a/drivers/net/starfire.c
2935 +++ b/drivers/net/starfire.c
2936 @@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
2937 if (retval) {
2938 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
2939 FIRMWARE_RX);
2940 - return retval;
2941 + goto out_init;
2942 }
2943 if (fw_rx->size % 4) {
2944 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
2945 @@ -1108,6 +1108,9 @@ out_tx:
2946 release_firmware(fw_tx);
2947 out_rx:
2948 release_firmware(fw_rx);
2949 +out_init:
2950 + if (retval)
2951 + netdev_close(dev);
2952 return retval;
2953 }
2954
2955 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2956 index c7aa05a..0905b38 100644
2957 --- a/drivers/net/wireless/ath/ath9k/hw.c
2958 +++ b/drivers/net/wireless/ath/ath9k/hw.c
2959 @@ -880,12 +880,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
2960 }
2961 }
2962
2963 -static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
2964 +static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
2965 {
2966 u32 i, j;
2967
2968 - if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
2969 - test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
2970 + if (ah->hw_version.devid == AR9280_DEVID_PCI) {
2971
2972 /* EEPROM Fixup */
2973 for (i = 0; i < ah->iniModes.ia_rows; i++) {
2974 @@ -980,7 +979,7 @@ int ath9k_hw_init(struct ath_hw *ah)
2975
2976 ath9k_hw_init_mode_gain_regs(ah);
2977 ath9k_hw_fill_cap_info(ah);
2978 - ath9k_hw_init_11a_eeprom_fix(ah);
2979 + ath9k_hw_init_eeprom_fix(ah);
2980
2981 r = ath9k_hw_init_macaddr(ah);
2982 if (r) {
2983 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2984 index 80df8f3..5864eaa 100644
2985 --- a/drivers/net/wireless/ath/ath9k/main.c
2986 +++ b/drivers/net/wireless/ath/ath9k/main.c
2987 @@ -2285,10 +2285,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2988 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2989 ath9k_ps_wakeup(sc);
2990 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2991 - ath_beacon_return(sc, avp);
2992 ath9k_ps_restore(sc);
2993 }
2994
2995 + ath_beacon_return(sc, avp);
2996 sc->sc_flags &= ~SC_OP_BEACONS;
2997
2998 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2999 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3000 index 81726ee..0eb2591 100644
3001 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3002 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
3003 @@ -2808,7 +2808,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3004 repeat_rate--;
3005 }
3006
3007 - lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX;
3008 + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3009 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3010 lq_cmd->agg_params.agg_time_limit =
3011 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
3012 diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
3013 index 768bd0e..43ed81e 100644
3014 --- a/drivers/regulator/wm8350-regulator.c
3015 +++ b/drivers/regulator/wm8350-regulator.c
3016 @@ -1504,7 +1504,8 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
3017 led->isink_init.consumer_supplies = &led->isink_consumer;
3018 led->isink_init.constraints.min_uA = 0;
3019 led->isink_init.constraints.max_uA = pdata->max_uA;
3020 - led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
3021 + led->isink_init.constraints.valid_ops_mask
3022 + = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
3023 led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
3024 ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
3025 if (ret != 0) {
3026 @@ -1517,6 +1518,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
3027 led->dcdc_init.num_consumer_supplies = 1;
3028 led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
3029 led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
3030 + led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
3031 ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
3032 if (ret != 0) {
3033 platform_device_put(pdev);
3034 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
3035 index aaccc8e..513dec9 100644
3036 --- a/drivers/s390/block/dasd.c
3037 +++ b/drivers/s390/block/dasd.c
3038 @@ -1005,8 +1005,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
3039 if (device == NULL ||
3040 device != dasd_device_from_cdev_locked(cdev) ||
3041 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
3042 - DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
3043 - "bus_id %s", dev_name(&cdev->dev));
3044 + DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
3045 + "invalid device in request");
3046 return;
3047 }
3048
3049 @@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
3050 device = (struct dasd_device *) cqr->startdev;
3051 if (!device ||
3052 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
3053 - DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
3054 - "bus_id %s", dev_name(&cdev->dev));
3055 + DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
3056 + "invalid device in request");
3057 return;
3058 }
3059
3060 diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
3061 index 417b97c..80c205b 100644
3062 --- a/drivers/s390/block/dasd_eckd.c
3063 +++ b/drivers/s390/block/dasd_eckd.c
3064 @@ -2980,7 +2980,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3065 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3066 " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3067 req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3068 - scsw_cc(&irb->scsw), req->intrc);
3069 + scsw_cc(&irb->scsw), req ? req->intrc : 0);
3070 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3071 " device %s: Failing CCW: %p\n",
3072 dev_name(&device->cdev->dev),
3073 diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
3074 index f756a1b..a5354b8 100644
3075 --- a/drivers/s390/block/dasd_ioctl.c
3076 +++ b/drivers/s390/block/dasd_ioctl.c
3077 @@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
3078 struct ccw_dev_id dev_id;
3079
3080 base = block->base;
3081 - if (!base->discipline->fill_info)
3082 + if (!base->discipline || !base->discipline->fill_info)
3083 return -EINVAL;
3084
3085 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
3086 @@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
3087 dasd_info->features |=
3088 ((base->features & DASD_FEATURE_READONLY) != 0);
3089
3090 - if (base->discipline)
3091 - memcpy(dasd_info->type, base->discipline->name, 4);
3092 - else
3093 - memcpy(dasd_info->type, "none", 4);
3094 + memcpy(dasd_info->type, base->discipline->name, 4);
3095
3096 if (block->request_queue->request_fn) {
3097 struct list_head *l;
3098 diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
3099 index 654daa3..f9d7d38 100644
3100 --- a/drivers/s390/block/dasd_proc.c
3101 +++ b/drivers/s390/block/dasd_proc.c
3102 @@ -71,7 +71,7 @@ dasd_devices_show(struct seq_file *m, void *v)
3103 /* Print device number. */
3104 seq_printf(m, "%s", dev_name(&device->cdev->dev));
3105 /* Print discipline string. */
3106 - if (device != NULL && device->discipline != NULL)
3107 + if (device->discipline != NULL)
3108 seq_printf(m, "(%s)", device->discipline->name);
3109 else
3110 seq_printf(m, "(none)");
3111 @@ -91,10 +91,7 @@ dasd_devices_show(struct seq_file *m, void *v)
3112 substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
3113 seq_printf(m, "%4s: ", substr);
3114 /* Print device status information. */
3115 - switch ((device != NULL) ? device->state : -1) {
3116 - case -1:
3117 - seq_printf(m, "unknown");
3118 - break;
3119 + switch (device->state) {
3120 case DASD_STATE_NEW:
3121 seq_printf(m, "new");
3122 break;
3123 diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
3124 index f4b0c47..7f1e3ba 100644
3125 --- a/drivers/s390/crypto/zcrypt_pcicc.c
3126 +++ b/drivers/s390/crypto/zcrypt_pcicc.c
3127 @@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev,
3128 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
3129 return -EAGAIN;
3130 }
3131 + if (service_rc == 8 && service_rs == 72)
3132 + return -EINVAL;
3133 zdev->online = 0;
3134 return -EAGAIN; /* repeat the request on a different device. */
3135 }
3136 diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
3137 index 5677b40..1f9e923 100644
3138 --- a/drivers/s390/crypto/zcrypt_pcixcc.c
3139 +++ b/drivers/s390/crypto/zcrypt_pcixcc.c
3140 @@ -462,6 +462,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
3141 }
3142 if (service_rc == 12 && service_rs == 769)
3143 return -EINVAL;
3144 + if (service_rc == 8 && service_rs == 72)
3145 + return -EINVAL;
3146 zdev->online = 0;
3147 return -EAGAIN; /* repeat the request on a different device. */
3148 }
3149 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
3150 index 5987da8..bc9a881 100644
3151 --- a/drivers/scsi/scsi_lib.c
3152 +++ b/drivers/scsi/scsi_lib.c
3153 @@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
3154 */
3155 req->next_rq->resid_len = scsi_in(cmd)->resid;
3156
3157 + scsi_release_buffers(cmd);
3158 blk_end_request_all(req, 0);
3159
3160 - scsi_release_buffers(cmd);
3161 scsi_next_command(cmd);
3162 return;
3163 }
3164 diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
3165 index 377f271..ab2ab3c 100644
3166 --- a/drivers/serial/uartlite.c
3167 +++ b/drivers/serial/uartlite.c
3168 @@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s,
3169 spin_unlock_irqrestore(&port->lock, flags);
3170 }
3171
3172 -static int __init ulite_console_setup(struct console *co, char *options)
3173 +static int __devinit ulite_console_setup(struct console *co, char *options)
3174 {
3175 struct uart_port *port;
3176 int baud = 9600;
3177 diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
3178 index e33d362..5b56f53 100644
3179 --- a/drivers/usb/host/r8a66597-hcd.c
3180 +++ b/drivers/usb/host/r8a66597-hcd.c
3181 @@ -216,8 +216,17 @@ static void disable_controller(struct r8a66597 *r8a66597)
3182 {
3183 int port;
3184
3185 + /* disable interrupts */
3186 r8a66597_write(r8a66597, 0, INTENB0);
3187 - r8a66597_write(r8a66597, 0, INTSTS0);
3188 + r8a66597_write(r8a66597, 0, INTENB1);
3189 + r8a66597_write(r8a66597, 0, BRDYENB);
3190 + r8a66597_write(r8a66597, 0, BEMPENB);
3191 + r8a66597_write(r8a66597, 0, NRDYENB);
3192 +
3193 + /* clear status */
3194 + r8a66597_write(r8a66597, 0, BRDYSTS);
3195 + r8a66597_write(r8a66597, 0, NRDYSTS);
3196 + r8a66597_write(r8a66597, 0, BEMPSTS);
3197
3198 for (port = 0; port < r8a66597->max_root_hub; port++)
3199 r8a66597_disable_port(r8a66597, port);
3200 @@ -2470,6 +2479,12 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
3201 r8a66597->rh_timer.data = (unsigned long)r8a66597;
3202 r8a66597->reg = (unsigned long)reg;
3203
3204 + /* make sure no interrupts are pending */
3205 + ret = r8a66597_clock_enable(r8a66597);
3206 + if (ret < 0)
3207 + goto clean_up3;
3208 + disable_controller(r8a66597);
3209 +
3210 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
3211 INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
3212 init_timer(&r8a66597->td_timer[i]);
3213 diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
3214 index 66358fa..b4b6dec 100644
3215 --- a/drivers/video/imxfb.c
3216 +++ b/drivers/video/imxfb.c
3217 @@ -593,7 +593,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
3218 */
3219 static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
3220 {
3221 - struct imxfb_info *fbi = platform_get_drvdata(dev);
3222 + struct fb_info *info = platform_get_drvdata(dev);
3223 + struct imxfb_info *fbi = info->par;
3224
3225 pr_debug("%s\n", __func__);
3226
3227 @@ -603,7 +604,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
3228
3229 static int imxfb_resume(struct platform_device *dev)
3230 {
3231 - struct imxfb_info *fbi = platform_get_drvdata(dev);
3232 + struct fb_info *info = platform_get_drvdata(dev);
3233 + struct imxfb_info *fbi = info->par;
3234
3235 pr_debug("%s\n", __func__);
3236
3237 diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
3238 index 054ef29..772ba3f 100644
3239 --- a/drivers/video/mx3fb.c
3240 +++ b/drivers/video/mx3fb.c
3241 @@ -324,8 +324,11 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
3242 unsigned long flags;
3243 dma_cookie_t cookie;
3244
3245 - dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
3246 - to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
3247 + if (mx3_fbi->txd)
3248 + dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
3249 + to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
3250 + else
3251 + dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
3252
3253 /* This enables the channel */
3254 if (mx3_fbi->cookie < 0) {
3255 @@ -646,6 +649,7 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a
3256
3257 static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
3258 {
3259 + dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
3260 /* This might be board-specific */
3261 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
3262 return;
3263 @@ -1486,12 +1490,12 @@ static int mx3fb_probe(struct platform_device *pdev)
3264 goto ersdc0;
3265 }
3266
3267 + mx3fb->backlight_level = 255;
3268 +
3269 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
3270 if (ret < 0)
3271 goto eisdc0;
3272
3273 - mx3fb->backlight_level = 255;
3274 -
3275 return 0;
3276
3277 eisdc0:
3278 diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
3279 index 14a8644..69357c0 100644
3280 --- a/fs/9p/vfs_super.c
3281 +++ b/fs/9p/vfs_super.c
3282 @@ -188,7 +188,8 @@ static void v9fs_kill_super(struct super_block *s)
3283
3284 P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
3285
3286 - v9fs_dentry_release(s->s_root); /* clunk root */
3287 + if (s->s_root)
3288 + v9fs_dentry_release(s->s_root); /* clunk root */
3289
3290 kill_anon_super(s);
3291
3292 diff --git a/fs/affs/affs.h b/fs/affs/affs.h
3293 index e511dc6..0e40caa 100644
3294 --- a/fs/affs/affs.h
3295 +++ b/fs/affs/affs.h
3296 @@ -106,8 +106,8 @@ struct affs_sb_info {
3297 u32 s_last_bmap;
3298 struct buffer_head *s_bmap_bh;
3299 char *s_prefix; /* Prefix for volumes and assigns. */
3300 - int s_prefix_len; /* Length of prefix. */
3301 char s_volume[32]; /* Volume prefix for absolute symlinks. */
3302 + spinlock_t symlink_lock; /* protects the previous two */
3303 };
3304
3305 #define SF_INTL 0x0001 /* International filesystem. */
3306 diff --git a/fs/affs/namei.c b/fs/affs/namei.c
3307 index 960d336..d70bbba 100644
3308 --- a/fs/affs/namei.c
3309 +++ b/fs/affs/namei.c
3310 @@ -341,10 +341,13 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3311 p = (char *)AFFS_HEAD(bh)->table;
3312 lc = '/';
3313 if (*symname == '/') {
3314 + struct affs_sb_info *sbi = AFFS_SB(sb);
3315 while (*symname == '/')
3316 symname++;
3317 - while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */
3318 - *p++ = AFFS_SB(sb)->s_volume[i++];
3319 + spin_lock(&sbi->symlink_lock);
3320 + while (sbi->s_volume[i]) /* Cannot overflow */
3321 + *p++ = sbi->s_volume[i++];
3322 + spin_unlock(&sbi->symlink_lock);
3323 }
3324 while (i < maxlen && (c = *symname++)) {
3325 if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
3326 diff --git a/fs/affs/super.c b/fs/affs/super.c
3327 index 104fdcb..d41e967 100644
3328 --- a/fs/affs/super.c
3329 +++ b/fs/affs/super.c
3330 @@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3331 switch (token) {
3332 case Opt_bs:
3333 if (match_int(&args[0], &n))
3334 - return -EINVAL;
3335 + return 0;
3336 if (n != 512 && n != 1024 && n != 2048
3337 && n != 4096) {
3338 printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
3339 @@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3340 break;
3341 case Opt_mode:
3342 if (match_octal(&args[0], &option))
3343 - return 1;
3344 + return 0;
3345 *mode = option & 0777;
3346 *mount_opts |= SF_SETMODE;
3347 break;
3348 @@ -221,8 +221,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3349 *mount_opts |= SF_MUFS;
3350 break;
3351 case Opt_prefix:
3352 - /* Free any previous prefix */
3353 - kfree(*prefix);
3354 *prefix = match_strdup(&args[0]);
3355 if (!*prefix)
3356 return 0;
3357 @@ -233,21 +231,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
3358 break;
3359 case Opt_reserved:
3360 if (match_int(&args[0], reserved))
3361 - return 1;
3362 + return 0;
3363 break;
3364 case Opt_root:
3365 if (match_int(&args[0], root))
3366 - return 1;
3367 + return 0;
3368 break;
3369 case Opt_setgid:
3370 if (match_int(&args[0], &option))
3371 - return 1;
3372 + return 0;
3373 *gid = option;
3374 *mount_opts |= SF_SETGID;
3375 break;
3376 case Opt_setuid:
3377 if (match_int(&args[0], &option))
3378 - return -EINVAL;
3379 + return 0;
3380 *uid = option;
3381 *mount_opts |= SF_SETUID;
3382 break;
3383 @@ -311,11 +309,14 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
3384 return -ENOMEM;
3385 sb->s_fs_info = sbi;
3386 mutex_init(&sbi->s_bmlock);
3387 + spin_lock_init(&sbi->symlink_lock);
3388
3389 if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
3390 &blocksize,&sbi->s_prefix,
3391 sbi->s_volume, &mount_flags)) {
3392 printk(KERN_ERR "AFFS: Error parsing options\n");
3393 + kfree(sbi->s_prefix);
3394 + kfree(sbi);
3395 return -EINVAL;
3396 }
3397 /* N.B. after this point s_prefix must be released */
3398 @@ -516,14 +517,18 @@ affs_remount(struct super_block *sb, int *flags, char *data)
3399 unsigned long mount_flags;
3400 int res = 0;
3401 char *new_opts = kstrdup(data, GFP_KERNEL);
3402 + char volume[32];
3403 + char *prefix = NULL;
3404
3405 pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
3406
3407 *flags |= MS_NODIRATIME;
3408
3409 + memcpy(volume, sbi->s_volume, 32);
3410 if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
3411 - &blocksize, &sbi->s_prefix, sbi->s_volume,
3412 + &blocksize, &prefix, volume,
3413 &mount_flags)) {
3414 + kfree(prefix);
3415 kfree(new_opts);
3416 return -EINVAL;
3417 }
3418 @@ -534,6 +539,14 @@ affs_remount(struct super_block *sb, int *flags, char *data)
3419 sbi->s_mode = mode;
3420 sbi->s_uid = uid;
3421 sbi->s_gid = gid;
3422 + /* protect against readers */
3423 + spin_lock(&sbi->symlink_lock);
3424 + if (prefix) {
3425 + kfree(sbi->s_prefix);
3426 + sbi->s_prefix = prefix;
3427 + }
3428 + memcpy(sbi->s_volume, volume, 32);
3429 + spin_unlock(&sbi->symlink_lock);
3430
3431 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
3432 unlock_kernel();
3433 diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
3434 index 4178253..ee00f08 100644
3435 --- a/fs/affs/symlink.c
3436 +++ b/fs/affs/symlink.c
3437 @@ -20,7 +20,6 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
3438 int i, j;
3439 char c;
3440 char lc;
3441 - char *pf;
3442
3443 pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
3444
3445 @@ -32,11 +31,15 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
3446 j = 0;
3447 lf = (struct slink_front *)bh->b_data;
3448 lc = 0;
3449 - pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
3450
3451 if (strchr(lf->symname,':')) { /* Handle assign or volume name */
3452 + struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
3453 + char *pf;
3454 + spin_lock(&sbi->symlink_lock);
3455 + pf = sbi->s_prefix ? sbi->s_prefix : "/";
3456 while (i < 1023 && (c = pf[i]))
3457 link[i++] = c;
3458 + spin_unlock(&sbi->symlink_lock);
3459 while (i < 1023 && lf->symname[j] != ':')
3460 link[i++] = lf->symname[j++];
3461 if (i < 1023)
3462 diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
3463 index 6f60336..8f3d9fd 100644
3464 --- a/fs/bfs/inode.c
3465 +++ b/fs/bfs/inode.c
3466 @@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3467 struct inode *inode;
3468 unsigned i, imap_len;
3469 struct bfs_sb_info *info;
3470 - long ret = -EINVAL;
3471 + int ret = -EINVAL;
3472 unsigned long i_sblock, i_eblock, i_eoff, s_size;
3473
3474 info = kzalloc(sizeof(*info), GFP_KERNEL);
3475 if (!info)
3476 return -ENOMEM;
3477 + mutex_init(&info->bfs_lock);
3478 s->s_fs_info = info;
3479
3480 sb_set_blocksize(s, BFS_BSIZE);
3481
3482 - bh = sb_bread(s, 0);
3483 - if(!bh)
3484 + info->si_sbh = sb_bread(s, 0);
3485 + if (!info->si_sbh)
3486 goto out;
3487 - bfs_sb = (struct bfs_super_block *)bh->b_data;
3488 + bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
3489 if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
3490 if (!silent)
3491 printf("No BFS filesystem on %s (magic=%08x)\n",
3492 s->s_id, le32_to_cpu(bfs_sb->s_magic));
3493 - goto out;
3494 + goto out1;
3495 }
3496 if (BFS_UNCLEAN(bfs_sb, s) && !silent)
3497 printf("%s is unclean, continuing\n", s->s_id);
3498
3499 s->s_magic = BFS_MAGIC;
3500 - info->si_sbh = bh;
3501
3502 if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
3503 printf("Superblock is corrupted\n");
3504 - goto out;
3505 + goto out1;
3506 }
3507
3508 info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
3509 @@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3510 imap_len = (info->si_lasti / 8) + 1;
3511 info->si_imap = kzalloc(imap_len, GFP_KERNEL);
3512 if (!info->si_imap)
3513 - goto out;
3514 + goto out1;
3515 for (i = 0; i < BFS_ROOT_INO; i++)
3516 set_bit(i, info->si_imap);
3517
3518 @@ -398,15 +398,13 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3519 inode = bfs_iget(s, BFS_ROOT_INO);
3520 if (IS_ERR(inode)) {
3521 ret = PTR_ERR(inode);
3522 - kfree(info->si_imap);
3523 - goto out;
3524 + goto out2;
3525 }
3526 s->s_root = d_alloc_root(inode);
3527 if (!s->s_root) {
3528 iput(inode);
3529 ret = -ENOMEM;
3530 - kfree(info->si_imap);
3531 - goto out;
3532 + goto out2;
3533 }
3534
3535 info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
3536 @@ -419,10 +417,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3537 bh = sb_bread(s, info->si_blocks - 1);
3538 if (!bh) {
3539 printf("Last block not available: %lu\n", info->si_blocks - 1);
3540 - iput(inode);
3541 ret = -EIO;
3542 - kfree(info->si_imap);
3543 - goto out;
3544 + goto out3;
3545 }
3546 brelse(bh);
3547
3548 @@ -459,11 +455,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3549 printf("Inode 0x%08x corrupted\n", i);
3550
3551 brelse(bh);
3552 - s->s_root = NULL;
3553 - kfree(info->si_imap);
3554 - kfree(info);
3555 - s->s_fs_info = NULL;
3556 - return -EIO;
3557 + ret = -EIO;
3558 + goto out3;
3559 }
3560
3561 if (!di->i_ino) {
3562 @@ -483,11 +476,17 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
3563 s->s_dirt = 1;
3564 }
3565 dump_imap("read_super", s);
3566 - mutex_init(&info->bfs_lock);
3567 return 0;
3568
3569 +out3:
3570 + dput(s->s_root);
3571 + s->s_root = NULL;
3572 +out2:
3573 + kfree(info->si_imap);
3574 +out1:
3575 + brelse(info->si_sbh);
3576 out:
3577 - brelse(bh);
3578 + mutex_destroy(&info->bfs_lock);
3579 kfree(info);
3580 s->s_fs_info = NULL;
3581 return ret;
3582 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
3583 index b639dcf..0133b5a 100644
3584 --- a/fs/binfmt_aout.c
3585 +++ b/fs/binfmt_aout.c
3586 @@ -263,6 +263,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
3587 #else
3588 set_personality(PER_LINUX);
3589 #endif
3590 + setup_new_exec(bprm);
3591
3592 current->mm->end_code = ex.a_text +
3593 (current->mm->start_code = N_TXTADDR(ex));
3594 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
3595 index b9b3bb5..1ed37ba 100644
3596 --- a/fs/binfmt_elf.c
3597 +++ b/fs/binfmt_elf.c
3598 @@ -662,27 +662,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3599 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
3600 goto out_free_interp;
3601
3602 - /*
3603 - * The early SET_PERSONALITY here is so that the lookup
3604 - * for the interpreter happens in the namespace of the
3605 - * to-be-execed image. SET_PERSONALITY can select an
3606 - * alternate root.
3607 - *
3608 - * However, SET_PERSONALITY is NOT allowed to switch
3609 - * this task into the new images's memory mapping
3610 - * policy - that is, TASK_SIZE must still evaluate to
3611 - * that which is appropriate to the execing application.
3612 - * This is because exit_mmap() needs to have TASK_SIZE
3613 - * evaluate to the size of the old image.
3614 - *
3615 - * So if (say) a 64-bit application is execing a 32-bit
3616 - * application it is the architecture's responsibility
3617 - * to defer changing the value of TASK_SIZE until the
3618 - * switch really is going to happen - do this in
3619 - * flush_thread(). - akpm
3620 - */
3621 - SET_PERSONALITY(loc->elf_ex);
3622 -
3623 interpreter = open_exec(elf_interpreter);
3624 retval = PTR_ERR(interpreter);
3625 if (IS_ERR(interpreter))
3626 @@ -730,9 +709,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3627 /* Verify the interpreter has a valid arch */
3628 if (!elf_check_arch(&loc->interp_elf_ex))
3629 goto out_free_dentry;
3630 - } else {
3631 - /* Executables without an interpreter also need a personality */
3632 - SET_PERSONALITY(loc->elf_ex);
3633 }
3634
3635 /* Flush all traces of the currently running executable */
3636 @@ -752,7 +728,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
3637
3638 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3639 current->flags |= PF_RANDOMIZE;
3640 - arch_pick_mmap_layout(current->mm);
3641 +
3642 + setup_new_exec(bprm);
3643
3644 /* Do this so that we can load the interpreter, if need be. We will
3645 change some of these later */
3646 diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
3647 index 38502c6..e7a0bb4 100644
3648 --- a/fs/binfmt_elf_fdpic.c
3649 +++ b/fs/binfmt_elf_fdpic.c
3650 @@ -171,6 +171,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
3651 #ifdef ELF_FDPIC_PLAT_INIT
3652 unsigned long dynaddr;
3653 #endif
3654 +#ifndef CONFIG_MMU
3655 + unsigned long stack_prot;
3656 +#endif
3657 struct file *interpreter = NULL; /* to shut gcc up */
3658 char *interpreter_name = NULL;
3659 int executable_stack;
3660 @@ -316,6 +319,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
3661 * defunct, deceased, etc. after this point we have to exit via
3662 * error_kill */
3663 set_personality(PER_LINUX_FDPIC);
3664 + if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
3665 + current->personality |= READ_IMPLIES_EXEC;
3666 +
3667 + setup_new_exec(bprm);
3668 +
3669 set_binfmt(&elf_fdpic_format);
3670
3671 current->mm->start_code = 0;
3672 @@ -377,9 +385,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
3673 if (stack_size < PAGE_SIZE * 2)
3674 stack_size = PAGE_SIZE * 2;
3675
3676 + stack_prot = PROT_READ | PROT_WRITE;
3677 + if (executable_stack == EXSTACK_ENABLE_X ||
3678 + (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
3679 + stack_prot |= PROT_EXEC;
3680 +
3681 down_write(&current->mm->mmap_sem);
3682 - current->mm->start_brk = do_mmap(NULL, 0, stack_size,
3683 - PROT_READ | PROT_WRITE | PROT_EXEC,
3684 + current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
3685 MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
3686 0);
3687
3688 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
3689 index a279665..ca88c46 100644
3690 --- a/fs/binfmt_flat.c
3691 +++ b/fs/binfmt_flat.c
3692 @@ -519,6 +519,7 @@ static int load_flat_file(struct linux_binprm * bprm,
3693
3694 /* OK, This is the point of no return */
3695 set_personality(PER_LINUX_32BIT);
3696 + setup_new_exec(bprm);
3697 }
3698
3699 /*
3700 diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
3701 index eff74b9..35cf002 100644
3702 --- a/fs/binfmt_som.c
3703 +++ b/fs/binfmt_som.c
3704 @@ -227,6 +227,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
3705 /* OK, This is the point of no return */
3706 current->flags &= ~PF_FORKNOEXEC;
3707 current->personality = PER_HPUX;
3708 + setup_new_exec(bprm);
3709
3710 /* Set the task size for HP-UX processes such that
3711 * the gateway page is outside the address space.
3712 diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
3713 index 49a34e7..a16f29e 100644
3714 --- a/fs/bio-integrity.c
3715 +++ b/fs/bio-integrity.c
3716 @@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr)
3717
3718 static inline int use_bip_pool(unsigned int idx)
3719 {
3720 - if (idx == BIOVEC_NR_POOLS)
3721 + if (idx == BIOVEC_MAX_IDX)
3722 return 1;
3723
3724 return 0;
3725 @@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
3726
3727 /* Use mempool if lower order alloc failed or max vecs were requested */
3728 if (bip == NULL) {
3729 + idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
3730 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
3731
3732 if (unlikely(bip == NULL)) {
3733 diff --git a/fs/bio.c b/fs/bio.c
3734 index 12da5db..e0c9e71 100644
3735 --- a/fs/bio.c
3736 +++ b/fs/bio.c
3737 @@ -542,13 +542,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
3738
3739 if (page == prev->bv_page &&
3740 offset == prev->bv_offset + prev->bv_len) {
3741 + unsigned int prev_bv_len = prev->bv_len;
3742 prev->bv_len += len;
3743
3744 if (q->merge_bvec_fn) {
3745 struct bvec_merge_data bvm = {
3746 + /* prev_bvec is already charged in
3747 + bi_size, discharge it in order to
3748 + simulate merging updated prev_bvec
3749 + as new bvec. */
3750 .bi_bdev = bio->bi_bdev,
3751 .bi_sector = bio->bi_sector,
3752 - .bi_size = bio->bi_size,
3753 + .bi_size = bio->bi_size - prev_bv_len,
3754 .bi_rw = bio->bi_rw,
3755 };
3756
3757 diff --git a/fs/exec.c b/fs/exec.c
3758 index ba112bd..7fa4efd 100644
3759 --- a/fs/exec.c
3760 +++ b/fs/exec.c
3761 @@ -931,9 +931,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
3762
3763 int flush_old_exec(struct linux_binprm * bprm)
3764 {
3765 - char * name;
3766 - int i, ch, retval;
3767 - char tcomm[sizeof(current->comm)];
3768 + int retval;
3769
3770 /*
3771 * Make sure we have a private signal table and that
3772 @@ -954,6 +952,25 @@ int flush_old_exec(struct linux_binprm * bprm)
3773
3774 bprm->mm = NULL; /* We're using it now */
3775
3776 + current->flags &= ~PF_RANDOMIZE;
3777 + flush_thread();
3778 + current->personality &= ~bprm->per_clear;
3779 +
3780 + return 0;
3781 +
3782 +out:
3783 + return retval;
3784 +}
3785 +EXPORT_SYMBOL(flush_old_exec);
3786 +
3787 +void setup_new_exec(struct linux_binprm * bprm)
3788 +{
3789 + int i, ch;
3790 + char * name;
3791 + char tcomm[sizeof(current->comm)];
3792 +
3793 + arch_pick_mmap_layout(current->mm);
3794 +
3795 /* This is the point of no return */
3796 current->sas_ss_sp = current->sas_ss_size = 0;
3797
3798 @@ -975,9 +992,6 @@ int flush_old_exec(struct linux_binprm * bprm)
3799 tcomm[i] = '\0';
3800 set_task_comm(current, tcomm);
3801
3802 - current->flags &= ~PF_RANDOMIZE;
3803 - flush_thread();
3804 -
3805 /* Set the new mm task size. We have to do that late because it may
3806 * depend on TIF_32BIT which is only updated in flush_thread() on
3807 * some architectures like powerpc
3808 @@ -993,8 +1007,6 @@ int flush_old_exec(struct linux_binprm * bprm)
3809 set_dumpable(current->mm, suid_dumpable);
3810 }
3811
3812 - current->personality &= ~bprm->per_clear;
3813 -
3814 /*
3815 * Flush performance counters when crossing a
3816 * security domain:
3817 @@ -1009,14 +1021,8 @@ int flush_old_exec(struct linux_binprm * bprm)
3818
3819 flush_signal_handlers(current, 0);
3820 flush_old_files(current->files);
3821 -
3822 - return 0;
3823 -
3824 -out:
3825 - return retval;
3826 }
3827 -
3828 -EXPORT_SYMBOL(flush_old_exec);
3829 +EXPORT_SYMBOL(setup_new_exec);
3830
3831 /*
3832 * Prepare credentials and lock ->cred_guard_mutex.
3833 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3834 index c18913a..a9f5e13 100644
3835 --- a/fs/fuse/file.c
3836 +++ b/fs/fuse/file.c
3837 @@ -828,6 +828,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
3838 if (!page)
3839 break;
3840
3841 + if (mapping_writably_mapped(mapping))
3842 + flush_dcache_page(page);
3843 +
3844 pagefault_disable();
3845 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
3846 pagefault_enable();
3847 diff --git a/fs/romfs/super.c b/fs/romfs/super.c
3848 index c117fa8..42d2135 100644
3849 --- a/fs/romfs/super.c
3850 +++ b/fs/romfs/super.c
3851 @@ -544,6 +544,7 @@ error:
3852 error_rsb_inval:
3853 ret = -EINVAL;
3854 error_rsb:
3855 + kfree(rsb);
3856 return ret;
3857 }
3858
3859 diff --git a/include/linux/acpi.h b/include/linux/acpi.h
3860 index dfcd920..c010b94 100644
3861 --- a/include/linux/acpi.h
3862 +++ b/include/linux/acpi.h
3863 @@ -253,6 +253,13 @@ void __init acpi_old_suspend_ordering(void);
3864 void __init acpi_s4_no_nvs(void);
3865 #endif /* CONFIG_PM_SLEEP */
3866
3867 +struct acpi_osc_context {
3868 + char *uuid_str; /* uuid string */
3869 + int rev;
3870 + struct acpi_buffer cap; /* arg2/arg3 */
3871 + struct acpi_buffer ret; /* free by caller if success */
3872 +};
3873 +
3874 #define OSC_QUERY_TYPE 0
3875 #define OSC_SUPPORT_TYPE 1
3876 #define OSC_CONTROL_TYPE 2
3877 @@ -265,6 +272,15 @@ void __init acpi_s4_no_nvs(void);
3878 #define OSC_INVALID_REVISION_ERROR 8
3879 #define OSC_CAPABILITIES_MASK_ERROR 16
3880
3881 +acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
3882 +
3883 +/* platform-wide _OSC bits */
3884 +#define OSC_SB_PAD_SUPPORT 1
3885 +#define OSC_SB_PPC_OST_SUPPORT 2
3886 +#define OSC_SB_PR3_SUPPORT 4
3887 +#define OSC_SB_CPUHP_OST_SUPPORT 8
3888 +#define OSC_SB_APEI_SUPPORT 16
3889 +
3890 /* _OSC DW1 Definition (OS Support Fields) */
3891 #define OSC_EXT_PCI_CONFIG_SUPPORT 1
3892 #define OSC_ACTIVE_STATE_PWR_SUPPORT 2
3893 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
3894 index aece486..340f441 100644
3895 --- a/include/linux/binfmts.h
3896 +++ b/include/linux/binfmts.h
3897 @@ -101,6 +101,7 @@ extern int prepare_binprm(struct linux_binprm *);
3898 extern int __must_check remove_arg_zero(struct linux_binprm *);
3899 extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
3900 extern int flush_old_exec(struct linux_binprm * bprm);
3901 +extern void setup_new_exec(struct linux_binprm * bprm);
3902
3903 extern int suid_dumpable;
3904 #define SUID_DUMP_DISABLE 0 /* No setuid dumping */
3905 diff --git a/include/linux/connector.h b/include/linux/connector.h
3906 index 3a14615..ecb61c4 100644
3907 --- a/include/linux/connector.h
3908 +++ b/include/linux/connector.h
3909 @@ -24,9 +24,6 @@
3910
3911 #include <linux/types.h>
3912
3913 -#define CN_IDX_CONNECTOR 0xffffffff
3914 -#define CN_VAL_CONNECTOR 0xffffffff
3915 -
3916 /*
3917 * Process Events connector unique ids -- used for message routing
3918 */
3919 @@ -73,30 +70,6 @@ struct cn_msg {
3920 __u8 data[0];
3921 };
3922
3923 -/*
3924 - * Notify structure - requests notification about
3925 - * registering/unregistering idx/val in range [first, first+range].
3926 - */
3927 -struct cn_notify_req {
3928 - __u32 first;
3929 - __u32 range;
3930 -};
3931 -
3932 -/*
3933 - * Main notification control message
3934 - * *_notify_num - number of appropriate cn_notify_req structures after
3935 - * this struct.
3936 - * group - notification receiver's idx.
3937 - * len - total length of the attached data.
3938 - */
3939 -struct cn_ctl_msg {
3940 - __u32 idx_notify_num;
3941 - __u32 val_notify_num;
3942 - __u32 group;
3943 - __u32 len;
3944 - __u8 data[0];
3945 -};
3946 -
3947 #ifdef __KERNEL__
3948
3949 #include <asm/atomic.h>
3950 @@ -149,11 +122,6 @@ struct cn_callback_entry {
3951 u32 seq, group;
3952 };
3953
3954 -struct cn_ctl_entry {
3955 - struct list_head notify_entry;
3956 - struct cn_ctl_msg *msg;
3957 -};
3958 -
3959 struct cn_dev {
3960 struct cb_id id;
3961
3962 diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
3963 index ad27c7d..9cd0bcf 100644
3964 --- a/include/linux/inetdevice.h
3965 +++ b/include/linux/inetdevice.h
3966 @@ -83,6 +83,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
3967 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
3968 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
3969 #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
3970 +#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
3971 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
3972 ACCEPT_SOURCE_ROUTE)
3973 #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
3974 diff --git a/include/linux/kvm.h b/include/linux/kvm.h
3975 index 8908dd6..0eadd71 100644
3976 --- a/include/linux/kvm.h
3977 +++ b/include/linux/kvm.h
3978 @@ -439,6 +439,7 @@ struct kvm_ioeventfd {
3979 #endif
3980 #define KVM_CAP_IOEVENTFD 36
3981 #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
3982 +#define KVM_CAP_ADJUST_CLOCK 39
3983
3984 #ifdef KVM_CAP_IRQ_ROUTING
3985
3986 @@ -501,6 +502,12 @@ struct kvm_irqfd {
3987 __u8 pad[20];
3988 };
3989
3990 +struct kvm_clock_data {
3991 + __u64 clock;
3992 + __u32 flags;
3993 + __u32 pad[9];
3994 +};
3995 +
3996 /*
3997 * ioctls for VM fds
3998 */
3999 @@ -550,6 +557,8 @@ struct kvm_irqfd {
4000 #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
4001 #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
4002 #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
4003 +#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
4004 +#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
4005
4006 /*
4007 * ioctls for vcpu fds
4008 diff --git a/include/linux/libata.h b/include/linux/libata.h
4009 index 8769864..b0f6d97 100644
4010 --- a/include/linux/libata.h
4011 +++ b/include/linux/libata.h
4012 @@ -354,6 +354,9 @@ enum {
4013 /* max tries if error condition is still set after ->error_handler */
4014 ATA_EH_MAX_TRIES = 5,
4015
4016 + /* sometimes resuming a link requires several retries */
4017 + ATA_LINK_RESUME_TRIES = 5,
4018 +
4019 /* how hard are we gonna try to probe/recover devices */
4020 ATA_PROBE_MAX_TRIES = 3,
4021 ATA_EH_DEV_TRIES = 3,
4022 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
4023 index ed5d750..3c62ed4 100644
4024 --- a/include/linux/pagemap.h
4025 +++ b/include/linux/pagemap.h
4026 @@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping,
4027 extern struct page * read_cache_page(struct address_space *mapping,
4028 pgoff_t index, filler_t *filler,
4029 void *data);
4030 +extern struct page * read_cache_page_gfp(struct address_space *mapping,
4031 + pgoff_t index, gfp_t gfp_mask);
4032 extern int read_cache_pages(struct address_space *mapping,
4033 struct list_head *pages, filler_t *filler, void *data);
4034
4035 diff --git a/include/linux/sched.h b/include/linux/sched.h
4036 index 0f67914..d3dce7d 100644
4037 --- a/include/linux/sched.h
4038 +++ b/include/linux/sched.h
4039 @@ -1354,7 +1354,7 @@ struct task_struct {
4040 char comm[TASK_COMM_LEN]; /* executable name excluding path
4041 - access with [gs]et_task_comm (which lock
4042 it with task_lock())
4043 - - initialized normally by flush_old_exec */
4044 + - initialized normally by setup_new_exec */
4045 /* file system info */
4046 int link_count, total_link_count;
4047 #ifdef CONFIG_SYSVIPC
4048 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
4049 index 1e4743e..0eb6942 100644
4050 --- a/include/linux/sysctl.h
4051 +++ b/include/linux/sysctl.h
4052 @@ -490,6 +490,7 @@ enum
4053 NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
4054 NET_IPV4_CONF_ARP_ACCEPT=21,
4055 NET_IPV4_CONF_ARP_NOTIFY=22,
4056 + NET_IPV4_CONF_SRC_VMARK=24,
4057 __NET_IPV4_CONF_MAX
4058 };
4059
4060 diff --git a/include/net/netrom.h b/include/net/netrom.h
4061 index 15696b1..ab170a6 100644
4062 --- a/include/net/netrom.h
4063 +++ b/include/net/netrom.h
4064 @@ -132,6 +132,8 @@ static __inline__ void nr_node_put(struct nr_node *nr_node)
4065 static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
4066 {
4067 if (atomic_dec_and_test(&nr_neigh->refcount)) {
4068 + if (nr_neigh->ax25)
4069 + ax25_cb_put(nr_neigh->ax25);
4070 kfree(nr_neigh->digipeat);
4071 kfree(nr_neigh);
4072 }
4073 diff --git a/kernel/cred.c b/kernel/cred.c
4074 index dd76cfe..1ed8ca1 100644
4075 --- a/kernel/cred.c
4076 +++ b/kernel/cred.c
4077 @@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void)
4078 #ifdef CONFIG_KEYS
4079 new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
4080 if (!new->tgcred) {
4081 - kfree(new);
4082 + kmem_cache_free(cred_jar, new);
4083 return NULL;
4084 }
4085 atomic_set(&new->tgcred->usage, 1);
4086 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
4087 index b6e7aae..469193c 100644
4088 --- a/kernel/sysctl_check.c
4089 +++ b/kernel/sysctl_check.c
4090 @@ -220,6 +220,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
4091 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
4092 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
4093 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
4094 + { NET_IPV4_CONF_SRC_VMARK, "src_valid_mark" },
4095 {}
4096 };
4097
4098 diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4099 index 5155dc3..ecc7adb 100644
4100 --- a/kernel/time/clocksource.c
4101 +++ b/kernel/time/clocksource.c
4102 @@ -413,8 +413,6 @@ void clocksource_touch_watchdog(void)
4103 clocksource_resume_watchdog();
4104 }
4105
4106 -#ifdef CONFIG_GENERIC_TIME
4107 -
4108 /**
4109 * clocksource_max_deferment - Returns max time the clocksource can be deferred
4110 * @cs: Pointer to clocksource
4111 @@ -456,6 +454,8 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
4112 return max_nsecs - (max_nsecs >> 5);
4113 }
4114
4115 +#ifdef CONFIG_GENERIC_TIME
4116 +
4117 /**
4118 * clocksource_select - Select the best clocksource available
4119 *
4120 diff --git a/mm/filemap.c b/mm/filemap.c
4121 index ef169f3..8e96c90 100644
4122 --- a/mm/filemap.c
4123 +++ b/mm/filemap.c
4124 @@ -1655,14 +1655,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap);
4125 static struct page *__read_cache_page(struct address_space *mapping,
4126 pgoff_t index,
4127 int (*filler)(void *,struct page*),
4128 - void *data)
4129 + void *data,
4130 + gfp_t gfp)
4131 {
4132 struct page *page;
4133 int err;
4134 repeat:
4135 page = find_get_page(mapping, index);
4136 if (!page) {
4137 - page = page_cache_alloc_cold(mapping);
4138 + page = __page_cache_alloc(gfp | __GFP_COLD);
4139 if (!page)
4140 return ERR_PTR(-ENOMEM);
4141 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
4142 @@ -1682,31 +1683,18 @@ repeat:
4143 return page;
4144 }
4145
4146 -/**
4147 - * read_cache_page_async - read into page cache, fill it if needed
4148 - * @mapping: the page's address_space
4149 - * @index: the page index
4150 - * @filler: function to perform the read
4151 - * @data: destination for read data
4152 - *
4153 - * Same as read_cache_page, but don't wait for page to become unlocked
4154 - * after submitting it to the filler.
4155 - *
4156 - * Read into the page cache. If a page already exists, and PageUptodate() is
4157 - * not set, try to fill the page but don't wait for it to become unlocked.
4158 - *
4159 - * If the page does not get brought uptodate, return -EIO.
4160 - */
4161 -struct page *read_cache_page_async(struct address_space *mapping,
4162 +static struct page *do_read_cache_page(struct address_space *mapping,
4163 pgoff_t index,
4164 int (*filler)(void *,struct page*),
4165 - void *data)
4166 + void *data,
4167 + gfp_t gfp)
4168 +
4169 {
4170 struct page *page;
4171 int err;
4172
4173 retry:
4174 - page = __read_cache_page(mapping, index, filler, data);
4175 + page = __read_cache_page(mapping, index, filler, data, gfp);
4176 if (IS_ERR(page))
4177 return page;
4178 if (PageUptodate(page))
4179 @@ -1731,8 +1719,67 @@ out:
4180 mark_page_accessed(page);
4181 return page;
4182 }
4183 +
4184 +/**
4185 + * read_cache_page_async - read into page cache, fill it if needed
4186 + * @mapping: the page's address_space
4187 + * @index: the page index
4188 + * @filler: function to perform the read
4189 + * @data: destination for read data
4190 + *
4191 + * Same as read_cache_page, but don't wait for page to become unlocked
4192 + * after submitting it to the filler.
4193 + *
4194 + * Read into the page cache. If a page already exists, and PageUptodate() is
4195 + * not set, try to fill the page but don't wait for it to become unlocked.
4196 + *
4197 + * If the page does not get brought uptodate, return -EIO.
4198 + */
4199 +struct page *read_cache_page_async(struct address_space *mapping,
4200 + pgoff_t index,
4201 + int (*filler)(void *,struct page*),
4202 + void *data)
4203 +{
4204 + return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
4205 +}
4206 EXPORT_SYMBOL(read_cache_page_async);
4207
4208 +static struct page *wait_on_page_read(struct page *page)
4209 +{
4210 + if (!IS_ERR(page)) {
4211 + wait_on_page_locked(page);
4212 + if (!PageUptodate(page)) {
4213 + page_cache_release(page);
4214 + page = ERR_PTR(-EIO);
4215 + }
4216 + }
4217 + return page;
4218 +}
4219 +
4220 +/**
4221 + * read_cache_page_gfp - read into page cache, using specified page allocation flags.
4222 + * @mapping: the page's address_space
4223 + * @index: the page index
4224 + * @gfp: the page allocator flags to use if allocating
4225 + *
4226 + * This is the same as "read_mapping_page(mapping, index, NULL)", but with
4227 + * any new page allocations done using the specified allocation flags. Note
4228 + * that the Radix tree operations will still use GFP_KERNEL, so you can't
4229 + * expect to do this atomically or anything like that - but you can pass in
4230 + * other page requirements.
4231 + *
4232 + * If the page does not get brought uptodate, return -EIO.
4233 + */
4234 +struct page *read_cache_page_gfp(struct address_space *mapping,
4235 + pgoff_t index,
4236 + gfp_t gfp)
4237 +{
4238 + filler_t *filler = (filler_t *)mapping->a_ops->readpage;
4239 +
4240 + return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
4241 +}
4242 +EXPORT_SYMBOL(read_cache_page_gfp);
4243 +
4244 /**
4245 * read_cache_page - read into page cache, fill it if needed
4246 * @mapping: the page's address_space
4247 @@ -1750,18 +1797,7 @@ struct page *read_cache_page(struct address_space *mapping,
4248 int (*filler)(void *,struct page*),
4249 void *data)
4250 {
4251 - struct page *page;
4252 -
4253 - page = read_cache_page_async(mapping, index, filler, data);
4254 - if (IS_ERR(page))
4255 - goto out;
4256 - wait_on_page_locked(page);
4257 - if (!PageUptodate(page)) {
4258 - page_cache_release(page);
4259 - page = ERR_PTR(-EIO);
4260 - }
4261 - out:
4262 - return page;
4263 + return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
4264 }
4265 EXPORT_SYMBOL(read_cache_page);
4266
4267 @@ -2217,6 +2253,9 @@ again:
4268 if (unlikely(status))
4269 break;
4270
4271 + if (mapping_writably_mapped(mapping))
4272 + flush_dcache_page(page);
4273 +
4274 pagefault_disable();
4275 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
4276 pagefault_enable();
4277 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4278 index 3a78e2e..36992b6 100644
4279 --- a/mm/page_alloc.c
4280 +++ b/mm/page_alloc.c
4281 @@ -559,8 +559,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
4282 page = list_entry(list->prev, struct page, lru);
4283 /* must delete as __free_one_page list manipulates */
4284 list_del(&page->lru);
4285 - __free_one_page(page, zone, 0, migratetype);
4286 - trace_mm_page_pcpu_drain(page, 0, migratetype);
4287 + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
4288 + __free_one_page(page, zone, 0, page_private(page));
4289 + trace_mm_page_pcpu_drain(page, 0, page_private(page));
4290 } while (--count && --batch_free && !list_empty(list));
4291 }
4292 spin_unlock(&zone->lock);
4293 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
4294 index a3a99d3..c228731 100644
4295 --- a/mm/vmalloc.c
4296 +++ b/mm/vmalloc.c
4297 @@ -509,6 +509,9 @@ static unsigned long lazy_max_pages(void)
4298
4299 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
4300
4301 +/* for per-CPU blocks */
4302 +static void purge_fragmented_blocks_allcpus(void);
4303 +
4304 /*
4305 * Purges all lazily-freed vmap areas.
4306 *
4307 @@ -539,6 +542,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
4308 } else
4309 spin_lock(&purge_lock);
4310
4311 + if (sync)
4312 + purge_fragmented_blocks_allcpus();
4313 +
4314 rcu_read_lock();
4315 list_for_each_entry_rcu(va, &vmap_area_list, list) {
4316 if (va->flags & VM_LAZY_FREE) {
4317 @@ -667,8 +673,6 @@ static bool vmap_initialized __read_mostly = false;
4318 struct vmap_block_queue {
4319 spinlock_t lock;
4320 struct list_head free;
4321 - struct list_head dirty;
4322 - unsigned int nr_dirty;
4323 };
4324
4325 struct vmap_block {
4326 @@ -678,10 +682,9 @@ struct vmap_block {
4327 unsigned long free, dirty;
4328 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
4329 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
4330 - union {
4331 - struct list_head free_list;
4332 - struct rcu_head rcu_head;
4333 - };
4334 + struct list_head free_list;
4335 + struct rcu_head rcu_head;
4336 + struct list_head purge;
4337 };
4338
4339 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
4340 @@ -757,7 +760,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
4341 vbq = &get_cpu_var(vmap_block_queue);
4342 vb->vbq = vbq;
4343 spin_lock(&vbq->lock);
4344 - list_add(&vb->free_list, &vbq->free);
4345 + list_add_rcu(&vb->free_list, &vbq->free);
4346 spin_unlock(&vbq->lock);
4347 put_cpu_var(vmap_cpu_blocks);
4348
4349 @@ -776,8 +779,6 @@ static void free_vmap_block(struct vmap_block *vb)
4350 struct vmap_block *tmp;
4351 unsigned long vb_idx;
4352
4353 - BUG_ON(!list_empty(&vb->free_list));
4354 -
4355 vb_idx = addr_to_vb_idx(vb->va->va_start);
4356 spin_lock(&vmap_block_tree_lock);
4357 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
4358 @@ -788,12 +789,61 @@ static void free_vmap_block(struct vmap_block *vb)
4359 call_rcu(&vb->rcu_head, rcu_free_vb);
4360 }
4361
4362 +static void purge_fragmented_blocks(int cpu)
4363 +{
4364 + LIST_HEAD(purge);
4365 + struct vmap_block *vb;
4366 + struct vmap_block *n_vb;
4367 + struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
4368 +
4369 + rcu_read_lock();
4370 + list_for_each_entry_rcu(vb, &vbq->free, free_list) {
4371 +
4372 + if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
4373 + continue;
4374 +
4375 + spin_lock(&vb->lock);
4376 + if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
4377 + vb->free = 0; /* prevent further allocs after releasing lock */
4378 + vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
4379 + bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
4380 + bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
4381 + spin_lock(&vbq->lock);
4382 + list_del_rcu(&vb->free_list);
4383 + spin_unlock(&vbq->lock);
4384 + spin_unlock(&vb->lock);
4385 + list_add_tail(&vb->purge, &purge);
4386 + } else
4387 + spin_unlock(&vb->lock);
4388 + }
4389 + rcu_read_unlock();
4390 +
4391 + list_for_each_entry_safe(vb, n_vb, &purge, purge) {
4392 + list_del(&vb->purge);
4393 + free_vmap_block(vb);
4394 + }
4395 +}
4396 +
4397 +static void purge_fragmented_blocks_thiscpu(void)
4398 +{
4399 + purge_fragmented_blocks(smp_processor_id());
4400 +}
4401 +
4402 +static void purge_fragmented_blocks_allcpus(void)
4403 +{
4404 + int cpu;
4405 +
4406 + for_each_possible_cpu(cpu)
4407 + purge_fragmented_blocks(cpu);
4408 +}
4409 +
4410 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
4411 {
4412 struct vmap_block_queue *vbq;
4413 struct vmap_block *vb;
4414 unsigned long addr = 0;
4415 unsigned int order;
4416 + int purge = 0;
4417
4418 BUG_ON(size & ~PAGE_MASK);
4419 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
4420 @@ -806,24 +856,37 @@ again:
4421 int i;
4422
4423 spin_lock(&vb->lock);
4424 + if (vb->free < 1UL << order)
4425 + goto next;
4426 i = bitmap_find_free_region(vb->alloc_map,
4427 VMAP_BBMAP_BITS, order);
4428
4429 - if (i >= 0) {
4430 - addr = vb->va->va_start + (i << PAGE_SHIFT);
4431 - BUG_ON(addr_to_vb_idx(addr) !=
4432 - addr_to_vb_idx(vb->va->va_start));
4433 - vb->free -= 1UL << order;
4434 - if (vb->free == 0) {
4435 - spin_lock(&vbq->lock);
4436 - list_del_init(&vb->free_list);
4437 - spin_unlock(&vbq->lock);
4438 + if (i < 0) {
4439 + if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
4440 + /* fragmented and no outstanding allocations */
4441 + BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
4442 + purge = 1;
4443 }
4444 - spin_unlock(&vb->lock);
4445 - break;
4446 + goto next;
4447 }
4448 + addr = vb->va->va_start + (i << PAGE_SHIFT);
4449 + BUG_ON(addr_to_vb_idx(addr) !=
4450 + addr_to_vb_idx(vb->va->va_start));
4451 + vb->free -= 1UL << order;
4452 + if (vb->free == 0) {
4453 + spin_lock(&vbq->lock);
4454 + list_del_rcu(&vb->free_list);
4455 + spin_unlock(&vbq->lock);
4456 + }
4457 + spin_unlock(&vb->lock);
4458 + break;
4459 +next:
4460 spin_unlock(&vb->lock);
4461 }
4462 +
4463 + if (purge)
4464 + purge_fragmented_blocks_thiscpu();
4465 +
4466 put_cpu_var(vmap_cpu_blocks);
4467 rcu_read_unlock();
4468
4469 @@ -860,11 +923,11 @@ static void vb_free(const void *addr, unsigned long size)
4470 BUG_ON(!vb);
4471
4472 spin_lock(&vb->lock);
4473 - bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
4474 + BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
4475
4476 vb->dirty += 1UL << order;
4477 if (vb->dirty == VMAP_BBMAP_BITS) {
4478 - BUG_ON(vb->free || !list_empty(&vb->free_list));
4479 + BUG_ON(vb->free);
4480 spin_unlock(&vb->lock);
4481 free_vmap_block(vb);
4482 } else
4483 @@ -1033,8 +1096,6 @@ void __init vmalloc_init(void)
4484 vbq = &per_cpu(vmap_block_queue, i);
4485 spin_lock_init(&vbq->lock);
4486 INIT_LIST_HEAD(&vbq->free);
4487 - INIT_LIST_HEAD(&vbq->dirty);
4488 - vbq->nr_dirty = 0;
4489 }
4490
4491 /* Import existing vmlist entries. */
4492 diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
4493 index bf706f8..1491260 100644
4494 --- a/net/ax25/ax25_out.c
4495 +++ b/net/ax25/ax25_out.c
4496 @@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
4497 #endif
4498 }
4499
4500 + /*
4501 + * There is one ref for the state machine; a caller needs
4502 + * one more to put it back, just like with the existing one.
4503 + */
4504 + ax25_cb_hold(ax25);
4505 +
4506 ax25_cb_add(ax25);
4507
4508 ax25->state = AX25_STATE_1;
4509 diff --git a/net/core/sock.c b/net/core/sock.c
4510 index 7626b6a..6605e75 100644
4511 --- a/net/core/sock.c
4512 +++ b/net/core/sock.c
4513 @@ -1181,6 +1181,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
4514
4515 if (newsk->sk_prot->sockets_allocated)
4516 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
4517 +
4518 + if (sock_flag(newsk, SOCK_TIMESTAMP) ||
4519 + sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
4520 + net_enable_timestamp();
4521 }
4522 out:
4523 return newsk;
4524 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
4525 index 5df2f6a..0030e73 100644
4526 --- a/net/ipv4/devinet.c
4527 +++ b/net/ipv4/devinet.c
4528 @@ -1450,6 +1450,7 @@ static struct devinet_sysctl_table {
4529 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
4530 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
4531 "accept_source_route"),
4532 + DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
4533 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
4534 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
4535 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
4536 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
4537 index aa00398..29391ee 100644
4538 --- a/net/ipv4/fib_frontend.c
4539 +++ b/net/ipv4/fib_frontend.c
4540 @@ -251,6 +251,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
4541 if (in_dev) {
4542 no_addr = in_dev->ifa_list == NULL;
4543 rpf = IN_DEV_RPFILTER(in_dev);
4544 + if (mark && !IN_DEV_SRC_VMARK(in_dev))
4545 + fl.mark = 0;
4546 }
4547 rcu_read_unlock();
4548
4549 diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
4550 index 37b9051..d87645e 100644
4551 --- a/net/mac80211/driver-trace.h
4552 +++ b/net/mac80211/driver-trace.h
4553 @@ -655,7 +655,7 @@ TRACE_EVENT(drv_ampdu_action,
4554 __entry->ret = ret;
4555 __entry->action = action;
4556 __entry->tid = tid;
4557 - __entry->ssn = *ssn;
4558 + __entry->ssn = ssn ? *ssn : 0;
4559 ),
4560
4561 TP_printk(
4562 diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
4563 index 4eb1ac9..850ffc0 100644
4564 --- a/net/netrom/nr_route.c
4565 +++ b/net/netrom/nr_route.c
4566 @@ -842,12 +842,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
4567 dptr = skb_push(skb, 1);
4568 *dptr = AX25_P_NETROM;
4569
4570 - ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev);
4571 - if (nr_neigh->ax25 && ax25s) {
4572 - /* We were already holding this ax25_cb */
4573 + ax25s = nr_neigh->ax25;
4574 + nr_neigh->ax25 = ax25_send_frame(skb, 256,
4575 + (ax25_address *)dev->dev_addr,
4576 + &nr_neigh->callsign,
4577 + nr_neigh->digipeat, nr_neigh->dev);
4578 + if (ax25s)
4579 ax25_cb_put(ax25s);
4580 - }
4581 - nr_neigh->ax25 = ax25s;
4582
4583 dev_put(dev);
4584 ret = (nr_neigh->ax25 != NULL);
4585 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
4586 index f2d116a..41866eb 100644
4587 --- a/net/packet/af_packet.c
4588 +++ b/net/packet/af_packet.c
4589 @@ -1028,8 +1028,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
4590
4591 status = TP_STATUS_SEND_REQUEST;
4592 err = dev_queue_xmit(skb);
4593 - if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
4594 - goto out_xmit;
4595 + if (unlikely(err > 0)) {
4596 + err = net_xmit_errno(err);
4597 + if (err && __packet_get_status(po, ph) ==
4598 + TP_STATUS_AVAILABLE) {
4599 + /* skb was destructed already */
4600 + skb = NULL;
4601 + goto out_status;
4602 + }
4603 + /*
4604 + * skb was dropped but not destructed yet;
4605 + * let's treat it like congestion or err < 0
4606 + */
4607 + err = 0;
4608 + }
4609 packet_increment_head(&po->tx_ring);
4610 len_sum += tp_len;
4611 } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
4612 @@ -1039,9 +1051,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
4613 err = len_sum;
4614 goto out_put;
4615
4616 -out_xmit:
4617 - skb->destructor = sock_wfree;
4618 - atomic_dec(&po->tx_ring.pending);
4619 out_status:
4620 __packet_set_status(po, ph, status);
4621 kfree_skb(skb);
4622 diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
4623 index bd86a63..5ef5f69 100644
4624 --- a/net/rose/rose_link.c
4625 +++ b/net/rose/rose_link.c
4626 @@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param)
4627 static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
4628 {
4629 ax25_address *rose_call;
4630 + ax25_cb *ax25s;
4631
4632 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
4633 rose_call = (ax25_address *)neigh->dev->dev_addr;
4634 else
4635 rose_call = &rose_callsign;
4636
4637 + ax25s = neigh->ax25;
4638 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
4639 + if (ax25s)
4640 + ax25_cb_put(ax25s);
4641
4642 return (neigh->ax25 != NULL);
4643 }
4644 @@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
4645 static int rose_link_up(struct rose_neigh *neigh)
4646 {
4647 ax25_address *rose_call;
4648 + ax25_cb *ax25s;
4649
4650 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
4651 rose_call = (ax25_address *)neigh->dev->dev_addr;
4652 else
4653 rose_call = &rose_callsign;
4654
4655 + ax25s = neigh->ax25;
4656 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
4657 + if (ax25s)
4658 + ax25_cb_put(ax25s);
4659
4660 return (neigh->ax25 != NULL);
4661 }
4662 diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
4663 index f3e2198..08230fa 100644
4664 --- a/net/rose/rose_route.c
4665 +++ b/net/rose/rose_route.c
4666 @@ -234,6 +234,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
4667
4668 if ((s = rose_neigh_list) == rose_neigh) {
4669 rose_neigh_list = rose_neigh->next;
4670 + if (rose_neigh->ax25)
4671 + ax25_cb_put(rose_neigh->ax25);
4672 kfree(rose_neigh->digipeat);
4673 kfree(rose_neigh);
4674 return;
4675 @@ -242,6 +244,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
4676 while (s != NULL && s->next != NULL) {
4677 if (s->next == rose_neigh) {
4678 s->next = rose_neigh->next;
4679 + if (rose_neigh->ax25)
4680 + ax25_cb_put(rose_neigh->ax25);
4681 kfree(rose_neigh->digipeat);
4682 kfree(rose_neigh);
4683 return;
4684 @@ -810,6 +814,7 @@ void rose_link_failed(ax25_cb *ax25, int reason)
4685
4686 if (rose_neigh != NULL) {
4687 rose_neigh->ax25 = NULL;
4688 + ax25_cb_put(ax25);
4689
4690 rose_del_route_by_neigh(rose_neigh);
4691 rose_kill_by_neigh(rose_neigh);
4692 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4693 index bb230d5..36d9e25 100644
4694 --- a/security/selinux/hooks.c
4695 +++ b/security/selinux/hooks.c
4696 @@ -2366,7 +2366,7 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
4697 initrlim = init_task.signal->rlim + i;
4698 rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
4699 }
4700 - update_rlimit_cpu(rlim->rlim_cur);
4701 + update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur);
4702 }
4703 }
4704

  ViewVC Help
Powered by ViewVC 1.1.20