/[linux-patches]/genpatches-2.6/trunk/3.4/1013_linux-3.4.14.patch
Gentoo

Contents of /genpatches-2.6/trunk/3.4/1013_linux-3.4.14.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2254 - (show annotations) (download)
Wed Dec 19 19:51:16 2012 UTC (18 months, 3 weeks ago) by mpagano
File size: 123768 byte(s)
Linux patches 3.4.12 through and including 3.4.24
1 diff --git a/Makefile b/Makefile
2 index 75b37ce..d174c84 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 4
8 -SUBLEVEL = 13
9 +SUBLEVEL = 14
10 EXTRAVERSION =
11 NAME = Saber-toothed Squirrel
12
13 diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
14 index 153d3fc..8e3d91b 100644
15 --- a/arch/alpha/kernel/process.c
16 +++ b/arch/alpha/kernel/process.c
17 @@ -28,6 +28,7 @@
18 #include <linux/tty.h>
19 #include <linux/console.h>
20 #include <linux/slab.h>
21 +#include <linux/rcupdate.h>
22
23 #include <asm/reg.h>
24 #include <asm/uaccess.h>
25 @@ -54,8 +55,11 @@ cpu_idle(void)
26 /* FIXME -- EV6 and LCA45 know how to power down
27 the CPU. */
28
29 + rcu_idle_enter();
30 while (!need_resched())
31 cpu_relax();
32 +
33 + rcu_idle_exit();
34 schedule();
35 }
36 }
37 diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
38 index 891dad8..c722027 100644
39 --- a/arch/cris/kernel/process.c
40 +++ b/arch/cris/kernel/process.c
41 @@ -25,6 +25,7 @@
42 #include <linux/elfcore.h>
43 #include <linux/mqueue.h>
44 #include <linux/reboot.h>
45 +#include <linux/rcupdate.h>
46
47 //#define DEBUG
48
49 @@ -102,6 +103,7 @@ void cpu_idle (void)
50 {
51 /* endless idle loop with no priority at all */
52 while (1) {
53 + rcu_idle_enter();
54 while (!need_resched()) {
55 void (*idle)(void);
56 /*
57 @@ -114,6 +116,7 @@ void cpu_idle (void)
58 idle = default_idle;
59 idle();
60 }
61 + rcu_idle_exit();
62 schedule_preempt_disabled();
63 }
64 }
65 diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
66 index d4de48b..3941cbc 100644
67 --- a/arch/frv/kernel/process.c
68 +++ b/arch/frv/kernel/process.c
69 @@ -25,6 +25,7 @@
70 #include <linux/reboot.h>
71 #include <linux/interrupt.h>
72 #include <linux/pagemap.h>
73 +#include <linux/rcupdate.h>
74
75 #include <asm/asm-offsets.h>
76 #include <asm/uaccess.h>
77 @@ -84,12 +85,14 @@ void cpu_idle(void)
78 {
79 /* endless idle loop with no priority at all */
80 while (1) {
81 + rcu_idle_enter();
82 while (!need_resched()) {
83 check_pgt_cache();
84
85 if (!frv_dma_inprogress && idle)
86 idle();
87 }
88 + rcu_idle_exit();
89
90 schedule_preempt_disabled();
91 }
92 diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
93 index 0e9c315..f153ed1 100644
94 --- a/arch/h8300/kernel/process.c
95 +++ b/arch/h8300/kernel/process.c
96 @@ -36,6 +36,7 @@
97 #include <linux/reboot.h>
98 #include <linux/fs.h>
99 #include <linux/slab.h>
100 +#include <linux/rcupdate.h>
101
102 #include <asm/uaccess.h>
103 #include <asm/traps.h>
104 @@ -78,8 +79,10 @@ void (*idle)(void) = default_idle;
105 void cpu_idle(void)
106 {
107 while (1) {
108 + rcu_idle_enter();
109 while (!need_resched())
110 idle();
111 + rcu_idle_exit();
112 schedule_preempt_disabled();
113 }
114 }
115 diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
116 index ce74e14..86d74ab 100644
117 --- a/arch/ia64/kernel/process.c
118 +++ b/arch/ia64/kernel/process.c
119 @@ -29,6 +29,7 @@
120 #include <linux/kdebug.h>
121 #include <linux/utsname.h>
122 #include <linux/tracehook.h>
123 +#include <linux/rcupdate.h>
124
125 #include <asm/cpu.h>
126 #include <asm/delay.h>
127 @@ -301,6 +302,7 @@ cpu_idle (void)
128
129 /* endless idle loop with no priority at all */
130 while (1) {
131 + rcu_idle_enter();
132 if (can_do_pal_halt) {
133 current_thread_info()->status &= ~TS_POLLING;
134 /*
135 @@ -331,6 +333,7 @@ cpu_idle (void)
136 normal_xtp();
137 #endif
138 }
139 + rcu_idle_exit();
140 schedule_preempt_disabled();
141 check_pgt_cache();
142 if (cpu_is_offline(cpu))
143 diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
144 index 3a4a32b..384e63f 100644
145 --- a/arch/m32r/kernel/process.c
146 +++ b/arch/m32r/kernel/process.c
147 @@ -26,6 +26,7 @@
148 #include <linux/ptrace.h>
149 #include <linux/unistd.h>
150 #include <linux/hardirq.h>
151 +#include <linux/rcupdate.h>
152
153 #include <asm/io.h>
154 #include <asm/uaccess.h>
155 @@ -82,6 +83,7 @@ void cpu_idle (void)
156 {
157 /* endless idle loop with no priority at all */
158 while (1) {
159 + rcu_idle_enter();
160 while (!need_resched()) {
161 void (*idle)(void) = pm_idle;
162
163 @@ -90,6 +92,7 @@ void cpu_idle (void)
164
165 idle();
166 }
167 + rcu_idle_exit();
168 schedule_preempt_disabled();
169 }
170 }
171 diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
172 index c488e3c..ac2892e 100644
173 --- a/arch/m68k/kernel/process.c
174 +++ b/arch/m68k/kernel/process.c
175 @@ -25,6 +25,7 @@
176 #include <linux/reboot.h>
177 #include <linux/init_task.h>
178 #include <linux/mqueue.h>
179 +#include <linux/rcupdate.h>
180
181 #include <asm/uaccess.h>
182 #include <asm/traps.h>
183 @@ -75,8 +76,10 @@ void cpu_idle(void)
184 {
185 /* endless idle loop with no priority at all */
186 while (1) {
187 + rcu_idle_enter();
188 while (!need_resched())
189 idle();
190 + rcu_idle_exit();
191 schedule_preempt_disabled();
192 }
193 }
194 diff --git a/arch/mips/Makefile b/arch/mips/Makefile
195 index 4fedf5a..5c1e75d 100644
196 --- a/arch/mips/Makefile
197 +++ b/arch/mips/Makefile
198 @@ -225,7 +225,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
199 LDFLAGS += -m $(ld-emul)
200
201 ifdef CONFIG_MIPS
202 -CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
203 +CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
204 egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
205 sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
206 ifdef CONFIG_64BIT
207 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
208 index 0c6877e..d3d6fa9 100644
209 --- a/arch/mips/kernel/Makefile
210 +++ b/arch/mips/kernel/Makefile
211 @@ -104,7 +104,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
212
213 obj-$(CONFIG_OF) += prom.o
214
215 -CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
216 +CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
217
218 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
219
220 diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
221 index 7120282..3eb4a52 100644
222 --- a/arch/mn10300/Makefile
223 +++ b/arch/mn10300/Makefile
224 @@ -26,7 +26,7 @@ CHECKFLAGS +=
225 PROCESSOR := unset
226 UNIT := unset
227
228 -KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33
229 +KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
230 KBUILD_AFLAGS += -mam33 -DCPU=AM33
231
232 ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
233 diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
234 index 14707f2..675d8f2 100644
235 --- a/arch/mn10300/kernel/process.c
236 +++ b/arch/mn10300/kernel/process.c
237 @@ -25,6 +25,7 @@
238 #include <linux/err.h>
239 #include <linux/fs.h>
240 #include <linux/slab.h>
241 +#include <linux/rcupdate.h>
242 #include <asm/uaccess.h>
243 #include <asm/pgtable.h>
244 #include <asm/io.h>
245 @@ -107,6 +108,7 @@ void cpu_idle(void)
246 {
247 /* endless idle loop with no priority at all */
248 for (;;) {
249 + rcu_idle_enter();
250 while (!need_resched()) {
251 void (*idle)(void);
252
253 @@ -121,6 +123,7 @@ void cpu_idle(void)
254 }
255 idle();
256 }
257 + rcu_idle_exit();
258
259 schedule_preempt_disabled();
260 }
261 diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
262 index d4b94b3..c54a4db 100644
263 --- a/arch/parisc/kernel/process.c
264 +++ b/arch/parisc/kernel/process.c
265 @@ -48,6 +48,7 @@
266 #include <linux/unistd.h>
267 #include <linux/kallsyms.h>
268 #include <linux/uaccess.h>
269 +#include <linux/rcupdate.h>
270
271 #include <asm/io.h>
272 #include <asm/asm-offsets.h>
273 @@ -69,8 +70,10 @@ void cpu_idle(void)
274
275 /* endless idle loop with no priority at all */
276 while (1) {
277 + rcu_idle_enter();
278 while (!need_resched())
279 barrier();
280 + rcu_idle_exit();
281 schedule_preempt_disabled();
282 check_pgt_cache();
283 }
284 diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
285 index ac39e6a..2974edd 100644
286 --- a/arch/powerpc/include/asm/pci-bridge.h
287 +++ b/arch/powerpc/include/asm/pci-bridge.h
288 @@ -181,6 +181,14 @@ static inline int pci_device_from_OF_node(struct device_node *np,
289 #if defined(CONFIG_EEH)
290 static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
291 {
292 + /*
293 + * For those OF nodes whose parent isn't PCI bridge, they
294 + * don't have PCI_DN actually. So we have to skip them for
295 + * any EEH operations.
296 + */
297 + if (!dn || !PCI_DN(dn))
298 + return NULL;
299 +
300 return PCI_DN(dn)->edev;
301 }
302 #endif
303 diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
304 index a75e37d..41d4b16 100644
305 --- a/arch/powerpc/platforms/pseries/eeh.c
306 +++ b/arch/powerpc/platforms/pseries/eeh.c
307 @@ -1029,7 +1029,7 @@ static void eeh_add_device_early(struct device_node *dn)
308 {
309 struct pci_controller *phb;
310
311 - if (!dn || !of_node_to_eeh_dev(dn))
312 + if (!of_node_to_eeh_dev(dn))
313 return;
314 phb = of_node_to_eeh_dev(dn)->phb;
315
316 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
317 index 2707023..637970c 100644
318 --- a/arch/score/kernel/process.c
319 +++ b/arch/score/kernel/process.c
320 @@ -27,6 +27,7 @@
321 #include <linux/reboot.h>
322 #include <linux/elfcore.h>
323 #include <linux/pm.h>
324 +#include <linux/rcupdate.h>
325
326 void (*pm_power_off)(void);
327 EXPORT_SYMBOL(pm_power_off);
328 @@ -50,9 +51,10 @@ void __noreturn cpu_idle(void)
329 {
330 /* endless idle loop with no priority at all */
331 while (1) {
332 + rcu_idle_enter();
333 while (!need_resched())
334 barrier();
335 -
336 + rcu_idle_exit();
337 schedule_preempt_disabled();
338 }
339 }
340 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
341 index b1c611e..f1276aa 100644
342 --- a/arch/x86/Makefile
343 +++ b/arch/x86/Makefile
344 @@ -85,7 +85,7 @@ endif
345 ifdef CONFIG_X86_X32
346 x32_ld_ok := $(call try-run,\
347 /bin/echo -e '1: .quad 1b' | \
348 - $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
349 + $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \
350 $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
351 $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
352 ifeq ($(x32_ld_ok),y)
353 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
354 index e398bb5..8a84501 100644
355 --- a/arch/x86/boot/compressed/Makefile
356 +++ b/arch/x86/boot/compressed/Makefile
357 @@ -28,6 +28,9 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
358 $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
359 $(obj)/piggy.o
360
361 +$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
362 +$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
363 +
364 ifeq ($(CONFIG_EFI_STUB), y)
365 VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
366 endif
367 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
368 index 49afb3f..c3520d7 100644
369 --- a/arch/x86/include/asm/pgtable.h
370 +++ b/arch/x86/include/asm/pgtable.h
371 @@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
372
373 static inline int pmd_large(pmd_t pte)
374 {
375 - return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
376 - (_PAGE_PSE | _PAGE_PRESENT);
377 + return pmd_flags(pte) & _PAGE_PSE;
378 }
379
380 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
381 @@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)
382
383 static inline int pmd_present(pmd_t pmd)
384 {
385 - return pmd_flags(pmd) & _PAGE_PRESENT;
386 + /*
387 + * Checking for _PAGE_PSE is needed too because
388 + * split_huge_page will temporarily clear the present bit (but
389 + * the _PAGE_PSE flag will remain set at all times while the
390 + * _PAGE_PRESENT bit is clear).
391 + */
392 + return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
393 }
394
395 static inline int pmd_none(pmd_t pmd)
396 diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
397 index 92660eda..f55a4ce 100644
398 --- a/arch/x86/platform/efi/efi.c
399 +++ b/arch/x86/platform/efi/efi.c
400 @@ -890,6 +890,7 @@ void __init efi_enter_virtual_mode(void)
401 *
402 * Call EFI services through wrapper functions.
403 */
404 + efi.runtime_version = efi_systab.fw_revision;
405 efi.get_time = virt_efi_get_time;
406 efi.set_time = virt_efi_set_time;
407 efi.get_wakeup_time = virt_efi_get_wakeup_time;
408 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
409 index 6a2d6ed..7a41d9e 100644
410 --- a/arch/xtensa/kernel/process.c
411 +++ b/arch/xtensa/kernel/process.c
412 @@ -31,6 +31,7 @@
413 #include <linux/mqueue.h>
414 #include <linux/fs.h>
415 #include <linux/slab.h>
416 +#include <linux/rcupdate.h>
417
418 #include <asm/pgtable.h>
419 #include <asm/uaccess.h>
420 @@ -110,8 +111,10 @@ void cpu_idle(void)
421
422 /* endless idle loop with no priority at all */
423 while (1) {
424 + rcu_idle_enter();
425 while (!need_resched())
426 platform_idle();
427 + rcu_idle_exit();
428 schedule_preempt_disabled();
429 }
430 }
431 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
432 index 3188da3..cf02e97 100644
433 --- a/drivers/acpi/bus.c
434 +++ b/drivers/acpi/bus.c
435 @@ -954,8 +954,6 @@ static int __init acpi_bus_init(void)
436 status = acpi_ec_ecdt_probe();
437 /* Ignore result. Not having an ECDT is not fatal. */
438
439 - acpi_bus_osc_support();
440 -
441 status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
442 if (ACPI_FAILURE(status)) {
443 printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
444 @@ -963,6 +961,12 @@ static int __init acpi_bus_init(void)
445 }
446
447 /*
448 + * _OSC method may exist in module level code,
449 + * so it must be run after ACPI_FULL_INITIALIZATION
450 + */
451 + acpi_bus_osc_support();
452 +
453 + /*
454 * _PDC control method may load dynamic SSDT tables,
455 * and we need to install the table handler before that.
456 */
457 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
458 index f7eff25..ebc272f 100644
459 --- a/drivers/base/power/main.c
460 +++ b/drivers/base/power/main.c
461 @@ -984,7 +984,7 @@ int dpm_suspend_end(pm_message_t state)
462
463 error = dpm_suspend_noirq(state);
464 if (error) {
465 - dpm_resume_early(state);
466 + dpm_resume_early(resume_event(state));
467 return error;
468 }
469
470 diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
471 index de0435e..887f68f 100644
472 --- a/drivers/block/aoe/aoecmd.c
473 +++ b/drivers/block/aoe/aoecmd.c
474 @@ -35,6 +35,7 @@ new_skb(ulong len)
475 skb_reset_mac_header(skb);
476 skb_reset_network_header(skb);
477 skb->protocol = __constant_htons(ETH_P_AOE);
478 + skb_checksum_none_assert(skb);
479 }
480 return skb;
481 }
482 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
483 index 2397f6f..6c87d67 100644
484 --- a/drivers/dma/dmaengine.c
485 +++ b/drivers/dma/dmaengine.c
486 @@ -578,7 +578,7 @@ void dmaengine_get(void)
487 list_del_rcu(&device->global_node);
488 break;
489 } else if (err)
490 - pr_err("%s: failed to get %s: (%d)\n",
491 + pr_debug("%s: failed to get %s: (%d)\n",
492 __func__, dma_chan_name(chan), err);
493 }
494 }
495 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
496 index 7e479a4..4fd363f 100644
497 --- a/drivers/gpu/drm/drm_crtc.c
498 +++ b/drivers/gpu/drm/drm_crtc.c
499 @@ -1028,15 +1028,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
500 fb->funcs->destroy(fb);
501 }
502
503 - list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
504 - crtc->funcs->destroy(crtc);
505 - }
506 -
507 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
508 head) {
509 plane->funcs->destroy(plane);
510 }
511
512 + list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
513 + crtc->funcs->destroy(crtc);
514 + }
515 +
516 idr_remove_all(&dev->mode_config.crtc_idr);
517 idr_destroy(&dev->mode_config.crtc_idr);
518 }
519 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
520 index 65060b7..645dcbf 100644
521 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
522 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
523 @@ -147,6 +147,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
524 (rdev->pdev->subsystem_device == 0x01fd))
525 return true;
526
527 + /* Gateway RS690 only seems to work with MSIs. */
528 + if ((rdev->pdev->device == 0x791f) &&
529 + (rdev->pdev->subsystem_vendor == 0x107b) &&
530 + (rdev->pdev->subsystem_device == 0x0185))
531 + return true;
532 +
533 + /* try and enable MSIs by default on all RS690s */
534 + if (rdev->family == CHIP_RS690)
535 + return true;
536 +
537 /* RV515 seems to have MSI issues where it loses
538 * MSI rearms occasionally. This leads to lockups and freezes.
539 * disable it by default.
540 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
541 index caa55d6..b8459bd 100644
542 --- a/drivers/gpu/drm/radeon/radeon_pm.c
543 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
544 @@ -567,7 +567,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
545 void radeon_pm_resume(struct radeon_device *rdev)
546 {
547 /* set up the default clocks if the MC ucode is loaded */
548 - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
549 + if ((rdev->family >= CHIP_BARTS) &&
550 + (rdev->family <= CHIP_CAYMAN) &&
551 + rdev->mc_fw) {
552 if (rdev->pm.default_vddc)
553 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
554 SET_VOLTAGE_TYPE_ASIC_VDDC);
555 @@ -622,7 +624,9 @@ int radeon_pm_init(struct radeon_device *rdev)
556 radeon_pm_print_states(rdev);
557 radeon_pm_init_profile(rdev);
558 /* set up the default clocks if the MC ucode is loaded */
559 - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
560 + if ((rdev->family >= CHIP_BARTS) &&
561 + (rdev->family <= CHIP_CAYMAN) &&
562 + rdev->mc_fw) {
563 if (rdev->pm.default_vddc)
564 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
565 SET_VOLTAGE_TYPE_ASIC_VDDC);
566 diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
567 index cb1ee4e..2a25888 100644
568 --- a/drivers/gpu/drm/savage/savage_bci.c
569 +++ b/drivers/gpu/drm/savage/savage_bci.c
570 @@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
571
572 dev_priv->chipset = (enum savage_family)chipset;
573
574 + pci_set_master(dev->pdev);
575 +
576 return 0;
577 }
578
579 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
580 index 5fda348..0d251d3 100644
581 --- a/drivers/iommu/intel-iommu.c
582 +++ b/drivers/iommu/intel-iommu.c
583 @@ -588,7 +588,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
584 {
585 int i;
586
587 - domain->iommu_coherency = 1;
588 + i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
589 +
590 + domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
591
592 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
593 if (!ecap_coherent(g_iommus[i]->ecap)) {
594 diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
595 index 0e49c99..c06992e 100644
596 --- a/drivers/media/rc/ite-cir.c
597 +++ b/drivers/media/rc/ite-cir.c
598 @@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
599 rdev = rc_allocate_device();
600 if (!rdev)
601 goto failure;
602 + itdev->rdev = rdev;
603
604 ret = -ENODEV;
605
606 @@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
607 if (ret)
608 goto failure;
609
610 - itdev->rdev = rdev;
611 ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
612
613 return 0;
614 diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
615 index 30662fc..63f571b 100644
616 --- a/drivers/media/video/gspca/pac7302.c
617 +++ b/drivers/media/video/gspca/pac7302.c
618 @@ -945,6 +945,7 @@ static const struct usb_device_id device_table[] = {
619 {USB_DEVICE(0x093a, 0x262a)},
620 {USB_DEVICE(0x093a, 0x262c)},
621 {USB_DEVICE(0x145f, 0x013c)},
622 + {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */
623 {}
624 };
625 MODULE_DEVICE_TABLE(usb, device_table);
626 diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
627 index ca881ef..746a59c 100644
628 --- a/drivers/mfd/max8925-core.c
629 +++ b/drivers/mfd/max8925-core.c
630 @@ -18,12 +18,19 @@
631 #include <linux/mfd/core.h>
632 #include <linux/mfd/max8925.h>
633
634 +static struct resource io_parent = {
635 + .start = 0,
636 + .end = 0xffffffff,
637 + .flags = IORESOURCE_IO,
638 +};
639 +
640 static struct resource backlight_resources[] = {
641 {
642 .name = "max8925-backlight",
643 .start = MAX8925_WLED_MODE_CNTL,
644 .end = MAX8925_WLED_CNTL,
645 .flags = IORESOURCE_IO,
646 + .parent = &io_parent,
647 },
648 };
649
650 @@ -42,6 +49,7 @@ static struct resource touch_resources[] = {
651 .start = MAX8925_TSC_IRQ,
652 .end = MAX8925_ADC_RES_END,
653 .flags = IORESOURCE_IO,
654 + .parent = &io_parent,
655 },
656 };
657
658 @@ -60,6 +68,7 @@ static struct resource power_supply_resources[] = {
659 .start = MAX8925_CHG_IRQ1,
660 .end = MAX8925_CHG_IRQ1_MASK,
661 .flags = IORESOURCE_IO,
662 + .parent = &io_parent,
663 },
664 };
665
666 @@ -118,6 +127,7 @@ static struct mfd_cell onkey_devs[] = {
667 .start = MAX8925_##_start, \
668 .end = MAX8925_##_end, \
669 .flags = IORESOURCE_IO, \
670 + .parent = &io_parent, \
671 }
672
673 static struct resource regulator_resources[] = {
674 diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
675 index 71a0c4e..1025377 100644
676 --- a/drivers/mmc/host/omap_hsmmc.c
677 +++ b/drivers/mmc/host/omap_hsmmc.c
678 @@ -2097,8 +2097,7 @@ static int omap_hsmmc_suspend(struct device *dev)
679 if (ret) {
680 host->suspended = 0;
681 if (host->pdata->resume) {
682 - ret = host->pdata->resume(dev, host->slot_id);
683 - if (ret)
684 + if (host->pdata->resume(dev, host->slot_id))
685 dev_dbg(dev, "Unmask interrupt failed\n");
686 }
687 goto err;
688 diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
689 index 724b35e..3b8236b 100644
690 --- a/drivers/mmc/host/sh_mmcif.c
691 +++ b/drivers/mmc/host/sh_mmcif.c
692 @@ -1191,6 +1191,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
693 host->sd_error = true;
694 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
695 }
696 + if (host->state == STATE_IDLE) {
697 + dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
698 + return IRQ_HANDLED;
699 + }
700 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
701 if (!host->dma_active)
702 return IRQ_WAKE_THREAD;
703 diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
704 index e5bfd0e..0598d52 100644
705 --- a/drivers/mtd/maps/autcpu12-nvram.c
706 +++ b/drivers/mtd/maps/autcpu12-nvram.c
707 @@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
708
709 static int __init init_autcpu12_sram (void)
710 {
711 - int err, save0, save1;
712 + map_word tmp, save0, save1;
713 + int err;
714
715 autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
716 if (!autcpu12_sram_map.virt) {
717 @@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
718 err = -EIO;
719 goto out;
720 }
721 - simple_map_init(&autcpu_sram_map);
722 + simple_map_init(&autcpu12_sram_map);
723
724 /*
725 * Check for 32K/128K
726 @@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
727 * Read and check result on ofs 0x0
728 * Restore contents
729 */
730 - save0 = map_read32(&autcpu12_sram_map,0);
731 - save1 = map_read32(&autcpu12_sram_map,0x10000);
732 - map_write32(&autcpu12_sram_map,~save0,0x10000);
733 + save0 = map_read(&autcpu12_sram_map, 0);
734 + save1 = map_read(&autcpu12_sram_map, 0x10000);
735 + tmp.x[0] = ~save0.x[0];
736 + map_write(&autcpu12_sram_map, tmp, 0x10000);
737 /* if we find this pattern on 0x0, we have 32K size
738 * restore contents and exit
739 */
740 - if ( map_read32(&autcpu12_sram_map,0) != save0) {
741 - map_write32(&autcpu12_sram_map,save0,0x0);
742 + tmp = map_read(&autcpu12_sram_map, 0);
743 + if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
744 + map_write(&autcpu12_sram_map, save0, 0x0);
745 goto map;
746 }
747 /* We have a 128K found, restore 0x10000 and set size
748 * to 128K
749 */
750 - map_write32(&autcpu12_sram_map,save1,0x10000);
751 + map_write(&autcpu12_sram_map, save1, 0x10000);
752 autcpu12_sram_map.size = SZ_128K;
753
754 map:
755 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
756 index 9651c06..bf24aa7 100644
757 --- a/drivers/mtd/mtdpart.c
758 +++ b/drivers/mtd/mtdpart.c
759 @@ -709,6 +709,8 @@ static const char *default_mtd_part_types[] = {
760 * partition parsers, specified in @types. However, if @types is %NULL, then
761 * the default list of parsers is used. The default list contains only the
762 * "cmdlinepart" and "ofpart" parsers ATM.
763 + * Note: If there are more then one parser in @types, the kernel only takes the
764 + * partitions parsed out by the first parser.
765 *
766 * This function may return:
767 * o a negative error code in case of failure
768 @@ -733,11 +735,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
769 if (!parser)
770 continue;
771 ret = (*parser->parse_fn)(master, pparts, data);
772 + put_partition_parser(parser);
773 if (ret > 0) {
774 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
775 ret, parser->name, master->name);
776 + break;
777 }
778 - put_partition_parser(parser);
779 }
780 return ret;
781 }
782 diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
783 index 30d1319..c126469 100644
784 --- a/drivers/mtd/nand/nand_bbt.c
785 +++ b/drivers/mtd/nand/nand_bbt.c
786 @@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
787 /* Read the mirror version, if available */
788 if (md && (md->options & NAND_BBT_VERSION)) {
789 scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
790 - mtd->writesize, td);
791 + mtd->writesize, md);
792 md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
793 pr_info("Bad block table at page %d, version 0x%02X\n",
794 md->pages[0], md->version[0]);
795 diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
796 index c606b6a..b9cbd65 100644
797 --- a/drivers/mtd/nand/nandsim.c
798 +++ b/drivers/mtd/nand/nandsim.c
799 @@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
800 uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
801 if (new_size >> overridesize != nsmtd->erasesize) {
802 NS_ERR("overridesize is too big\n");
803 + retval = -EINVAL;
804 goto err_exit;
805 }
806 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
807 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
808 index c2b0bba..62d039a 100644
809 --- a/drivers/mtd/nand/omap2.c
810 +++ b/drivers/mtd/nand/omap2.c
811 @@ -1133,7 +1133,8 @@ static int omap_nand_remove(struct platform_device *pdev)
812 /* Release NAND device, its internal structures and partitions */
813 nand_release(&info->mtd);
814 iounmap(info->nand.IO_ADDR_R);
815 - kfree(&info->mtd);
816 + release_mem_region(info->phys_base, NAND_IO_SIZE);
817 + kfree(info);
818 return 0;
819 }
820
821 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
822 index 41bb34f..acd8246 100644
823 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
824 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
825 @@ -571,14 +571,16 @@ drop:
826 static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
827 struct bnx2x_fastpath *fp)
828 {
829 - /* Do nothing if no IP/L4 csum validation was done */
830 -
831 + /* Do nothing if no L4 csum validation was done.
832 + * We do not check whether IP csum was validated. For IPv4 we assume
833 + * that if the card got as far as validating the L4 csum, it also
834 + * validated the IP csum. IPv6 has no IP csum.
835 + */
836 if (cqe->fast_path_cqe.status_flags &
837 - (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
838 - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
839 + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
840 return;
841
842 - /* If both IP/L4 validation were done, check if an error was found. */
843 + /* If L4 validation was done, check if an error was found. */
844
845 if (cqe->fast_path_cqe.type_error_flags &
846 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
847 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
848 index 65a718f..22b399a 100644
849 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
850 +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
851 @@ -1370,6 +1370,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
852 struct pci_dev *root = pdev->bus->self;
853 u32 aer_pos;
854
855 + /* root bus? */
856 + if (!root)
857 + return;
858 +
859 if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
860 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
861 return;
862 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
863 index 5fb74c4..482dcd3 100644
864 --- a/drivers/net/ethernet/realtek/r8169.c
865 +++ b/drivers/net/ethernet/realtek/r8169.c
866 @@ -319,6 +319,8 @@ enum rtl_registers {
867 Config0 = 0x51,
868 Config1 = 0x52,
869 Config2 = 0x53,
870 +#define PME_SIGNAL (1 << 5) /* 8168c and later */
871 +
872 Config3 = 0x54,
873 Config4 = 0x55,
874 Config5 = 0x56,
875 @@ -1400,7 +1402,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
876 u16 reg;
877 u8 mask;
878 } cfg[] = {
879 - { WAKE_ANY, Config1, PMEnable },
880 { WAKE_PHY, Config3, LinkUp },
881 { WAKE_MAGIC, Config3, MagicPacket },
882 { WAKE_UCAST, Config5, UWF },
883 @@ -1408,16 +1409,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
884 { WAKE_MCAST, Config5, MWF },
885 { WAKE_ANY, Config5, LanWake }
886 };
887 + u8 options;
888
889 RTL_W8(Cfg9346, Cfg9346_Unlock);
890
891 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
892 - u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
893 + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
894 if (wolopts & cfg[i].opt)
895 options |= cfg[i].mask;
896 RTL_W8(cfg[i].reg, options);
897 }
898
899 + switch (tp->mac_version) {
900 + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
901 + options = RTL_R8(Config1) & ~PMEnable;
902 + if (wolopts)
903 + options |= PMEnable;
904 + RTL_W8(Config1, options);
905 + break;
906 + default:
907 + options = RTL_R8(Config2) & ~PME_SIGNAL;
908 + if (wolopts)
909 + options |= PME_SIGNAL;
910 + RTL_W8(Config2, options);
911 + break;
912 + }
913 +
914 RTL_W8(Cfg9346, Cfg9346_Lock);
915 }
916
917 diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
918 index 3455876..06f2b49 100644
919 --- a/drivers/net/ethernet/ti/davinci_cpdma.c
920 +++ b/drivers/net/ethernet/ti/davinci_cpdma.c
921 @@ -851,6 +851,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
922
923 next_dma = desc_read(desc, hw_next);
924 chan->head = desc_from_phys(pool, next_dma);
925 + chan->count--;
926 chan->stats.teardown_dequeue++;
927
928 /* issue callback without locks held */
929 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
930 index 2fa1a9b..2e0d876 100644
931 --- a/drivers/net/ppp/pppoe.c
932 +++ b/drivers/net/ppp/pppoe.c
933 @@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock)
934
935 po = pppox_sk(sk);
936
937 - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
938 + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
939 dev_put(po->pppoe_dev);
940 po->pppoe_dev = NULL;
941 }
942 diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
943 index 91d2588..1470d3e 100644
944 --- a/drivers/net/rionet.c
945 +++ b/drivers/net/rionet.c
946 @@ -79,6 +79,7 @@ static int rionet_capable = 1;
947 * on system trade-offs.
948 */
949 static struct rio_dev **rionet_active;
950 +static int nact; /* total number of active rionet peers */
951
952 #define is_rionet_capable(src_ops, dst_ops) \
953 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
954 @@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
955 struct ethhdr *eth = (struct ethhdr *)skb->data;
956 u16 destid;
957 unsigned long flags;
958 + int add_num = 1;
959
960 local_irq_save(flags);
961 if (!spin_trylock(&rnet->tx_lock)) {
962 @@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
963 return NETDEV_TX_LOCKED;
964 }
965
966 - if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
967 + if (is_multicast_ether_addr(eth->h_dest))
968 + add_num = nact;
969 +
970 + if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
971 netif_stop_queue(ndev);
972 spin_unlock_irqrestore(&rnet->tx_lock, flags);
973 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
974 @@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
975 }
976
977 if (is_multicast_ether_addr(eth->h_dest)) {
978 + int count = 0;
979 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
980 i++)
981 - if (rionet_active[i])
982 + if (rionet_active[i]) {
983 rionet_queue_tx_msg(skb, ndev,
984 rionet_active[i]);
985 + if (count)
986 + atomic_inc(&skb->users);
987 + count++;
988 + }
989 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
990 destid = RIONET_GET_DESTID(eth->h_dest);
991 if (rionet_active[destid])
992 @@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
993 if (info == RIONET_DOORBELL_JOIN) {
994 if (!rionet_active[sid]) {
995 list_for_each_entry(peer, &rionet_peers, node) {
996 - if (peer->rdev->destid == sid)
997 + if (peer->rdev->destid == sid) {
998 rionet_active[sid] = peer->rdev;
999 + nact++;
1000 + }
1001 }
1002 rio_mport_send_doorbell(mport, sid,
1003 RIONET_DOORBELL_JOIN);
1004 }
1005 } else if (info == RIONET_DOORBELL_LEAVE) {
1006 rionet_active[sid] = NULL;
1007 + nact--;
1008 } else {
1009 if (netif_msg_intr(rnet))
1010 printk(KERN_WARNING "%s: unhandled doorbell\n",
1011 @@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
1012
1013 rc = rionet_setup_netdev(rdev->net->hport, ndev);
1014 rionet_check = 1;
1015 + nact = 0;
1016 }
1017
1018 /*
1019 diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
1020 index cc9776c..8789bc5 100644
1021 --- a/drivers/net/usb/sierra_net.c
1022 +++ b/drivers/net/usb/sierra_net.c
1023 @@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
1024 return -EIO;
1025 }
1026
1027 - *datap = *attrdata;
1028 + *datap = le16_to_cpu(*attrdata);
1029
1030 kfree(attrdata);
1031 return result;
1032 diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
1033 index aaaca9a..3f575af 100644
1034 --- a/drivers/net/wan/ixp4xx_hss.c
1035 +++ b/drivers/net/wan/ixp4xx_hss.c
1036 @@ -10,6 +10,7 @@
1037
1038 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1039
1040 +#include <linux/module.h>
1041 #include <linux/bitops.h>
1042 #include <linux/cdev.h>
1043 #include <linux/dma-mapping.h>
1044 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
1045 index 806c44f..09bf377 100644
1046 --- a/drivers/pci/hotplug/acpiphp_glue.c
1047 +++ b/drivers/pci/hotplug/acpiphp_glue.c
1048 @@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
1049 if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
1050 return AE_OK;
1051
1052 + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
1053 + if (ACPI_FAILURE(status)) {
1054 + warn("can't evaluate _ADR (%#x)\n", status);
1055 + return AE_OK;
1056 + }
1057 +
1058 + device = (adr >> 16) & 0xffff;
1059 + function = adr & 0xffff;
1060 +
1061 pdev = pbus->self;
1062 if (pdev && pci_is_pcie(pdev)) {
1063 tmp = acpi_find_root_bridge_handle(pdev);
1064 @@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
1065 }
1066 }
1067
1068 - acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
1069 - device = (adr >> 16) & 0xffff;
1070 - function = adr & 0xffff;
1071 -
1072 newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
1073 if (!newfunc)
1074 return AE_NO_MEMORY;
1075 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
1076 index 5e1ca3c..63e0199 100644
1077 --- a/drivers/pci/probe.c
1078 +++ b/drivers/pci/probe.c
1079 @@ -749,8 +749,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1080
1081 /* Check if setup is sensible at all */
1082 if (!pass &&
1083 - (primary != bus->number || secondary <= bus->number)) {
1084 - dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
1085 + (primary != bus->number || secondary <= bus->number ||
1086 + secondary > subordinate)) {
1087 + dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1088 + secondary, subordinate);
1089 broken = 1;
1090 }
1091
1092 diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
1093 index 0860181..4f1b10b 100644
1094 --- a/drivers/s390/scsi/zfcp_aux.c
1095 +++ b/drivers/s390/scsi/zfcp_aux.c
1096 @@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
1097
1098 rwlock_init(&port->unit_list_lock);
1099 INIT_LIST_HEAD(&port->unit_list);
1100 + atomic_set(&port->units, 0);
1101
1102 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
1103 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
1104 diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
1105 index 96f13ad8..79a6afe 100644
1106 --- a/drivers/s390/scsi/zfcp_ccw.c
1107 +++ b/drivers/s390/scsi/zfcp_ccw.c
1108 @@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
1109 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
1110 }
1111
1112 -static int zfcp_ccw_activate(struct ccw_device *cdev)
1113 -
1114 +/**
1115 + * zfcp_ccw_activate - activate adapter and wait for it to finish
1116 + * @cdev: pointer to belonging ccw device
1117 + * @clear: Status flags to clear.
1118 + * @tag: s390dbf trace record tag
1119 + */
1120 +static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
1121 {
1122 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
1123
1124 if (!adapter)
1125 return 0;
1126
1127 + zfcp_erp_clear_adapter_status(adapter, clear);
1128 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
1129 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
1130 - "ccresu2");
1131 + tag);
1132 zfcp_erp_wait(adapter);
1133 flush_work(&adapter->scan_work);
1134
1135 @@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
1136 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
1137 adapter->req_no = 0;
1138
1139 - zfcp_ccw_activate(cdev);
1140 + zfcp_ccw_activate(cdev, 0, "ccsonl1");
1141 zfcp_ccw_adapter_put(adapter);
1142 return 0;
1143 }
1144
1145 /**
1146 - * zfcp_ccw_set_offline - set_offline function of zfcp driver
1147 + * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
1148 * @cdev: pointer to belonging ccw device
1149 + * @set: Status flags to set.
1150 + * @tag: s390dbf trace record tag
1151 *
1152 * This function gets called by the common i/o layer and sets an adapter
1153 * into state offline.
1154 */
1155 -static int zfcp_ccw_set_offline(struct ccw_device *cdev)
1156 +static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
1157 {
1158 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
1159
1160 if (!adapter)
1161 return 0;
1162
1163 - zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
1164 + zfcp_erp_set_adapter_status(adapter, set);
1165 + zfcp_erp_adapter_shutdown(adapter, 0, tag);
1166 zfcp_erp_wait(adapter);
1167
1168 zfcp_ccw_adapter_put(adapter);
1169 @@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
1170 }
1171
1172 /**
1173 + * zfcp_ccw_set_offline - set_offline function of zfcp driver
1174 + * @cdev: pointer to belonging ccw device
1175 + *
1176 + * This function gets called by the common i/o layer and sets an adapter
1177 + * into state offline.
1178 + */
1179 +static int zfcp_ccw_set_offline(struct ccw_device *cdev)
1180 +{
1181 + return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
1182 +}
1183 +
1184 +/**
1185 * zfcp_ccw_notify - ccw notify function
1186 * @cdev: pointer to belonging ccw device
1187 * @event: indicates if adapter was detached or attached
1188 @@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
1189
1190 switch (event) {
1191 case CIO_GONE:
1192 + if (atomic_read(&adapter->status) &
1193 + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
1194 + zfcp_dbf_hba_basic("ccnigo1", adapter);
1195 + break;
1196 + }
1197 dev_warn(&cdev->dev, "The FCP device has been detached\n");
1198 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
1199 break;
1200 @@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
1201 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
1202 break;
1203 case CIO_OPER:
1204 + if (atomic_read(&adapter->status) &
1205 + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
1206 + zfcp_dbf_hba_basic("ccniop1", adapter);
1207 + break;
1208 + }
1209 dev_info(&cdev->dev, "The FCP device is operational again\n");
1210 zfcp_erp_set_adapter_status(adapter,
1211 ZFCP_STATUS_COMMON_RUNNING);
1212 @@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
1213 zfcp_ccw_adapter_put(adapter);
1214 }
1215
1216 +static int zfcp_ccw_suspend(struct ccw_device *cdev)
1217 +{
1218 + zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
1219 + return 0;
1220 +}
1221 +
1222 +static int zfcp_ccw_thaw(struct ccw_device *cdev)
1223 +{
1224 + /* trace records for thaw and final shutdown during suspend
1225 + can only be found in system dump until the end of suspend
1226 + but not after resume because it's based on the memory image
1227 + right after the very first suspend (freeze) callback */
1228 + zfcp_ccw_activate(cdev, 0, "ccthaw1");
1229 + return 0;
1230 +}
1231 +
1232 +static int zfcp_ccw_resume(struct ccw_device *cdev)
1233 +{
1234 + zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
1235 + return 0;
1236 +}
1237 +
1238 struct ccw_driver zfcp_ccw_driver = {
1239 .driver = {
1240 .owner = THIS_MODULE,
1241 @@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = {
1242 .set_offline = zfcp_ccw_set_offline,
1243 .notify = zfcp_ccw_notify,
1244 .shutdown = zfcp_ccw_shutdown,
1245 - .freeze = zfcp_ccw_set_offline,
1246 - .thaw = zfcp_ccw_activate,
1247 - .restore = zfcp_ccw_activate,
1248 + .freeze = zfcp_ccw_suspend,
1249 + .thaw = zfcp_ccw_thaw,
1250 + .restore = zfcp_ccw_resume,
1251 };
1252 diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
1253 index fab2c25..8ed63aa 100644
1254 --- a/drivers/s390/scsi/zfcp_cfdc.c
1255 +++ b/drivers/s390/scsi/zfcp_cfdc.c
1256 @@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
1257 }
1258 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1259
1260 - shost_for_each_device(sdev, port->adapter->scsi_host) {
1261 + shost_for_each_device(sdev, adapter->scsi_host) {
1262 zfcp_sdev = sdev_to_zfcp(sdev);
1263 status = atomic_read(&zfcp_sdev->status);
1264 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
1265 diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1266 index a9a816e..79b9848 100644
1267 --- a/drivers/s390/scsi/zfcp_dbf.c
1268 +++ b/drivers/s390/scsi/zfcp_dbf.c
1269 @@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
1270 length = min((u16)sizeof(struct qdio_buffer),
1271 (u16)ZFCP_DBF_PAY_MAX_REC);
1272
1273 - while ((char *)pl[payload->counter] && payload->counter < scount) {
1274 + while (payload->counter < scount && (char *)pl[payload->counter]) {
1275 memcpy(payload->data, (char *)pl[payload->counter], length);
1276 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
1277 payload->counter++;
1278 @@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
1279 spin_unlock_irqrestore(&dbf->pay_lock, flags);
1280 }
1281
1282 +/**
1283 + * zfcp_dbf_hba_basic - trace event for basic adapter events
1284 + * @adapter: pointer to struct zfcp_adapter
1285 + */
1286 +void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
1287 +{
1288 + struct zfcp_dbf *dbf = adapter->dbf;
1289 + struct zfcp_dbf_hba *rec = &dbf->hba_buf;
1290 + unsigned long flags;
1291 +
1292 + spin_lock_irqsave(&dbf->hba_lock, flags);
1293 + memset(rec, 0, sizeof(*rec));
1294 +
1295 + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1296 + rec->id = ZFCP_DBF_HBA_BASIC;
1297 +
1298 + debug_event(dbf->hba, 1, rec, sizeof(*rec));
1299 + spin_unlock_irqrestore(&dbf->hba_lock, flags);
1300 +}
1301 +
1302 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
1303 struct zfcp_adapter *adapter,
1304 struct zfcp_port *port,
1305 diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
1306 index 714f087..3ac7a4b 100644
1307 --- a/drivers/s390/scsi/zfcp_dbf.h
1308 +++ b/drivers/s390/scsi/zfcp_dbf.h
1309 @@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
1310 ZFCP_DBF_HBA_RES = 1,
1311 ZFCP_DBF_HBA_USS = 2,
1312 ZFCP_DBF_HBA_BIT = 3,
1313 + ZFCP_DBF_HBA_BASIC = 4,
1314 };
1315
1316 /**
1317 diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
1318 index ed5d921..f172b84 100644
1319 --- a/drivers/s390/scsi/zfcp_def.h
1320 +++ b/drivers/s390/scsi/zfcp_def.h
1321 @@ -77,6 +77,7 @@ struct zfcp_reqlist;
1322 #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
1323 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
1324 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
1325 +#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
1326 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
1327 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
1328 #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
1329 @@ -204,6 +205,7 @@ struct zfcp_port {
1330 struct zfcp_adapter *adapter; /* adapter used to access port */
1331 struct list_head unit_list; /* head of logical unit list */
1332 rwlock_t unit_list_lock; /* unit list lock */
1333 + atomic_t units; /* zfcp_unit count */
1334 atomic_t status; /* status of this remote port */
1335 u64 wwnn; /* WWNN if known */
1336 u64 wwpn; /* WWPN */
1337 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
1338 index 2302e1c..ef9e502 100644
1339 --- a/drivers/s390/scsi/zfcp_ext.h
1340 +++ b/drivers/s390/scsi/zfcp_ext.h
1341 @@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
1342 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
1343 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
1344 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
1345 +extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
1346 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
1347 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
1348 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
1349 @@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
1350 extern struct attribute_group zfcp_sysfs_unit_attrs;
1351 extern struct attribute_group zfcp_sysfs_adapter_attrs;
1352 extern struct attribute_group zfcp_sysfs_port_attrs;
1353 +extern struct mutex zfcp_sysfs_port_units_mutex;
1354 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
1355 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
1356
1357 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1358 index e9a787e..2136fc2 100644
1359 --- a/drivers/s390/scsi/zfcp_fsf.c
1360 +++ b/drivers/s390/scsi/zfcp_fsf.c
1361 @@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
1362 return;
1363 }
1364
1365 - zfcp_dbf_hba_fsf_uss("fssrh_2", req);
1366 + zfcp_dbf_hba_fsf_uss("fssrh_4", req);
1367
1368 switch (sr_buf->status_type) {
1369 case FSF_STATUS_READ_PORT_CLOSED:
1370 @@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
1371 }
1372 }
1373
1374 +#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
1375 +#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
1376 +#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
1377 +#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
1378 +#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
1379 +#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
1380 +#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
1381 +
1382 +static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
1383 +{
1384 + u32 fdmi_speed = 0;
1385 + if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
1386 + fdmi_speed |= FC_PORTSPEED_1GBIT;
1387 + if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
1388 + fdmi_speed |= FC_PORTSPEED_2GBIT;
1389 + if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
1390 + fdmi_speed |= FC_PORTSPEED_4GBIT;
1391 + if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
1392 + fdmi_speed |= FC_PORTSPEED_10GBIT;
1393 + if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
1394 + fdmi_speed |= FC_PORTSPEED_8GBIT;
1395 + if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
1396 + fdmi_speed |= FC_PORTSPEED_16GBIT;
1397 + if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
1398 + fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
1399 + return fdmi_speed;
1400 +}
1401 +
1402 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
1403 {
1404 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
1405 @@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
1406 fc_host_port_name(shost) = nsp->fl_wwpn;
1407 fc_host_node_name(shost) = nsp->fl_wwnn;
1408 fc_host_port_id(shost) = ntoh24(bottom->s_id);
1409 - fc_host_speed(shost) = bottom->fc_link_speed;
1410 + fc_host_speed(shost) =
1411 + zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
1412 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
1413
1414 adapter->hydra_version = bottom->adapter_type;
1415 @@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
1416 } else
1417 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
1418 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
1419 - fc_host_supported_speeds(shost) = bottom->supported_speed;
1420 + fc_host_supported_speeds(shost) =
1421 + zfcp_fsf_convert_portspeed(bottom->supported_speed);
1422 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
1423 FC_FC4_LIST_SIZE);
1424 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
1425 @@ -771,12 +801,14 @@ out:
1426 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
1427 {
1428 struct scsi_device *sdev = req->data;
1429 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1430 + struct zfcp_scsi_dev *zfcp_sdev;
1431 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
1432
1433 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1434 return;
1435
1436 + zfcp_sdev = sdev_to_zfcp(sdev);
1437 +
1438 switch (req->qtcb->header.fsf_status) {
1439 case FSF_PORT_HANDLE_NOT_VALID:
1440 if (fsq->word[0] == fsq->word[1]) {
1441 @@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1442
1443 switch (header->fsf_status) {
1444 case FSF_GOOD:
1445 - zfcp_dbf_san_res("fsscth1", req);
1446 + zfcp_dbf_san_res("fsscth2", req);
1447 ct->status = 0;
1448 break;
1449 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1450 @@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1451 {
1452 struct zfcp_adapter *adapter = req->adapter;
1453 struct scsi_device *sdev = req->data;
1454 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1455 + struct zfcp_scsi_dev *zfcp_sdev;
1456 struct fsf_qtcb_header *header = &req->qtcb->header;
1457 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1458
1459 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1460 return;
1461
1462 + zfcp_sdev = sdev_to_zfcp(sdev);
1463 +
1464 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1465 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1466 ZFCP_STATUS_LUN_SHARED |
1467 @@ -1856,11 +1890,13 @@ out:
1468 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1469 {
1470 struct scsi_device *sdev = req->data;
1471 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1472 + struct zfcp_scsi_dev *zfcp_sdev;
1473
1474 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1475 return;
1476
1477 + zfcp_sdev = sdev_to_zfcp(sdev);
1478 +
1479 switch (req->qtcb->header.fsf_status) {
1480 case FSF_PORT_HANDLE_NOT_VALID:
1481 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1482 @@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1483 {
1484 struct fsf_qual_latency_info *lat_in;
1485 struct latency_cont *lat = NULL;
1486 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
1487 + struct zfcp_scsi_dev *zfcp_sdev;
1488 struct zfcp_blk_drv_data blktrc;
1489 int ticks = req->adapter->timer_ticks;
1490
1491 @@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1492
1493 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1494 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
1495 + zfcp_sdev = sdev_to_zfcp(scsi->device);
1496 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1497 blktrc.channel_lat = lat_in->channel_lat * ticks;
1498 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
1499 @@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1500 {
1501 struct scsi_cmnd *scmnd = req->data;
1502 struct scsi_device *sdev = scmnd->device;
1503 - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1504 + struct zfcp_scsi_dev *zfcp_sdev;
1505 struct fsf_qtcb_header *header = &req->qtcb->header;
1506
1507 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
1508 return;
1509
1510 + zfcp_sdev = sdev_to_zfcp(sdev);
1511 +
1512 switch (header->fsf_status) {
1513 case FSF_HANDLE_MISMATCH:
1514 case FSF_PORT_HANDLE_NOT_VALID:
1515 diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
1516 index e14da57..e76d003 100644
1517 --- a/drivers/s390/scsi/zfcp_qdio.c
1518 +++ b/drivers/s390/scsi/zfcp_qdio.c
1519 @@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
1520 {
1521 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
1522 struct zfcp_adapter *adapter = qdio->adapter;
1523 - struct qdio_buffer_element *sbale;
1524 int sbal_no, sbal_idx;
1525 - void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
1526 - u64 req_id;
1527 - u8 scount;
1528
1529 if (unlikely(qdio_err)) {
1530 - memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
1531 if (zfcp_adapter_multi_buffer_active(adapter)) {
1532 + void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
1533 + struct qdio_buffer_element *sbale;
1534 + u64 req_id;
1535 + u8 scount;
1536 +
1537 + memset(pl, 0,
1538 + ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
1539 sbale = qdio->res_q[idx]->element;
1540 req_id = (u64) sbale->addr;
1541 - scount = sbale->scount + 1; /* incl. signaling SBAL */
1542 + scount = min(sbale->scount + 1,
1543 + ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
1544 + /* incl. signaling SBAL */
1545
1546 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
1547 sbal_idx = (idx + sbal_no) %
1548 diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
1549 index cdc4ff7..9e62210 100644
1550 --- a/drivers/s390/scsi/zfcp_sysfs.c
1551 +++ b/drivers/s390/scsi/zfcp_sysfs.c
1552 @@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
1553 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
1554 zfcp_sysfs_port_rescan_store);
1555
1556 +DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
1557 +
1558 static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
1559 struct device_attribute *attr,
1560 const char *buf, size_t count)
1561 @@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
1562 else
1563 retval = 0;
1564
1565 + mutex_lock(&zfcp_sysfs_port_units_mutex);
1566 + if (atomic_read(&port->units) > 0) {
1567 + retval = -EBUSY;
1568 + mutex_unlock(&zfcp_sysfs_port_units_mutex);
1569 + goto out;
1570 + }
1571 + /* port is about to be removed, so no more unit_add */
1572 + atomic_set(&port->units, -1);
1573 + mutex_unlock(&zfcp_sysfs_port_units_mutex);
1574 +
1575 write_lock_irq(&adapter->port_list_lock);
1576 list_del(&port->list);
1577 write_unlock_irq(&adapter->port_list_lock);
1578 @@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
1579 {
1580 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
1581 u64 fcp_lun;
1582 + int retval;
1583
1584 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
1585 return -EINVAL;
1586
1587 - if (zfcp_unit_add(port, fcp_lun))
1588 - return -EINVAL;
1589 + retval = zfcp_unit_add(port, fcp_lun);
1590 + if (retval)
1591 + return retval;
1592
1593 return count;
1594 }
1595 diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
1596 index 20796eb..4e6a535 100644
1597 --- a/drivers/s390/scsi/zfcp_unit.c
1598 +++ b/drivers/s390/scsi/zfcp_unit.c
1599 @@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
1600 {
1601 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
1602
1603 - put_device(&unit->port->dev);
1604 + atomic_dec(&unit->port->units);
1605 kfree(unit);
1606 }
1607
1608 @@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
1609 int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
1610 {
1611 struct zfcp_unit *unit;
1612 + int retval = 0;
1613 +
1614 + mutex_lock(&zfcp_sysfs_port_units_mutex);
1615 + if (atomic_read(&port->units) == -1) {
1616 + /* port is already gone */
1617 + retval = -ENODEV;
1618 + goto out;
1619 + }
1620
1621 unit = zfcp_unit_find(port, fcp_lun);
1622 if (unit) {
1623 put_device(&unit->dev);
1624 - return -EEXIST;
1625 + retval = -EEXIST;
1626 + goto out;
1627 }
1628
1629 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
1630 - if (!unit)
1631 - return -ENOMEM;
1632 + if (!unit) {
1633 + retval = -ENOMEM;
1634 + goto out;
1635 + }
1636
1637 unit->port = port;
1638 unit->fcp_lun = fcp_lun;
1639 @@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
1640 if (dev_set_name(&unit->dev, "0x%016llx",
1641 (unsigned long long) fcp_lun)) {
1642 kfree(unit);
1643 - return -ENOMEM;
1644 + retval = -ENOMEM;
1645 + goto out;
1646 }
1647
1648 - get_device(&port->dev);
1649 -
1650 if (device_register(&unit->dev)) {
1651 put_device(&unit->dev);
1652 - return -ENOMEM;
1653 + retval = -ENOMEM;
1654 + goto out;
1655 }
1656
1657 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
1658 device_unregister(&unit->dev);
1659 - return -EINVAL;
1660 + retval = -EINVAL;
1661 + goto out;
1662 }
1663
1664 + atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
1665 +
1666 write_lock_irq(&port->unit_list_lock);
1667 list_add_tail(&unit->list, &port->unit_list);
1668 write_unlock_irq(&port->unit_list_lock);
1669
1670 zfcp_unit_scsi_scan(unit);
1671
1672 - return 0;
1673 +out:
1674 + mutex_unlock(&zfcp_sysfs_port_units_mutex);
1675 + return retval;
1676 }
1677
1678 /**
1679 diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
1680 index 68ce085..a540162 100644
1681 --- a/drivers/scsi/atp870u.c
1682 +++ b/drivers/scsi/atp870u.c
1683 @@ -1173,7 +1173,16 @@ wait_io1:
1684 outw(val, tmport);
1685 outb(2, 0x80);
1686 TCM_SYNC:
1687 - udelay(0x800);
1688 + /*
1689 + * The funny division into multiple delays is to accomodate
1690 + * arches like ARM where udelay() multiplies its argument by
1691 + * a large number to initialize a loop counter. To avoid
1692 + * overflow, the maximum supported udelay is 2000 microseconds.
1693 + *
1694 + * XXX it would be more polite to find a way to use msleep()
1695 + */
1696 + mdelay(2);
1697 + udelay(48);
1698 if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
1699 outw(0, tmport--);
1700 outb(0, tmport);
1701 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
1702 index 33ef60d..6a8568c 100644
1703 --- a/fs/cifs/cifs_unicode.c
1704 +++ b/fs/cifs/cifs_unicode.c
1705 @@ -203,6 +203,27 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
1706 int i;
1707 wchar_t wchar_to; /* needed to quiet sparse */
1708
1709 + /* special case for utf8 to handle no plane0 chars */
1710 + if (!strcmp(codepage->charset, "utf8")) {
1711 + /*
1712 + * convert utf8 -> utf16, we assume we have enough space
1713 + * as caller should have assumed conversion does not overflow
1714 + * in destination len is length in wchar_t units (16bits)
1715 + */
1716 + i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
1717 + (wchar_t *) to, len);
1718 +
1719 + /* if success terminate and exit */
1720 + if (i >= 0)
1721 + goto success;
1722 + /*
1723 + * if fails fall back to UCS encoding as this
1724 + * function should not return negative values
1725 + * currently can fail only if source contains
1726 + * invalid encoded characters
1727 + */
1728 + }
1729 +
1730 for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
1731 charlen = codepage->char2uni(from, len, &wchar_to);
1732 if (charlen < 1) {
1733 @@ -215,6 +236,7 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
1734 put_unaligned_le16(wchar_to, &to[i]);
1735 }
1736
1737 +success:
1738 put_unaligned_le16(0, &to[i]);
1739 return i;
1740 }
1741 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1742 index 65a78e9..f771e9f 100644
1743 --- a/fs/cifs/connect.c
1744 +++ b/fs/cifs/connect.c
1745 @@ -70,6 +70,7 @@ enum {
1746 /* Mount options that take no arguments */
1747 Opt_user_xattr, Opt_nouser_xattr,
1748 Opt_forceuid, Opt_noforceuid,
1749 + Opt_forcegid, Opt_noforcegid,
1750 Opt_noblocksend, Opt_noautotune,
1751 Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
1752 Opt_mapchars, Opt_nomapchars, Opt_sfu,
1753 @@ -121,6 +122,8 @@ static const match_table_t cifs_mount_option_tokens = {
1754 { Opt_nouser_xattr, "nouser_xattr" },
1755 { Opt_forceuid, "forceuid" },
1756 { Opt_noforceuid, "noforceuid" },
1757 + { Opt_forcegid, "forcegid" },
1758 + { Opt_noforcegid, "noforcegid" },
1759 { Opt_noblocksend, "noblocksend" },
1760 { Opt_noautotune, "noautotune" },
1761 { Opt_hard, "hard" },
1762 @@ -1287,6 +1290,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1763 case Opt_noforceuid:
1764 override_uid = 0;
1765 break;
1766 + case Opt_forcegid:
1767 + override_gid = 1;
1768 + break;
1769 + case Opt_noforcegid:
1770 + override_gid = 0;
1771 + break;
1772 case Opt_noblocksend:
1773 vol->noblocksnd = 1;
1774 break;
1775 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1776 index 55a654d..dcd08e4 100644
1777 --- a/fs/ext4/inode.c
1778 +++ b/fs/ext4/inode.c
1779 @@ -2386,6 +2386,16 @@ static int ext4_nonda_switch(struct super_block *sb)
1780 free_blocks = EXT4_C2B(sbi,
1781 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
1782 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
1783 + /*
1784 + * Start pushing delalloc when 1/2 of free blocks are dirty.
1785 + */
1786 + if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
1787 + !writeback_in_progress(sb->s_bdi) &&
1788 + down_read_trylock(&sb->s_umount)) {
1789 + writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
1790 + up_read(&sb->s_umount);
1791 + }
1792 +
1793 if (2 * free_blocks < 3 * dirty_blocks ||
1794 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
1795 /*
1796 @@ -2394,13 +2404,6 @@ static int ext4_nonda_switch(struct super_block *sb)
1797 */
1798 return 1;
1799 }
1800 - /*
1801 - * Even if we don't switch but are nearing capacity,
1802 - * start pushing delalloc when 1/2 of free blocks are dirty.
1803 - */
1804 - if (free_blocks < 2 * dirty_blocks)
1805 - writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
1806 -
1807 return 0;
1808 }
1809
1810 @@ -3889,6 +3892,7 @@ static int ext4_do_update_inode(handle_t *handle,
1811 struct ext4_inode_info *ei = EXT4_I(inode);
1812 struct buffer_head *bh = iloc->bh;
1813 int err = 0, rc, block;
1814 + int need_datasync = 0;
1815
1816 /* For fields not not tracking in the in-memory inode,
1817 * initialise them to zero for new inodes. */
1818 @@ -3937,7 +3941,10 @@ static int ext4_do_update_inode(handle_t *handle,
1819 raw_inode->i_file_acl_high =
1820 cpu_to_le16(ei->i_file_acl >> 32);
1821 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
1822 - ext4_isize_set(raw_inode, ei->i_disksize);
1823 + if (ei->i_disksize != ext4_isize(raw_inode)) {
1824 + ext4_isize_set(raw_inode, ei->i_disksize);
1825 + need_datasync = 1;
1826 + }
1827 if (ei->i_disksize > 0x7fffffffULL) {
1828 struct super_block *sb = inode->i_sb;
1829 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
1830 @@ -3988,7 +3995,7 @@ static int ext4_do_update_inode(handle_t *handle,
1831 err = rc;
1832 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
1833
1834 - ext4_update_inode_fsync_trans(handle, inode, 0);
1835 + ext4_update_inode_fsync_trans(handle, inode, need_datasync);
1836 out_brelse:
1837 brelse(bh);
1838 ext4_std_error(inode->i_sb, err);
1839 diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
1840 index c5826c6..e2016f3 100644
1841 --- a/fs/ext4/move_extent.c
1842 +++ b/fs/ext4/move_extent.c
1843 @@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
1844 }
1845
1846 /**
1847 - * mext_check_null_inode - NULL check for two inodes
1848 - *
1849 - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
1850 - */
1851 -static int
1852 -mext_check_null_inode(struct inode *inode1, struct inode *inode2,
1853 - const char *function, unsigned int line)
1854 -{
1855 - int ret = 0;
1856 -
1857 - if (inode1 == NULL) {
1858 - __ext4_error(inode2->i_sb, function, line,
1859 - "Both inodes should not be NULL: "
1860 - "inode1 NULL inode2 %lu", inode2->i_ino);
1861 - ret = -EIO;
1862 - } else if (inode2 == NULL) {
1863 - __ext4_error(inode1->i_sb, function, line,
1864 - "Both inodes should not be NULL: "
1865 - "inode1 %lu inode2 NULL", inode1->i_ino);
1866 - ret = -EIO;
1867 - }
1868 - return ret;
1869 -}
1870 -
1871 -/**
1872 * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
1873 *
1874 - * @orig_inode: original inode structure
1875 - * @donor_inode: donor inode structure
1876 - * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
1877 - * i_ino order.
1878 + * Acquire write lock of i_data_sem of the two inodes
1879 */
1880 static void
1881 -double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
1882 +double_down_write_data_sem(struct inode *first, struct inode *second)
1883 {
1884 - struct inode *first = orig_inode, *second = donor_inode;
1885 + if (first < second) {
1886 + down_write(&EXT4_I(first)->i_data_sem);
1887 + down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
1888 + } else {
1889 + down_write(&EXT4_I(second)->i_data_sem);
1890 + down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
1891
1892 - /*
1893 - * Use the inode number to provide the stable locking order instead
1894 - * of its address, because the C language doesn't guarantee you can
1895 - * compare pointers that don't come from the same array.
1896 - */
1897 - if (donor_inode->i_ino < orig_inode->i_ino) {
1898 - first = donor_inode;
1899 - second = orig_inode;
1900 }
1901 -
1902 - down_write(&EXT4_I(first)->i_data_sem);
1903 - down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
1904 }
1905
1906 /**
1907 @@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode,
1908 return -EINVAL;
1909 }
1910
1911 - /* Files should be in the same ext4 FS */
1912 - if (orig_inode->i_sb != donor_inode->i_sb) {
1913 - ext4_debug("ext4 move extent: The argument files "
1914 - "should be in same FS [ino:orig %lu, donor %lu]\n",
1915 - orig_inode->i_ino, donor_inode->i_ino);
1916 - return -EINVAL;
1917 - }
1918 -
1919 /* Ext4 move extent supports only extent based file */
1920 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
1921 ext4_debug("ext4 move extent: orig file is not extents "
1922 @@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode,
1923 * @inode1: the inode structure
1924 * @inode2: the inode structure
1925 *
1926 - * Lock two inodes' i_mutex by i_ino order.
1927 - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
1928 + * Lock two inodes' i_mutex
1929 */
1930 -static int
1931 +static void
1932 mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
1933 {
1934 - int ret = 0;
1935 -
1936 - BUG_ON(inode1 == NULL && inode2 == NULL);
1937 -
1938 - ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
1939 - if (ret < 0)
1940 - goto out;
1941 -
1942 - if (inode1 == inode2) {
1943 - mutex_lock(&inode1->i_mutex);
1944 - goto out;
1945 - }
1946 -
1947 - if (inode1->i_ino < inode2->i_ino) {
1948 + BUG_ON(inode1 == inode2);
1949 + if (inode1 < inode2) {
1950 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
1951 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
1952 } else {
1953 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
1954 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
1955 }
1956 -
1957 -out:
1958 - return ret;
1959 }
1960
1961 /**
1962 @@ -1109,28 +1051,13 @@ out:
1963 * @inode1: the inode that is released first
1964 * @inode2: the inode that is released second
1965 *
1966 - * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
1967 */
1968
1969 -static int
1970 +static void
1971 mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
1972 {
1973 - int ret = 0;
1974 -
1975 - BUG_ON(inode1 == NULL && inode2 == NULL);
1976 -
1977 - ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
1978 - if (ret < 0)
1979 - goto out;
1980 -
1981 - if (inode1)
1982 - mutex_unlock(&inode1->i_mutex);
1983 -
1984 - if (inode2 && inode2 != inode1)
1985 - mutex_unlock(&inode2->i_mutex);
1986 -
1987 -out:
1988 - return ret;
1989 + mutex_unlock(&inode1->i_mutex);
1990 + mutex_unlock(&inode2->i_mutex);
1991 }
1992
1993 /**
1994 @@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1995 ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
1996 ext4_lblk_t rest_blocks;
1997 pgoff_t orig_page_offset = 0, seq_end_page;
1998 - int ret1, ret2, depth, last_extent = 0;
1999 + int ret, depth, last_extent = 0;
2000 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
2001 int data_offset_in_page;
2002 int block_len_in_page;
2003 int uninit;
2004
2005 - /* orig and donor should be different file */
2006 - if (orig_inode->i_ino == donor_inode->i_ino) {
2007 + if (orig_inode->i_sb != donor_inode->i_sb) {
2008 + ext4_debug("ext4 move extent: The argument files "
2009 + "should be in same FS [ino:orig %lu, donor %lu]\n",
2010 + orig_inode->i_ino, donor_inode->i_ino);
2011 + return -EINVAL;
2012 + }
2013 +
2014 + /* orig and donor should be different inodes */
2015 + if (orig_inode == donor_inode) {
2016 ext4_debug("ext4 move extent: The argument files should not "
2017 - "be same file [ino:orig %lu, donor %lu]\n",
2018 + "be same inode [ino:orig %lu, donor %lu]\n",
2019 orig_inode->i_ino, donor_inode->i_ino);
2020 return -EINVAL;
2021 }
2022 @@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2023 orig_inode->i_ino, donor_inode->i_ino);
2024 return -EINVAL;
2025 }
2026 -
2027 + /* TODO: This is non obvious task to swap blocks for inodes with full
2028 + jornaling enabled */
2029 + if (ext4_should_journal_data(orig_inode) ||
2030 + ext4_should_journal_data(donor_inode)) {
2031 + return -EINVAL;
2032 + }
2033 /* Protect orig and donor inodes against a truncate */
2034 - ret1 = mext_inode_double_lock(orig_inode, donor_inode);
2035 - if (ret1 < 0)
2036 - return ret1;
2037 + mext_inode_double_lock(orig_inode, donor_inode);
2038
2039 /* Protect extent tree against block allocations via delalloc */
2040 double_down_write_data_sem(orig_inode, donor_inode);
2041 /* Check the filesystem environment whether move_extent can be done */
2042 - ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
2043 + ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
2044 donor_start, &len);
2045 - if (ret1)
2046 + if (ret)
2047 goto out;
2048
2049 file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
2050 @@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2051 if (file_end < block_end)
2052 len -= block_end - file_end;
2053
2054 - ret1 = get_ext_path(orig_inode, block_start, &orig_path);
2055 - if (ret1)
2056 + ret = get_ext_path(orig_inode, block_start, &orig_path);
2057 + if (ret)
2058 goto out;
2059
2060 /* Get path structure to check the hole */
2061 - ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
2062 - if (ret1)
2063 + ret = get_ext_path(orig_inode, block_start, &holecheck_path);
2064 + if (ret)
2065 goto out;
2066
2067 depth = ext_depth(orig_inode);
2068 @@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2069 last_extent = mext_next_extent(orig_inode,
2070 holecheck_path, &ext_cur);
2071 if (last_extent < 0) {
2072 - ret1 = last_extent;
2073 + ret = last_extent;
2074 goto out;
2075 }
2076 last_extent = mext_next_extent(orig_inode, orig_path,
2077 &ext_dummy);
2078 if (last_extent < 0) {
2079 - ret1 = last_extent;
2080 + ret = last_extent;
2081 goto out;
2082 }
2083 seq_start = le32_to_cpu(ext_cur->ee_block);
2084 @@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2085 if (le32_to_cpu(ext_cur->ee_block) > block_end) {
2086 ext4_debug("ext4 move extent: The specified range of file "
2087 "may be the hole\n");
2088 - ret1 = -EINVAL;
2089 + ret = -EINVAL;
2090 goto out;
2091 }
2092
2093 @@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2094 last_extent = mext_next_extent(orig_inode, holecheck_path,
2095 &ext_cur);
2096 if (last_extent < 0) {
2097 - ret1 = last_extent;
2098 + ret = last_extent;
2099 break;
2100 }
2101 add_blocks = ext4_ext_get_actual_len(ext_cur);
2102 @@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2103 orig_page_offset,
2104 data_offset_in_page,
2105 block_len_in_page, uninit,
2106 - &ret1);
2107 + &ret);
2108
2109 /* Count how many blocks we have exchanged */
2110 *moved_len += block_len_in_page;
2111 - if (ret1 < 0)
2112 + if (ret < 0)
2113 break;
2114 if (*moved_len > len) {
2115 EXT4_ERROR_INODE(orig_inode,
2116 "We replaced blocks too much! "
2117 "sum of replaced: %llu requested: %llu",
2118 *moved_len, len);
2119 - ret1 = -EIO;
2120 + ret = -EIO;
2121 break;
2122 }
2123
2124 @@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
2125 }
2126
2127 double_down_write_data_sem(orig_inode, donor_inode);
2128 - if (ret1 < 0)
2129 + if (ret < 0)
2130 break;
2131
2132 /* Decrease buffer counter */
2133 if (holecheck_path)
2134 ext4_ext_drop_refs(holecheck_path);
2135 - ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
2136 - if (ret1)
2137 + ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
2138 + if (ret)
2139 break;
2140 depth = holecheck_path->p_depth;
2141
2142 /* Decrease buffer counter */
2143 if (orig_path)
2144 ext4_ext_drop_refs(orig_path);
2145 - ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
2146 - if (ret1)
2147 + ret = get_ext_path(orig_inode, seq_start, &orig_path);
2148 + if (ret)
2149 break;
2150
2151 ext_cur = holecheck_path[depth].p_ext;
2152 @@ -1412,12 +1349,7 @@ out:
2153 kfree(holecheck_path);
2154 }
2155 double_up_write_data_sem(orig_inode, donor_inode);
2156 - ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
2157 -
2158 - if (ret1)
2159 - return ret1;
2160 - else if (ret2)
2161 - return ret2;
2162 + mext_inode_double_unlock(orig_inode, donor_inode);
2163
2164 - return 0;
2165 + return ret;
2166 }
2167 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2168 index 0a94cbb..ac76939 100644
2169 --- a/fs/ext4/namei.c
2170 +++ b/fs/ext4/namei.c
2171 @@ -1801,9 +1801,7 @@ retry:
2172 err = PTR_ERR(inode);
2173 if (!IS_ERR(inode)) {
2174 init_special_inode(inode, inode->i_mode, rdev);
2175 -#ifdef CONFIG_EXT4_FS_XATTR
2176 inode->i_op = &ext4_special_inode_operations;
2177 -#endif
2178 err = ext4_add_nondir(handle, dentry, inode);
2179 }
2180 ext4_journal_stop(handle);
2181 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2182 index 3407a62..231cacb 100644
2183 --- a/fs/ext4/resize.c
2184 +++ b/fs/ext4/resize.c
2185 @@ -200,8 +200,11 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
2186 * be a partial of a flex group.
2187 *
2188 * @sb: super block of fs to which the groups belongs
2189 + *
2190 + * Returns 0 on a successful allocation of the metadata blocks in the
2191 + * block group.
2192 */
2193 -static void ext4_alloc_group_tables(struct super_block *sb,
2194 +static int ext4_alloc_group_tables(struct super_block *sb,
2195 struct ext4_new_flex_group_data *flex_gd,
2196 int flexbg_size)
2197 {
2198 @@ -226,6 +229,8 @@ static void ext4_alloc_group_tables(struct super_block *sb,
2199 (last_group & ~(flexbg_size - 1))));
2200 next_group:
2201 group = group_data[0].group;
2202 + if (src_group >= group_data[0].group + flex_gd->count)
2203 + return -ENOSPC;
2204 start_blk = ext4_group_first_block_no(sb, src_group);
2205 last_blk = start_blk + group_data[src_group - group].blocks_count;
2206
2207 @@ -235,7 +240,6 @@ next_group:
2208
2209 start_blk += overhead;
2210
2211 - BUG_ON(src_group >= group_data[0].group + flex_gd->count);
2212 /* We collect contiguous blocks as much as possible. */
2213 src_group++;
2214 for (; src_group <= last_group; src_group++)
2215 @@ -300,6 +304,7 @@ next_group:
2216 group_data[i].free_blocks_count);
2217 }
2218 }
2219 + return 0;
2220 }
2221
2222 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
2223 @@ -451,6 +456,9 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
2224 gdblocks = ext4_bg_num_gdb(sb, group);
2225 start = ext4_group_first_block_no(sb, group);
2226
2227 + if (!ext4_bg_has_super(sb, group))
2228 + goto handle_itb;
2229 +
2230 /* Copy all of the GDT blocks into the backup in this group */
2231 for (j = 0, block = start + 1; j < gdblocks; j++, block++) {
2232 struct buffer_head *gdb;
2233 @@ -493,6 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
2234 goto out;
2235 }
2236
2237 +handle_itb:
2238 /* Initialize group tables of the grop @group */
2239 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
2240 goto handle_bb;
2241 @@ -1293,13 +1302,15 @@ exit_journal:
2242 err = err2;
2243
2244 if (!err) {
2245 - int i;
2246 + int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
2247 + int gdb_num_end = ((group + flex_gd->count - 1) /
2248 + EXT4_DESC_PER_BLOCK(sb));
2249 +
2250 update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
2251 sizeof(struct ext4_super_block));
2252 - for (i = 0; i < flex_gd->count; i++, group++) {
2253 + for (; gdb_num <= gdb_num_end; gdb_num++) {
2254 struct buffer_head *gdb_bh;
2255 - int gdb_num;
2256 - gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb);
2257 +
2258 gdb_bh = sbi->s_group_desc[gdb_num];
2259 update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
2260 gdb_bh->b_size);
2261 @@ -1676,7 +1687,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
2262 */
2263 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
2264 flexbg_size)) {
2265 - ext4_alloc_group_tables(sb, flex_gd, flexbg_size);
2266 + if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2267 + break;
2268 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2269 if (unlikely(err))
2270 break;
2271 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2272 index 12a278f..b1c28f1 100644
2273 --- a/fs/ext4/super.c
2274 +++ b/fs/ext4/super.c
2275 @@ -1692,7 +1692,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
2276
2277 static const char *token2str(int token)
2278 {
2279 - static const struct match_token *t;
2280 + const struct match_token *t;
2281
2282 for (t = tokens; t->token != Opt_err; t++)
2283 if (t->token == token && !strchr(t->pattern, '='))
2284 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
2285 index 539f36c..b35bd64 100644
2286 --- a/fs/fs-writeback.c
2287 +++ b/fs/fs-writeback.c
2288 @@ -68,6 +68,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
2289 {
2290 return test_bit(BDI_writeback_running, &bdi->state);
2291 }
2292 +EXPORT_SYMBOL(writeback_in_progress);
2293
2294 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
2295 {
2296 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2297 index 9956ac6..e5bfb11 100644
2298 --- a/fs/jbd2/journal.c
2299 +++ b/fs/jbd2/journal.c
2300 @@ -1317,6 +1317,11 @@ static void jbd2_mark_journal_empty(journal_t *journal)
2301
2302 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
2303 read_lock(&journal->j_state_lock);
2304 + /* Is it already empty? */
2305 + if (sb->s_start == 0) {
2306 + read_unlock(&journal->j_state_lock);
2307 + return;
2308 + }
2309 jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
2310 journal->j_tail_sequence);
2311
2312 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
2313 index 74d9be1..6bec5c0 100644
2314 --- a/fs/jffs2/wbuf.c
2315 +++ b/fs/jffs2/wbuf.c
2316 @@ -1043,10 +1043,10 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
2317 ops.datbuf = NULL;
2318
2319 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
2320 - if (ret || ops.oobretlen != ops.ooblen) {
2321 + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
2322 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
2323 jeb->offset, ops.ooblen, ops.oobretlen, ret);
2324 - if (!ret)
2325 + if (!ret || mtd_is_bitflip(ret))
2326 ret = -EIO;
2327 return ret;
2328 }
2329 @@ -1085,10 +1085,10 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
2330 ops.datbuf = NULL;
2331
2332 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
2333 - if (ret || ops.oobretlen != ops.ooblen) {
2334 + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
2335 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
2336 jeb->offset, ops.ooblen, ops.oobretlen, ret);
2337 - if (!ret)
2338 + if (!ret || mtd_is_bitflip(ret))
2339 ret = -EIO;
2340 return ret;
2341 }
2342 diff --git a/fs/proc/page.c b/fs/proc/page.c
2343 index 7fcd0d6..b8730d9 100644
2344 --- a/fs/proc/page.c
2345 +++ b/fs/proc/page.c
2346 @@ -115,7 +115,13 @@ u64 stable_page_flags(struct page *page)
2347 u |= 1 << KPF_COMPOUND_TAIL;
2348 if (PageHuge(page))
2349 u |= 1 << KPF_HUGE;
2350 - else if (PageTransCompound(page))
2351 + /*
2352 + * PageTransCompound can be true for non-huge compound pages (slab
2353 + * pages or pages allocated by drivers with __GFP_COMP) because it
2354 + * just checks PG_head/PG_tail, so we need to check PageLRU to make
2355 + * sure a given page is a thp, not a non-huge compound page.
2356 + */
2357 + else if (PageTransCompound(page) && PageLRU(compound_trans_head(page)))
2358 u |= 1 << KPF_THP;
2359
2360 /*
2361 diff --git a/fs/udf/super.c b/fs/udf/super.c
2362 index e660ffd..4988a8a 100644
2363 --- a/fs/udf/super.c
2364 +++ b/fs/udf/super.c
2365 @@ -1287,6 +1287,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2366 udf_err(sb, "error loading logical volume descriptor: "
2367 "Partition table too long (%u > %lu)\n", table_len,
2368 sb->s_blocksize - sizeof(*lvd));
2369 + ret = 1;
2370 goto out_bh;
2371 }
2372
2373 @@ -1331,8 +1332,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2374 UDF_ID_SPARABLE,
2375 strlen(UDF_ID_SPARABLE))) {
2376 if (udf_load_sparable_map(sb, map,
2377 - (struct sparablePartitionMap *)gpm) < 0)
2378 + (struct sparablePartitionMap *)gpm) < 0) {
2379 + ret = 1;
2380 goto out_bh;
2381 + }
2382 } else if (!strncmp(upm2->partIdent.ident,
2383 UDF_ID_METADATA,
2384 strlen(UDF_ID_METADATA))) {
2385 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
2386 index 7c727a9..0abf1d4 100644
2387 --- a/include/linux/mempolicy.h
2388 +++ b/include/linux/mempolicy.h
2389 @@ -188,7 +188,7 @@ struct sp_node {
2390
2391 struct shared_policy {
2392 struct rb_root root;
2393 - spinlock_t lock;
2394 + struct mutex mutex;
2395 };
2396
2397 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
2398 diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
2399 index 22e61fd..28e493b 100644
2400 --- a/include/linux/xfrm.h
2401 +++ b/include/linux/xfrm.h
2402 @@ -84,6 +84,8 @@ struct xfrm_replay_state {
2403 __u32 bitmap;
2404 };
2405
2406 +#define XFRMA_REPLAY_ESN_MAX 4096
2407 +
2408 struct xfrm_replay_state_esn {
2409 unsigned int bmp_len;
2410 __u32 oseq;
2411 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
2412 index 96239e7..9f7e94b 100644
2413 --- a/include/net/xfrm.h
2414 +++ b/include/net/xfrm.h
2415 @@ -269,6 +269,9 @@ struct xfrm_replay {
2416 int (*check)(struct xfrm_state *x,
2417 struct sk_buff *skb,
2418 __be32 net_seq);
2419 + int (*recheck)(struct xfrm_state *x,
2420 + struct sk_buff *skb,
2421 + __be32 net_seq);
2422 void (*notify)(struct xfrm_state *x, int event);
2423 int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
2424 };
2425 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
2426 index 14f7070..5fc1570 100644
2427 --- a/kernel/cpuset.c
2428 +++ b/kernel/cpuset.c
2429 @@ -2065,6 +2065,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2430 * (of no affect) on systems that are actively using CPU hotplug
2431 * but making no active use of cpusets.
2432 *
2433 + * The only exception to this is suspend/resume, where we don't
2434 + * modify cpusets at all.
2435 + *
2436 * This routine ensures that top_cpuset.cpus_allowed tracks
2437 * cpu_active_mask on each CPU hotplug (cpuhp) event.
2438 *
2439 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
2440 index d0c5baf..4eec66e 100644
2441 --- a/kernel/rcutree.c
2442 +++ b/kernel/rcutree.c
2443 @@ -295,7 +295,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
2444 static int
2445 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
2446 {
2447 - return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
2448 + return *rdp->nxttail[RCU_DONE_TAIL +
2449 + ACCESS_ONCE(rsp->completed) != rdp->completed] &&
2450 + !rcu_gp_in_progress(rsp);
2451 }
2452
2453 /*
2454 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2455 index 593087b..1d22981 100644
2456 --- a/kernel/sched/core.c
2457 +++ b/kernel/sched/core.c
2458 @@ -6937,34 +6937,66 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev)
2459 }
2460 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2461
2462 +static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
2463 +
2464 /*
2465 * Update cpusets according to cpu_active mask. If cpusets are
2466 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
2467 * around partition_sched_domains().
2468 + *
2469 + * If we come here as part of a suspend/resume, don't touch cpusets because we
2470 + * want to restore it back to its original state upon resume anyway.
2471 */
2472 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
2473 void *hcpu)
2474 {
2475 - switch (action & ~CPU_TASKS_FROZEN) {
2476 + switch (action) {
2477 + case CPU_ONLINE_FROZEN:
2478 + case CPU_DOWN_FAILED_FROZEN:
2479 +
2480 + /*
2481 + * num_cpus_frozen tracks how many CPUs are involved in suspend
2482 + * resume sequence. As long as this is not the last online
2483 + * operation in the resume sequence, just build a single sched
2484 + * domain, ignoring cpusets.
2485 + */
2486 + num_cpus_frozen--;
2487 + if (likely(num_cpus_frozen)) {
2488 + partition_sched_domains(1, NULL, NULL);
2489 + break;
2490 + }
2491 +
2492 + /*
2493 + * This is the last CPU online operation. So fall through and
2494 + * restore the original sched domains by considering the
2495 + * cpuset configurations.
2496 + */
2497 +
2498 case CPU_ONLINE:
2499 case CPU_DOWN_FAILED:
2500 cpuset_update_active_cpus();
2501 - return NOTIFY_OK;
2502 + break;
2503 default:
2504 return NOTIFY_DONE;
2505 }
2506 + return NOTIFY_OK;
2507 }
2508
2509 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
2510 void *hcpu)
2511 {
2512 - switch (action & ~CPU_TASKS_FROZEN) {
2513 + switch (action) {
2514 case CPU_DOWN_PREPARE:
2515 cpuset_update_active_cpus();
2516 - return NOTIFY_OK;
2517 + break;
2518 + case CPU_DOWN_PREPARE_FROZEN:
2519 + num_cpus_frozen++;
2520 + partition_sched_domains(1, NULL, NULL);
2521 + break;
2522 default:
2523 return NOTIFY_DONE;
2524 }
2525 + return NOTIFY_OK;
2526 }
2527
2528 void __init sched_init_smp(void)
2529 diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
2530 index 7b386e8..da5eb5b 100644
2531 --- a/kernel/sched/stop_task.c
2532 +++ b/kernel/sched/stop_task.c
2533 @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
2534 {
2535 struct task_struct *stop = rq->stop;
2536
2537 - if (stop && stop->on_rq)
2538 + if (stop && stop->on_rq) {
2539 + stop->se.exec_start = rq->clock_task;
2540 return stop;
2541 + }
2542
2543 return NULL;
2544 }
2545 @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)
2546
2547 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
2548 {
2549 + struct task_struct *curr = rq->curr;
2550 + u64 delta_exec;
2551 +
2552 + delta_exec = rq->clock_task - curr->se.exec_start;
2553 + if (unlikely((s64)delta_exec < 0))
2554 + delta_exec = 0;
2555 +
2556 + schedstat_set(curr->se.statistics.exec_max,
2557 + max(curr->se.statistics.exec_max, delta_exec));
2558 +
2559 + curr->se.sum_exec_runtime += delta_exec;
2560 + account_group_exec_runtime(curr, delta_exec);
2561 +
2562 + curr->se.exec_start = rq->clock_task;
2563 + cpuacct_charge(curr, delta_exec);
2564 }
2565
2566 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
2567 @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
2568
2569 static void set_curr_task_stop(struct rq *rq)
2570 {
2571 + struct task_struct *stop = rq->stop;
2572 +
2573 + stop->se.exec_start = rq->clock_task;
2574 }
2575
2576 static void switched_to_stop(struct rq *rq, struct task_struct *p)
2577 diff --git a/kernel/sys.c b/kernel/sys.c
2578 index e7006eb..898a84c 100644
2579 --- a/kernel/sys.c
2580 +++ b/kernel/sys.c
2581 @@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
2582 void kernel_restart(char *cmd)
2583 {
2584 kernel_restart_prepare(cmd);
2585 + disable_nonboot_cpus();
2586 if (!cmd)
2587 printk(KERN_EMERG "Restarting system.\n");
2588 else
2589 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2590 index 7584322..56f793d 100644
2591 --- a/kernel/workqueue.c
2592 +++ b/kernel/workqueue.c
2593 @@ -1864,7 +1864,9 @@ __acquires(&gcwq->lock)
2594
2595 spin_unlock_irq(&gcwq->lock);
2596
2597 + smp_wmb(); /* paired with test_and_set_bit(PENDING) */
2598 work_clear_pending(work);
2599 +
2600 lock_map_acquire_read(&cwq->wq->lockdep_map);
2601 lock_map_acquire(&lockdep_map);
2602 trace_workqueue_execute_start(work);
2603 diff --git a/lib/gcd.c b/lib/gcd.c
2604 index cce4f3c..3657f12 100644
2605 --- a/lib/gcd.c
2606 +++ b/lib/gcd.c
2607 @@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
2608
2609 if (a < b)
2610 swap(a, b);
2611 +
2612 + if (!b)
2613 + return a;
2614 while ((r = a % b) != 0) {
2615 a = b;
2616 b = r;
2617 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2618 index a799df5..c384e09 100644
2619 --- a/mm/hugetlb.c
2620 +++ b/mm/hugetlb.c
2621 @@ -2431,7 +2431,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2622 * from page cache lookup which is in HPAGE_SIZE units.
2623 */
2624 address = address & huge_page_mask(h);
2625 - pgoff = vma_hugecache_offset(h, vma, address);
2626 + pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2627 + vma->vm_pgoff;
2628 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
2629
2630 /*
2631 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
2632 index 9afcbad..d1e4bef 100644
2633 --- a/mm/mempolicy.c
2634 +++ b/mm/mempolicy.c
2635 @@ -607,6 +607,42 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
2636 return first;
2637 }
2638
2639 +/*
2640 + * Apply policy to a single VMA
2641 + * This must be called with the mmap_sem held for writing.
2642 + */
2643 +static int vma_replace_policy(struct vm_area_struct *vma,
2644 + struct mempolicy *pol)
2645 +{
2646 + int err;
2647 + struct mempolicy *old;
2648 + struct mempolicy *new;
2649 +
2650 + pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
2651 + vma->vm_start, vma->vm_end, vma->vm_pgoff,
2652 + vma->vm_ops, vma->vm_file,
2653 + vma->vm_ops ? vma->vm_ops->set_policy : NULL);
2654 +
2655 + new = mpol_dup(pol);
2656 + if (IS_ERR(new))
2657 + return PTR_ERR(new);
2658 +
2659 + if (vma->vm_ops && vma->vm_ops->set_policy) {
2660 + err = vma->vm_ops->set_policy(vma, new);
2661 + if (err)
2662 + goto err_out;
2663 + }
2664 +
2665 + old = vma->vm_policy;
2666 + vma->vm_policy = new; /* protected by mmap_sem */
2667 + mpol_put(old);
2668 +
2669 + return 0;
2670 + err_out:
2671 + mpol_put(new);
2672 + return err;
2673 +}
2674 +
2675 /* Step 2: apply policy to a range and do splits. */
2676 static int mbind_range(struct mm_struct *mm, unsigned long start,
2677 unsigned long end, struct mempolicy *new_pol)
2678 @@ -655,23 +691,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
2679 if (err)
2680 goto out;
2681 }
2682 -
2683 - /*
2684 - * Apply policy to a single VMA. The reference counting of
2685 - * policy for vma_policy linkages has already been handled by
2686 - * vma_merge and split_vma as necessary. If this is a shared
2687 - * policy then ->set_policy will increment the reference count
2688 - * for an sp node.
2689 - */
2690 - pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
2691 - vma->vm_start, vma->vm_end, vma->vm_pgoff,
2692 - vma->vm_ops, vma->vm_file,
2693 - vma->vm_ops ? vma->vm_ops->set_policy : NULL);
2694 - if (vma->vm_ops && vma->vm_ops->set_policy) {
2695 - err = vma->vm_ops->set_policy(vma, new_pol);
2696 - if (err)
2697 - goto out;
2698 - }
2699 + err = vma_replace_policy(vma, new_pol);
2700 + if (err)
2701 + goto out;
2702 }
2703
2704 out:
2705 @@ -1510,8 +1532,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
2706 addr);
2707 if (vpol)
2708 pol = vpol;
2709 - } else if (vma->vm_policy)
2710 + } else if (vma->vm_policy) {
2711 pol = vma->vm_policy;
2712 +
2713 + /*
2714 + * shmem_alloc_page() passes MPOL_F_SHARED policy with
2715 + * a pseudo vma whose vma->vm_ops=NULL. Take a reference
2716 + * count on these policies which will be dropped by
2717 + * mpol_cond_put() later
2718 + */
2719 + if (mpol_needs_cond_ref(pol))
2720 + mpol_get(pol);
2721 + }
2722 }
2723 if (!pol)
2724 pol = &default_policy;
2725 @@ -2035,7 +2067,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2726 */
2727
2728 /* lookup first element intersecting start-end */
2729 -/* Caller holds sp->lock */
2730 +/* Caller holds sp->mutex */
2731 static struct sp_node *
2732 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2733 {
2734 @@ -2099,36 +2131,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2735
2736 if (!sp->root.rb_node)
2737 return NULL;
2738 - spin_lock(&sp->lock);
2739 + mutex_lock(&sp->mutex);
2740 sn = sp_lookup(sp, idx, idx+1);
2741 if (sn) {
2742 mpol_get(sn->policy);
2743 pol = sn->policy;
2744 }
2745 - spin_unlock(&sp->lock);
2746 + mutex_unlock(&sp->mutex);
2747 return pol;
2748 }
2749
2750 +static void sp_free(struct sp_node *n)
2751 +{
2752 + mpol_put(n->policy);
2753 + kmem_cache_free(sn_cache, n);
2754 +}
2755 +
2756 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2757 {
2758 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2759 rb_erase(&n->nd, &sp->root);
2760 - mpol_put(n->policy);
2761 - kmem_cache_free(sn_cache, n);
2762 + sp_free(n);
2763 }
2764
2765 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2766 struct mempolicy *pol)
2767 {
2768 - struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2769 + struct sp_node *n;
2770 + struct mempolicy *newpol;
2771
2772 + n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2773 if (!n)
2774 return NULL;
2775 +
2776 + newpol = mpol_dup(pol);
2777 + if (IS_ERR(newpol)) {
2778 + kmem_cache_free(sn_cache, n);
2779 + return NULL;
2780 + }
2781 + newpol->flags |= MPOL_F_SHARED;
2782 +
2783 n->start = start;
2784 n->end = end;
2785 - mpol_get(pol);
2786 - pol->flags |= MPOL_F_SHARED; /* for unref */
2787 - n->policy = pol;
2788 + n->policy = newpol;
2789 +
2790 return n;
2791 }
2792
2793 @@ -2136,10 +2182,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2794 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2795 unsigned long end, struct sp_node *new)
2796 {
2797 - struct sp_node *n, *new2 = NULL;
2798 + struct sp_node *n;
2799 + int ret = 0;
2800
2801 -restart:
2802 - spin_lock(&sp->lock);
2803 + mutex_lock(&sp->mutex);
2804 n = sp_lookup(sp, start, end);
2805 /* Take care of old policies in the same range. */
2806 while (n && n->start < end) {
2807 @@ -2152,16 +2198,14 @@ restart:
2808 } else {
2809 /* Old policy spanning whole new range. */
2810 if (n->end > end) {
2811 + struct sp_node *new2;
2812 + new2 = sp_alloc(end, n->end, n->policy);
2813 if (!new2) {
2814 - spin_unlock(&sp->lock);
2815 - new2 = sp_alloc(end, n->end, n->policy);
2816 - if (!new2)
2817 - return -ENOMEM;
2818 - goto restart;
2819 + ret = -ENOMEM;
2820 + goto out;
2821 }
2822 n->end = start;
2823 sp_insert(sp, new2);
2824 - new2 = NULL;
2825 break;
2826 } else
2827 n->end = start;
2828 @@ -2172,12 +2216,9 @@ restart:
2829 }
2830 if (new)
2831 sp_insert(sp, new);
2832 - spin_unlock(&sp->lock);
2833 - if (new2) {
2834 - mpol_put(new2->policy);
2835 - kmem_cache_free(sn_cache, new2);
2836 - }
2837 - return 0;
2838 +out:
2839 + mutex_unlock(&sp->mutex);
2840 + return ret;
2841 }
2842
2843 /**
2844 @@ -2195,7 +2236,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2845 int ret;
2846
2847 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2848 - spin_lock_init(&sp->lock);
2849 + mutex_init(&sp->mutex);
2850
2851 if (mpol) {
2852 struct vm_area_struct pvma;
2853 @@ -2249,7 +2290,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
2854 }
2855 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2856 if (err && new)
2857 - kmem_cache_free(sn_cache, new);
2858 + sp_free(new);
2859 return err;
2860 }
2861
2862 @@ -2261,16 +2302,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
2863
2864 if (!p->root.rb_node)
2865 return;
2866 - spin_lock(&p->lock);
2867 + mutex_lock(&p->mutex);
2868 next = rb_first(&p->root);
2869 while (next) {
2870 n = rb_entry(next, struct sp_node, nd);
2871 next = rb_next(&n->nd);
2872 - rb_erase(&n->nd, &p->root);
2873 - mpol_put(n->policy);
2874 - kmem_cache_free(sn_cache, n);
2875 + sp_delete(p, n);
2876 }
2877 - spin_unlock(&p->lock);
2878 + mutex_unlock(&p->mutex);
2879 }
2880
2881 /* assumes fs == KERNEL_DS */
2882 diff --git a/mm/slab.c b/mm/slab.c
2883 index e901a36..da2bb68 100644
2884 --- a/mm/slab.c
2885 +++ b/mm/slab.c
2886 @@ -1685,9 +1685,6 @@ void __init kmem_cache_init_late(void)
2887
2888 g_cpucache_up = LATE;
2889
2890 - /* Annotate slab for lockdep -- annotate the malloc caches */
2891 - init_lock_keys();
2892 -
2893 /* 6) resize the head arrays to their final sizes */
2894 mutex_lock(&cache_chain_mutex);
2895 list_for_each_entry(cachep, &cache_chain, next)
2896 @@ -1695,6 +1692,9 @@ void __init kmem_cache_init_late(void)
2897 BUG();
2898 mutex_unlock(&cache_chain_mutex);
2899
2900 + /* Annotate slab for lockdep -- annotate the malloc caches */
2901 + init_lock_keys();
2902 +
2903 /* Done! */
2904 g_cpucache_up = FULL;
2905
2906 diff --git a/mm/truncate.c b/mm/truncate.c
2907 index 61a183b..4224627 100644
2908 --- a/mm/truncate.c
2909 +++ b/mm/truncate.c
2910 @@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
2911 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
2912 return 0;
2913
2914 + clear_page_mlock(page);
2915 +
2916 spin_lock_irq(&mapping->tree_lock);
2917 if (PageDirty(page))
2918 goto failed;
2919
2920 - clear_page_mlock(page);
2921 BUG_ON(page_has_private(page));
2922 __delete_from_page_cache(page);
2923 spin_unlock_irq(&mapping->tree_lock);
2924 diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
2925 index 4d39d80..f364630 100644
2926 --- a/net/8021q/vlan_core.c
2927 +++ b/net/8021q/vlan_core.c
2928 @@ -106,7 +106,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
2929 return NULL;
2930 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
2931 skb->mac_header += VLAN_HLEN;
2932 - skb_reset_mac_len(skb);
2933 return skb;
2934 }
2935
2936 @@ -140,6 +139,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
2937
2938 skb_reset_network_header(skb);
2939 skb_reset_transport_header(skb);
2940 + skb_reset_mac_len(skb);
2941 +
2942 return skb;
2943
2944 err_free:
2945 diff --git a/net/core/dev.c b/net/core/dev.c
2946 index 3fd9cae..086bc2e 100644
2947 --- a/net/core/dev.c
2948 +++ b/net/core/dev.c
2949 @@ -2121,7 +2121,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2950 static netdev_features_t harmonize_features(struct sk_buff *skb,
2951 __be16 protocol, netdev_features_t features)
2952 {
2953 - if (!can_checksum_protocol(features, protocol)) {
2954 + if (skb->ip_summed != CHECKSUM_NONE &&
2955 + !can_checksum_protocol(features, protocol)) {
2956 features &= ~NETIF_F_ALL_CSUM;
2957 features &= ~NETIF_F_SG;
2958 } else if (illegal_highdma(skb->dev, skb)) {
2959 @@ -2617,15 +2618,16 @@ void __skb_get_rxhash(struct sk_buff *skb)
2960 if (!skb_flow_dissect(skb, &keys))
2961 return;
2962
2963 - if (keys.ports) {
2964 - if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2965 - swap(keys.port16[0], keys.port16[1]);
2966 + if (keys.ports)
2967 skb->l4_rxhash = 1;
2968 - }
2969
2970 /* get a consistent hash (same value on both flow directions) */
2971 - if ((__force u32)keys.dst < (__force u32)keys.src)
2972 + if (((__force u32)keys.dst < (__force u32)keys.src) ||
2973 + (((__force u32)keys.dst == (__force u32)keys.src) &&
2974 + ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2975 swap(keys.dst, keys.src);
2976 + swap(keys.port16[0], keys.port16[1]);
2977 + }
2978
2979 hash = jhash_3words((__force u32)keys.dst,
2980 (__force u32)keys.src,
2981 diff --git a/net/core/sock.c b/net/core/sock.c
2982 index d3e0a52..4b469e3 100644
2983 --- a/net/core/sock.c
2984 +++ b/net/core/sock.c
2985 @@ -644,7 +644,8 @@ set_rcvbuf:
2986
2987 case SO_KEEPALIVE:
2988 #ifdef CONFIG_INET
2989 - if (sk->sk_protocol == IPPROTO_TCP)
2990 + if (sk->sk_protocol == IPPROTO_TCP &&
2991 + sk->sk_type == SOCK_STREAM)
2992 tcp_set_keepalive(sk, valbool);
2993 #endif
2994 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
2995 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2996 index bbd604c..2fe0dc2 100644
2997 --- a/net/ipv4/raw.c
2998 +++ b/net/ipv4/raw.c
2999 @@ -131,18 +131,20 @@ found:
3000 * 0 - deliver
3001 * 1 - block
3002 */
3003 -static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
3004 +static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
3005 {
3006 - int type;
3007 + struct icmphdr _hdr;
3008 + const struct icmphdr *hdr;
3009
3010 - if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
3011 + hdr = skb_header_pointer(skb, skb_transport_offset(skb),
3012 + sizeof(_hdr), &_hdr);
3013 + if (!hdr)
3014 return 1;
3015
3016 - type = icmp_hdr(skb)->type;
3017 - if (type < 32) {
3018 + if (hdr->type < 32) {
3019 __u32 data = raw_sk(sk)->filter.data;
3020
3021 - return ((1 << type) & data) != 0;
3022 + return ((1U << hdr->type) & data) != 0;
3023 }
3024
3025 /* Do not block unknown ICMP types */
3026 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
3027 index 367bdaf..8fbe2e2 100644
3028 --- a/net/ipv4/tcp.c
3029 +++ b/net/ipv4/tcp.c
3030 @@ -1594,8 +1594,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
3031 }
3032
3033 #ifdef CONFIG_NET_DMA
3034 - if (tp->ucopy.dma_chan)
3035 - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
3036 + if (tp->ucopy.dma_chan) {
3037 + if (tp->rcv_wnd == 0 &&
3038 + !skb_queue_empty(&sk->sk_async_wait_queue)) {
3039 + tcp_service_net_dma(sk, true);
3040 + tcp_cleanup_rbuf(sk, copied);
3041 + } else
3042 + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
3043 + }
3044 #endif
3045 if (copied >= target) {
3046 /* Do not sleep, just process backlog. */
3047 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3048 index 2c69eca..5ec6069 100644
3049 --- a/net/ipv6/addrconf.c
3050 +++ b/net/ipv6/addrconf.c
3051 @@ -793,10 +793,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
3052 struct in6_addr prefix;
3053 struct rt6_info *rt;
3054 struct net *net = dev_net(ifp->idev->dev);
3055 + struct flowi6 fl6 = {};
3056 +
3057 ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
3058 - rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
3059 + fl6.flowi6_oif = ifp->idev->dev->ifindex;
3060 + fl6.daddr = prefix;
3061 + rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
3062 + RT6_LOOKUP_F_IFACE);
3063
3064 - if (rt && addrconf_is_prefix_route(rt)) {
3065 + if (rt != net->ipv6.ip6_null_entry &&
3066 + addrconf_is_prefix_route(rt)) {
3067 if (onlink == 0) {
3068 ip6_del_rt(rt);
3069 rt = NULL;
3070 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
3071 index 92bb9cb..c3a007d 100644
3072 --- a/net/ipv6/ip6_fib.c
3073 +++ b/net/ipv6/ip6_fib.c
3074 @@ -818,6 +818,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
3075 offsetof(struct rt6_info, rt6i_src),
3076 allow_create, replace_required);
3077
3078 + if (IS_ERR(sn)) {
3079 + err = PTR_ERR(sn);
3080 + sn = NULL;
3081 + }
3082 if (!sn) {
3083 /* If it is failed, discard just allocated
3084 root, and then (in st_failure) stale node
3085 diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
3086 index 7e1e0fb..740c919 100644
3087 --- a/net/ipv6/mip6.c
3088 +++ b/net/ipv6/mip6.c
3089 @@ -84,28 +84,30 @@ static int mip6_mh_len(int type)
3090
3091 static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
3092 {
3093 - struct ip6_mh *mh;
3094 + struct ip6_mh _hdr;
3095 + const struct ip6_mh *mh;
3096
3097 - if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
3098 - !pskb_may_pull(skb, (skb_transport_offset(skb) +
3099 - ((skb_transport_header(skb)[1] + 1) << 3))))
3100 + mh = skb_header_pointer(skb, skb_transport_offset(skb),
3101 + sizeof(_hdr), &_hdr);
3102 + if (!mh)
3103 return -1;
3104
3105 - mh = (struct ip6_mh *)skb_transport_header(skb);
3106 + if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
3107 + return -1;
3108
3109 if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
3110 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
3111 mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
3112 - mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
3113 - skb_network_header(skb)));
3114 + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
3115 + skb_network_header_len(skb));
3116 return -1;
3117 }
3118
3119 if (mh->ip6mh_proto != IPPROTO_NONE) {
3120 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
3121 mh->ip6mh_proto);
3122 - mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
3123 - skb_network_header(skb)));
3124 + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
3125 + skb_network_header_len(skb));
3126 return -1;
3127 }
3128
3129 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
3130 index 5bddea7..3ee2870 100644
3131 --- a/net/ipv6/raw.c
3132 +++ b/net/ipv6/raw.c
3133 @@ -107,21 +107,20 @@ found:
3134 * 0 - deliver
3135 * 1 - block
3136 */
3137 -static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
3138 +static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
3139 {
3140 - struct icmp6hdr *icmph;
3141 - struct raw6_sock *rp = raw6_sk(sk);
3142 -
3143 - if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
3144 - __u32 *data = &rp->filter.data[0];
3145 - int bit_nr;
3146 + struct icmp6hdr *_hdr;
3147 + const struct icmp6hdr *hdr;
3148
3149 - icmph = (struct icmp6hdr *) skb->data;
3150 - bit_nr = icmph->icmp6_type;
3151 + hdr = skb_header_pointer(skb, skb_transport_offset(skb),
3152 + sizeof(_hdr), &_hdr);
3153 + if (hdr) {
3154 + const __u32 *data = &raw6_sk(sk)->filter.data[0];
3155 + unsigned int type = hdr->icmp6_type;
3156
3157 - return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
3158 + return (data[type >> 5] & (1U << (type & 31))) != 0;
3159 }
3160 - return 0;
3161 + return 1;
3162 }
3163
3164 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3165 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3166 index c4920ca..2796b37 100644
3167 --- a/net/ipv6/route.c
3168 +++ b/net/ipv6/route.c
3169 @@ -1485,17 +1485,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
3170 struct fib6_table *table;
3171 struct net *net = dev_net(rt->dst.dev);
3172
3173 - if (rt == net->ipv6.ip6_null_entry)
3174 - return -ENOENT;
3175 + if (rt == net->ipv6.ip6_null_entry) {
3176 + err = -ENOENT;
3177 + goto out;
3178 + }
3179
3180 table = rt->rt6i_table;
3181 write_lock_bh(&table->tb6_lock);
3182 -
3183 err = fib6_del(rt, info);
3184 - dst_release(&rt->dst);
3185 -
3186 write_unlock_bh(&table->tb6_lock);
3187
3188 +out:
3189 + dst_release(&rt->dst);
3190 return err;
3191 }
3192
3193 diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
3194 index 7446038..5c82907 100644
3195 --- a/net/l2tp/l2tp_eth.c
3196 +++ b/net/l2tp/l2tp_eth.c
3197 @@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
3198 printk("\n");
3199 }
3200
3201 - if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
3202 + if (!pskb_may_pull(skb, ETH_HLEN))
3203 goto error;
3204
3205 secpath_reset(skb);
3206 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
3207 index 06592d8..1b9024e 100644
3208 --- a/net/netrom/af_netrom.c
3209 +++ b/net/netrom/af_netrom.c
3210 @@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
3211 msg->msg_flags |= MSG_TRUNC;
3212 }
3213
3214 - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
3215 + er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
3216 + if (er < 0) {
3217 + skb_free_datagram(sk, skb);
3218 + release_sock(sk);
3219 + return er;
3220 + }
3221
3222 if (sax != NULL) {
3223 sax->sax25_family = AF_NETROM;
3224 diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
3225 index 24d94c0..599f67a 100644
3226 --- a/net/sched/sch_cbq.c
3227 +++ b/net/sched/sch_cbq.c
3228 @@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
3229 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
3230 cl = defmap[TC_PRIO_BESTEFFORT];
3231
3232 - if (cl == NULL || cl->level >= head->level)
3233 + if (cl == NULL)
3234 goto fallback;
3235 }
3236 -
3237 + if (cl->level >= head->level)
3238 + goto fallback;
3239 #ifdef CONFIG_NET_CLS_ACT
3240 switch (result) {
3241 case TC_ACT_QUEUED:
3242 diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
3243 index e68cb44..cdd474a 100644
3244 --- a/net/sched/sch_qfq.c
3245 +++ b/net/sched/sch_qfq.c
3246 @@ -830,7 +830,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
3247 if (mask) {
3248 struct qfq_group *next = qfq_ffs(q, mask);
3249 if (qfq_gt(roundedF, next->F)) {
3250 - cl->S = next->F;
3251 + if (qfq_gt(limit, next->F))
3252 + cl->S = next->F;
3253 + else /* preserve timestamp correctness */
3254 + cl->S = limit;
3255 return;
3256 }
3257 }
3258 diff --git a/net/sctp/output.c b/net/sctp/output.c
3259 index 8fc4dcd..32ba8d0 100644
3260 --- a/net/sctp/output.c
3261 +++ b/net/sctp/output.c
3262 @@ -334,6 +334,25 @@ finish:
3263 return retval;
3264 }
3265
3266 +static void sctp_packet_release_owner(struct sk_buff *skb)
3267 +{
3268 + sk_free(skb->sk);
3269 +}
3270 +
3271 +static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
3272 +{
3273 + skb_orphan(skb);
3274 + skb->sk = sk;
3275 + skb->destructor = sctp_packet_release_owner;
3276 +
3277 + /*
3278 + * The data chunks have already been accounted for in sctp_sendmsg(),
3279 + * therefore only reserve a single byte to keep socket around until
3280 + * the packet has been transmitted.
3281 + */
3282 + atomic_inc(&sk->sk_wmem_alloc);
3283 +}
3284 +
3285 /* All packets are sent to the network through this function from
3286 * sctp_outq_tail().
3287 *
3288 @@ -375,7 +394,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
3289 /* Set the owning socket so that we know where to get the
3290 * destination IP address.
3291 */
3292 - skb_set_owner_w(nskb, sk);
3293 + sctp_packet_set_owner_w(nskb, sk);
3294
3295 if (!sctp_transport_dst_check(tp)) {
3296 sctp_transport_route(tp, NULL, sctp_sk(sk));
3297 diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
3298 index 54a0dc2..ab2bb42 100644
3299 --- a/net/xfrm/xfrm_input.c
3300 +++ b/net/xfrm/xfrm_input.c
3301 @@ -212,7 +212,7 @@ resume:
3302 /* only the first xfrm gets the encap type */
3303 encap_type = 0;
3304
3305 - if (async && x->repl->check(x, skb, seq)) {
3306 + if (async && x->repl->recheck(x, skb, seq)) {
3307 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
3308 goto drop_unlock;
3309 }
3310 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3311 index a15d2a0..71c80c7 100644
3312 --- a/net/xfrm/xfrm_policy.c
3313 +++ b/net/xfrm/xfrm_policy.c
3314 @@ -1761,7 +1761,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
3315
3316 if (!afinfo) {
3317 dst_release(dst_orig);
3318 - ret = ERR_PTR(-EINVAL);
3319 + return ERR_PTR(-EINVAL);
3320 } else {
3321 ret = afinfo->blackhole_route(net, dst_orig);
3322 }
3323 diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
3324 index 2f6d11d..3efb07d 100644
3325 --- a/net/xfrm/xfrm_replay.c
3326 +++ b/net/xfrm/xfrm_replay.c
3327 @@ -420,6 +420,18 @@ err:
3328 return -EINVAL;
3329 }
3330
3331 +static int xfrm_replay_recheck_esn(struct xfrm_state *x,
3332 + struct sk_buff *skb, __be32 net_seq)
3333 +{
3334 + if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
3335 + htonl(xfrm_replay_seqhi(x, net_seq)))) {
3336 + x->stats.replay_window++;
3337 + return -EINVAL;
3338 + }
3339 +
3340 + return xfrm_replay_check_esn(x, skb, net_seq);
3341 +}
3342 +
3343 static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
3344 {
3345 unsigned int bitnr, nr, i;
3346 @@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
3347 static struct xfrm_replay xfrm_replay_legacy = {
3348 .advance = xfrm_replay_advance,
3349 .check = xfrm_replay_check,
3350 + .recheck = xfrm_replay_check,
3351 .notify = xfrm_replay_notify,
3352 .overflow = xfrm_replay_overflow,
3353 };
3354 @@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
3355 static struct xfrm_replay xfrm_replay_bmp = {
3356 .advance = xfrm_replay_advance_bmp,
3357 .check = xfrm_replay_check_bmp,
3358 + .recheck = xfrm_replay_check_bmp,
3359 .notify = xfrm_replay_notify_bmp,
3360 .overflow = xfrm_replay_overflow_bmp,
3361 };
3362 @@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
3363 static struct xfrm_replay xfrm_replay_esn = {
3364 .advance = xfrm_replay_advance_esn,
3365 .check = xfrm_replay_check_esn,
3366 + .recheck = xfrm_replay_recheck_esn,
3367 .notify = xfrm_replay_notify_bmp,
3368 .overflow = xfrm_replay_overflow_esn,
3369 };
3370 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
3371 index 7128dde..c8b903d 100644
3372 --- a/net/xfrm/xfrm_user.c
3373 +++ b/net/xfrm/xfrm_user.c
3374 @@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
3375 struct nlattr **attrs)
3376 {
3377 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
3378 + struct xfrm_replay_state_esn *rs;
3379
3380 - if ((p->flags & XFRM_STATE_ESN) && !rt)
3381 - return -EINVAL;
3382 + if (p->flags & XFRM_STATE_ESN) {
3383 + if (!rt)
3384 + return -EINVAL;
3385 +
3386 + rs = nla_data(rt);
3387 +
3388 + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
3389 + return -EINVAL;
3390 +
3391 + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
3392 + nla_len(rt) != sizeof(*rs))
3393 + return -EINVAL;
3394 + }
3395
3396 if (!rt)
3397 return 0;
3398 @@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
3399 struct nlattr *rp)
3400 {
3401 struct xfrm_replay_state_esn *up;
3402 + int ulen;
3403
3404 if (!replay_esn || !rp)
3405 return 0;
3406
3407 up = nla_data(rp);
3408 + ulen = xfrm_replay_state_esn_len(up);
3409
3410 - if (xfrm_replay_state_esn_len(replay_esn) !=
3411 - xfrm_replay_state_esn_len(up))
3412 + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
3413 return -EINVAL;
3414
3415 return 0;
3416 @@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
3417 struct nlattr *rta)
3418 {
3419 struct xfrm_replay_state_esn *p, *pp, *up;
3420 + int klen, ulen;
3421
3422 if (!rta)
3423 return 0;
3424
3425 up = nla_data(rta);
3426 + klen = xfrm_replay_state_esn_len(up);
3427 + ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
3428
3429 - p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
3430 + p = kzalloc(klen, GFP_KERNEL);
3431 if (!p)
3432 return -ENOMEM;
3433
3434 - pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
3435 + pp = kzalloc(klen, GFP_KERNEL);
3436 if (!pp) {
3437 kfree(p);
3438 return -ENOMEM;
3439 }
3440
3441 + memcpy(p, up, ulen);
3442 + memcpy(pp, up, ulen);
3443 +
3444 *replay_esn = p;
3445 *preplay_esn = pp;
3446
3447 @@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
3448 * somehow made shareable and move it to xfrm_state.c - JHS
3449 *
3450 */
3451 -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
3452 +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
3453 + int update_esn)
3454 {
3455 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
3456 - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
3457 + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
3458 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
3459 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
3460 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
3461 @@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
3462 goto error;
3463
3464 /* override default values from above */
3465 - xfrm_update_ae_params(x, attrs);
3466 + xfrm_update_ae_params(x, attrs, 0);
3467
3468 return x;
3469
3470 @@ -689,6 +709,7 @@ out:
3471
3472 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
3473 {
3474 + memset(p, 0, sizeof(*p));
3475 memcpy(&p->id, &x->id, sizeof(p->id));
3476 memcpy(&p->sel, &x->sel, sizeof(p->sel));
3477 memcpy(&p->lft, &x->lft, sizeof(p->lft));
3478 @@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
3479 return -EMSGSIZE;
3480
3481 algo = nla_data(nla);
3482 - strcpy(algo->alg_name, auth->alg_name);
3483 + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
3484 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
3485 algo->alg_key_len = auth->alg_key_len;
3486
3487 @@ -862,6 +883,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
3488 {
3489 struct xfrm_dump_info info;
3490 struct sk_buff *skb;
3491 + int err;
3492
3493 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3494 if (!skb)
3495 @@ -872,9 +894,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
3496 info.nlmsg_seq = seq;
3497 info.nlmsg_flags = 0;
3498
3499 - if (dump_one_state(x, 0, &info)) {
3500 + err = dump_one_state(x, 0, &info);
3501 + if (err) {
3502 kfree_skb(skb);
3503 - return NULL;
3504 + return ERR_PTR(err);
3505 }
3506
3507 return skb;
3508 @@ -1297,6 +1320,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
3509
3510 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
3511 {
3512 + memset(p, 0, sizeof(*p));
3513 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
3514 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
3515 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
3516 @@ -1401,6 +1425,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
3517 struct xfrm_user_tmpl *up = &vec[i];
3518 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
3519
3520 + memset(up, 0, sizeof(*up));
3521 memcpy(&up->id, &kp->id, sizeof(up->id));
3522 up->family = kp->encap_family;
3523 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
3524 @@ -1529,6 +1554,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
3525 {
3526 struct xfrm_dump_info info;
3527 struct sk_buff *skb;
3528 + int err;
3529
3530 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3531 if (!skb)
3532 @@ -1539,9 +1565,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
3533 info.nlmsg_seq = seq;
3534 info.nlmsg_flags = 0;
3535
3536 - if (dump_one_policy(xp, dir, 0, &info) < 0) {
3537 + err = dump_one_policy(xp, dir, 0, &info);
3538 + if (err) {
3539 kfree_skb(skb);
3540 - return NULL;
3541 + return ERR_PTR(err);
3542 }
3543
3544 return skb;
3545 @@ -1794,7 +1821,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
3546 goto out;
3547
3548 spin_lock_bh(&x->lock);
3549 - xfrm_update_ae_params(x, attrs);
3550 + xfrm_update_ae_params(x, attrs, 1);
3551 spin_unlock_bh(&x->lock);
3552
3553 c.event = nlh->nlmsg_type;
3554 diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
3555 index 6a3ee98..978416d 100644
3556 --- a/scripts/Kbuild.include
3557 +++ b/scripts/Kbuild.include
3558 @@ -98,24 +98,24 @@ try-run = $(shell set -e; \
3559 # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
3560
3561 as-option = $(call try-run,\
3562 - $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
3563 + $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
3564
3565 # as-instr
3566 # Usage: cflags-y += $(call as-instr,instr,option1,option2)
3567
3568 as-instr = $(call try-run,\
3569 - printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
3570 + printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
3571
3572 # cc-option
3573 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
3574
3575 cc-option = $(call try-run,\
3576 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
3577 + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
3578
3579 # cc-option-yn
3580 # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
3581 cc-option-yn = $(call try-run,\
3582 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
3583 + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
3584
3585 # cc-option-align
3586 # Prefix align with either -falign or -malign
3587 @@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
3588 # cc-disable-warning
3589 # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
3590 cc-disable-warning = $(call try-run,\
3591 - $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
3592 + $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
3593
3594 # cc-version
3595 # Usage gcc-ver := $(call cc-version)
3596 @@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
3597 # cc-ldoption
3598 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
3599 cc-ldoption = $(call try-run,\
3600 - $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
3601 + $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
3602
3603 # ld-option
3604 # Usage: LDFLAGS += $(call ld-option, -X)
3605 @@ -209,7 +209,7 @@ endif
3606 # >$< substitution to preserve $ when reloading .cmd file
3607 # note: when using inline perl scripts [perl -e '...$$t=1;...']
3608 # in $(cmd_xxx) double $$ your perl vars
3609 -make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
3610 +make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
3611
3612 # Find any prerequisites that is newer than target or that does not exist.
3613 # PHONY targets skipped in both cases.
3614 diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
3615 index debecb5..7f2126d 100644
3616 --- a/scripts/gcc-version.sh
3617 +++ b/scripts/gcc-version.sh
3618 @@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
3619 exit 1
3620 fi
3621
3622 -MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
3623 -MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
3624 +MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
3625 +MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
3626 if [ "x$with_patchlevel" != "x" ] ; then
3627 - PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
3628 + PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
3629 printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
3630 else
3631 printf "%02d%02d\\n" $MAJOR $MINOR
3632 diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
3633 index 29493dc..12dbd0b 100644
3634 --- a/scripts/gcc-x86_32-has-stack-protector.sh
3635 +++ b/scripts/gcc-x86_32-has-stack-protector.sh
3636 @@ -1,6 +1,6 @@
3637 #!/bin/sh
3638
3639 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3640 +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3641 if [ "$?" -eq "0" ] ; then
3642 echo y
3643 else
3644 diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
3645 index afaec61..973e8c1 100644
3646 --- a/scripts/gcc-x86_64-has-stack-protector.sh
3647 +++ b/scripts/gcc-x86_64-has-stack-protector.sh
3648 @@ -1,6 +1,6 @@
3649 #!/bin/sh
3650
3651 -echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3652 +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
3653 if [ "$?" -eq "0" ] ; then
3654 echo y
3655 else
3656 diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
3657 index fa59cbf..854d9c7 100755
3658 --- a/scripts/kconfig/check.sh
3659 +++ b/scripts/kconfig/check.sh
3660 @@ -1,6 +1,6 @@
3661 #!/bin/sh
3662 # Needed for systems without gettext
3663 -$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
3664 +$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
3665 #include <libintl.h>
3666 int main()
3667 {
3668 diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
3669 index 82cc3a8..50df490 100644
3670 --- a/scripts/kconfig/lxdialog/check-lxdialog.sh
3671 +++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
3672 @@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15
3673
3674 # Check if we can link to ncurses
3675 check() {
3676 - $cc -xc - -o $tmp 2>/dev/null <<'EOF'
3677 + $cc -x c - -o $tmp 2>/dev/null <<'EOF'
3678 #include CURSES_LOC
3679 main() {}
3680 EOF
3681 diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
3682 index bccf07dd..3346f42 100644
3683 --- a/scripts/kconfig/streamline_config.pl
3684 +++ b/scripts/kconfig/streamline_config.pl
3685 @@ -463,6 +463,8 @@ while(<CIN>) {
3686 if (defined($configs{$1})) {
3687 if ($localyesconfig) {
3688 $setconfigs{$1} = 'y';
3689 + print "$1=y\n";
3690 + next;
3691 } else {
3692 $setconfigs{$1} = $2;
3693 }
3694 diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
3695 index ad079b6..bdc963e 100644
3696 --- a/sound/drivers/aloop.c
3697 +++ b/sound/drivers/aloop.c
3698 @@ -119,6 +119,7 @@ struct loopback_pcm {
3699 unsigned int period_size_frac;
3700 unsigned long last_jiffies;
3701 struct timer_list timer;
3702 + spinlock_t timer_lock;
3703 };
3704
3705 static struct platform_device *devices[SNDRV_CARDS];
3706 @@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
3707 unsigned long tick;
3708 unsigned int rate_shift = get_rate_shift(dpcm);
3709
3710 + spin_lock(&dpcm->timer_lock);
3711 if (rate_shift != dpcm->pcm_rate_shift) {
3712 dpcm->pcm_rate_shift = rate_shift;
3713 dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
3714 @@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
3715 tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
3716 dpcm->timer.expires = jiffies + tick;
3717 add_timer(&dpcm->timer);
3718 + spin_unlock(&dpcm->timer_lock);
3719 }
3720
3721 static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
3722 {
3723 + spin_lock(&dpcm->timer_lock);
3724 del_timer(&dpcm->timer);
3725 dpcm->timer.expires = 0;
3726 + spin_unlock(&dpcm->timer_lock);
3727 }
3728
3729 #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
3730 @@ -659,6 +664,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
3731 dpcm->substream = substream;
3732 setup_timer(&dpcm->timer, loopback_timer_function,
3733 (unsigned long)dpcm);
3734 + spin_lock_init(&dpcm->timer_lock);
3735
3736 cable = loopback->cables[substream->number][dev];
3737 if (!cable) {
3738 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3739 index 52e7a45..e7cb4bd 100644
3740 --- a/sound/pci/hda/patch_realtek.c
3741 +++ b/sound/pci/hda/patch_realtek.c
3742 @@ -6307,6 +6307,12 @@ static int patch_alc269(struct hda_codec *codec)
3743 if (err < 0)
3744 goto error;
3745
3746 + alc_pick_fixup(codec, alc269_fixup_models,
3747 + alc269_fixup_tbl, alc269_fixups);
3748 + alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
3749 +
3750 + alc_auto_parse_customize_define(codec);
3751 +
3752 if (codec->vendor_id == 0x10ec0269) {
3753 spec->codec_variant = ALC269_TYPE_ALC269VA;
3754 switch (alc_get_coef0(codec) & 0x00f0) {
3755 @@ -6331,12 +6337,6 @@ static int patch_alc269(struct hda_codec *codec)
3756 alc269_fill_coef(codec);
3757 }
3758
3759 - alc_pick_fixup(codec, alc269_fixup_models,
3760 - alc269_fixup_tbl, alc269_fixups);
3761 - alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
3762 -
3763 - alc_auto_parse_customize_define(codec);
3764 -
3765 /* automatic parse from the BIOS config */
3766 err = alc269_parse_auto_config(codec);
3767 if (err < 0)
3768 diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
3769 index b9567bc..757a52a 100644
3770 --- a/sound/soc/codecs/wm9712.c
3771 +++ b/sound/soc/codecs/wm9712.c
3772 @@ -146,7 +146,7 @@ SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
3773 SOC_SINGLE("Bass Volume", AC97_MASTER_TONE, 8, 15, 1),
3774 SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),
3775
3776 -SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
3777 +SOC_SINGLE("Capture Switch", AC97_REC_GAIN, 15, 1, 1),
3778 SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
3779 SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
3780 SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
3781 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3782 index ab23869..8a818a4 100644
3783 --- a/sound/usb/mixer.c
3784 +++ b/sound/usb/mixer.c
3785 @@ -1247,6 +1247,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
3786 /* disable non-functional volume control */
3787 master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME);
3788 break;
3789 + case USB_ID(0x1130, 0xf211):
3790 + snd_printk(KERN_INFO
3791 + "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n");
3792 + /* disable non-functional volume control */
3793 + channels = 0;
3794 + break;
3795 +
3796 }
3797 if (channels > 0)
3798 first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
3799 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
3800 index d89ab4c..63128cd 100644
3801 --- a/sound/usb/quirks-table.h
3802 +++ b/sound/usb/quirks-table.h
3803 @@ -2751,6 +2751,59 @@ YAMAHA_DEVICE(0x7010, "UB99"),
3804 }
3805 },
3806
3807 +/* Microsoft XboxLive Headset/Xbox Communicator */
3808 +{
3809 + USB_DEVICE(0x045e, 0x0283),
3810 + .bInterfaceClass = USB_CLASS_PER_INTERFACE,
3811 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
3812 + .vendor_name = "Microsoft",
3813 + .product_name = "XboxLive Headset/Xbox Communicator",
3814 + .ifnum = QUIRK_ANY_INTERFACE,
3815 + .type = QUIRK_COMPOSITE,
3816 + .data = &(const struct snd_usb_audio_quirk[]) {
3817 + {
3818 + /* playback */
3819 + .ifnum = 0,
3820 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
3821 + .data = &(const struct audioformat) {
3822 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
3823 + .channels = 1,
3824 + .iface = 0,
3825 + .altsetting = 0,
3826 + .altset_idx = 0,
3827 + .attributes = 0,
3828 + .endpoint = 0x04,
3829 + .ep_attr = 0x05,
3830 + .rates = SNDRV_PCM_RATE_CONTINUOUS,
3831 + .rate_min = 22050,
3832 + .rate_max = 22050
3833 + }
3834 + },
3835 + {
3836 + /* capture */
3837 + .ifnum = 1,
3838 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
3839 + .data = &(const struct audioformat) {
3840 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
3841 + .channels = 1,
3842 + .iface = 1,
3843 + .altsetting = 0,
3844 + .altset_idx = 0,
3845 + .attributes = 0,
3846 + .endpoint = 0x85,
3847 + .ep_attr = 0x05,
3848 + .rates = SNDRV_PCM_RATE_CONTINUOUS,
3849 + .rate_min = 16000,
3850 + .rate_max = 16000
3851 + }
3852 + },
3853 + {
3854 + .ifnum = -1
3855 + }
3856 + }
3857 + }
3858 +},
3859 +
3860 {
3861 /*
3862 * Some USB MIDI devices don't have an audio control interface,
3863 diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
3864 index f759f4f..fd2f922 100644
3865 --- a/tools/lguest/lguest.c
3866 +++ b/tools/lguest/lguest.c
3867 @@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type)
3868 dev->feature_len = 0;
3869 dev->num_vq = 0;
3870 dev->running = false;
3871 + dev->next = NULL;
3872
3873 /*
3874 * Append to device list. Prepending to a single-linked list is
3875 diff --git a/tools/perf/Makefile b/tools/perf/Makefile
3876 index 92271d3..c3dd3d4 100644
3877 --- a/tools/perf/Makefile
3878 +++ b/tools/perf/Makefile
3879 @@ -70,7 +70,7 @@ ifeq ($(ARCH),x86_64)
3880 ARCH := x86
3881 IS_X86_64 := 0
3882 ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
3883 - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
3884 + IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
3885 endif
3886 ifeq (${IS_X86_64}, 1)
3887 RAW_ARCH := x86_64
3888 diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
3889 index a93e06c..cf397bd 100644
3890 --- a/tools/power/cpupower/Makefile
3891 +++ b/tools/power/cpupower/Makefile
3892 @@ -111,7 +111,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo;
3893 export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
3894
3895 # check if compiler option is supported
3896 -cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
3897 +cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
3898
3899 # use '-Os' optimization if available, else use -O2
3900 OPTIMIZATION := $(call cc-supports,-Os,-O2)

  ViewVC Help
Powered by ViewVC 1.1.20