/[linux-patches]/genpatches-2.6/tags/2.6.32-15/1012_linux-2.6.32.13.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-15/1012_linux-2.6.32.13.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1735 - (show annotations) (download)
Wed Aug 4 11:25:09 2010 UTC (4 years, 1 month ago) by mpagano
File size: 97640 byte(s)
2.6.32-15 release
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 345c399..5f6aa11 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -241,7 +241,7 @@ and is between 256 and 4096 characters. It is defined in the file
6
7 acpi_sleep= [HW,ACPI] Sleep options
8 Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
9 - old_ordering, s4_nonvs }
10 + old_ordering, s4_nonvs, sci_force_enable }
11 See Documentation/power/video.txt for information on
12 s3_bios and s3_mode.
13 s3_beep is for debugging; it makes the PC's speaker beep
14 @@ -254,6 +254,9 @@ and is between 256 and 4096 characters. It is defined in the file
15 of _PTS is used by default).
16 s4_nonvs prevents the kernel from saving/restoring the
17 ACPI NVS memory during hibernation.
18 + sci_force_enable causes the kernel to set SCI_EN directly
19 + on resume from S1/S3 (which is against the ACPI spec,
20 + but some broken systems don't work without it).
21
22 acpi_use_timer_override [HW,ACPI]
23 Use timer override. For some broken Nvidia NF5 boards
24 diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
25 index 811743c..5f2ba8d 100644
26 --- a/arch/arm/mach-pxa/include/mach/colibri.h
27 +++ b/arch/arm/mach-pxa/include/mach/colibri.h
28 @@ -2,6 +2,7 @@
29 #define _COLIBRI_H_
30
31 #include <net/ax88796.h>
32 +#include <mach/mfp.h>
33
34 /*
35 * common settings for all modules
36 diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
37 index 7950ef4..743385d 100644
38 --- a/arch/mips/include/asm/mach-sibyte/war.h
39 +++ b/arch/mips/include/asm/mach-sibyte/war.h
40 @@ -16,7 +16,11 @@
41 #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
42 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
43
44 -#define BCM1250_M3_WAR 1
45 +#ifndef __ASSEMBLY__
46 +extern int sb1250_m3_workaround_needed(void);
47 +#endif
48 +
49 +#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
50 #define SIBYTE_1956_WAR 1
51
52 #else
53 diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
54 index 0444da1..92da315 100644
55 --- a/arch/mips/sibyte/sb1250/setup.c
56 +++ b/arch/mips/sibyte/sb1250/setup.c
57 @@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
58 return ret;
59 }
60
61 +int sb1250_m3_workaround_needed(void)
62 +{
63 + switch (soc_type) {
64 + case K_SYS_SOC_TYPE_BCM1250:
65 + case K_SYS_SOC_TYPE_BCM1250_ALT:
66 + case K_SYS_SOC_TYPE_BCM1250_ALT2:
67 + case K_SYS_SOC_TYPE_BCM1125:
68 + case K_SYS_SOC_TYPE_BCM1125H:
69 + return soc_pass < K_SYS_REVISION_BCM1250_C0;
70 +
71 + default:
72 + return 0;
73 + }
74 +}
75 +
76 static int __init setup_bcm112x(void)
77 {
78 int ret = 0;
79 diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
80 index f7064ab..9e74bfe 100644
81 --- a/arch/parisc/kernel/pci.c
82 +++ b/arch/parisc/kernel/pci.c
83 @@ -18,7 +18,6 @@
84
85 #include <asm/io.h>
86 #include <asm/system.h>
87 -#include <asm/cache.h> /* for L1_CACHE_BYTES */
88 #include <asm/superio.h>
89
90 #define DEBUG_RESOURCES 0
91 @@ -123,6 +122,10 @@ static int __init pcibios_init(void)
92 } else {
93 printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
94 }
95 +
96 + /* Set the CLS for PCI as early as possible. */
97 + pci_cache_line_size = pci_dfl_cache_line_size;
98 +
99 return 0;
100 }
101
102 @@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
103 ** upper byte is PCI_LATENCY_TIMER.
104 */
105 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
106 - (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
107 + (0x80 << 8) | pci_cache_line_size);
108 }
109
110
111 diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
112 index dc93e95..45f4e61 100644
113 --- a/arch/powerpc/mm/fsl_booke_mmu.c
114 +++ b/arch/powerpc/mm/fsl_booke_mmu.c
115 @@ -131,15 +131,10 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys,
116 TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
117 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
118
119 -#ifndef CONFIG_KGDB /* want user access for breakpoints */
120 if (flags & _PAGE_USER) {
121 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
122 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
123 }
124 -#else
125 - TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
126 - TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
127 -#endif
128
129 tlbcam_addrs[index].start = virt;
130 tlbcam_addrs[index].limit = virt + size - 1;
131 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
132 index 4fdb669..fbc161d 100644
133 --- a/arch/x86/Kconfig
134 +++ b/arch/x86/Kconfig
135 @@ -622,7 +622,7 @@ config GART_IOMMU
136 bool "GART IOMMU support" if EMBEDDED
137 default y
138 select SWIOTLB
139 - depends on X86_64 && PCI
140 + depends on X86_64 && PCI && K8_NB
141 ---help---
142 Support for full DMA access of devices with 32bit memory access only
143 on systems with more than 3GB. This is usually needed for USB,
144 @@ -1236,6 +1236,11 @@ config ARCH_MEMORY_PROBE
145 def_bool X86_64
146 depends on MEMORY_HOTPLUG
147
148 +config ILLEGAL_POINTER_VALUE
149 + hex
150 + default 0 if X86_32
151 + default 0xdead000000000000 if X86_64
152 +
153 source "mm/Kconfig"
154
155 config HIGHPTE
156 @@ -2022,7 +2027,7 @@ endif # X86_32
157
158 config K8_NB
159 def_bool y
160 - depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA)))
161 + depends on CPU_SUP_AMD && PCI
162
163 source "drivers/pcmcia/Kconfig"
164
165 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
166 index ca93638..8b85734 100644
167 --- a/arch/x86/kernel/acpi/sleep.c
168 +++ b/arch/x86/kernel/acpi/sleep.c
169 @@ -162,6 +162,8 @@ static int __init acpi_sleep_setup(char *str)
170 #endif
171 if (strncmp(str, "old_ordering", 12) == 0)
172 acpi_old_suspend_ordering();
173 + if (strncmp(str, "sci_force_enable", 16) == 0)
174 + acpi_set_sci_en_on_resume();
175 str = strchr(str, ',');
176 if (str != NULL)
177 str += strspn(str, ", \t");
178 diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
179 index ab1cd30..5e92606 100644
180 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
181 +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
182 @@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
183 powernow_table[i].index = index;
184
185 /* Frequency may be rounded for these */
186 - if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
187 + if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
188 + || boot_cpu_data.x86 == 0x11) {
189 powernow_table[i].frequency =
190 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
191 } else
192 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
193 index 2a94890..2f12d6d 100644
194 --- a/arch/x86/kernel/cpu/intel.c
195 +++ b/arch/x86/kernel/cpu/intel.c
196 @@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
197 (c->x86 == 0x6 && c->x86_model >= 0x0e))
198 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
199
200 + /*
201 + * Atom erratum AAE44/AAF40/AAG38/AAH41:
202 + *
203 + * A race condition between speculative fetches and invalidating
204 + * a large page. This is worked around in microcode, but we
205 + * need the microcode to have already been loaded... so if it is
206 + * not, recommend a BIOS update and disable large pages.
207 + */
208 + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
209 + u32 ucode, junk;
210 +
211 + wrmsr(MSR_IA32_UCODE_REV, 0, 0);
212 + sync_core();
213 + rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
214 +
215 + if (ucode < 0x20e) {
216 + printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
217 + clear_cpu_cap(c, X86_FEATURE_PSE);
218 + }
219 + }
220 +
221 #ifdef CONFIG_X86_64
222 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
223 #else
224 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
225 index be2d432..b25b229 100644
226 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
227 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
228 @@ -647,18 +647,19 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
229 {
230 struct _cpuid4_info *this_leaf, *sibling_leaf;
231 unsigned long num_threads_sharing;
232 - int index_msb, i;
233 + int index_msb, i, sibling;
234 struct cpuinfo_x86 *c = &cpu_data(cpu);
235
236 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
237 - struct cpuinfo_x86 *d;
238 - for_each_online_cpu(i) {
239 + for_each_cpu(i, c->llc_shared_map) {
240 if (!per_cpu(cpuid4_info, i))
241 continue;
242 - d = &cpu_data(i);
243 this_leaf = CPUID4_INFO_IDX(i, index);
244 - cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
245 - d->llc_shared_map);
246 + for_each_cpu(sibling, c->llc_shared_map) {
247 + if (!cpu_online(sibling))
248 + continue;
249 + set_bit(sibling, this_leaf->shared_cpu_map);
250 + }
251 }
252 return;
253 }
254 diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
255 index cbc4332..9b89546 100644
256 --- a/arch/x86/kernel/k8.c
257 +++ b/arch/x86/kernel/k8.c
258 @@ -121,3 +121,17 @@ void k8_flush_garts(void)
259 }
260 EXPORT_SYMBOL_GPL(k8_flush_garts);
261
262 +static __init int init_k8_nbs(void)
263 +{
264 + int err = 0;
265 +
266 + err = cache_k8_northbridges();
267 +
268 + if (err < 0)
269 + printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
270 +
271 + return err;
272 +}
273 +
274 +/* This has to go after the PCI subsystem */
275 +fs_initcall(init_k8_nbs);
276 diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
277 index c245b6a..1c76691 100644
278 --- a/arch/x86/kernel/pci-gart_64.c
279 +++ b/arch/x86/kernel/pci-gart_64.c
280 @@ -720,7 +720,7 @@ void __init gart_iommu_init(void)
281 unsigned long scratch;
282 long i;
283
284 - if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
285 + if (num_k8_northbridges == 0)
286 return;
287
288 #ifndef CONFIG_AGP_AMD64
289 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
290 index 6eabe90..868fdb4 100644
291 --- a/arch/x86/kernel/process_64.c
292 +++ b/arch/x86/kernel/process_64.c
293 @@ -295,11 +295,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
294
295 set_tsk_thread_flag(p, TIF_FORK);
296
297 - p->thread.fs = me->thread.fs;
298 - p->thread.gs = me->thread.gs;
299 -
300 savesegment(gs, p->thread.gsindex);
301 + p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
302 savesegment(fs, p->thread.fsindex);
303 + p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
304 savesegment(es, p->thread.es);
305 savesegment(ds, p->thread.ds);
306
307 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
308 index 389fc55..2782509 100644
309 --- a/arch/x86/kvm/x86.c
310 +++ b/arch/x86/kvm/x86.c
311 @@ -4155,18 +4155,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
312 return kvm_seg.selector;
313 }
314
315 -static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
316 - u16 selector,
317 - struct kvm_segment *kvm_seg)
318 -{
319 - struct desc_struct seg_desc;
320 -
321 - if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
322 - return 1;
323 - seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
324 - return 0;
325 -}
326 -
327 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
328 {
329 struct kvm_segment segvar = {
330 diff --git a/block/blk-timeout.c b/block/blk-timeout.c
331 index 1ba7e0a..4f0c06c 100644
332 --- a/block/blk-timeout.c
333 +++ b/block/blk-timeout.c
334 @@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
335 struct request_queue *q = (struct request_queue *) data;
336 unsigned long flags, next = 0;
337 struct request *rq, *tmp;
338 + int next_set = 0;
339
340 spin_lock_irqsave(q->queue_lock, flags);
341
342 @@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
343 if (blk_mark_rq_complete(rq))
344 continue;
345 blk_rq_timed_out(rq);
346 - } else if (!next || time_after(next, rq->deadline))
347 + } else if (!next_set || time_after(next, rq->deadline)) {
348 next = rq->deadline;
349 + next_set = 1;
350 + }
351 }
352
353 - /*
354 - * next can never be 0 here with the list non-empty, since we always
355 - * bump ->deadline to 1 so we can detect if the timer was ever added
356 - * or not. See comment in blk_add_timer()
357 - */
358 - if (next)
359 + if (next_set)
360 mod_timer(&q->timeout, round_jiffies_up(next));
361
362 spin_unlock_irqrestore(q->queue_lock, flags);
363 diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
364 index 943f2ab..ce038d8 100644
365 --- a/crypto/async_tx/async_raid6_recov.c
366 +++ b/crypto/async_tx/async_raid6_recov.c
367 @@ -324,6 +324,7 @@ struct dma_async_tx_descriptor *
368 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
369 struct page **blocks, struct async_submit_ctl *submit)
370 {
371 + void *scribble = submit->scribble;
372 int non_zero_srcs, i;
373
374 BUG_ON(faila == failb);
375 @@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
376
377 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
378
379 - /* we need to preserve the contents of 'blocks' for the async
380 - * case, so punt to synchronous if a scribble buffer is not available
381 + /* if a dma resource is not available or a scribble buffer is not
382 + * available punt to the synchronous path. In the 'dma not
383 + * available' case be sure to use the scribble buffer to
384 + * preserve the content of 'blocks' as the caller intended.
385 */
386 - if (!submit->scribble) {
387 - void **ptrs = (void **) blocks;
388 + if (!async_dma_find_channel(DMA_PQ) || !scribble) {
389 + void **ptrs = scribble ? scribble : (void **) blocks;
390
391 async_tx_quiesce(&submit->depend_tx);
392 for (i = 0; i < disks; i++)
393 @@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
394
395 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
396
397 - /* we need to preserve the contents of 'blocks' for the async
398 - * case, so punt to synchronous if a scribble buffer is not available
399 + /* if a dma resource is not available or a scribble buffer is not
400 + * available punt to the synchronous path. In the 'dma not
401 + * available' case be sure to use the scribble buffer to
402 + * preserve the content of 'blocks' as the caller intended.
403 */
404 - if (!scribble) {
405 - void **ptrs = (void **) blocks;
406 + if (!async_dma_find_channel(DMA_PQ) || !scribble) {
407 + void **ptrs = scribble ? scribble : (void **) blocks;
408
409 async_tx_quiesce(&submit->depend_tx);
410 for (i = 0; i < disks; i++)
411 diff --git a/drivers/Makefile b/drivers/Makefile
412 index 6ee53c7..8b0b948 100644
413 --- a/drivers/Makefile
414 +++ b/drivers/Makefile
415 @@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/
416 obj-$(CONFIG_PNP) += pnp/
417 obj-$(CONFIG_ARM_AMBA) += amba/
418
419 +obj-$(CONFIG_VIRTIO) += virtio/
420 obj-$(CONFIG_XEN) += xen/
421
422 # regulators early, since some subsystems rely on them to initialize
423 @@ -106,7 +107,6 @@ obj-$(CONFIG_HID) += hid/
424 obj-$(CONFIG_PPC_PS3) += ps3/
425 obj-$(CONFIG_OF) += of/
426 obj-$(CONFIG_SSB) += ssb/
427 -obj-$(CONFIG_VIRTIO) += virtio/
428 obj-$(CONFIG_VLYNQ) += vlynq/
429 obj-$(CONFIG_STAGING) += staging/
430 obj-y += platform/
431 diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
432 index 2ef7030..c216062 100644
433 --- a/drivers/acpi/power_meter.c
434 +++ b/drivers/acpi/power_meter.c
435 @@ -34,7 +34,7 @@
436 #define ACPI_POWER_METER_NAME "power_meter"
437 ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
438 #define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
439 -#define ACPI_POWER_METER_CLASS "power_meter_resource"
440 +#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
441
442 #define NUM_SENSORS 17
443
444 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
445 index 5f2c379..7c85265 100644
446 --- a/drivers/acpi/sleep.c
447 +++ b/drivers/acpi/sleep.c
448 @@ -81,6 +81,23 @@ static int acpi_sleep_prepare(u32 acpi_state)
449 #ifdef CONFIG_ACPI_SLEEP
450 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
451 /*
452 + * According to the ACPI specification the BIOS should make sure that ACPI is
453 + * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
454 + * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
455 + * on such systems during resume. Unfortunately that doesn't help in
456 + * particularly pathological cases in which SCI_EN has to be set directly on
457 + * resume, although the specification states very clearly that this flag is
458 + * owned by the hardware. The set_sci_en_on_resume variable will be set in such
459 + * cases.
460 + */
461 +static bool set_sci_en_on_resume;
462 +
463 +void __init acpi_set_sci_en_on_resume(void)
464 +{
465 + set_sci_en_on_resume = true;
466 +}
467 +
468 +/*
469 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
470 * user to request that behavior by using the 'acpi_old_suspend_ordering'
471 * kernel command line option that causes the following variable to be set.
472 @@ -170,18 +187,6 @@ static void acpi_pm_end(void)
473 #endif /* CONFIG_ACPI_SLEEP */
474
475 #ifdef CONFIG_SUSPEND
476 -/*
477 - * According to the ACPI specification the BIOS should make sure that ACPI is
478 - * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
479 - * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
480 - * on such systems during resume. Unfortunately that doesn't help in
481 - * particularly pathological cases in which SCI_EN has to be set directly on
482 - * resume, although the specification states very clearly that this flag is
483 - * owned by the hardware. The set_sci_en_on_resume variable will be set in such
484 - * cases.
485 - */
486 -static bool set_sci_en_on_resume;
487 -
488 extern void do_suspend_lowlevel(void);
489
490 static u32 acpi_suspend_states[] = {
491 @@ -445,6 +450,126 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
492 },
493 },
494 {
495 + .callback = init_set_sci_en_on_resume,
496 + .ident = "Lenovo ThinkPad T410",
497 + .matches = {
498 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
499 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
500 + },
501 + },
502 + {
503 + .callback = init_set_sci_en_on_resume,
504 + .ident = "Lenovo ThinkPad T510",
505 + .matches = {
506 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
507 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
508 + },
509 + },
510 + {
511 + .callback = init_set_sci_en_on_resume,
512 + .ident = "Lenovo ThinkPad W510",
513 + .matches = {
514 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
515 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
516 + },
517 + },
518 + {
519 + .callback = init_set_sci_en_on_resume,
520 + .ident = "Lenovo ThinkPad X201",
521 + .matches = {
522 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
523 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
524 + },
525 + },
526 + {
527 + .callback = init_set_sci_en_on_resume,
528 + .ident = "Lenovo ThinkPad X201",
529 + .matches = {
530 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
531 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
532 + },
533 + },
534 + {
535 + .callback = init_set_sci_en_on_resume,
536 + .ident = "Lenovo ThinkPad T410",
537 + .matches = {
538 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
539 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
540 + },
541 + },
542 + {
543 + .callback = init_set_sci_en_on_resume,
544 + .ident = "Lenovo ThinkPad T510",
545 + .matches = {
546 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
547 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
548 + },
549 + },
550 + {
551 + .callback = init_set_sci_en_on_resume,
552 + .ident = "Lenovo ThinkPad W510",
553 + .matches = {
554 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
555 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
556 + },
557 + },
558 + {
559 + .callback = init_set_sci_en_on_resume,
560 + .ident = "Lenovo ThinkPad X201",
561 + .matches = {
562 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
563 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
564 + },
565 + },
566 + {
567 + .callback = init_set_sci_en_on_resume,
568 + .ident = "Lenovo ThinkPad X201",
569 + .matches = {
570 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
571 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
572 + },
573 + },
574 + {
575 + .callback = init_set_sci_en_on_resume,
576 + .ident = "Lenovo ThinkPad T410",
577 + .matches = {
578 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
579 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
580 + },
581 + },
582 + {
583 + .callback = init_set_sci_en_on_resume,
584 + .ident = "Lenovo ThinkPad T510",
585 + .matches = {
586 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
587 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
588 + },
589 + },
590 + {
591 + .callback = init_set_sci_en_on_resume,
592 + .ident = "Lenovo ThinkPad W510",
593 + .matches = {
594 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
595 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
596 + },
597 + },
598 + {
599 + .callback = init_set_sci_en_on_resume,
600 + .ident = "Lenovo ThinkPad X201",
601 + .matches = {
602 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
603 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
604 + },
605 + },
606 + {
607 + .callback = init_set_sci_en_on_resume,
608 + .ident = "Lenovo ThinkPad X201",
609 + .matches = {
610 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
611 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
612 + },
613 + },
614 + {
615 .callback = init_old_suspend_ordering,
616 .ident = "Panasonic CF51-2L",
617 .matches = {
618 @@ -453,6 +578,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
619 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
620 },
621 },
622 + {
623 + .callback = init_set_sci_en_on_resume,
624 + .ident = "Dell Studio 1558",
625 + .matches = {
626 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
627 + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
628 + },
629 + },
630 + {
631 + .callback = init_set_sci_en_on_resume,
632 + .ident = "Dell Studio 1557",
633 + .matches = {
634 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
635 + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
636 + },
637 + },
638 + {
639 + .callback = init_set_sci_en_on_resume,
640 + .ident = "Dell Studio 1555",
641 + .matches = {
642 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
643 + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
644 + },
645 + },
646 {},
647 };
648 #endif /* CONFIG_SUSPEND */
649 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
650 index 7d8d3c3..e30b9e7 100644
651 --- a/drivers/ata/libata-eh.c
652 +++ b/drivers/ata/libata-eh.c
653 @@ -870,6 +870,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
654 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
655 {
656 struct ata_port *ap = qc->ap;
657 + struct request_queue *q = qc->scsicmd->device->request_queue;
658 + unsigned long flags;
659
660 WARN_ON(!ap->ops->error_handler);
661
662 @@ -881,7 +883,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
663 * Note that ATA_QCFLAG_FAILED is unconditionally set after
664 * this function completes.
665 */
666 + spin_lock_irqsave(q->queue_lock, flags);
667 blk_abort_request(qc->scsicmd->request);
668 + spin_unlock_irqrestore(q->queue_lock, flags);
669 }
670
671 /**
672 @@ -1615,6 +1619,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
673 }
674
675 /* okay, this error is ours */
676 + memset(&tf, 0, sizeof(tf));
677 rc = ata_eh_read_log_10h(dev, &tag, &tf);
678 if (rc) {
679 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
680 diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
681 index ccb1fa8..70d56b6 100644
682 --- a/drivers/char/agp/Kconfig
683 +++ b/drivers/char/agp/Kconfig
684 @@ -57,7 +57,7 @@ config AGP_AMD
685
686 config AGP_AMD64
687 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
688 - depends on AGP && X86
689 + depends on AGP && X86 && K8_NB
690 default y if GART_IOMMU
691 help
692 This option gives you AGP support for the GLX component of
693 diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
694 index 73655ae..f8e57c6 100644
695 --- a/drivers/cpuidle/governors/menu.c
696 +++ b/drivers/cpuidle/governors/menu.c
697 @@ -101,7 +101,6 @@ struct menu_device {
698
699 unsigned int expected_us;
700 u64 predicted_us;
701 - unsigned int measured_us;
702 unsigned int exit_us;
703 unsigned int bucket;
704 u64 correction_factor[BUCKETS];
705 @@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_device *dev)
706 int i;
707 int multiplier;
708
709 - data->last_state_idx = 0;
710 - data->exit_us = 0;
711 -
712 if (data->needs_update) {
713 menu_update(dev);
714 data->needs_update = 0;
715 }
716
717 + data->last_state_idx = 0;
718 + data->exit_us = 0;
719 +
720 /* Special case when user has set very strict latency requirement */
721 if (unlikely(latency_req == 0))
722 return 0;
723 @@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev)
724 new_factor = data->correction_factor[data->bucket]
725 * (DECAY - 1) / DECAY;
726
727 - if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING)
728 + if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
729 new_factor += RESOLUTION * measured_us / data->expected_us;
730 else
731 /*
732 diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
733 index ac2aea8..1999807 100644
734 --- a/drivers/edac/edac_mce_amd.c
735 +++ b/drivers/edac/edac_mce_amd.c
736 @@ -295,7 +295,6 @@ wrong_ls_mce:
737 void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
738 {
739 u32 ec = ERROR_CODE(regs->nbsl);
740 - u32 xec = EXT_ERROR_CODE(regs->nbsl);
741
742 if (!handle_errors)
743 return;
744 @@ -319,7 +318,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
745 pr_cont("\n");
746 }
747
748 - pr_emerg("%s.\n", EXT_ERR_MSG(xec));
749 + pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
750
751 if (BUS_ERROR(ec) && nb_bus_decoder)
752 nb_bus_decoder(node_id, regs);
753 @@ -382,7 +381,7 @@ static void amd_decode_mce(struct mce *m)
754 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
755
756 /* do the two bits[14:13] together */
757 - ecc = m->status & (3ULL << 45);
758 + ecc = (m->status >> 45) & 0x3;
759 if (ecc)
760 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
761
762 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
763 index 1e9c66a..aa8a4e9 100644
764 --- a/drivers/gpu/drm/i915/i915_gem.c
765 +++ b/drivers/gpu/drm/i915/i915_gem.c
766 @@ -2334,6 +2334,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
767 pitch_val = obj_priv->stride / tile_width;
768 pitch_val = ffs(pitch_val) - 1;
769
770 + if (obj_priv->tiling_mode == I915_TILING_Y &&
771 + HAS_128_BYTE_Y_TILING(dev))
772 + WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
773 + else
774 + WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
775 +
776 val = obj_priv->gtt_offset;
777 if (obj_priv->tiling_mode == I915_TILING_Y)
778 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
779 diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
780 index 200e398..fb2811c 100644
781 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
782 +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
783 @@ -353,21 +353,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
784 * reg, so dont bother to check the size */
785 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
786 return false;
787 - } else if (IS_I9XX(dev)) {
788 - uint32_t pitch_val = ffs(stride / tile_width) - 1;
789 -
790 - /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
791 - * instead of 4 (2KB) on 945s.
792 - */
793 - if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
794 - size > (I830_FENCE_MAX_SIZE_VAL << 20))
795 + } else if (IS_I9XX(dev) || IS_I8XX(dev)) {
796 + if (stride > 8192)
797 return false;
798 - } else {
799 - uint32_t pitch_val = ffs(stride / tile_width) - 1;
800
801 - if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
802 - size > (I830_FENCE_MAX_SIZE_VAL << 19))
803 - return false;
804 + if (IS_I9XX(dev)) {
805 + if (size > I830_FENCE_MAX_SIZE_VAL << 20)
806 + return false;
807 + } else {
808 + if (size > I830_FENCE_MAX_SIZE_VAL << 19)
809 + return false;
810 + }
811 }
812
813 /* 965+ just needs multiples of tile width */
814 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
815 index cc9b49a..73e7ec0 100644
816 --- a/drivers/gpu/drm/i915/i915_reg.h
817 +++ b/drivers/gpu/drm/i915/i915_reg.h
818 @@ -214,7 +214,7 @@
819 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
820 #define I830_FENCE_PITCH_SHIFT 4
821 #define I830_FENCE_REG_VALID (1<<0)
822 -#define I915_FENCE_MAX_PITCH_VAL 0x10
823 +#define I915_FENCE_MAX_PITCH_VAL 4
824 #define I830_FENCE_MAX_PITCH_VAL 6
825 #define I830_FENCE_MAX_SIZE_VAL (1<<8)
826
827 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
828 index 3bf7b0a..8066db7 100644
829 --- a/drivers/i2c/i2c-core.c
830 +++ b/drivers/i2c/i2c-core.c
831 @@ -1202,14 +1202,24 @@ static int i2c_detect_address(struct i2c_client *temp_client, int kind,
832
833 /* Make sure there is something at this address, unless forced */
834 if (kind < 0) {
835 - if (i2c_smbus_xfer(adapter, addr, 0, 0, 0,
836 - I2C_SMBUS_QUICK, NULL) < 0)
837 - return 0;
838 -
839 - /* prevent 24RF08 corruption */
840 - if ((addr & ~0x0f) == 0x50)
841 - i2c_smbus_xfer(adapter, addr, 0, 0, 0,
842 - I2C_SMBUS_QUICK, NULL);
843 + if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
844 + /* Special probe for FSC hwmon chips */
845 + union i2c_smbus_data dummy;
846 +
847 + if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
848 + I2C_SMBUS_BYTE_DATA, &dummy) < 0)
849 + return 0;
850 + } else {
851 + if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
852 + I2C_SMBUS_QUICK, NULL) < 0)
853 + return 0;
854 +
855 + /* Prevent 24RF08 corruption */
856 + if ((addr & ~0x0f) == 0x50)
857 + i2c_smbus_xfer(adapter, addr, 0,
858 + I2C_SMBUS_WRITE, 0,
859 + I2C_SMBUS_QUICK, NULL);
860 + }
861 }
862
863 /* Finally call the custom detection function */
864 diff --git a/drivers/md/md.c b/drivers/md/md.c
865 index 08f7471..f2e719d 100644
866 --- a/drivers/md/md.c
867 +++ b/drivers/md/md.c
868 @@ -2011,12 +2011,18 @@ repeat:
869 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
870 /* .. if the array isn't clean, an 'even' event must also go
871 * to spares. */
872 - if ((mddev->events&1)==0)
873 + if ((mddev->events&1)==0) {
874 nospares = 0;
875 + sync_req = 2; /* force a second update to get the
876 + * even/odd in sync */
877 + }
878 } else {
879 /* otherwise an 'odd' event must go to spares */
880 - if ((mddev->events&1))
881 + if ((mddev->events&1)) {
882 nospares = 0;
883 + sync_req = 2; /* force a second update to get the
884 + * even/odd in sync */
885 + }
886 }
887 }
888
889 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
890 index 431b9b2..2394973 100644
891 --- a/drivers/md/raid5.c
892 +++ b/drivers/md/raid5.c
893 @@ -1526,7 +1526,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
894
895 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
896 atomic_inc(&rdev->read_errors);
897 - if (conf->mddev->degraded)
898 + if (conf->mddev->degraded >= conf->max_degraded)
899 printk_rl(KERN_WARNING
900 "raid5:%s: read error not correctable "
901 "(sector %llu on %s).\n",
902 @@ -1649,8 +1649,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
903 int previous, int *dd_idx,
904 struct stripe_head *sh)
905 {
906 - long stripe;
907 - unsigned long chunk_number;
908 + sector_t stripe, stripe2;
909 + sector_t chunk_number;
910 unsigned int chunk_offset;
911 int pd_idx, qd_idx;
912 int ddf_layout = 0;
913 @@ -1670,18 +1670,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
914 */
915 chunk_offset = sector_div(r_sector, sectors_per_chunk);
916 chunk_number = r_sector;
917 - BUG_ON(r_sector != chunk_number);
918
919 /*
920 * Compute the stripe number
921 */
922 - stripe = chunk_number / data_disks;
923 -
924 - /*
925 - * Compute the data disk and parity disk indexes inside the stripe
926 - */
927 - *dd_idx = chunk_number % data_disks;
928 -
929 + stripe = chunk_number;
930 + *dd_idx = sector_div(stripe, data_disks);
931 + stripe2 = stripe;
932 /*
933 * Select the parity disk based on the user selected algorithm.
934 */
935 @@ -1693,21 +1688,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
936 case 5:
937 switch (algorithm) {
938 case ALGORITHM_LEFT_ASYMMETRIC:
939 - pd_idx = data_disks - stripe % raid_disks;
940 + pd_idx = data_disks - sector_div(stripe2, raid_disks);
941 if (*dd_idx >= pd_idx)
942 (*dd_idx)++;
943 break;
944 case ALGORITHM_RIGHT_ASYMMETRIC:
945 - pd_idx = stripe % raid_disks;
946 + pd_idx = sector_div(stripe2, raid_disks);
947 if (*dd_idx >= pd_idx)
948 (*dd_idx)++;
949 break;
950 case ALGORITHM_LEFT_SYMMETRIC:
951 - pd_idx = data_disks - stripe % raid_disks;
952 + pd_idx = data_disks - sector_div(stripe2, raid_disks);
953 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
954 break;
955 case ALGORITHM_RIGHT_SYMMETRIC:
956 - pd_idx = stripe % raid_disks;
957 + pd_idx = sector_div(stripe2, raid_disks);
958 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
959 break;
960 case ALGORITHM_PARITY_0:
961 @@ -1727,7 +1722,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
962
963 switch (algorithm) {
964 case ALGORITHM_LEFT_ASYMMETRIC:
965 - pd_idx = raid_disks - 1 - (stripe % raid_disks);
966 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
967 qd_idx = pd_idx + 1;
968 if (pd_idx == raid_disks-1) {
969 (*dd_idx)++; /* Q D D D P */
970 @@ -1736,7 +1731,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
971 (*dd_idx) += 2; /* D D P Q D */
972 break;
973 case ALGORITHM_RIGHT_ASYMMETRIC:
974 - pd_idx = stripe % raid_disks;
975 + pd_idx = sector_div(stripe2, raid_disks);
976 qd_idx = pd_idx + 1;
977 if (pd_idx == raid_disks-1) {
978 (*dd_idx)++; /* Q D D D P */
979 @@ -1745,12 +1740,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
980 (*dd_idx) += 2; /* D D P Q D */
981 break;
982 case ALGORITHM_LEFT_SYMMETRIC:
983 - pd_idx = raid_disks - 1 - (stripe % raid_disks);
984 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
985 qd_idx = (pd_idx + 1) % raid_disks;
986 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
987 break;
988 case ALGORITHM_RIGHT_SYMMETRIC:
989 - pd_idx = stripe % raid_disks;
990 + pd_idx = sector_div(stripe2, raid_disks);
991 qd_idx = (pd_idx + 1) % raid_disks;
992 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
993 break;
994 @@ -1769,7 +1764,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
995 /* Exactly the same as RIGHT_ASYMMETRIC, but or
996 * of blocks for computing Q is different.
997 */
998 - pd_idx = stripe % raid_disks;
999 + pd_idx = sector_div(stripe2, raid_disks);
1000 qd_idx = pd_idx + 1;
1001 if (pd_idx == raid_disks-1) {
1002 (*dd_idx)++; /* Q D D D P */
1003 @@ -1784,7 +1779,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1004 * D D D P Q rather than
1005 * Q D D D P
1006 */
1007 - pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1008 + stripe2 += 1;
1009 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1010 qd_idx = pd_idx + 1;
1011 if (pd_idx == raid_disks-1) {
1012 (*dd_idx)++; /* Q D D D P */
1013 @@ -1796,7 +1792,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1014
1015 case ALGORITHM_ROTATING_N_CONTINUE:
1016 /* Same as left_symmetric but Q is before P */
1017 - pd_idx = raid_disks - 1 - (stripe % raid_disks);
1018 + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1019 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1020 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1021 ddf_layout = 1;
1022 @@ -1804,27 +1800,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1023
1024 case ALGORITHM_LEFT_ASYMMETRIC_6:
1025 /* RAID5 left_asymmetric, with Q on last device */
1026 - pd_idx = data_disks - stripe % (raid_disks-1);
1027 + pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1028 if (*dd_idx >= pd_idx)
1029 (*dd_idx)++;
1030 qd_idx = raid_disks - 1;
1031 break;
1032
1033 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1034 - pd_idx = stripe % (raid_disks-1);
1035 + pd_idx = sector_div(stripe2, raid_disks-1);
1036 if (*dd_idx >= pd_idx)
1037 (*dd_idx)++;
1038 qd_idx = raid_disks - 1;
1039 break;
1040
1041 case ALGORITHM_LEFT_SYMMETRIC_6:
1042 - pd_idx = data_disks - stripe % (raid_disks-1);
1043 + pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1044 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1045 qd_idx = raid_disks - 1;
1046 break;
1047
1048 case ALGORITHM_RIGHT_SYMMETRIC_6:
1049 - pd_idx = stripe % (raid_disks-1);
1050 + pd_idx = sector_div(stripe2, raid_disks-1);
1051 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1052 qd_idx = raid_disks - 1;
1053 break;
1054 @@ -1869,14 +1865,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1055 : conf->algorithm;
1056 sector_t stripe;
1057 int chunk_offset;
1058 - int chunk_number, dummy1, dd_idx = i;
1059 + sector_t chunk_number;
1060 + int dummy1, dd_idx = i;
1061 sector_t r_sector;
1062 struct stripe_head sh2;
1063
1064
1065 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1066 stripe = new_sector;
1067 - BUG_ON(new_sector != stripe);
1068
1069 if (i == sh->pd_idx)
1070 return 0;
1071 @@ -1969,7 +1965,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1072 }
1073
1074 chunk_number = stripe * data_disks + i;
1075 - r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1076 + r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1077
1078 check = raid5_compute_sector(conf, r_sector,
1079 previous, &dummy1, &sh2);
1080 diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
1081 index e48380c..95a463c 100644
1082 --- a/drivers/media/dvb/ttpci/budget.c
1083 +++ b/drivers/media/dvb/ttpci/budget.c
1084 @@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget)
1085 &budget->i2c_adap,
1086 &tt1600_isl6423_config);
1087
1088 - } else {
1089 - dvb_frontend_detach(budget->dvb_frontend);
1090 - budget->dvb_frontend = NULL;
1091 }
1092 }
1093 break;
1094 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
1095 index 08cddb6..a9aa957 100644
1096 --- a/drivers/net/bnx2.c
1097 +++ b/drivers/net/bnx2.c
1098 @@ -4752,8 +4752,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
1099 rc = bnx2_alloc_bad_rbuf(bp);
1100 }
1101
1102 - if (bp->flags & BNX2_FLAG_USING_MSIX)
1103 + if (bp->flags & BNX2_FLAG_USING_MSIX) {
1104 bnx2_setup_msix_tbl(bp);
1105 + /* Prevent MSIX table reads and write from timing out */
1106 + REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
1107 + BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
1108 + }
1109
1110 return rc;
1111 }
1112 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1113 index ab75323..211b587 100644
1114 --- a/drivers/net/r8169.c
1115 +++ b/drivers/net/r8169.c
1116 @@ -2832,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
1117 spin_lock_irq(&tp->lock);
1118
1119 RTL_W8(Cfg9346, Cfg9346_Unlock);
1120 - RTL_W32(MAC0, low);
1121 +
1122 RTL_W32(MAC4, high);
1123 + RTL_R32(MAC4);
1124 +
1125 + RTL_W32(MAC0, low);
1126 + RTL_R32(MAC0);
1127 +
1128 RTL_W8(Cfg9346, Cfg9346_Lock);
1129
1130 spin_unlock_irq(&tp->lock);
1131 @@ -4316,7 +4321,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
1132
1133 tp->cur_tx += frags + 1;
1134
1135 - smp_wmb();
1136 + wmb();
1137
1138 RTL_W8(TxPoll, NPQ); /* set polling bit */
1139
1140 @@ -4676,7 +4681,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
1141 * until it does.
1142 */
1143 tp->intr_mask = 0xffff;
1144 - smp_wmb();
1145 + wmb();
1146 RTL_W16(IntrMask, tp->intr_event);
1147 }
1148
1149 @@ -4814,8 +4819,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
1150 mc_filter[1] = swab32(data);
1151 }
1152
1153 - RTL_W32(MAR0 + 0, mc_filter[0]);
1154 RTL_W32(MAR0 + 4, mc_filter[1]);
1155 + RTL_W32(MAR0 + 0, mc_filter[0]);
1156
1157 RTL_W32(RxConfig, tmp);
1158
1159 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1160 index dcc1c23..fd6622c 100644
1161 --- a/drivers/net/tg3.c
1162 +++ b/drivers/net/tg3.c
1163 @@ -8168,6 +8168,7 @@ static int tg3_test_msi(struct tg3 *tp)
1164 pci_disable_msi(tp->pdev);
1165
1166 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
1167 + tp->napi[0].irq_vec = tp->pdev->irq;
1168
1169 err = tg3_request_irq(tp, 0);
1170 if (err)
1171 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
1172 index a2b30a1..9a6eede 100644
1173 --- a/drivers/net/usb/dm9601.c
1174 +++ b/drivers/net/usb/dm9601.c
1175 @@ -238,7 +238,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
1176 goto out;
1177
1178 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
1179 - dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14);
1180 + dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
1181
1182 for (i = 0; i < DM_TIMEOUT; i++) {
1183 u8 tmp;
1184 diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
1185 index 1b73733..7bafa83 100644
1186 --- a/drivers/net/wireless/p54/p54pci.c
1187 +++ b/drivers/net/wireless/p54/p54pci.c
1188 @@ -205,6 +205,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
1189 i %= ring_limit;
1190 continue;
1191 }
1192 +
1193 + if (unlikely(len > priv->common.rx_mtu)) {
1194 + if (net_ratelimit())
1195 + dev_err(&priv->pdev->dev, "rx'd frame size "
1196 + "exceeds length threshold.\n");
1197 +
1198 + len = priv->common.rx_mtu;
1199 + }
1200 skb_put(skb, len);
1201
1202 if (p54_rx(dev, skb)) {
1203 @@ -237,7 +245,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
1204 u32 idx, i;
1205
1206 i = (*index) % ring_limit;
1207 - (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
1208 + (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
1209 idx %= ring_limit;
1210
1211 while (i != idx) {
1212 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
1213 index 8742640..b3c4fbd 100644
1214 --- a/drivers/net/wireless/p54/p54usb.c
1215 +++ b/drivers/net/wireless/p54/p54usb.c
1216 @@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
1217 /* Version 1 devices (pci chip + net2280) */
1218 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
1219 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
1220 + {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
1221 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
1222 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
1223 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
1224 diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
1225 index b6dda2b..9d147de 100644
1226 --- a/drivers/net/wireless/p54/txrx.c
1227 +++ b/drivers/net/wireless/p54/txrx.c
1228 @@ -186,7 +186,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
1229 struct ieee80211_tx_queue_stats *queue;
1230 unsigned long flags;
1231
1232 - if (WARN_ON(p54_queue > P54_QUEUE_NUM))
1233 + if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
1234 return -EINVAL;
1235
1236 queue = &priv->tx_stats[p54_queue];
1237 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1238 index bd667d2..595d03a 100644
1239 --- a/drivers/pci/pci.c
1240 +++ b/drivers/pci/pci.c
1241 @@ -601,7 +601,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
1242 */
1243 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
1244 {
1245 - return state > PCI_D0 ?
1246 + return state >= PCI_D0 ?
1247 pci_platform_power_transition(dev, state) : -EINVAL;
1248 }
1249 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
1250 @@ -638,10 +638,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1251 */
1252 return 0;
1253
1254 - /* Check if we're already there */
1255 - if (dev->current_state == state)
1256 - return 0;
1257 -
1258 __pci_start_power_transition(dev, state);
1259
1260 /* This device is quirked not to be put into D3, so
1261 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1262 index c7a6a89..aab4a39 100644
1263 --- a/drivers/scsi/libiscsi.c
1264 +++ b/drivers/scsi/libiscsi.c
1265 @@ -384,12 +384,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
1266
1267 WARN_ON(hdrlength >= 256);
1268 hdr->hlength = hdrlength & 0xFF;
1269 + hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
1270
1271 if (session->tt->init_task && session->tt->init_task(task))
1272 return -EIO;
1273
1274 task->state = ISCSI_TASK_RUNNING;
1275 - hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
1276 session->cmdsn++;
1277
1278 conn->scsicmd_pdus_cnt++;
1279 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
1280 index e155011..816ab97 100644
1281 --- a/drivers/scsi/libsas/sas_ata.c
1282 +++ b/drivers/scsi/libsas/sas_ata.c
1283 @@ -394,11 +394,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
1284 void sas_ata_task_abort(struct sas_task *task)
1285 {
1286 struct ata_queued_cmd *qc = task->uldd_task;
1287 + struct request_queue *q = qc->scsicmd->device->request_queue;
1288 struct completion *waiting;
1289 + unsigned long flags;
1290
1291 /* Bounce SCSI-initiated commands to the SCSI EH */
1292 if (qc->scsicmd) {
1293 + spin_lock_irqsave(q->queue_lock, flags);
1294 blk_abort_request(qc->scsicmd->request);
1295 + spin_unlock_irqrestore(q->queue_lock, flags);
1296 scsi_schedule_eh(qc->scsicmd->device->host);
1297 return;
1298 }
1299 diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
1300 index 1c558d3..39fb9aa 100644
1301 --- a/drivers/scsi/libsas/sas_scsi_host.c
1302 +++ b/drivers/scsi/libsas/sas_scsi_host.c
1303 @@ -1025,6 +1025,8 @@ int __sas_task_abort(struct sas_task *task)
1304 void sas_task_abort(struct sas_task *task)
1305 {
1306 struct scsi_cmnd *sc = task->uldd_task;
1307 + struct request_queue *q = sc->device->request_queue;
1308 + unsigned long flags;
1309
1310 /* Escape for libsas internal commands */
1311 if (!sc) {
1312 @@ -1039,7 +1041,9 @@ void sas_task_abort(struct sas_task *task)
1313 return;
1314 }
1315
1316 + spin_lock_irqsave(q->queue_lock, flags);
1317 blk_abort_request(sc->request);
1318 + spin_unlock_irqrestore(q->queue_lock, flags);
1319 scsi_schedule_eh(sc->device->host);
1320 }
1321
1322 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
1323 index b79481e..799bd75 100644
1324 --- a/drivers/scsi/qla2xxx/qla_isr.c
1325 +++ b/drivers/scsi/qla2xxx/qla_isr.c
1326 @@ -1347,16 +1347,22 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1327
1328 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1329 if (IS_FWI2_CAPABLE(ha)) {
1330 - sense_len = le32_to_cpu(sts24->sense_len);
1331 - rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1332 - resid_len = le32_to_cpu(sts24->rsp_residual_count);
1333 - fw_resid_len = le32_to_cpu(sts24->residual_len);
1334 + if (scsi_status & SS_SENSE_LEN_VALID)
1335 + sense_len = le32_to_cpu(sts24->sense_len);
1336 + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1337 + rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1338 + if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1339 + resid_len = le32_to_cpu(sts24->rsp_residual_count);
1340 + if (comp_status == CS_DATA_UNDERRUN)
1341 + fw_resid_len = le32_to_cpu(sts24->residual_len);
1342 rsp_info = sts24->data;
1343 sense_data = sts24->data;
1344 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1345 } else {
1346 - sense_len = le16_to_cpu(sts->req_sense_length);
1347 - rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1348 + if (scsi_status & SS_SENSE_LEN_VALID)
1349 + sense_len = le16_to_cpu(sts->req_sense_length);
1350 + if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1351 + rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1352 resid_len = le32_to_cpu(sts->residual_length);
1353 rsp_info = sts->rsp_info;
1354 sense_data = sts->req_sense_data;
1355 @@ -1443,38 +1449,62 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1356 break;
1357
1358 case CS_DATA_UNDERRUN:
1359 - resid = resid_len;
1360 + DEBUG2(printk(KERN_INFO
1361 + "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
1362 + "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
1363 + vha->host_no, cp->device->id, cp->device->lun, comp_status,
1364 + scsi_status, resid_len, fw_resid_len, cp->cmnd[0],
1365 + cp->underflow));
1366 +
1367 /* Use F/W calculated residual length. */
1368 - if (IS_FWI2_CAPABLE(ha)) {
1369 - if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1370 - lscsi_status = 0;
1371 - } else if (resid != fw_resid_len) {
1372 - scsi_status &= ~SS_RESIDUAL_UNDER;
1373 - lscsi_status = 0;
1374 + resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1375 + scsi_set_resid(cp, resid);
1376 + if (scsi_status & SS_RESIDUAL_UNDER) {
1377 + if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1378 + DEBUG2(printk(
1379 + "scsi(%ld:%d:%d:%d) Dropped frame(s) "
1380 + "detected (%x of %x bytes)...residual "
1381 + "length mismatch...retrying command.\n",
1382 + vha->host_no, cp->device->channel,
1383 + cp->device->id, cp->device->lun, resid,
1384 + scsi_bufflen(cp)));
1385 +
1386 + cp->result = DID_ERROR << 16 | lscsi_status;
1387 + break;
1388 }
1389 - resid = fw_resid_len;
1390 - }
1391
1392 - if (scsi_status & SS_RESIDUAL_UNDER) {
1393 - scsi_set_resid(cp, resid);
1394 - } else {
1395 - DEBUG2(printk(KERN_INFO
1396 - "scsi(%ld:%d:%d) UNDERRUN status detected "
1397 - "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1398 - "os_underflow=0x%x\n", vha->host_no,
1399 - cp->device->id, cp->device->lun, comp_status,
1400 - scsi_status, resid_len, resid, cp->cmnd[0],
1401 - cp->underflow));
1402 + if (!lscsi_status &&
1403 + ((unsigned)(scsi_bufflen(cp) - resid) <
1404 + cp->underflow)) {
1405 + qla_printk(KERN_INFO, ha,
1406 + "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1407 + "detected (%x of %x bytes)...returning "
1408 + "error status.\n", vha->host_no,
1409 + cp->device->channel, cp->device->id,
1410 + cp->device->lun, resid, scsi_bufflen(cp));
1411
1412 + cp->result = DID_ERROR << 16;
1413 + break;
1414 + }
1415 + } else if (!lscsi_status) {
1416 + DEBUG2(printk(
1417 + "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
1418 + "(%x of %x bytes)...firmware reported underrun..."
1419 + "retrying command.\n", vha->host_no,
1420 + cp->device->channel, cp->device->id,
1421 + cp->device->lun, resid, scsi_bufflen(cp)));
1422 +
1423 + cp->result = DID_ERROR << 16;
1424 + break;
1425 }
1426
1427 + cp->result = DID_OK << 16 | lscsi_status;
1428 +
1429 /*
1430 * Check to see if SCSI Status is non zero. If so report SCSI
1431 * Status.
1432 */
1433 if (lscsi_status != 0) {
1434 - cp->result = DID_OK << 16 | lscsi_status;
1435 -
1436 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1437 DEBUG2(printk(KERN_INFO
1438 "scsi(%ld): QUEUE FULL status detected "
1439 @@ -1501,42 +1531,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1440 break;
1441
1442 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1443 - } else {
1444 - /*
1445 - * If RISC reports underrun and target does not report
1446 - * it then we must have a lost frame, so tell upper
1447 - * layer to retry it by reporting an error.
1448 - */
1449 - if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1450 - DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1451 - "frame(s) detected (%x of %x bytes)..."
1452 - "retrying command.\n",
1453 - vha->host_no, cp->device->channel,
1454 - cp->device->id, cp->device->lun, resid,
1455 - scsi_bufflen(cp)));
1456 -
1457 - scsi_set_resid(cp, resid);
1458 - cp->result = DID_ERROR << 16;
1459 - break;
1460 - }
1461 -
1462 - /* Handle mid-layer underflow */
1463 - if ((unsigned)(scsi_bufflen(cp) - resid) <
1464 - cp->underflow) {
1465 - qla_printk(KERN_INFO, ha,
1466 - "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1467 - "detected (%x of %x bytes)...returning "
1468 - "error status.\n", vha->host_no,
1469 - cp->device->channel, cp->device->id,
1470 - cp->device->lun, resid,
1471 - scsi_bufflen(cp));
1472 -
1473 - cp->result = DID_ERROR << 16;
1474 - break;
1475 - }
1476 -
1477 - /* Everybody online, looking good... */
1478 - cp->result = DID_OK << 16;
1479 }
1480 break;
1481
1482 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
1483 index c4103be..bc3e363 100644
1484 --- a/drivers/scsi/scsi_debug.c
1485 +++ b/drivers/scsi/scsi_debug.c
1486 @@ -914,7 +914,8 @@ static int resp_start_stop(struct scsi_cmnd * scp,
1487 static sector_t get_sdebug_capacity(void)
1488 {
1489 if (scsi_debug_virtual_gb > 0)
1490 - return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
1491 + return (sector_t)scsi_debug_virtual_gb *
1492 + (1073741824 / scsi_debug_sector_size);
1493 else
1494 return sdebug_store_sectors;
1495 }
1496 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
1497 index 1b0060b..573921d 100644
1498 --- a/drivers/scsi/scsi_error.c
1499 +++ b/drivers/scsi/scsi_error.c
1500 @@ -301,7 +301,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
1501 if (scmd->device->allow_restart &&
1502 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
1503 return FAILED;
1504 - return SUCCESS;
1505 +
1506 + if (blk_barrier_rq(scmd->request))
1507 + /*
1508 + * barrier requests should always retry on UA
1509 + * otherwise block will get a spurious error
1510 + */
1511 + return NEEDS_RETRY;
1512 + else
1513 + /*
1514 + * for normal (non barrier) commands, pass the
1515 + * UA upwards for a determination in the
1516 + * completion functions
1517 + */
1518 + return SUCCESS;
1519
1520 /* these three are not supported */
1521 case COPY_ABORTED:
1522 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1523 index bc9a881..41d712e 100644
1524 --- a/drivers/scsi/scsi_lib.c
1525 +++ b/drivers/scsi/scsi_lib.c
1526 @@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1527 * we already took a copy of the original into rq->errors which
1528 * is what gets returned to the user
1529 */
1530 - if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
1531 - if (!(req->cmd_flags & REQ_QUIET))
1532 + if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
1533 + /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
1534 + * print since caller wants ATA registers. Only occurs on
1535 + * SCSI ATA PASS_THROUGH commands when CK_COND=1
1536 + */
1537 + if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
1538 + ;
1539 + else if (!(req->cmd_flags & REQ_QUIET))
1540 scsi_print_sense("", cmd);
1541 result = 0;
1542 /* BLOCK_PC may have set error */
1543 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1544 index 9093c72..7694a95 100644
1545 --- a/drivers/scsi/sd.c
1546 +++ b/drivers/scsi/sd.c
1547 @@ -971,6 +971,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
1548 {
1549 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1550 rq->timeout = SD_TIMEOUT;
1551 + rq->retries = SD_MAX_RETRIES;
1552 rq->cmd[0] = SYNCHRONIZE_CACHE;
1553 rq->cmd_len = 10;
1554 }
1555 diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
1556 index deac67e..48ead15 100644
1557 --- a/drivers/serial/8250_pnp.c
1558 +++ b/drivers/serial/8250_pnp.c
1559 @@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
1560 { "FUJ02E6", 0 },
1561 /* Fujitsu Wacom 2FGT Tablet PC device */
1562 { "FUJ02E7", 0 },
1563 + /* Fujitsu Wacom 1FGT Tablet PC device */
1564 + { "FUJ02E9", 0 },
1565 /*
1566 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
1567 * disguise)
1568 diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
1569 index c2809f2..b12237f 100644
1570 --- a/drivers/staging/hv/Hv.c
1571 +++ b/drivers/staging/hv/Hv.c
1572 @@ -306,9 +306,9 @@ void HvCleanup(void)
1573 DPRINT_ENTER(VMBUS);
1574
1575 if (gHvContext.SignalEventBuffer) {
1576 + kfree(gHvContext.SignalEventBuffer);
1577 gHvContext.SignalEventBuffer = NULL;
1578 gHvContext.SignalEventParam = NULL;
1579 - kfree(gHvContext.SignalEventBuffer);
1580 }
1581
1582 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
1583 diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/RndisFilter.c
1584 index 26d7997..f05f4e1 100644
1585 --- a/drivers/staging/hv/RndisFilter.c
1586 +++ b/drivers/staging/hv/RndisFilter.c
1587 @@ -756,6 +756,7 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
1588
1589 ret = RndisFilterSetPacketFilter(Device,
1590 NDIS_PACKET_TYPE_BROADCAST |
1591 + NDIS_PACKET_TYPE_ALL_MULTICAST |
1592 NDIS_PACKET_TYPE_DIRECTED);
1593 if (ret == 0)
1594 Device->State = RNDIS_DEV_DATAINITIALIZED;
1595 diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
1596 index 0d7459e..4c3c8bc 100644
1597 --- a/drivers/staging/hv/netvsc_drv.c
1598 +++ b/drivers/staging/hv/netvsc_drv.c
1599 @@ -413,8 +413,7 @@ static int netvsc_probe(struct device *device)
1600 if (!net_drv_obj->Base.OnDeviceAdd)
1601 return -1;
1602
1603 - net = alloc_netdev(sizeof(struct net_device_context), "seth%d",
1604 - ether_setup);
1605 + net = alloc_etherdev(sizeof(struct net_device_context));
1606 if (!net)
1607 return -1;
1608
1609 diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
1610 index 6da1021..a2566f1 100644
1611 --- a/drivers/staging/usbip/usbip_event.c
1612 +++ b/drivers/staging/usbip/usbip_event.c
1613 @@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *ud)
1614 {
1615 struct usbip_task *eh = &ud->eh;
1616
1617 + if (eh->thread == current)
1618 + return; /* do not wait for myself */
1619 +
1620 wait_for_completion(&eh->thread_done);
1621 usbip_dbg_eh("usbip_eh has finished\n");
1622 }
1623 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
1624 index 5d80d5e..34fc7bb 100644
1625 --- a/drivers/usb/core/driver.c
1626 +++ b/drivers/usb/core/driver.c
1627 @@ -1175,9 +1175,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1628 udev->state == USB_STATE_SUSPENDED)
1629 goto done;
1630
1631 - udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1632 -
1633 if (msg.event & PM_EVENT_AUTO) {
1634 + udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1635 status = autosuspend_check(udev, 0);
1636 if (status < 0)
1637 goto done;
1638 @@ -1742,6 +1741,34 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
1639 return status;
1640 }
1641
1642 +static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
1643 +{
1644 + int w, i;
1645 + struct usb_interface *intf;
1646 +
1647 + /* Remote wakeup is needed only when we actually go to sleep.
1648 + * For things like FREEZE and QUIESCE, if the device is already
1649 + * autosuspended then its current wakeup setting is okay.
1650 + */
1651 + if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
1652 + udev->do_remote_wakeup = 0;
1653 + return;
1654 + }
1655 +
1656 + /* If remote wakeup is permitted, see whether any interface drivers
1657 + * actually want it.
1658 + */
1659 + w = 0;
1660 + if (device_may_wakeup(&udev->dev) && udev->actconfig) {
1661 + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1662 + intf = udev->actconfig->interface[i];
1663 + w |= intf->needs_remote_wakeup;
1664 + }
1665 + }
1666 +
1667 + udev->do_remote_wakeup = w;
1668 +}
1669 +
1670 int usb_suspend(struct device *dev, pm_message_t msg)
1671 {
1672 struct usb_device *udev;
1673 @@ -1761,6 +1788,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
1674 }
1675
1676 udev->skip_sys_resume = 0;
1677 + choose_wakeup(udev, msg);
1678 return usb_external_suspend_device(udev, msg);
1679 }
1680
1681 diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
1682 index 05e6d31..1a78cd1 100644
1683 --- a/drivers/usb/core/generic.c
1684 +++ b/drivers/usb/core/generic.c
1685 @@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev)
1686 * than a vendor-specific driver. */
1687 else if (udev->descriptor.bDeviceClass !=
1688 USB_CLASS_VENDOR_SPEC &&
1689 - (!desc || desc->bInterfaceClass !=
1690 + (desc && desc->bInterfaceClass !=
1691 USB_CLASS_VENDOR_SPEC)) {
1692 best = c;
1693 break;
1694 diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
1695 index 97b40ce..4a6366a 100644
1696 --- a/drivers/usb/core/inode.c
1697 +++ b/drivers/usb/core/inode.c
1698 @@ -515,13 +515,13 @@ static int fs_create_by_name (const char *name, mode_t mode,
1699 *dentry = NULL;
1700 mutex_lock(&parent->d_inode->i_mutex);
1701 *dentry = lookup_one_len(name, parent, strlen(name));
1702 - if (!IS_ERR(dentry)) {
1703 + if (!IS_ERR(*dentry)) {
1704 if ((mode & S_IFMT) == S_IFDIR)
1705 error = usbfs_mkdir (parent->d_inode, *dentry, mode);
1706 else
1707 error = usbfs_create (parent->d_inode, *dentry, mode);
1708 } else
1709 - error = PTR_ERR(dentry);
1710 + error = PTR_ERR(*dentry);
1711 mutex_unlock(&parent->d_inode->i_mutex);
1712
1713 return error;
1714 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
1715 index 35bf518..5aeabd8 100644
1716 --- a/drivers/usb/host/ehci-hcd.c
1717 +++ b/drivers/usb/host/ehci-hcd.c
1718 @@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
1719 */
1720 ehci->periodic_size = DEFAULT_I_TDPS;
1721 INIT_LIST_HEAD(&ehci->cached_itd_list);
1722 + INIT_LIST_HEAD(&ehci->cached_sitd_list);
1723 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
1724 return retval;
1725
1726 diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
1727 index aeda96e..1f3f01e 100644
1728 --- a/drivers/usb/host/ehci-mem.c
1729 +++ b/drivers/usb/host/ehci-mem.c
1730 @@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
1731
1732 static void ehci_mem_cleanup (struct ehci_hcd *ehci)
1733 {
1734 - free_cached_itd_list(ehci);
1735 + free_cached_lists(ehci);
1736 if (ehci->async)
1737 qh_put (ehci->async);
1738 ehci->async = NULL;
1739 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
1740 index 5cc3f48..6746a8a 100644
1741 --- a/drivers/usb/host/ehci-sched.c
1742 +++ b/drivers/usb/host/ehci-sched.c
1743 @@ -2127,13 +2127,27 @@ sitd_complete (
1744 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1745 }
1746 iso_stream_put (ehci, stream);
1747 - /* OK to recycle this SITD now that its completion callback ran. */
1748 +
1749 done:
1750 sitd->urb = NULL;
1751 - sitd->stream = NULL;
1752 - list_move(&sitd->sitd_list, &stream->free_list);
1753 - iso_stream_put(ehci, stream);
1754 -
1755 + if (ehci->clock_frame != sitd->frame) {
1756 + /* OK to recycle this SITD now. */
1757 + sitd->stream = NULL;
1758 + list_move(&sitd->sitd_list, &stream->free_list);
1759 + iso_stream_put(ehci, stream);
1760 + } else {
1761 + /* HW might remember this SITD, so we can't recycle it yet.
1762 + * Move it to a safe place until a new frame starts.
1763 + */
1764 + list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
1765 + if (stream->refcount == 2) {
1766 + /* If iso_stream_put() were called here, stream
1767 + * would be freed. Instead, just prevent reuse.
1768 + */
1769 + stream->ep->hcpriv = NULL;
1770 + stream->ep = NULL;
1771 + }
1772 + }
1773 return retval;
1774 }
1775
1776 @@ -2199,9 +2213,10 @@ done:
1777
1778 /*-------------------------------------------------------------------------*/
1779
1780 -static void free_cached_itd_list(struct ehci_hcd *ehci)
1781 +static void free_cached_lists(struct ehci_hcd *ehci)
1782 {
1783 struct ehci_itd *itd, *n;
1784 + struct ehci_sitd *sitd, *sn;
1785
1786 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
1787 struct ehci_iso_stream *stream = itd->stream;
1788 @@ -2209,6 +2224,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
1789 list_move(&itd->itd_list, &stream->free_list);
1790 iso_stream_put(ehci, stream);
1791 }
1792 +
1793 + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
1794 + struct ehci_iso_stream *stream = sitd->stream;
1795 + sitd->stream = NULL;
1796 + list_move(&sitd->sitd_list, &stream->free_list);
1797 + iso_stream_put(ehci, stream);
1798 + }
1799 }
1800
1801 /*-------------------------------------------------------------------------*/
1802 @@ -2235,7 +2257,7 @@ scan_periodic (struct ehci_hcd *ehci)
1803 clock_frame = -1;
1804 }
1805 if (ehci->clock_frame != clock_frame) {
1806 - free_cached_itd_list(ehci);
1807 + free_cached_lists(ehci);
1808 ehci->clock_frame = clock_frame;
1809 }
1810 clock %= mod;
1811 @@ -2398,7 +2420,7 @@ restart:
1812 clock = now;
1813 clock_frame = clock >> 3;
1814 if (ehci->clock_frame != clock_frame) {
1815 - free_cached_itd_list(ehci);
1816 + free_cached_lists(ehci);
1817 ehci->clock_frame = clock_frame;
1818 }
1819 } else {
1820 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
1821 index b1dce96..556c0b4 100644
1822 --- a/drivers/usb/host/ehci.h
1823 +++ b/drivers/usb/host/ehci.h
1824 @@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
1825 int next_uframe; /* scan periodic, start here */
1826 unsigned periodic_sched; /* periodic activity count */
1827
1828 - /* list of itds completed while clock_frame was still active */
1829 + /* list of itds & sitds completed while clock_frame was still active */
1830 struct list_head cached_itd_list;
1831 + struct list_head cached_sitd_list;
1832 unsigned clock_frame;
1833
1834 /* per root hub port */
1835 @@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
1836 clear_bit (action, &ehci->actions);
1837 }
1838
1839 -static void free_cached_itd_list(struct ehci_hcd *ehci);
1840 +static void free_cached_lists(struct ehci_hcd *ehci);
1841
1842 /*-------------------------------------------------------------------------*/
1843
1844 diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
1845 index 32bbce9..65cac8c 100644
1846 --- a/drivers/usb/host/ohci-hub.c
1847 +++ b/drivers/usb/host/ohci-hub.c
1848 @@ -697,7 +697,7 @@ static int ohci_hub_control (
1849 u16 wLength
1850 ) {
1851 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
1852 - int ports = hcd_to_bus (hcd)->root_hub->maxchild;
1853 + int ports = ohci->num_ports;
1854 u32 temp;
1855 int retval = 0;
1856
1857 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1858 index b8fd270..dd71f02 100644
1859 --- a/drivers/usb/host/xhci-mem.c
1860 +++ b/drivers/usb/host/xhci-mem.c
1861 @@ -496,6 +496,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1862 return EP_INTERVAL(interval);
1863 }
1864
1865 +/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
1866 + * High speed endpoint descriptors can define "the number of additional
1867 + * transaction opportunities per microframe", but that goes in the Max Burst
1868 + * endpoint context field.
1869 + */
1870 +static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
1871 + struct usb_host_endpoint *ep)
1872 +{
1873 + if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
1874 + return 0;
1875 + return ep->ss_ep_comp->desc.bmAttributes;
1876 +}
1877 +
1878 static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
1879 struct usb_host_endpoint *ep)
1880 {
1881 @@ -526,6 +539,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
1882 return type;
1883 }
1884
1885 +/* Return the maximum endpoint service interval time (ESIT) payload.
1886 + * Basically, this is the maxpacket size, multiplied by the burst size
1887 + * and mult size.
1888 + */
1889 +static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1890 + struct usb_device *udev,
1891 + struct usb_host_endpoint *ep)
1892 +{
1893 + int max_burst;
1894 + int max_packet;
1895 +
1896 + /* Only applies for interrupt or isochronous endpoints */
1897 + if (usb_endpoint_xfer_control(&ep->desc) ||
1898 + usb_endpoint_xfer_bulk(&ep->desc))
1899 + return 0;
1900 +
1901 + if (udev->speed == USB_SPEED_SUPER) {
1902 + if (ep->ss_ep_comp)
1903 + return ep->ss_ep_comp->desc.wBytesPerInterval;
1904 + xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
1905 + /* Assume no bursts, no multiple opportunities to send. */
1906 + return ep->desc.wMaxPacketSize;
1907 + }
1908 +
1909 + max_packet = ep->desc.wMaxPacketSize & 0x3ff;
1910 + max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
1911 + /* A 0 in max burst means 1 transfer per ESIT */
1912 + return max_packet * (max_burst + 1);
1913 +}
1914 +
1915 int xhci_endpoint_init(struct xhci_hcd *xhci,
1916 struct xhci_virt_device *virt_dev,
1917 struct usb_device *udev,
1918 @@ -537,6 +580,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1919 struct xhci_ring *ep_ring;
1920 unsigned int max_packet;
1921 unsigned int max_burst;
1922 + u32 max_esit_payload;
1923
1924 ep_index = xhci_get_endpoint_index(&ep->desc);
1925 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1926 @@ -550,6 +594,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1927 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
1928
1929 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
1930 + ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
1931
1932 /* FIXME dig Mult and streams info out of ep companion desc */
1933
1934 @@ -595,6 +640,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1935 default:
1936 BUG();
1937 }
1938 + max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1939 + ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
1940 +
1941 + /*
1942 + * XXX no idea how to calculate the average TRB buffer length for bulk
1943 + * endpoints, as the driver gives us no clue how big each scatter gather
1944 + * list entry (or buffer) is going to be.
1945 + *
1946 + * For isochronous and interrupt endpoints, we set it to the max
1947 + * available, until we have new API in the USB core to allow drivers to
1948 + * declare how much bandwidth they actually need.
1949 + *
1950 + * Normally, it would be calculated by taking the total of the buffer
1951 + * lengths in the TD and then dividing by the number of TRBs in a TD,
1952 + * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1953 + * use Event Data TRBs, and we don't chain in a link TRB on short
1954 + * transfers, we're basically dividing by 1.
1955 + */
1956 + ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
1957 +
1958 /* FIXME Debug endpoint context */
1959 return 0;
1960 }
1961 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1962 index 4b254b6..db821e9 100644
1963 --- a/drivers/usb/host/xhci.h
1964 +++ b/drivers/usb/host/xhci.h
1965 @@ -609,6 +609,10 @@ struct xhci_ep_ctx {
1966 #define MAX_PACKET_MASK (0xffff << 16)
1967 #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
1968
1969 +/* tx_info bitmasks */
1970 +#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
1971 +#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
1972 +
1973
1974 /**
1975 * struct xhci_input_control_context
1976 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1977 index 3689077..54f8494 100644
1978 --- a/drivers/usb/serial/sierra.c
1979 +++ b/drivers/usb/serial/sierra.c
1980 @@ -195,6 +195,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
1981 static struct usb_device_id id_table [] = {
1982 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
1983 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
1984 + { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
1985 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
1986
1987 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
1988 diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
1989 index 1ed3d55..17726a0 100644
1990 --- a/drivers/w1/slaves/w1_therm.c
1991 +++ b/drivers/w1/slaves/w1_therm.c
1992 @@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = {
1993
1994 static inline int w1_DS18B20_convert_temp(u8 rom[9])
1995 {
1996 - int t = ((s16)rom[1] << 8) | rom[0];
1997 - t = t*1000/16;
1998 - return t;
1999 + s16 t = le16_to_cpup((__le16 *)rom);
2000 + return t*1000/16;
2001 }
2002
2003 static inline int w1_DS18S20_convert_temp(u8 rom[9])
2004 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2005 index 9333dc9..9e21653 100644
2006 --- a/fs/ext4/extents.c
2007 +++ b/fs/ext4/extents.c
2008 @@ -3711,7 +3711,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2009 __u64 start, __u64 len)
2010 {
2011 ext4_lblk_t start_blk;
2012 - ext4_lblk_t len_blks;
2013 int error = 0;
2014
2015 /* fallback to generic here if not in extents fmt */
2016 @@ -3725,8 +3724,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2017 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
2018 error = ext4_xattr_fiemap(inode, fieinfo);
2019 } else {
2020 + ext4_lblk_t len_blks;
2021 + __u64 last_blk;
2022 +
2023 start_blk = start >> inode->i_sb->s_blocksize_bits;
2024 - len_blks = len >> inode->i_sb->s_blocksize_bits;
2025 + last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
2026 + if (last_blk >= EXT_MAX_BLOCK)
2027 + last_blk = EXT_MAX_BLOCK-1;
2028 + len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
2029
2030 /*
2031 * Walk the extent tree gathering extent information.
2032 diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
2033 index 7f24a0b..1aba003 100644
2034 --- a/fs/jfs/resize.c
2035 +++ b/fs/jfs/resize.c
2036 @@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2037 struct inode *iplist[1];
2038 struct jfs_superblock *j_sb, *j_sb2;
2039 uint old_agsize;
2040 + int agsizechanged = 0;
2041 struct buffer_head *bh, *bh2;
2042
2043 /* If the volume hasn't grown, get out now */
2044 @@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2045 */
2046 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
2047 goto error_out;
2048 +
2049 + agsizechanged |= (bmp->db_agsize != old_agsize);
2050 +
2051 /*
2052 * the map now has extended to cover additional nblocks:
2053 * dn_mapsize = oldMapsize + nblocks;
2054 @@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
2055 * will correctly identify the new ag);
2056 */
2057 /* if new AG size the same as old AG size, done! */
2058 - if (bmp->db_agsize != old_agsize) {
2059 + if (agsizechanged) {
2060 if ((rc = diExtendFS(ipimap, ipbmap)))
2061 goto error_out;
2062
2063 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
2064 index 69d6a46..127ed5c 100644
2065 --- a/fs/nfs/client.c
2066 +++ b/fs/nfs/client.c
2067 @@ -965,6 +965,8 @@ out_error:
2068 static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
2069 {
2070 target->flags = source->flags;
2071 + target->rsize = source->rsize;
2072 + target->wsize = source->wsize;
2073 target->acregmin = source->acregmin;
2074 target->acregmax = source->acregmax;
2075 target->acdirmin = source->acdirmin;
2076 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2077 index dff7f0d..a87cbd8 100644
2078 --- a/fs/nfs/dir.c
2079 +++ b/fs/nfs/dir.c
2080 @@ -837,6 +837,8 @@ out_zap_parent:
2081 /* If we have submounts, don't unhash ! */
2082 if (have_submounts(dentry))
2083 goto out_valid;
2084 + if (dentry->d_flags & DCACHE_DISCONNECTED)
2085 + goto out_valid;
2086 shrink_dcache_parent(dentry);
2087 }
2088 d_drop(dentry);
2089 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
2090 index c598ab9..12f62ff 100644
2091 --- a/fs/nfsd/nfs4xdr.c
2092 +++ b/fs/nfsd/nfs4xdr.c
2093 @@ -168,10 +168,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
2094 argp->p = page_address(argp->pagelist[0]);
2095 argp->pagelist++;
2096 if (argp->pagelen < PAGE_SIZE) {
2097 - argp->end = p + (argp->pagelen>>2);
2098 + argp->end = argp->p + (argp->pagelen>>2);
2099 argp->pagelen = 0;
2100 } else {
2101 - argp->end = p + (PAGE_SIZE>>2);
2102 + argp->end = argp->p + (PAGE_SIZE>>2);
2103 argp->pagelen -= PAGE_SIZE;
2104 }
2105 memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
2106 @@ -1433,10 +1433,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
2107 argp->p = page_address(argp->pagelist[0]);
2108 argp->pagelist++;
2109 if (argp->pagelen < PAGE_SIZE) {
2110 - argp->end = p + (argp->pagelen>>2);
2111 + argp->end = argp->p + (argp->pagelen>>2);
2112 argp->pagelen = 0;
2113 } else {
2114 - argp->end = p + (PAGE_SIZE>>2);
2115 + argp->end = argp->p + (PAGE_SIZE>>2);
2116 argp->pagelen -= PAGE_SIZE;
2117 }
2118 }
2119 diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
2120 index d43d34a..5a253ba 100644
2121 --- a/fs/ocfs2/buffer_head_io.c
2122 +++ b/fs/ocfs2/buffer_head_io.c
2123 @@ -407,6 +407,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
2124 struct buffer_head *bh)
2125 {
2126 int ret = 0;
2127 + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
2128
2129 mlog_entry_void();
2130
2131 @@ -426,6 +427,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
2132
2133 get_bh(bh); /* for end_buffer_write_sync() */
2134 bh->b_end_io = end_buffer_write_sync;
2135 + ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
2136 submit_bh(WRITE, bh);
2137
2138 wait_on_buffer(bh);
2139 diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
2140 index 02bf178..18bc101 100644
2141 --- a/fs/ocfs2/dlm/dlmfs.c
2142 +++ b/fs/ocfs2/dlm/dlmfs.c
2143 @@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
2144 if ((count + *ppos) > i_size_read(inode))
2145 readlen = i_size_read(inode) - *ppos;
2146 else
2147 - readlen = count - *ppos;
2148 + readlen = count;
2149
2150 lvb_buf = kmalloc(readlen, GFP_NOFS);
2151 if (!lvb_buf)
2152 diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
2153 index 0297fb8..4c827d8 100644
2154 --- a/fs/ocfs2/inode.c
2155 +++ b/fs/ocfs2/inode.c
2156 @@ -559,6 +559,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
2157 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2158 if (IS_ERR(handle)) {
2159 status = PTR_ERR(handle);
2160 + handle = NULL;
2161 mlog_errno(status);
2162 goto out;
2163 }
2164 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2165 index 3a0df7a..03a1ab8 100644
2166 --- a/fs/ocfs2/refcounttree.c
2167 +++ b/fs/ocfs2/refcounttree.c
2168 @@ -3995,6 +3995,9 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
2169 di->i_attr = s_di->i_attr;
2170
2171 if (preserve) {
2172 + t_inode->i_uid = s_inode->i_uid;
2173 + t_inode->i_gid = s_inode->i_gid;
2174 + t_inode->i_mode = s_inode->i_mode;
2175 di->i_uid = s_di->i_uid;
2176 di->i_gid = s_di->i_gid;
2177 di->i_mode = s_di->i_mode;
2178 diff --git a/fs/proc/base.c b/fs/proc/base.c
2179 index 13b0378..a1bb0f6 100644
2180 --- a/fs/proc/base.c
2181 +++ b/fs/proc/base.c
2182 @@ -2844,7 +2844,7 @@ out_no_task:
2183 */
2184 static const struct pid_entry tid_base_stuff[] = {
2185 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2186 - DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
2187 + DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2188 REG("environ", S_IRUSR, proc_environ_operations),
2189 INF("auxv", S_IRUSR, proc_pid_auxv),
2190 ONE("status", S_IRUGO, proc_pid_status),
2191 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
2192 index 6d2668f..d42c30c 100644
2193 --- a/fs/reiserfs/dir.c
2194 +++ b/fs/reiserfs/dir.c
2195 @@ -45,8 +45,6 @@ static inline bool is_privroot_deh(struct dentry *dir,
2196 struct reiserfs_de_head *deh)
2197 {
2198 struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
2199 - if (reiserfs_expose_privroot(dir->d_sb))
2200 - return 0;
2201 return (dir == dir->d_parent && privroot->d_inode &&
2202 deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
2203 }
2204 diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
2205 index 6925b83..cc1caa2 100644
2206 --- a/fs/reiserfs/xattr.c
2207 +++ b/fs/reiserfs/xattr.c
2208 @@ -536,7 +536,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
2209 if (!err && new_size < i_size_read(dentry->d_inode)) {
2210 struct iattr newattrs = {
2211 .ia_ctime = current_fs_time(inode->i_sb),
2212 - .ia_size = buffer_size,
2213 + .ia_size = new_size,
2214 .ia_valid = ATTR_SIZE | ATTR_CTIME,
2215 };
2216 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
2217 @@ -952,21 +952,13 @@ int reiserfs_permission(struct inode *inode, int mask)
2218 return generic_permission(inode, mask, NULL);
2219 }
2220
2221 -/* This will catch lookups from the fs root to .reiserfs_priv */
2222 -static int
2223 -xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
2224 +static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
2225 {
2226 - struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
2227 - if (container_of(q1, struct dentry, d_name) == priv_root)
2228 - return -ENOENT;
2229 - if (q1->len == name->len &&
2230 - !memcmp(q1->name, name->name, name->len))
2231 - return 0;
2232 - return 1;
2233 + return -EPERM;
2234 }
2235
2236 static const struct dentry_operations xattr_lookup_poison_ops = {
2237 - .d_compare = xattr_lookup_poison,
2238 + .d_revalidate = xattr_hide_revalidate,
2239 };
2240
2241 int reiserfs_lookup_privroot(struct super_block *s)
2242 @@ -980,8 +972,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
2243 strlen(PRIVROOT_NAME));
2244 if (!IS_ERR(dentry)) {
2245 REISERFS_SB(s)->priv_root = dentry;
2246 - if (!reiserfs_expose_privroot(s))
2247 - s->s_root->d_op = &xattr_lookup_poison_ops;
2248 + dentry->d_op = &xattr_lookup_poison_ops;
2249 if (dentry->d_inode)
2250 dentry->d_inode->i_flags |= S_PRIVATE;
2251 } else
2252 diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
2253 index aae1249..d95bfa2 100644
2254 --- a/fs/xfs/linux-2.6/xfs_super.c
2255 +++ b/fs/xfs/linux-2.6/xfs_super.c
2256 @@ -1164,6 +1164,7 @@ xfs_fs_put_super(
2257
2258 xfs_unmountfs(mp);
2259 xfs_freesb(mp);
2260 + xfs_inode_shrinker_unregister(mp);
2261 xfs_icsb_destroy_counters(mp);
2262 xfs_close_devices(mp);
2263 xfs_dmops_put(mp);
2264 @@ -1555,6 +1556,8 @@ xfs_fs_fill_super(
2265 if (error)
2266 goto fail_vnrele;
2267
2268 + xfs_inode_shrinker_register(mp);
2269 +
2270 kfree(mtpt);
2271
2272 xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
2273 @@ -1894,6 +1897,7 @@ init_xfs_fs(void)
2274 goto out_cleanup_procfs;
2275
2276 vfs_initquota();
2277 + xfs_inode_shrinker_init();
2278
2279 error = register_filesystem(&xfs_fs_type);
2280 if (error)
2281 @@ -1923,6 +1927,7 @@ exit_xfs_fs(void)
2282 {
2283 vfs_exitquota();
2284 unregister_filesystem(&xfs_fs_type);
2285 + xfs_inode_shrinker_destroy();
2286 xfs_sysctl_unregister();
2287 xfs_cleanup_procfs();
2288 xfs_buf_terminate();
2289 diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
2290 index c1b7154..c82683a 100644
2291 --- a/fs/xfs/linux-2.6/xfs_sync.c
2292 +++ b/fs/xfs/linux-2.6/xfs_sync.c
2293 @@ -94,7 +94,8 @@ xfs_inode_ag_walk(
2294 struct xfs_perag *pag, int flags),
2295 int flags,
2296 int tag,
2297 - int exclusive)
2298 + int exclusive,
2299 + int *nr_to_scan)
2300 {
2301 struct xfs_perag *pag = &mp->m_perag[ag];
2302 uint32_t first_index;
2303 @@ -134,7 +135,7 @@ restart:
2304 if (error == EFSCORRUPTED)
2305 break;
2306
2307 - } while (1);
2308 + } while ((*nr_to_scan)--);
2309
2310 if (skipped) {
2311 delay(1);
2312 @@ -152,23 +153,30 @@ xfs_inode_ag_iterator(
2313 struct xfs_perag *pag, int flags),
2314 int flags,
2315 int tag,
2316 - int exclusive)
2317 + int exclusive,
2318 + int *nr_to_scan)
2319 {
2320 int error = 0;
2321 int last_error = 0;
2322 xfs_agnumber_t ag;
2323 + int nr;
2324
2325 + nr = nr_to_scan ? *nr_to_scan : INT_MAX;
2326 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
2327 if (!mp->m_perag[ag].pag_ici_init)
2328 continue;
2329 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
2330 - exclusive);
2331 + exclusive, &nr);
2332 if (error) {
2333 last_error = error;
2334 if (error == EFSCORRUPTED)
2335 break;
2336 }
2337 + if (nr <= 0)
2338 + break;
2339 }
2340 + if (nr_to_scan)
2341 + *nr_to_scan = nr;
2342 return XFS_ERROR(last_error);
2343 }
2344
2345 @@ -288,7 +296,7 @@ xfs_sync_data(
2346 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
2347
2348 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
2349 - XFS_ICI_NO_TAG, 0);
2350 + XFS_ICI_NO_TAG, 0, NULL);
2351 if (error)
2352 return XFS_ERROR(error);
2353
2354 @@ -310,7 +318,7 @@ xfs_sync_attr(
2355 ASSERT((flags & ~SYNC_WAIT) == 0);
2356
2357 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
2358 - XFS_ICI_NO_TAG, 0);
2359 + XFS_ICI_NO_TAG, 0, NULL);
2360 }
2361
2362 STATIC int
2363 @@ -678,6 +686,7 @@ __xfs_inode_set_reclaim_tag(
2364 radix_tree_tag_set(&pag->pag_ici_root,
2365 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
2366 XFS_ICI_RECLAIM_TAG);
2367 + pag->pag_ici_reclaimable++;
2368 }
2369
2370 /*
2371 @@ -709,6 +718,7 @@ __xfs_inode_clear_reclaim_tag(
2372 {
2373 radix_tree_tag_clear(&pag->pag_ici_root,
2374 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
2375 + pag->pag_ici_reclaimable--;
2376 }
2377
2378 STATIC int
2379 @@ -769,5 +779,88 @@ xfs_reclaim_inodes(
2380 int mode)
2381 {
2382 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
2383 - XFS_ICI_RECLAIM_TAG, 1);
2384 + XFS_ICI_RECLAIM_TAG, 1, NULL);
2385 +}
2386 +
2387 +/*
2388 + * Shrinker infrastructure.
2389 + *
2390 + * This is all far more complex than it needs to be. It adds a global list of
2391 + * mounts because the shrinkers can only call a global context. We need to make
2392 + * the shrinkers pass a context to avoid the need for global state.
2393 + */
2394 +static LIST_HEAD(xfs_mount_list);
2395 +static struct rw_semaphore xfs_mount_list_lock;
2396 +
2397 +static int
2398 +xfs_reclaim_inode_shrink(
2399 + int nr_to_scan,
2400 + gfp_t gfp_mask)
2401 +{
2402 + struct xfs_mount *mp;
2403 + xfs_agnumber_t ag;
2404 + int reclaimable = 0;
2405 +
2406 + if (nr_to_scan) {
2407 + if (!(gfp_mask & __GFP_FS))
2408 + return -1;
2409 +
2410 + down_read(&xfs_mount_list_lock);
2411 + list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
2412 + xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
2413 + XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
2414 + if (nr_to_scan <= 0)
2415 + break;
2416 + }
2417 + up_read(&xfs_mount_list_lock);
2418 + }
2419 +
2420 + down_read(&xfs_mount_list_lock);
2421 + list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
2422 + for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
2423 +
2424 + if (!mp->m_perag[ag].pag_ici_init)
2425 + continue;
2426 + reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
2427 + }
2428 + }
2429 + up_read(&xfs_mount_list_lock);
2430 + return reclaimable;
2431 +}
2432 +
2433 +static struct shrinker xfs_inode_shrinker = {
2434 + .shrink = xfs_reclaim_inode_shrink,
2435 + .seeks = DEFAULT_SEEKS,
2436 +};
2437 +
2438 +void __init
2439 +xfs_inode_shrinker_init(void)
2440 +{
2441 + init_rwsem(&xfs_mount_list_lock);
2442 + register_shrinker(&xfs_inode_shrinker);
2443 +}
2444 +
2445 +void
2446 +xfs_inode_shrinker_destroy(void)
2447 +{
2448 + ASSERT(list_empty(&xfs_mount_list));
2449 + unregister_shrinker(&xfs_inode_shrinker);
2450 +}
2451 +
2452 +void
2453 +xfs_inode_shrinker_register(
2454 + struct xfs_mount *mp)
2455 +{
2456 + down_write(&xfs_mount_list_lock);
2457 + list_add_tail(&mp->m_mplist, &xfs_mount_list);
2458 + up_write(&xfs_mount_list_lock);
2459 +}
2460 +
2461 +void
2462 +xfs_inode_shrinker_unregister(
2463 + struct xfs_mount *mp)
2464 +{
2465 + down_write(&xfs_mount_list_lock);
2466 + list_del(&mp->m_mplist);
2467 + up_write(&xfs_mount_list_lock);
2468 }
2469 diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
2470 index ea932b4..0b28c13 100644
2471 --- a/fs/xfs/linux-2.6/xfs_sync.h
2472 +++ b/fs/xfs/linux-2.6/xfs_sync.h
2473 @@ -54,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
2474 int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
2475 int xfs_inode_ag_iterator(struct xfs_mount *mp,
2476 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
2477 - int flags, int tag, int write_lock);
2478 + int flags, int tag, int write_lock, int *nr_to_scan);
2479 +
2480 +void xfs_inode_shrinker_init(void);
2481 +void xfs_inode_shrinker_destroy(void);
2482 +void xfs_inode_shrinker_register(struct xfs_mount *mp);
2483 +void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
2484
2485 #endif
2486 diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
2487 index f99cfa4..60fe358 100644
2488 --- a/fs/xfs/quota/xfs_qm_syscalls.c
2489 +++ b/fs/xfs/quota/xfs_qm_syscalls.c
2490 @@ -893,7 +893,8 @@ xfs_qm_dqrele_all_inodes(
2491 uint flags)
2492 {
2493 ASSERT(mp->m_quotainfo);
2494 - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
2495 + xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
2496 + XFS_ICI_NO_TAG, 0, NULL);
2497 }
2498
2499 /*------------------------------------------------------------------------*/
2500 diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
2501 index a5d54bf..381fba7 100644
2502 --- a/fs/xfs/xfs_ag.h
2503 +++ b/fs/xfs/xfs_ag.h
2504 @@ -215,6 +215,7 @@ typedef struct xfs_perag
2505 int pag_ici_init; /* incore inode cache initialised */
2506 rwlock_t pag_ici_lock; /* incore inode lock */
2507 struct radix_tree_root pag_ici_root; /* incore inode cache root */
2508 + int pag_ici_reclaimable; /* reclaimable inodes */
2509 #endif
2510 } xfs_perag_t;
2511
2512 diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
2513 index 1e6094f..08fdb6d 100644
2514 --- a/fs/xfs/xfs_mount.h
2515 +++ b/fs/xfs/xfs_mount.h
2516 @@ -243,6 +243,7 @@ typedef struct xfs_mount {
2517 wait_queue_head_t m_wait_single_sync_task;
2518 __int64_t m_update_flags; /* sb flags we need to update
2519 on the next remount,rw */
2520 + struct list_head m_mplist; /* inode shrinker mount list */
2521 } xfs_mount_t;
2522
2523 /*
2524 diff --git a/include/linux/acpi.h b/include/linux/acpi.h
2525 index c010b94..07432a1 100644
2526 --- a/include/linux/acpi.h
2527 +++ b/include/linux/acpi.h
2528 @@ -251,6 +251,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
2529 void __init acpi_no_s4_hw_signature(void);
2530 void __init acpi_old_suspend_ordering(void);
2531 void __init acpi_s4_no_nvs(void);
2532 +void __init acpi_set_sci_en_on_resume(void);
2533 #endif /* CONFIG_PM_SLEEP */
2534
2535 struct acpi_osc_context {
2536 diff --git a/include/linux/ata.h b/include/linux/ata.h
2537 index 4fb3573..8938796 100644
2538 --- a/include/linux/ata.h
2539 +++ b/include/linux/ata.h
2540 @@ -1000,8 +1000,8 @@ static inline int ata_ok(u8 status)
2541
2542 static inline int lba_28_ok(u64 block, u32 n_block)
2543 {
2544 - /* check the ending block number */
2545 - return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
2546 + /* check the ending block number: must be LESS THAN 0x0fffffff */
2547 + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
2548 }
2549
2550 static inline int lba_48_ok(u64 block, u32 n_block)
2551 diff --git a/include/linux/poison.h b/include/linux/poison.h
2552 index 7fc194a..34066ff 100644
2553 --- a/include/linux/poison.h
2554 +++ b/include/linux/poison.h
2555 @@ -2,13 +2,25 @@
2556 #define _LINUX_POISON_H
2557
2558 /********** include/linux/list.h **********/
2559 +
2560 +/*
2561 + * Architectures might want to move the poison pointer offset
2562 + * into some well-recognized area such as 0xdead000000000000,
2563 + * that is also not mappable by user-space exploits:
2564 + */
2565 +#ifdef CONFIG_ILLEGAL_POINTER_VALUE
2566 +# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
2567 +#else
2568 +# define POISON_POINTER_DELTA 0
2569 +#endif
2570 +
2571 /*
2572 * These are non-NULL pointers that will result in page faults
2573 * under normal circumstances, used to verify that nobody uses
2574 * non-initialized list entries.
2575 */
2576 -#define LIST_POISON1 ((void *) 0x00100100)
2577 -#define LIST_POISON2 ((void *) 0x00200200)
2578 +#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
2579 +#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
2580
2581 /********** include/linux/timer.h **********/
2582 /*
2583 @@ -36,6 +48,15 @@
2584 #define POISON_FREE 0x6b /* for use-after-free poisoning */
2585 #define POISON_END 0xa5 /* end-byte of poisoning */
2586
2587 +/********** mm/hugetlb.c **********/
2588 +/*
2589 + * Private mappings of hugetlb pages use this poisoned value for
2590 + * page->mapping. The core VM should not be doing anything with this mapping
2591 + * but futex requires the existence of some page->mapping value even though it
2592 + * is unused if PAGE_MAPPING_ANON is set.
2593 + */
2594 +#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
2595 +
2596 /********** arch/$ARCH/mm/init.c **********/
2597 #define POISON_FREE_INITMEM 0xcc
2598
2599 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
2600 index 93515c6..6ba163f 100644
2601 --- a/include/linux/syscalls.h
2602 +++ b/include/linux/syscalls.h
2603 @@ -153,7 +153,8 @@ static void prof_sysexit_disable_##sname(void) \
2604 #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
2605
2606 #define SYSCALL_TRACE_ENTER_EVENT(sname) \
2607 - static struct ftrace_event_call event_enter_##sname; \
2608 + static struct ftrace_event_call \
2609 + __attribute__((__aligned__(4))) event_enter_##sname; \
2610 struct trace_event enter_syscall_print_##sname = { \
2611 .trace = print_syscall_enter, \
2612 }; \
2613 @@ -189,7 +190,8 @@ static void prof_sysexit_disable_##sname(void) \
2614 }
2615
2616 #define SYSCALL_TRACE_EXIT_EVENT(sname) \
2617 - static struct ftrace_event_call event_exit_##sname; \
2618 + static struct ftrace_event_call \
2619 + __attribute__((__aligned__(4))) event_exit_##sname; \
2620 struct trace_event exit_syscall_print_##sname = { \
2621 .trace = print_syscall_exit, \
2622 }; \
2623 diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
2624 index dacb8ef..4b6a4a3 100644
2625 --- a/include/trace/ftrace.h
2626 +++ b/include/trace/ftrace.h
2627 @@ -43,7 +43,8 @@
2628 tstruct \
2629 char __data[0]; \
2630 }; \
2631 - static struct ftrace_event_call event_##name
2632 + static struct ftrace_event_call \
2633 + __attribute__((__aligned__(4))) event_##name
2634
2635 #undef __cpparg
2636 #define __cpparg(arg...) arg
2637 diff --git a/init/initramfs.c b/init/initramfs.c
2638 index 4c00edc..1fd59b8 100644
2639 --- a/init/initramfs.c
2640 +++ b/init/initramfs.c
2641 @@ -455,7 +455,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
2642 compress_name);
2643 message = msg_buf;
2644 }
2645 - }
2646 + } else
2647 + error("junk in compressed archive");
2648 if (state != Reset)
2649 error("junk in compressed archive");
2650 this_header = saved_offset + my_inptr;
2651 diff --git a/kernel/cred.c b/kernel/cred.c
2652 index 1ed8ca1..099f5e6 100644
2653 --- a/kernel/cred.c
2654 +++ b/kernel/cred.c
2655 @@ -786,8 +786,6 @@ bool creds_are_invalid(const struct cred *cred)
2656 {
2657 if (cred->magic != CRED_MAGIC)
2658 return true;
2659 - if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
2660 - return true;
2661 #ifdef CONFIG_SECURITY_SELINUX
2662 if (selinux_is_enabled()) {
2663 if ((unsigned long) cred->security < PAGE_SIZE)
2664 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
2665 index 447e8db..72df1eb 100644
2666 --- a/kernel/perf_event.c
2667 +++ b/kernel/perf_event.c
2668 @@ -4609,7 +4609,7 @@ err_fput_free_put_context:
2669
2670 err_free_put_context:
2671 if (err < 0)
2672 - kfree(event);
2673 + free_event(event);
2674
2675 err_put_context:
2676 if (err < 0)
2677 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
2678 index 405cb85..374d4ee 100644
2679 --- a/kernel/trace/trace.h
2680 +++ b/kernel/trace/trace.h
2681 @@ -746,7 +746,8 @@ extern const char *__stop___trace_bprintk_fmt[];
2682
2683 #undef FTRACE_ENTRY
2684 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
2685 - extern struct ftrace_event_call event_##call;
2686 + extern struct ftrace_event_call \
2687 + __attribute__((__aligned__(4))) event_##call;
2688 #undef FTRACE_ENTRY_DUP
2689 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
2690 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
2691 diff --git a/lib/flex_array.c b/lib/flex_array.c
2692 index 66eef2e..41b1804 100644
2693 --- a/lib/flex_array.c
2694 +++ b/lib/flex_array.c
2695 @@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
2696 ret->element_size = element_size;
2697 ret->total_nr_elements = total;
2698 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
2699 - memset(ret->parts[0], FLEX_ARRAY_FREE,
2700 + memset(&ret->parts[0], FLEX_ARRAY_FREE,
2701 FLEX_ARRAY_BASE_BYTES_LEFT);
2702 return ret;
2703 }
2704 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2705 index 5d7601b..220c22a 100644
2706 --- a/mm/hugetlb.c
2707 +++ b/mm/hugetlb.c
2708 @@ -545,6 +545,7 @@ static void free_huge_page(struct page *page)
2709
2710 mapping = (struct address_space *) page_private(page);
2711 set_page_private(page, 0);
2712 + page->mapping = NULL;
2713 BUG_ON(page_count(page));
2714 INIT_LIST_HEAD(&page->lru);
2715
2716 @@ -2095,8 +2096,10 @@ retry:
2717 spin_lock(&inode->i_lock);
2718 inode->i_blocks += blocks_per_huge_page(h);
2719 spin_unlock(&inode->i_lock);
2720 - } else
2721 + } else {
2722 lock_page(page);
2723 + page->mapping = HUGETLB_POISON;
2724 + }
2725 }
2726
2727 /*
2728 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2729 index 66035bf..ba9a0aa 100644
2730 --- a/mm/memcontrol.c
2731 +++ b/mm/memcontrol.c
2732 @@ -2008,12 +2008,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2733 }
2734 unlock_page_cgroup(pc);
2735
2736 + *ptr = mem;
2737 if (mem) {
2738 - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
2739 + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
2740 page);
2741 css_put(&mem->css);
2742 }
2743 - *ptr = mem;
2744 return ret;
2745 }
2746
2747 diff --git a/net/dccp/probe.c b/net/dccp/probe.c
2748 index 37731da..4875998 100644
2749 --- a/net/dccp/probe.c
2750 +++ b/net/dccp/probe.c
2751 @@ -164,7 +164,8 @@ static __init int dccpprobe_init(void)
2752 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
2753 goto err0;
2754
2755 - ret = register_jprobe(&dccp_send_probe);
2756 + ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
2757 + "dccp");
2758 if (ret)
2759 goto err1;
2760
2761 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
2762 index 9144ef0..2e08921 100644
2763 --- a/net/mac80211/agg-tx.c
2764 +++ b/net/mac80211/agg-tx.c
2765 @@ -181,7 +181,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
2766 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
2767 HT_ADDBA_REQUESTED_MSK) {
2768 spin_unlock_bh(&sta->lock);
2769 - *state = HT_AGG_STATE_IDLE;
2770 #ifdef CONFIG_MAC80211_HT_DEBUG
2771 printk(KERN_DEBUG "timer expired on tid %d but we are not "
2772 "(or no longer) expecting addBA response there",
2773 diff --git a/security/inode.c b/security/inode.c
2774 index f7496c6..3d78d69 100644
2775 --- a/security/inode.c
2776 +++ b/security/inode.c
2777 @@ -168,13 +168,13 @@ static int create_by_name(const char *name, mode_t mode,
2778
2779 mutex_lock(&parent->d_inode->i_mutex);
2780 *dentry = lookup_one_len(name, parent, strlen(name));
2781 - if (!IS_ERR(dentry)) {
2782 + if (!IS_ERR(*dentry)) {
2783 if ((mode & S_IFMT) == S_IFDIR)
2784 error = mkdir(parent->d_inode, *dentry, mode);
2785 else
2786 error = create(parent->d_inode, *dentry, mode);
2787 } else
2788 - error = PTR_ERR(dentry);
2789 + error = PTR_ERR(*dentry);
2790 mutex_unlock(&parent->d_inode->i_mutex);
2791
2792 return error;
2793 diff --git a/security/keys/request_key.c b/security/keys/request_key.c
2794 index 03fe63e..9ac7bfd 100644
2795 --- a/security/keys/request_key.c
2796 +++ b/security/keys/request_key.c
2797 @@ -336,8 +336,10 @@ static int construct_alloc_key(struct key_type *type,
2798
2799 key_already_present:
2800 mutex_unlock(&key_construction_mutex);
2801 - if (dest_keyring)
2802 + if (dest_keyring) {
2803 + __key_link(dest_keyring, key_ref_to_ptr(key_ref));
2804 up_write(&dest_keyring->sem);
2805 + }
2806 mutex_unlock(&user->cons_lock);
2807 key_put(key);
2808 *_key = key = key_ref_to_ptr(key_ref);
2809 @@ -428,6 +430,11 @@ struct key *request_key_and_link(struct key_type *type,
2810
2811 if (!IS_ERR(key_ref)) {
2812 key = key_ref_to_ptr(key_ref);
2813 + if (dest_keyring) {
2814 + construct_get_dest_keyring(&dest_keyring);
2815 + key_link(dest_keyring, key);
2816 + key_put(dest_keyring);
2817 + }
2818 } else if (PTR_ERR(key_ref) != -EAGAIN) {
2819 key = ERR_CAST(key_ref);
2820 } else {
2821 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2822 index eb998c2..cfb11c0 100644
2823 --- a/sound/pci/hda/hda_intel.c
2824 +++ b/sound/pci/hda/hda_intel.c
2825 @@ -2236,6 +2236,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2826 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2827 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
2828 SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
2829 + SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
2830 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
2831 {}
2832 };
2833 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2834 index 79afb46..3d2e8da 100644
2835 --- a/sound/pci/hda/patch_conexant.c
2836 +++ b/sound/pci/hda/patch_conexant.c
2837 @@ -1175,9 +1175,11 @@ static int patch_cxt5045(struct hda_codec *codec)
2838
2839 switch (codec->subsystem_id >> 16) {
2840 case 0x103c:
2841 - /* HP laptop has a really bad sound over 0dB on NID 0x17.
2842 - * Fix max PCM level to 0 dB
2843 - * (originall it has 0x2b steps with 0dB offset 0x14)
2844 + case 0x1631:
2845 + case 0x1734:
2846 + /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
2847 + * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
2848 + * (originally it has 0x2b steps with 0dB offset 0x14)
2849 */
2850 snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
2851 (0x14 << AC_AMPCAP_OFFSET_SHIFT) |
2852 @@ -2348,6 +2350,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2853 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
2854 CXT5066_DELL_LAPTOP),
2855 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
2856 + SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
2857 + SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
2858 {}
2859 };
2860
2861 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2862 index 3d1ad71..a9bdccc 100644
2863 --- a/sound/pci/hda/patch_realtek.c
2864 +++ b/sound/pci/hda/patch_realtek.c
2865 @@ -3971,7 +3971,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
2866 SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
2867 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
2868 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
2869 - SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
2870 + SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
2871 SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
2872 SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
2873 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
2874 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2875 index 86de305..a519a72 100644
2876 --- a/sound/pci/hda/patch_sigmatel.c
2877 +++ b/sound/pci/hda/patch_sigmatel.c
2878 @@ -1592,6 +1592,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
2879 "Dell Studio 1555", STAC_DELL_M6_DMIC),
2880 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
2881 "Dell Studio 1557", STAC_DELL_M6_DMIC),
2882 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
2883 + "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
2884 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
2885 + "Dell Studio 1558", STAC_DELL_M6_BOTH),
2886 {} /* terminator */
2887 };
2888
2889 @@ -1712,6 +1716,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
2890 "HP HDX", STAC_HP_HDX), /* HDX16 */
2891 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620,
2892 "HP dv6", STAC_HP_DV5),
2893 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
2894 + "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
2895 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
2896 "HP", STAC_HP_DV5),
2897 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
2898 diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
2899 index 75283fb..c2311f8 100644
2900 --- a/sound/pci/maestro3.c
2901 +++ b/sound/pci/maestro3.c
2902 @@ -849,6 +849,7 @@ struct snd_m3 {
2903 struct snd_kcontrol *master_switch;
2904 struct snd_kcontrol *master_volume;
2905 struct tasklet_struct hwvol_tq;
2906 + unsigned int in_suspend;
2907
2908 #ifdef CONFIG_PM
2909 u16 *suspend_mem;
2910 @@ -884,6 +885,7 @@ static struct pci_device_id snd_m3_ids[] = {
2911 MODULE_DEVICE_TABLE(pci, snd_m3_ids);
2912
2913 static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
2914 + SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
2915 SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
2916 SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
2917 SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
2918 @@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
2919 outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
2920 outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
2921
2922 + /* Ignore spurious HV interrupts during suspend / resume, this avoids
2923 + mistaking them for a mute button press. */
2924 + if (chip->in_suspend)
2925 + return;
2926 +
2927 if (!chip->master_switch || !chip->master_volume)
2928 return;
2929
2930 @@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
2931 if (chip->suspend_mem == NULL)
2932 return 0;
2933
2934 + chip->in_suspend = 1;
2935 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
2936 snd_pcm_suspend_all(chip->pcm);
2937 snd_ac97_suspend(chip->ac97);
2938 @@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci)
2939 snd_m3_hv_init(chip);
2940
2941 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
2942 + chip->in_suspend = 0;
2943 return 0;
2944 }
2945 #endif /* CONFIG_PM */

  ViewVC Help
Powered by ViewVC 1.1.20