/[linux-patches]/genpatches-2.6/tags/2.6.32-47/1024_linux-2.6.32.25.patch
Gentoo

Contents of /genpatches-2.6/tags/2.6.32-47/1024_linux-2.6.32.25.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2037 - (show annotations) (download)
Wed Dec 28 14:38:55 2011 UTC (6 years, 9 months ago) by psomas
File size: 65919 byte(s)
2.6.32-47 release
1 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
2 index c38afdb..0a3cf9e 100644
3 --- a/arch/powerpc/kernel/head_64.S
4 +++ b/arch/powerpc/kernel/head_64.S
5 @@ -563,15 +563,21 @@ __secondary_start:
6 /* Set thread priority to MEDIUM */
7 HMT_MEDIUM
8
9 - /* Do early setup for that CPU (stab, slb, hash table pointer) */
10 - bl .early_setup_secondary
11 -
12 /* Initialize the kernel stack. Just a repeat for iSeries. */
13 LOAD_REG_ADDR(r3, current_set)
14 sldi r28,r24,3 /* get current_set[cpu#] */
15 - ldx r1,r3,r28
16 - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
17 - std r1,PACAKSAVE(r13)
18 + ldx r14,r3,r28
19 + addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
20 + std r14,PACAKSAVE(r13)
21 +
22 + /* Do early setup for that CPU (stab, slb, hash table pointer) */
23 + bl .early_setup_secondary
24 +
25 + /*
26 + * setup the new stack pointer, but *don't* use this until
27 + * translation is on.
28 + */
29 + mr r1, r14
30
31 /* Clear backchain so we get nice backtraces */
32 li r7,0
33 diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
34 index 635d16d..9fcf26c 100644
35 --- a/arch/um/drivers/ubd_kern.c
36 +++ b/arch/um/drivers/ubd_kern.c
37 @@ -160,6 +160,7 @@ struct ubd {
38 struct scatterlist sg[MAX_SG];
39 struct request *request;
40 int start_sg, end_sg;
41 + sector_t rq_pos;
42 };
43
44 #define DEFAULT_COW { \
45 @@ -184,6 +185,7 @@ struct ubd {
46 .request = NULL, \
47 .start_sg = 0, \
48 .end_sg = 0, \
49 + .rq_pos = 0, \
50 }
51
52 /* Protected by ubd_lock */
53 @@ -1222,7 +1224,6 @@ static void do_ubd_request(struct request_queue *q)
54 {
55 struct io_thread_req *io_req;
56 struct request *req;
57 - sector_t sector;
58 int n;
59
60 while(1){
61 @@ -1233,12 +1234,12 @@ static void do_ubd_request(struct request_queue *q)
62 return;
63
64 dev->request = req;
65 + dev->rq_pos = blk_rq_pos(req);
66 dev->start_sg = 0;
67 dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
68 }
69
70 req = dev->request;
71 - sector = blk_rq_pos(req);
72 while(dev->start_sg < dev->end_sg){
73 struct scatterlist *sg = &dev->sg[dev->start_sg];
74
75 @@ -1250,10 +1251,9 @@ static void do_ubd_request(struct request_queue *q)
76 return;
77 }
78 prepare_request(req, io_req,
79 - (unsigned long long)sector << 9,
80 + (unsigned long long)dev->rq_pos << 9,
81 sg->offset, sg->length, sg_page(sg));
82
83 - sector += sg->length >> 9;
84 n = os_write_file(thread_fd, &io_req,
85 sizeof(struct io_thread_req *));
86 if(n != sizeof(struct io_thread_req *)){
87 @@ -1266,6 +1266,7 @@ static void do_ubd_request(struct request_queue *q)
88 return;
89 }
90
91 + dev->rq_pos += sg->length >> 9;
92 dev->start_sg++;
93 }
94 dev->end_sg = 0;
95 diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
96 index 2a2cc7a..7beb491 100644
97 --- a/arch/x86/include/asm/amd_iommu_types.h
98 +++ b/arch/x86/include/asm/amd_iommu_types.h
99 @@ -305,6 +305,9 @@ struct amd_iommu {
100 /* capabilities of that IOMMU read from ACPI */
101 u32 cap;
102
103 + /* flags read from acpi table */
104 + u8 acpi_flags;
105 +
106 /*
107 * Capability pointer. There could be more than one IOMMU per PCI
108 * device function if there are more than one AMD IOMMU capability
109 @@ -348,6 +351,15 @@ struct amd_iommu {
110
111 /* default dma_ops domain for that IOMMU */
112 struct dma_ops_domain *default_dom;
113 +
114 + /*
115 + * This array is required to work around a potential BIOS bug.
116 + * The BIOS may miss to restore parts of the PCI configuration
117 + * space when the system resumes from S3. The result is that the
118 + * IOMMU does not execute commands anymore which leads to system
119 + * failure.
120 + */
121 + u32 cache_cfg[4];
122 };
123
124 /*
125 @@ -469,4 +481,10 @@ static inline void amd_iommu_stats_init(void) { }
126 /* some function prototypes */
127 extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
128
129 +static inline bool is_rd890_iommu(struct pci_dev *pdev)
130 +{
131 + return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
132 + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
133 +}
134 +
135 #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
136 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
137 index d8e5d0c..d1911ab 100644
138 --- a/arch/x86/kernel/Makefile
139 +++ b/arch/x86/kernel/Makefile
140 @@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
141 CFLAGS_REMOVE_tsc.o = -pg
142 CFLAGS_REMOVE_rtc.o = -pg
143 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
144 +CFLAGS_REMOVE_pvclock.o = -pg
145 +CFLAGS_REMOVE_kvmclock.o = -pg
146 CFLAGS_REMOVE_ftrace.o = -pg
147 CFLAGS_REMOVE_early_printk.o = -pg
148 endif
149 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
150 index f0fa7a1..7cd33f7 100644
151 --- a/arch/x86/kernel/amd_iommu.c
152 +++ b/arch/x86/kernel/amd_iommu.c
153 @@ -1688,6 +1688,7 @@ static void __unmap_single(struct amd_iommu *iommu,
154 size_t size,
155 int dir)
156 {
157 + dma_addr_t flush_addr;
158 dma_addr_t i, start;
159 unsigned int pages;
160
161 @@ -1695,6 +1696,7 @@ static void __unmap_single(struct amd_iommu *iommu,
162 (dma_addr + size > dma_dom->aperture_size))
163 return;
164
165 + flush_addr = dma_addr;
166 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
167 dma_addr &= PAGE_MASK;
168 start = dma_addr;
169 @@ -1709,7 +1711,7 @@ static void __unmap_single(struct amd_iommu *iommu,
170 dma_ops_free_addresses(dma_dom, dma_addr, pages);
171
172 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
173 - iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
174 + iommu_flush_pages(iommu, dma_dom->domain.id, flush_addr, size);
175 dma_dom->need_flush = false;
176 }
177 }
178 diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
179 index 3925adf..400be99 100644
180 --- a/arch/x86/kernel/amd_iommu_init.c
181 +++ b/arch/x86/kernel/amd_iommu_init.c
182 @@ -622,6 +622,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
183 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
184 MMIO_GET_LD(range));
185 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
186 +
187 + if (is_rd890_iommu(iommu->dev)) {
188 + pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
189 + pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
190 + pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
191 + pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
192 + }
193 }
194
195 /*
196 @@ -639,29 +646,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
197 struct ivhd_entry *e;
198
199 /*
200 - * First set the recommended feature enable bits from ACPI
201 - * into the IOMMU control registers
202 + * First save the recommended feature enable bits from ACPI
203 */
204 - h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
205 - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
206 - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
207 -
208 - h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
209 - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
210 - iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
211 -
212 - h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
213 - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
214 - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
215 -
216 - h->flags & IVHD_FLAG_ISOC_EN_MASK ?
217 - iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
218 - iommu_feature_disable(iommu, CONTROL_ISOC_EN);
219 -
220 - /*
221 - * make IOMMU memory accesses cache coherent
222 - */
223 - iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
224 + iommu->acpi_flags = h->flags;
225
226 /*
227 * Done. Now parse the device entries
228 @@ -1089,6 +1076,40 @@ static void init_device_table(void)
229 }
230 }
231
232 +static void iommu_init_flags(struct amd_iommu *iommu)
233 +{
234 + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
235 + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
236 + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
237 +
238 + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
239 + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
240 + iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
241 +
242 + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
243 + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
244 + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
245 +
246 + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
247 + iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
248 + iommu_feature_disable(iommu, CONTROL_ISOC_EN);
249 +
250 + /*
251 + * make IOMMU memory accesses cache coherent
252 + */
253 + iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
254 +}
255 +
256 +static void iommu_apply_quirks(struct amd_iommu *iommu)
257 +{
258 + if (is_rd890_iommu(iommu->dev)) {
259 + pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
260 + pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
261 + pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
262 + pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
263 + }
264 +}
265 +
266 /*
267 * This function finally enables all IOMMUs found in the system after
268 * they have been initialized
269 @@ -1099,6 +1120,8 @@ static void enable_iommus(void)
270
271 for_each_iommu(iommu) {
272 iommu_disable(iommu);
273 + iommu_apply_quirks(iommu);
274 + iommu_init_flags(iommu);
275 iommu_set_device_table(iommu);
276 iommu_enable_command_buffer(iommu);
277 iommu_enable_event_buffer(iommu);
278 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
279 index 0da6495..420e43e 100644
280 --- a/arch/x86/kernel/apic/io_apic.c
281 +++ b/arch/x86/kernel/apic/io_apic.c
282 @@ -332,14 +332,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
283
284 old_cfg = old_desc->chip_data;
285
286 - memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
287 + cfg->vector = old_cfg->vector;
288 + cfg->move_in_progress = old_cfg->move_in_progress;
289 + cpumask_copy(cfg->domain, old_cfg->domain);
290 + cpumask_copy(cfg->old_domain, old_cfg->old_domain);
291
292 init_copy_irq_2_pin(old_cfg, cfg, node);
293 }
294
295 -static void free_irq_cfg(struct irq_cfg *old_cfg)
296 +static void free_irq_cfg(struct irq_cfg *cfg)
297 {
298 - kfree(old_cfg);
299 + free_cpumask_var(cfg->domain);
300 + free_cpumask_var(cfg->old_domain);
301 + kfree(cfg);
302 }
303
304 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
305 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
306 index cc25c2b..4e34d10 100644
307 --- a/arch/x86/kernel/cpu/common.c
308 +++ b/arch/x86/kernel/cpu/common.c
309 @@ -540,7 +540,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
310 }
311 }
312
313 -static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
314 +void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
315 {
316 u32 tfms, xlvl;
317 u32 ebx;
318 @@ -579,6 +579,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
319 if (c->extended_cpuid_level >= 0x80000007)
320 c->x86_power = cpuid_edx(0x80000007);
321
322 + init_scattered_cpuid_features(c);
323 }
324
325 static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
326 @@ -727,7 +728,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
327
328 get_model_name(c); /* Default name */
329
330 - init_scattered_cpuid_features(c);
331 detect_nopl(c);
332 }
333
334 diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
335 index 6de9a90..eb19c08 100644
336 --- a/arch/x86/kernel/cpu/cpu.h
337 +++ b/arch/x86/kernel/cpu/cpu.h
338 @@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
339 *const __x86_cpu_dev_end[];
340
341 extern void display_cacheinfo(struct cpuinfo_x86 *c);
342 +extern void get_cpu_cap(struct cpuinfo_x86 *c);
343
344 #endif
345 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
346 index 2f12d6d..6a77cca 100644
347 --- a/arch/x86/kernel/cpu/intel.c
348 +++ b/arch/x86/kernel/cpu/intel.c
349 @@ -40,6 +40,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
350 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
351 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
352 c->cpuid_level = cpuid_eax(0);
353 + get_cpu_cap(c);
354 }
355 }
356
357 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
358 index 83a3d1f..8387792 100644
359 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
360 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
361 @@ -140,6 +140,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
362 address = (low & MASK_BLKPTR_LO) >> 21;
363 if (!address)
364 break;
365 +
366 address += MCG_XBLK_ADDR;
367 } else
368 ++address;
369 @@ -147,12 +148,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
370 if (rdmsr_safe(address, &low, &high))
371 break;
372
373 - if (!(high & MASK_VALID_HI)) {
374 - if (block)
375 - continue;
376 - else
377 - break;
378 - }
379 + if (!(high & MASK_VALID_HI))
380 + continue;
381
382 if (!(high & MASK_CNTP_HI) ||
383 (high & MASK_LOCKED_HI))
384 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
385 index 19528ef..c771e1a 100644
386 --- a/arch/x86/kernel/hpet.c
387 +++ b/arch/x86/kernel/hpet.c
388 @@ -497,7 +497,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
389 {
390 unsigned int irq;
391
392 - irq = create_irq();
393 + irq = create_irq_nr(0, -1);
394 if (!irq)
395 return -EINVAL;
396
397 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
398 index 238526b..ca6b336 100644
399 --- a/arch/x86/oprofile/nmi_int.c
400 +++ b/arch/x86/oprofile/nmi_int.c
401 @@ -624,6 +624,7 @@ static int __init ppro_init(char **cpu_type)
402 case 0x0f:
403 case 0x16:
404 case 0x17:
405 + case 0x1d:
406 *cpu_type = "i386/core_2";
407 break;
408 case 0x1a:
409 diff --git a/block/bsg.c b/block/bsg.c
410 index 0676301..7154a7a 100644
411 --- a/block/bsg.c
412 +++ b/block/bsg.c
413 @@ -424,7 +424,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
414 /*
415 * fill in all the output members
416 */
417 - hdr->device_status = status_byte(rq->errors);
418 + hdr->device_status = rq->errors & 0xff;
419 hdr->transport_status = host_byte(rq->errors);
420 hdr->driver_status = driver_byte(rq->errors);
421 hdr->info = 0;
422 diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
423 index 81e64f4..d46e256 100644
424 --- a/drivers/acpi/acpica/aclocal.h
425 +++ b/drivers/acpi/acpica/aclocal.h
426 @@ -846,6 +846,7 @@ struct acpi_bit_register_info {
427 ACPI_BITMASK_POWER_BUTTON_STATUS | \
428 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
429 ACPI_BITMASK_RT_CLOCK_STATUS | \
430 + ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
431 ACPI_BITMASK_WAKE_STATUS)
432
433 #define ACPI_BITMASK_TIMER_ENABLE 0x0001
434 diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
435 index 2815df6..ab645bb 100644
436 --- a/drivers/acpi/blacklist.c
437 +++ b/drivers/acpi/blacklist.c
438 @@ -218,6 +218,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
439 },
440 },
441 {
442 + .callback = dmi_disable_osi_vista,
443 + .ident = "Toshiba Satellite L355",
444 + .matches = {
445 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
446 + DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
447 + },
448 + },
449 + {
450 .callback = dmi_disable_osi_win7,
451 .ident = "ASUS K50IJ",
452 .matches = {
453 @@ -225,6 +233,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
454 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
455 },
456 },
457 + {
458 + .callback = dmi_disable_osi_vista,
459 + .ident = "Toshiba P305D",
460 + .matches = {
461 + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
462 + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
463 + },
464 + },
465
466 /*
467 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
468 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
469 index ec742a4..7102474 100644
470 --- a/drivers/acpi/processor_core.c
471 +++ b/drivers/acpi/processor_core.c
472 @@ -134,12 +134,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
473
474 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
475 {
476 - set_no_mwait, "IFL91 board", {
477 - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
478 - DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
479 - DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
480 - DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
481 - {
482 set_no_mwait, "Extensa 5220", {
483 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
484 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
485 diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
486 index 466ab10..f2b44d5 100644
487 --- a/drivers/dma/mv_xor.c
488 +++ b/drivers/dma/mv_xor.c
489 @@ -161,7 +161,7 @@ static int mv_is_err_intr(u32 intr_cause)
490
491 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
492 {
493 - u32 val = (1 << (1 + (chan->idx * 16)));
494 + u32 val = ~(1 << (chan->idx * 16));
495 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
496 __raw_writel(val, XOR_INTR_CAUSE(chan));
497 }
498 diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
499 index f7346a9..62a5ce5 100644
500 --- a/drivers/i2c/busses/i2c-pca-isa.c
501 +++ b/drivers/i2c/busses/i2c-pca-isa.c
502 @@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg)
503
504 static int pca_isa_waitforcompletion(void *pd)
505 {
506 - long ret = ~0;
507 unsigned long timeout;
508 + long ret;
509
510 if (irq > -1) {
511 ret = wait_event_timeout(pca_wait,
512 @@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd)
513 } else {
514 /* Do polling */
515 timeout = jiffies + pca_isa_ops.timeout;
516 - while (((pca_isa_readbyte(pd, I2C_PCA_CON)
517 - & I2C_PCA_CON_SI) == 0)
518 - && (ret = time_before(jiffies, timeout)))
519 + do {
520 + ret = time_before(jiffies, timeout);
521 + if (pca_isa_readbyte(pd, I2C_PCA_CON)
522 + & I2C_PCA_CON_SI)
523 + break;
524 udelay(100);
525 + } while (ret);
526 }
527 +
528 return ret > 0;
529 }
530
531 diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
532 index 5b2213d..1d8c208 100644
533 --- a/drivers/i2c/busses/i2c-pca-platform.c
534 +++ b/drivers/i2c/busses/i2c-pca-platform.c
535 @@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val)
536 static int i2c_pca_pf_waitforcompletion(void *pd)
537 {
538 struct i2c_pca_pf_data *i2c = pd;
539 - long ret = ~0;
540 unsigned long timeout;
541 + long ret;
542
543 if (i2c->irq) {
544 ret = wait_event_timeout(i2c->wait,
545 @@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
546 } else {
547 /* Do polling */
548 timeout = jiffies + i2c->adap.timeout;
549 - while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
550 - & I2C_PCA_CON_SI) == 0)
551 - && (ret = time_before(jiffies, timeout)))
552 + do {
553 + ret = time_before(jiffies, timeout);
554 + if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
555 + & I2C_PCA_CON_SI)
556 + break;
557 udelay(100);
558 + } while (ret);
559 }
560
561 return ret > 0;
562 diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
563 index 66b4135..675fc04 100644
564 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
565 +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
566 @@ -486,7 +486,8 @@ static int send_connect(struct iwch_ep *ep)
567 V_MSS_IDX(mtu_idx) |
568 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
569 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
570 - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
571 + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
572 + V_CONG_CONTROL_FLAVOR(cong_flavor);
573 skb->priority = CPL_PRIORITY_SETUP;
574 set_arp_failure_handler(skb, act_open_req_arp_failure);
575
576 @@ -1303,7 +1304,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
577 V_MSS_IDX(mtu_idx) |
578 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
579 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
580 - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor);
581 + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
582 + V_CONG_CONTROL_FLAVOR(cong_flavor);
583
584 rpl = cplhdr(skb);
585 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
586 diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
587 index b1bd6dd..93c60e0 100644
588 --- a/drivers/input/joydev.c
589 +++ b/drivers/input/joydev.c
590 @@ -481,6 +481,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
591
592 memcpy(joydev->abspam, abspam, len);
593
594 + for (i = 0; i < joydev->nabs; i++)
595 + joydev->absmap[joydev->abspam[i]] = i;
596 +
597 out:
598 kfree(abspam);
599 return retval;
600 diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
601 index 319c459..dd30b9d 100644
602 --- a/drivers/media/video/cx231xx/cx231xx-cards.c
603 +++ b/drivers/media/video/cx231xx/cx231xx-cards.c
604 @@ -225,14 +225,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
605 dev->board.name, dev->model);
606
607 /* set the direction for GPIO pins */
608 - cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
609 - cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
610 - cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
611 + if (dev->board.tuner_gpio) {
612 + cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
613 + cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1);
614 + cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1);
615
616 - /* request some modules if any required */
617 + /* request some modules if any required */
618
619 - /* reset the Tuner */
620 - cx231xx_gpio_set(dev, dev->board.tuner_gpio);
621 + /* reset the Tuner */
622 + cx231xx_gpio_set(dev, dev->board.tuner_gpio);
623 + }
624
625 /* set the mode to Analog mode initially */
626 cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
627 diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
628 index f87757f..09d4223 100644
629 --- a/drivers/media/video/saa7134/saa7134-core.c
630 +++ b/drivers/media/video/saa7134/saa7134-core.c
631 @@ -420,19 +420,6 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
632 ctrl |= SAA7134_MAIN_CTRL_TE5;
633 irq |= SAA7134_IRQ1_INTE_RA2_1 |
634 SAA7134_IRQ1_INTE_RA2_0;
635 -
636 - /* dma: setup channel 5 (= TS) */
637 -
638 - saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
639 - saa_writeb(SAA7134_TS_DMA1,
640 - ((dev->ts.nr_packets - 1) >> 8) & 0xff);
641 - /* TSNOPIT=0, TSCOLAP=0 */
642 - saa_writeb(SAA7134_TS_DMA2,
643 - (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
644 - saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
645 - saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
646 - SAA7134_RS_CONTROL_ME |
647 - (dev->ts.pt_ts.dma >> 12));
648 }
649
650 /* set task conditions + field handling */
651 diff --git a/drivers/media/video/saa7134/saa7134-ts.c b/drivers/media/video/saa7134/saa7134-ts.c
652 index 03488ba..b9817d7 100644
653 --- a/drivers/media/video/saa7134/saa7134-ts.c
654 +++ b/drivers/media/video/saa7134/saa7134-ts.c
655 @@ -250,6 +250,19 @@ int saa7134_ts_start(struct saa7134_dev *dev)
656
657 BUG_ON(dev->ts_started);
658
659 + /* dma: setup channel 5 (= TS) */
660 + saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
661 + saa_writeb(SAA7134_TS_DMA1,
662 + ((dev->ts.nr_packets - 1) >> 8) & 0xff);
663 + /* TSNOPIT=0, TSCOLAP=0 */
664 + saa_writeb(SAA7134_TS_DMA2,
665 + (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
666 + saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
667 + saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
668 + SAA7134_RS_CONTROL_ME |
669 + (dev->ts.pt_ts.dma >> 12));
670 +
671 + /* reset hardware TS buffers */
672 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
673 saa_writeb(SAA7134_TS_SERIAL1, 0x03);
674 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
675 diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
676 index 997975d..64076ff 100644
677 --- a/drivers/media/video/v4l2-compat-ioctl32.c
678 +++ b/drivers/media/video/v4l2-compat-ioctl32.c
679 @@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u
680 struct video_code32 {
681 char loadwhat[16]; /* name or tag of file being passed */
682 compat_int_t datasize;
683 - unsigned char *data;
684 + compat_uptr_t data;
685 };
686
687 -static int get_microcode32(struct video_code *kp, struct video_code32 __user *up)
688 +static struct video_code __user *get_microcode32(struct video_code32 *kp)
689 {
690 - if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) ||
691 - copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) ||
692 - get_user(kp->datasize, &up->datasize) ||
693 - copy_from_user(kp->data, up->data, up->datasize))
694 - return -EFAULT;
695 - return 0;
696 + struct video_code __user *up;
697 +
698 + up = compat_alloc_user_space(sizeof(*up));
699 +
700 + /*
701 + * NOTE! We don't actually care if these fail. If the
702 + * user address is invalid, the native ioctl will do
703 + * the error handling for us
704 + */
705 + (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat));
706 + (void) put_user(kp->datasize, &up->datasize);
707 + (void) put_user(compat_ptr(kp->data), &up->data);
708 + return up;
709 }
710
711 #define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32)
712 @@ -741,7 +748,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
713 struct video_tuner vt;
714 struct video_buffer vb;
715 struct video_window vw;
716 - struct video_code vc;
717 + struct video_code32 vc;
718 struct video_audio va;
719 #endif
720 struct v4l2_format v2f;
721 @@ -820,8 +827,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
722 break;
723
724 case VIDIOCSMICROCODE:
725 - err = get_microcode32(&karg.vc, up);
726 - compatible_arg = 0;
727 + /* Copy the 32-bit "video_code32" to kernel space */
728 + if (copy_from_user(&karg.vc, up, sizeof(karg.vc)))
729 + return -EFAULT;
730 + /* Convert the 32-bit version to a 64-bit version in user space */
731 + up = get_microcode32(&karg.vc);
732 break;
733
734 case VIDIOCSFREQ:
735 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
736 index 676cd0c..14c5480 100644
737 --- a/drivers/mmc/host/sdhci-s3c.c
738 +++ b/drivers/mmc/host/sdhci-s3c.c
739 @@ -379,8 +379,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
740 sdhci_remove_host(host, 1);
741
742 for (ptr = 0; ptr < 3; ptr++) {
743 - clk_disable(sc->clk_bus[ptr]);
744 - clk_put(sc->clk_bus[ptr]);
745 + if (sc->clk_bus[ptr]) {
746 + clk_disable(sc->clk_bus[ptr]);
747 + clk_put(sc->clk_bus[ptr]);
748 + }
749 }
750 clk_disable(sc->clk_io);
751 clk_put(sc->clk_io);
752 diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
753 index 00569dc..403bfb6 100644
754 --- a/drivers/net/atlx/atl1.c
755 +++ b/drivers/net/atlx/atl1.c
756 @@ -2856,10 +2856,11 @@ static int atl1_resume(struct pci_dev *pdev)
757 pci_enable_wake(pdev, PCI_D3cold, 0);
758
759 atl1_reset_hw(&adapter->hw);
760 - adapter->cmb.cmb->int_stats = 0;
761
762 - if (netif_running(netdev))
763 + if (netif_running(netdev)) {
764 + adapter->cmb.cmb->int_stats = 0;
765 atl1_up(adapter);
766 + }
767 netif_device_attach(netdev);
768
769 return 0;
770 diff --git a/drivers/net/b44.c b/drivers/net/b44.c
771 index 4869adb..137cb03 100644
772 --- a/drivers/net/b44.c
773 +++ b/drivers/net/b44.c
774 @@ -2175,8 +2175,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
775 dev->irq = sdev->irq;
776 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
777
778 - netif_carrier_off(dev);
779 -
780 err = ssb_bus_powerup(sdev->bus, 0);
781 if (err) {
782 dev_err(sdev->dev,
783 @@ -2216,6 +2214,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
784 goto err_out_powerdown;
785 }
786
787 + netif_carrier_off(dev);
788 +
789 ssb_set_drvdata(sdev, dev);
790
791 /* Chip reset provides power to the b44 MAC & PCI cores, which
792 diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
793 index 8a09043..0f3ae46 100644
794 --- a/drivers/net/netxen/netxen_nic_init.c
795 +++ b/drivers/net/netxen/netxen_nic_init.c
796 @@ -1199,7 +1199,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
797 if (pkt_offset)
798 skb_pull(skb, pkt_offset);
799
800 - skb->truesize = skb->len + sizeof(struct sk_buff);
801 skb->protocol = eth_type_trans(skb, netdev);
802
803 napi_gro_receive(&sds_ring->napi, skb);
804 @@ -1261,8 +1260,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
805
806 skb_put(skb, lro_length + data_offset);
807
808 - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
809 -
810 skb_pull(skb, l2_hdr_offset);
811 skb->protocol = eth_type_trans(skb, netdev);
812
813 diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
814 index 8b14c6e..9ee9f01 100644
815 --- a/drivers/net/r6040.c
816 +++ b/drivers/net/r6040.c
817 @@ -135,7 +135,7 @@
818 #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
819 #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
820 #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
821 -#define MCAST_MAX 4 /* Max number multicast addresses to filter */
822 +#define MCAST_MAX 3 /* Max number multicast addresses to filter */
823
824 /* Descriptor status */
825 #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
826 @@ -985,9 +985,6 @@ static void r6040_multicast_list(struct net_device *dev)
827 crc >>= 26;
828 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
829 }
830 - /* Write the index of the hash table */
831 - for (i = 0; i < 4; i++)
832 - iowrite16(hash_table[i] << 14, ioaddr + MCR1);
833 /* Fill the MAC hash tables with their values */
834 iowrite16(hash_table[0], ioaddr + MAR0);
835 iowrite16(hash_table[1], ioaddr + MAR1);
836 @@ -995,6 +992,7 @@ static void r6040_multicast_list(struct net_device *dev)
837 iowrite16(hash_table[3], ioaddr + MAR3);
838 }
839 /* Multicast Address 1~4 case */
840 + dmi = dev->mc_list;
841 for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
842 adrp = (u16 *)dmi->dmi_addr;
843 iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
844 @@ -1003,9 +1001,9 @@ static void r6040_multicast_list(struct net_device *dev)
845 dmi = dmi->next;
846 }
847 for (i = dev->mc_count; i < MCAST_MAX; i++) {
848 - iowrite16(0xffff, ioaddr + MID_0L + 8*i);
849 - iowrite16(0xffff, ioaddr + MID_0M + 8*i);
850 - iowrite16(0xffff, ioaddr + MID_0H + 8*i);
851 + iowrite16(0xffff, ioaddr + MID_1L + 8*i);
852 + iowrite16(0xffff, ioaddr + MID_1M + 8*i);
853 + iowrite16(0xffff, ioaddr + MID_1H + 8*i);
854 }
855 }
856
857 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
858 index 62e784a..7dd2132 100644
859 --- a/drivers/net/r8169.c
860 +++ b/drivers/net/r8169.c
861 @@ -3999,7 +3999,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
862 static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
863 struct net_device *dev,
864 struct RxDesc *desc, int rx_buf_sz,
865 - unsigned int align)
866 + unsigned int align, gfp_t gfp)
867 {
868 struct sk_buff *skb;
869 dma_addr_t mapping;
870 @@ -4007,7 +4007,7 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
871
872 pad = align ? align : NET_IP_ALIGN;
873
874 - skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
875 + skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
876 if (!skb)
877 goto err_out;
878
879 @@ -4038,7 +4038,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
880 }
881
882 static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
883 - u32 start, u32 end)
884 + u32 start, u32 end, gfp_t gfp)
885 {
886 u32 cur;
887
888 @@ -4053,7 +4053,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
889
890 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
891 tp->RxDescArray + i,
892 - tp->rx_buf_sz, tp->align);
893 + tp->rx_buf_sz, tp->align, gfp);
894 if (!skb)
895 break;
896
897 @@ -4081,7 +4081,7 @@ static int rtl8169_init_ring(struct net_device *dev)
898 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
899 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
900
901 - if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
902 + if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
903 goto err_out;
904
905 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
906 @@ -4584,7 +4584,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
907 count = cur_rx - tp->cur_rx;
908 tp->cur_rx = cur_rx;
909
910 - delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
911 + delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
912 if (!delta && count && netif_msg_intr(tp))
913 printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
914 tp->dirty_rx += delta;
915 diff --git a/drivers/net/skge.c b/drivers/net/skge.c
916 index 8f54143..5b07e00 100644
917 --- a/drivers/net/skge.c
918 +++ b/drivers/net/skge.c
919 @@ -40,6 +40,7 @@
920 #include <linux/sched.h>
921 #include <linux/seq_file.h>
922 #include <linux/mii.h>
923 +#include <linux/dmi.h>
924 #include <asm/irq.h>
925
926 #include "skge.h"
927 @@ -3890,6 +3891,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
928 dev->name, dev->dev_addr);
929 }
930
931 +static int only_32bit_dma;
932 +
933 static int __devinit skge_probe(struct pci_dev *pdev,
934 const struct pci_device_id *ent)
935 {
936 @@ -3911,7 +3914,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
937
938 pci_set_master(pdev);
939
940 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
941 + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
942 using_dac = 1;
943 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
944 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
945 @@ -4168,8 +4171,21 @@ static struct pci_driver skge_driver = {
946 .shutdown = skge_shutdown,
947 };
948
949 +static struct dmi_system_id skge_32bit_dma_boards[] = {
950 + {
951 + .ident = "Gigabyte nForce boards",
952 + .matches = {
953 + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
954 + DMI_MATCH(DMI_BOARD_NAME, "nForce"),
955 + },
956 + },
957 + {}
958 +};
959 +
960 static int __init skge_init_module(void)
961 {
962 + if (dmi_check_system(skge_32bit_dma_boards))
963 + only_32bit_dma = 1;
964 skge_debug_init();
965 return pci_register_driver(&skge_driver);
966 }
967 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
968 index bf6bd67..4633fc2 100644
969 --- a/drivers/pci/quirks.c
970 +++ b/drivers/pci/quirks.c
971 @@ -155,6 +155,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
972 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
973
974 /*
975 + * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
976 + * for some HT machines to use C4 w/o hanging.
977 + */
978 +static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
979 +{
980 + u32 pmbase;
981 + u16 pm1a;
982 +
983 + pci_read_config_dword(dev, 0x40, &pmbase);
984 + pmbase = pmbase & 0xff80;
985 + pm1a = inw(pmbase);
986 +
987 + if (pm1a & 0x10) {
988 + dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
989 + outw(0x10, pmbase);
990 + }
991 +}
992 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
993 +
994 +/*
995 * Chipsets where PCI->PCI transfers vanish or hang
996 */
997 static void __devinit quirk_nopcipci(struct pci_dev *dev)
998 diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
999 index 222ee07..8164ba5 100644
1000 --- a/drivers/usb/core/file.c
1001 +++ b/drivers/usb/core/file.c
1002 @@ -159,9 +159,9 @@ void usb_major_cleanup(void)
1003 int usb_register_dev(struct usb_interface *intf,
1004 struct usb_class_driver *class_driver)
1005 {
1006 - int retval = -EINVAL;
1007 + int retval;
1008 int minor_base = class_driver->minor_base;
1009 - int minor = 0;
1010 + int minor;
1011 char name[20];
1012 char *temp;
1013
1014 @@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
1015 */
1016 minor_base = 0;
1017 #endif
1018 - intf->minor = -1;
1019 -
1020 - dbg ("looking for a minor, starting at %d", minor_base);
1021
1022 if (class_driver->fops == NULL)
1023 - goto exit;
1024 + return -EINVAL;
1025 + if (intf->minor >= 0)
1026 + return -EADDRINUSE;
1027 +
1028 + retval = init_usb_class();
1029 + if (retval)
1030 + return retval;
1031 +
1032 + dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
1033
1034 down_write(&minor_rwsem);
1035 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
1036 @@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
1037 continue;
1038
1039 usb_minors[minor] = class_driver->fops;
1040 -
1041 - retval = 0;
1042 + intf->minor = minor;
1043 break;
1044 }
1045 up_write(&minor_rwsem);
1046 -
1047 - if (retval)
1048 - goto exit;
1049 -
1050 - retval = init_usb_class();
1051 - if (retval)
1052 - goto exit;
1053 -
1054 - intf->minor = minor;
1055 + if (intf->minor < 0)
1056 + return -EXFULL;
1057
1058 /* create a usb class device for this usb interface */
1059 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
1060 @@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
1061 "%s", temp);
1062 if (IS_ERR(intf->usb_dev)) {
1063 down_write(&minor_rwsem);
1064 - usb_minors[intf->minor] = NULL;
1065 + usb_minors[minor] = NULL;
1066 + intf->minor = -1;
1067 up_write(&minor_rwsem);
1068 retval = PTR_ERR(intf->usb_dev);
1069 }
1070 -exit:
1071 return retval;
1072 }
1073 EXPORT_SYMBOL_GPL(usb_register_dev);
1074 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1075 index 980a8d2..1ca6545 100644
1076 --- a/drivers/usb/core/message.c
1077 +++ b/drivers/usb/core/message.c
1078 @@ -1792,6 +1792,7 @@ free_interfaces:
1079 intf->dev.groups = usb_interface_groups;
1080 intf->dev.dma_mask = dev->dev.dma_mask;
1081 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1082 + intf->minor = -1;
1083 device_initialize(&intf->dev);
1084 mark_quiesced(intf);
1085 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1086 diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
1087 index 74073f9..c6f5ee4 100644
1088 --- a/drivers/usb/musb/musb_gadget.c
1089 +++ b/drivers/usb/musb/musb_gadget.c
1090 @@ -577,11 +577,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
1091 {
1092 const u8 epnum = req->epnum;
1093 struct usb_request *request = &req->request;
1094 - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
1095 + struct musb_ep *musb_ep;
1096 void __iomem *epio = musb->endpoints[epnum].regs;
1097 unsigned fifo_count = 0;
1098 - u16 len = musb_ep->packet_sz;
1099 + u16 len;
1100 u16 csr = musb_readw(epio, MUSB_RXCSR);
1101 + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
1102 +
1103 + if (hw_ep->is_shared_fifo)
1104 + musb_ep = &hw_ep->ep_in;
1105 + else
1106 + musb_ep = &hw_ep->ep_out;
1107 +
1108 + len = musb_ep->packet_sz;
1109
1110 /* We shouldn't get here while DMA is active, but we do... */
1111 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
1112 @@ -749,9 +757,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
1113 u16 csr;
1114 struct usb_request *request;
1115 void __iomem *mbase = musb->mregs;
1116 - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
1117 + struct musb_ep *musb_ep;
1118 void __iomem *epio = musb->endpoints[epnum].regs;
1119 struct dma_channel *dma;
1120 + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
1121 +
1122 + if (hw_ep->is_shared_fifo)
1123 + musb_ep = &hw_ep->ep_in;
1124 + else
1125 + musb_ep = &hw_ep->ep_out;
1126
1127 musb_ep_select(mbase, epnum);
1128
1129 @@ -1074,7 +1088,7 @@ struct free_record {
1130 /*
1131 * Context: controller locked, IRQs blocked.
1132 */
1133 -static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1134 +void musb_ep_restart(struct musb *musb, struct musb_request *req)
1135 {
1136 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1137 req->tx ? "TX/IN" : "RX/OUT",
1138 diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
1139 index 59502da..76711f2 100644
1140 --- a/drivers/usb/musb/musb_gadget.h
1141 +++ b/drivers/usb/musb/musb_gadget.h
1142 @@ -105,4 +105,6 @@ extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
1143
1144 extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
1145
1146 +extern void musb_ep_restart(struct musb *, struct musb_request *);
1147 +
1148 #endif /* __MUSB_GADGET_H */
1149 diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
1150 index 067e5a9..53c0444 100644
1151 --- a/drivers/usb/musb/musb_gadget_ep0.c
1152 +++ b/drivers/usb/musb/musb_gadget_ep0.c
1153 @@ -369,6 +369,7 @@ stall:
1154 ctrlrequest->wIndex & 0x0f;
1155 struct musb_ep *musb_ep;
1156 struct musb_hw_ep *ep;
1157 + struct musb_request *request;
1158 void __iomem *regs;
1159 int is_in;
1160 u16 csr;
1161 @@ -411,6 +412,14 @@ stall:
1162 csr);
1163 }
1164
1165 + /* Maybe start the first request in the queue */
1166 + request = to_musb_request(
1167 + next_request(musb_ep));
1168 + if (!musb_ep->busy && request) {
1169 + DBG(3, "restarting the request\n");
1170 + musb_ep_restart(musb, request);
1171 + }
1172 +
1173 /* select ep0 again */
1174 musb_ep_select(mbase, 0);
1175 handled = 1;
1176 diff --git a/fs/exec.c b/fs/exec.c
1177 index 56da15f..a0410eb 100644
1178 --- a/fs/exec.c
1179 +++ b/fs/exec.c
1180 @@ -376,6 +376,9 @@ static int count(char __user * __user * argv, int max)
1181 argv++;
1182 if (i++ >= max)
1183 return -E2BIG;
1184 +
1185 + if (fatal_signal_pending(current))
1186 + return -ERESTARTNOHAND;
1187 cond_resched();
1188 }
1189 }
1190 @@ -419,6 +422,12 @@ static int copy_strings(int argc, char __user * __user * argv,
1191 while (len > 0) {
1192 int offset, bytes_to_copy;
1193
1194 + if (fatal_signal_pending(current)) {
1195 + ret = -ERESTARTNOHAND;
1196 + goto out;
1197 + }
1198 + cond_resched();
1199 +
1200 offset = pos % PAGE_SIZE;
1201 if (offset == 0)
1202 offset = PAGE_SIZE;
1203 @@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
1204 #else
1205 stack_top = arch_align_stack(stack_top);
1206 stack_top = PAGE_ALIGN(stack_top);
1207 +
1208 + if (unlikely(stack_top < mmap_min_addr) ||
1209 + unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
1210 + return -ENOMEM;
1211 +
1212 stack_shift = vma->vm_end - stack_top;
1213
1214 bprm->p -= stack_shift;
1215 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
1216 index e342103..91e656f 100644
1217 --- a/fs/ocfs2/symlink.c
1218 +++ b/fs/ocfs2/symlink.c
1219 @@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
1220 }
1221
1222 /* Fast symlinks can't be large */
1223 - len = strlen(target);
1224 + len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
1225 link = kzalloc(len + 1, GFP_NOFS);
1226 if (!link) {
1227 status = -ENOMEM;
1228 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1229 index 899145d..e10ef04 100644
1230 --- a/fs/proc/task_mmu.c
1231 +++ b/fs/proc/task_mmu.c
1232 @@ -220,7 +220,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
1233 /* We don't show the stack guard page in /proc/maps */
1234 start = vma->vm_start;
1235 if (vma->vm_flags & VM_GROWSDOWN)
1236 - start += PAGE_SIZE;
1237 + if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
1238 + start += PAGE_SIZE;
1239
1240 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
1241 start,
1242 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
1243 index 0cbdccc..3d016e9 100644
1244 --- a/include/drm/drm_pciids.h
1245 +++ b/include/drm/drm_pciids.h
1246 @@ -85,7 +85,6 @@
1247 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
1248 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
1249 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
1250 - {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
1251 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
1252 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
1253 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
1254 @@ -103,6 +102,7 @@
1255 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1256 {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1257 {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1258 + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
1259 {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
1260 {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
1261 {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
1262 diff --git a/include/linux/mm.h b/include/linux/mm.h
1263 index a8d25e4..11e5be6 100644
1264 --- a/include/linux/mm.h
1265 +++ b/include/linux/mm.h
1266 @@ -841,6 +841,12 @@ int set_page_dirty(struct page *page);
1267 int set_page_dirty_lock(struct page *page);
1268 int clear_page_dirty_for_io(struct page *page);
1269
1270 +/* Is the vma a continuation of the stack vma above it? */
1271 +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
1272 +{
1273 + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1274 +}
1275 +
1276 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1277 unsigned long old_addr, struct vm_area_struct *new_vma,
1278 unsigned long new_addr, unsigned long len);
1279 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
1280 index 67325bf..07ed684 100644
1281 --- a/include/linux/pci_ids.h
1282 +++ b/include/linux/pci_ids.h
1283 @@ -393,6 +393,9 @@
1284 #define PCI_DEVICE_ID_VLSI_82C147 0x0105
1285 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
1286
1287 +/* AMD RD890 Chipset */
1288 +#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
1289 +
1290 #define PCI_VENDOR_ID_ADL 0x1005
1291 #define PCI_DEVICE_ID_ADL_2301 0x2301
1292
1293 diff --git a/include/linux/socket.h b/include/linux/socket.h
1294 index 3273a0c..9464cfb 100644
1295 --- a/include/linux/socket.h
1296 +++ b/include/linux/socket.h
1297 @@ -304,7 +304,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
1298 int offset,
1299 unsigned int len, __wsum *csump);
1300
1301 -extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
1302 +extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
1303 extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
1304 extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
1305 int offset, int len);
1306 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
1307 index 931a4d9..a6e9d00 100644
1308 --- a/kernel/hrtimer.c
1309 +++ b/kernel/hrtimer.c
1310 @@ -920,6 +920,7 @@ static inline int
1311 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
1312 {
1313 if (hrtimer_is_queued(timer)) {
1314 + unsigned long state;
1315 int reprogram;
1316
1317 /*
1318 @@ -933,8 +934,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
1319 debug_deactivate(timer);
1320 timer_stats_hrtimer_clear_start_info(timer);
1321 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
1322 - __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
1323 - reprogram);
1324 + /*
1325 + * We must preserve the CALLBACK state flag here,
1326 + * otherwise we could move the timer base in
1327 + * switch_hrtimer_base.
1328 + */
1329 + state = timer->state & HRTIMER_STATE_CALLBACK;
1330 + __remove_hrtimer(timer, base, state, reprogram);
1331 return 1;
1332 }
1333 return 0;
1334 @@ -1221,6 +1227,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1335 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1336 enqueue_hrtimer(timer, base);
1337 }
1338 +
1339 + WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
1340 +
1341 timer->state &= ~HRTIMER_STATE_CALLBACK;
1342 }
1343
1344 diff --git a/kernel/sched.c b/kernel/sched.c
1345 index 3480822..a675fd6 100644
1346 --- a/kernel/sched.c
1347 +++ b/kernel/sched.c
1348 @@ -7752,10 +7752,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1349 unsigned long flags;
1350 struct rq *rq;
1351
1352 - switch (action) {
1353 + switch (action & ~CPU_TASKS_FROZEN) {
1354
1355 case CPU_UP_PREPARE:
1356 - case CPU_UP_PREPARE_FROZEN:
1357 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
1358 if (IS_ERR(p))
1359 return NOTIFY_BAD;
1360 @@ -7770,7 +7769,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1361 break;
1362
1363 case CPU_ONLINE:
1364 - case CPU_ONLINE_FROZEN:
1365 /* Strictly unnecessary, as first user will wake it. */
1366 wake_up_process(cpu_rq(cpu)->migration_thread);
1367
1368 @@ -7787,7 +7785,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1369
1370 #ifdef CONFIG_HOTPLUG_CPU
1371 case CPU_UP_CANCELED:
1372 - case CPU_UP_CANCELED_FROZEN:
1373 if (!cpu_rq(cpu)->migration_thread)
1374 break;
1375 /* Unbind it from offline cpu so it can run. Fall thru. */
1376 @@ -7812,7 +7809,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1377 break;
1378
1379 case CPU_DEAD:
1380 - case CPU_DEAD_FROZEN:
1381 migrate_live_tasks(cpu);
1382 rq = cpu_rq(cpu);
1383 /* Idle task back to normal (off runqueue, low prio) */
1384 @@ -7846,7 +7842,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1385 break;
1386
1387 case CPU_DYING:
1388 - case CPU_DYING_FROZEN:
1389 /* Update our root-domain */
1390 rq = cpu_rq(cpu);
1391 spin_lock_irqsave(&rq->lock, flags);
1392 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1393 index c88b21c..e749a05 100644
1394 --- a/kernel/trace/ring_buffer.c
1395 +++ b/kernel/trace/ring_buffer.c
1396 @@ -389,7 +389,7 @@ static inline int test_time_stamp(u64 delta)
1397 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
1398
1399 /* Max number of timestamps that can fit on a page */
1400 -#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
1401 +#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
1402
1403 int ring_buffer_print_page_header(struct trace_seq *s)
1404 {
1405 diff --git a/mm/mlock.c b/mm/mlock.c
1406 index 380ea89..2d846cf 100644
1407 --- a/mm/mlock.c
1408 +++ b/mm/mlock.c
1409 @@ -138,12 +138,6 @@ void munlock_vma_page(struct page *page)
1410 }
1411 }
1412
1413 -/* Is the vma a continuation of the stack vma above it? */
1414 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
1415 -{
1416 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1417 -}
1418 -
1419 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
1420 {
1421 return (vma->vm_flags & VM_GROWSDOWN) &&
1422 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
1423 index 5aef51e..450862e 100644
1424 --- a/net/core/ethtool.c
1425 +++ b/net/core/ethtool.c
1426 @@ -311,7 +311,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1427 if (regs.len > reglen)
1428 regs.len = reglen;
1429
1430 - regbuf = kmalloc(reglen, GFP_USER);
1431 + regbuf = kzalloc(reglen, GFP_USER);
1432 if (!regbuf)
1433 return -ENOMEM;
1434
1435 diff --git a/net/core/iovec.c b/net/core/iovec.c
1436 index 16ad45d..8cee101 100644
1437 --- a/net/core/iovec.c
1438 +++ b/net/core/iovec.c
1439 @@ -36,9 +36,10 @@
1440 * in any case.
1441 */
1442
1443 -int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
1444 +long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
1445 {
1446 - int size, err, ct;
1447 + int size, ct;
1448 + long err;
1449
1450 if (m->msg_namelen) {
1451 if (mode == VERIFY_READ) {
1452 diff --git a/net/core/stream.c b/net/core/stream.c
1453 index a37debf..e48c85f 100644
1454 --- a/net/core/stream.c
1455 +++ b/net/core/stream.c
1456 @@ -140,10 +140,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
1457
1458 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1459 sk->sk_write_pending++;
1460 - sk_wait_event(sk, &current_timeo, !sk->sk_err &&
1461 - !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1462 - sk_stream_memory_free(sk) &&
1463 - vm_wait);
1464 + sk_wait_event(sk, &current_timeo, sk->sk_err ||
1465 + (sk->sk_shutdown & SEND_SHUTDOWN) ||
1466 + (sk_stream_memory_free(sk) &&
1467 + !vm_wait));
1468 sk->sk_write_pending--;
1469
1470 if (vm_wait) {
1471 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1472 index 4d50daa..2ef9026 100644
1473 --- a/net/ipv4/ip_output.c
1474 +++ b/net/ipv4/ip_output.c
1475 @@ -476,9 +476,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1476 * we can switch to copy when see the first bad fragment.
1477 */
1478 if (skb_has_frags(skb)) {
1479 - struct sk_buff *frag;
1480 + struct sk_buff *frag, *frag2;
1481 int first_len = skb_pagelen(skb);
1482 - int truesizes = 0;
1483
1484 if (first_len - hlen > mtu ||
1485 ((first_len - hlen) & 7) ||
1486 @@ -491,18 +490,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1487 if (frag->len > mtu ||
1488 ((frag->len & 7) && frag->next) ||
1489 skb_headroom(frag) < hlen)
1490 - goto slow_path;
1491 + goto slow_path_clean;
1492
1493 /* Partially cloned skb? */
1494 if (skb_shared(frag))
1495 - goto slow_path;
1496 + goto slow_path_clean;
1497
1498 BUG_ON(frag->sk);
1499 if (skb->sk) {
1500 frag->sk = skb->sk;
1501 frag->destructor = sock_wfree;
1502 }
1503 - truesizes += frag->truesize;
1504 + skb->truesize -= frag->truesize;
1505 }
1506
1507 /* Everything is OK. Generate! */
1508 @@ -512,7 +511,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1509 frag = skb_shinfo(skb)->frag_list;
1510 skb_frag_list_init(skb);
1511 skb->data_len = first_len - skb_headlen(skb);
1512 - skb->truesize -= truesizes;
1513 skb->len = first_len;
1514 iph->tot_len = htons(first_len);
1515 iph->frag_off = htons(IP_MF);
1516 @@ -564,6 +562,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1517 }
1518 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1519 return err;
1520 +
1521 +slow_path_clean:
1522 + skb_walk_frags(skb, frag2) {
1523 + if (frag2 == frag)
1524 + break;
1525 + frag2->sk = NULL;
1526 + frag2->destructor = NULL;
1527 + skb->truesize += frag2->truesize;
1528 + }
1529 }
1530
1531 slow_path:
1532 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1533 index 5b1050a..6c8f6c9 100644
1534 --- a/net/ipv4/route.c
1535 +++ b/net/ipv4/route.c
1536 @@ -2712,6 +2712,11 @@ slow_output:
1537
1538 EXPORT_SYMBOL_GPL(__ip_route_output_key);
1539
1540 +static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
1541 +{
1542 + return NULL;
1543 +}
1544 +
1545 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
1546 {
1547 }
1548 @@ -2720,7 +2725,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
1549 .family = AF_INET,
1550 .protocol = cpu_to_be16(ETH_P_IP),
1551 .destroy = ipv4_dst_destroy,
1552 - .check = ipv4_dst_check,
1553 + .check = ipv4_blackhole_dst_check,
1554 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
1555 .entries = ATOMIC_INIT(0),
1556 };
1557 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1558 index 108fad0..4678308 100644
1559 --- a/net/ipv4/tcp.c
1560 +++ b/net/ipv4/tcp.c
1561 @@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
1562 */
1563
1564 mask = 0;
1565 - if (sk->sk_err)
1566 - mask = POLLERR;
1567
1568 /*
1569 * POLLHUP is certainly not done right. But poll() doesn't
1570 @@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
1571 if (tp->urg_data & TCP_URG_VALID)
1572 mask |= POLLPRI;
1573 }
1574 + /* This barrier is coupled with smp_wmb() in tcp_reset() */
1575 + smp_rmb();
1576 + if (sk->sk_err)
1577 + mask |= POLLERR;
1578 +
1579 return mask;
1580 }
1581
1582 @@ -935,7 +938,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
1583 goto out_err;
1584
1585 while (--iovlen >= 0) {
1586 - int seglen = iov->iov_len;
1587 + size_t seglen = iov->iov_len;
1588 unsigned char __user *from = iov->iov_base;
1589
1590 iov++;
1591 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1592 index 2433bcd..ce1ce82 100644
1593 --- a/net/ipv4/tcp_input.c
1594 +++ b/net/ipv4/tcp_input.c
1595 @@ -3969,6 +3969,8 @@ static void tcp_reset(struct sock *sk)
1596 default:
1597 sk->sk_err = ECONNRESET;
1598 }
1599 + /* This barrier is coupled with smp_rmb() in tcp_poll() */
1600 + smp_wmb();
1601
1602 if (!sock_flag(sk, SOCK_DEAD))
1603 sk->sk_error_report(sk);
1604 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
1605 index 74fb2eb..e3a9a65 100644
1606 --- a/net/ipv4/xfrm4_policy.c
1607 +++ b/net/ipv4/xfrm4_policy.c
1608 @@ -71,7 +71,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
1609 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
1610 xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
1611 xdst->u.rt.fl.fl4_src == fl->fl4_src &&
1612 - xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
1613 + !((xdst->u.rt.fl.fl4_tos ^ fl->fl4_tos) & IPTOS_RT_MASK) &&
1614 xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
1615 dst_clone(dst);
1616 break;
1617 @@ -83,7 +83,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
1618
1619 static int xfrm4_get_tos(struct flowi *fl)
1620 {
1621 - return fl->fl4_tos;
1622 + return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
1623 }
1624
1625 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1626 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1627 index cd48801..eca3ef7 100644
1628 --- a/net/ipv6/ip6_output.c
1629 +++ b/net/ipv6/ip6_output.c
1630 @@ -643,7 +643,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1631
1632 if (skb_has_frags(skb)) {
1633 int first_len = skb_pagelen(skb);
1634 - int truesizes = 0;
1635 + struct sk_buff *frag2;
1636
1637 if (first_len - hlen > mtu ||
1638 ((first_len - hlen) & 7) ||
1639 @@ -655,18 +655,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1640 if (frag->len > mtu ||
1641 ((frag->len & 7) && frag->next) ||
1642 skb_headroom(frag) < hlen)
1643 - goto slow_path;
1644 + goto slow_path_clean;
1645
1646 /* Partially cloned skb? */
1647 if (skb_shared(frag))
1648 - goto slow_path;
1649 + goto slow_path_clean;
1650
1651 BUG_ON(frag->sk);
1652 if (skb->sk) {
1653 frag->sk = skb->sk;
1654 frag->destructor = sock_wfree;
1655 - truesizes += frag->truesize;
1656 }
1657 + skb->truesize -= frag->truesize;
1658 }
1659
1660 err = 0;
1661 @@ -697,7 +697,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1662
1663 first_len = skb_pagelen(skb);
1664 skb->data_len = first_len - skb_headlen(skb);
1665 - skb->truesize -= truesizes;
1666 skb->len = first_len;
1667 ipv6_hdr(skb)->payload_len = htons(first_len -
1668 sizeof(struct ipv6hdr));
1669 @@ -760,6 +759,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
1670 IPSTATS_MIB_FRAGFAILS);
1671 dst_release(&rt->u.dst);
1672 return err;
1673 +
1674 +slow_path_clean:
1675 + skb_walk_frags(skb, frag2) {
1676 + if (frag2 == frag)
1677 + break;
1678 + frag2->sk = NULL;
1679 + frag2->destructor = NULL;
1680 + skb->truesize += frag2->truesize;
1681 + }
1682 }
1683
1684 slow_path:
1685 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1686 index d6fe764..e307517 100644
1687 --- a/net/ipv6/route.c
1688 +++ b/net/ipv6/route.c
1689 @@ -1561,14 +1561,13 @@ out:
1690 * i.e. Path MTU discovery
1691 */
1692
1693 -void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1694 - struct net_device *dev, u32 pmtu)
1695 +static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
1696 + struct net *net, u32 pmtu, int ifindex)
1697 {
1698 struct rt6_info *rt, *nrt;
1699 - struct net *net = dev_net(dev);
1700 int allfrag = 0;
1701
1702 - rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1703 + rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1704 if (rt == NULL)
1705 return;
1706
1707 @@ -1636,6 +1635,27 @@ out:
1708 dst_release(&rt->u.dst);
1709 }
1710
1711 +void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1712 + struct net_device *dev, u32 pmtu)
1713 +{
1714 + struct net *net = dev_net(dev);
1715 +
1716 + /*
1717 + * RFC 1981 states that a node "MUST reduce the size of the packets it
1718 + * is sending along the path" that caused the Packet Too Big message.
1719 + * Since it's not possible in the general case to determine which
1720 + * interface was used to send the original packet, we update the MTU
1721 + * on the interface that will be used to send future packets. We also
1722 + * update the MTU on the interface that received the Packet Too Big in
1723 + * case the original packet was forced out that interface with
1724 + * SO_BINDTODEVICE or similar. This is the next best thing to the
1725 + * correct behaviour, which would be to update the MTU on all
1726 + * interfaces.
1727 + */
1728 + rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1729 + rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1730 +}
1731 +
1732 /*
1733 * Misc support functions
1734 */
1735 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
1736 index f60c0c2..519ff9d 100644
1737 --- a/net/phonet/af_phonet.c
1738 +++ b/net/phonet/af_phonet.c
1739 @@ -67,6 +67,8 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
1740 struct phonet_protocol *pnp;
1741 int err;
1742
1743 + if (!net_eq(net, &init_net))
1744 + return -EAFNOSUPPORT;
1745 if (!capable(CAP_SYS_ADMIN))
1746 return -EPERM;
1747
1748 @@ -353,6 +355,8 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
1749 struct sockaddr_pn sa;
1750 u16 len;
1751
1752 + if (!net_eq(net, &init_net))
1753 + goto out;
1754 /* check we have at least a full Phonet header */
1755 if (!pskb_pull(skb, sizeof(struct phonethdr)))
1756 goto out;
1757 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
1758 index 5f32d21..9cdd35e 100644
1759 --- a/net/phonet/pep.c
1760 +++ b/net/phonet/pep.c
1761 @@ -224,12 +224,13 @@ static void pipe_grant_credits(struct sock *sk)
1762 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
1763 {
1764 struct pep_sock *pn = pep_sk(sk);
1765 - struct pnpipehdr *hdr = pnp_hdr(skb);
1766 + struct pnpipehdr *hdr;
1767 int wake = 0;
1768
1769 if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
1770 return -EINVAL;
1771
1772 + hdr = pnp_hdr(skb);
1773 if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
1774 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
1775 (unsigned)hdr->data[0]);
1776 diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
1777 index 5f42f30..5a2275c 100644
1778 --- a/net/phonet/pn_dev.c
1779 +++ b/net/phonet/pn_dev.c
1780 @@ -246,7 +246,11 @@ static struct notifier_block phonet_device_notifier = {
1781 /* Per-namespace Phonet devices handling */
1782 static int phonet_init_net(struct net *net)
1783 {
1784 - struct phonet_net *pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
1785 + struct phonet_net *pnn;
1786 +
1787 + if (!net_eq(net, &init_net))
1788 + return 0;
1789 + pnn = kmalloc(sizeof(*pnn), GFP_KERNEL);
1790 if (!pnn)
1791 return -ENOMEM;
1792
1793 @@ -263,9 +267,13 @@ static int phonet_init_net(struct net *net)
1794
1795 static void phonet_exit_net(struct net *net)
1796 {
1797 - struct phonet_net *pnn = net_generic(net, phonet_net_id);
1798 + struct phonet_net *pnn;
1799 struct net_device *dev;
1800
1801 + if (!net_eq(net, &init_net))
1802 + return;
1803 + pnn = net_generic(net, phonet_net_id);
1804 +
1805 rtnl_lock();
1806 for_each_netdev(net, dev)
1807 phonet_device_destroy(dev);
1808 diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
1809 index d21fd35..7acab1e 100644
1810 --- a/net/phonet/pn_netlink.c
1811 +++ b/net/phonet/pn_netlink.c
1812 @@ -68,6 +68,8 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
1813 int err;
1814 u8 pnaddr;
1815
1816 + if (!net_eq(net, &init_net))
1817 + return -EOPNOTSUPP;
1818 if (!capable(CAP_SYS_ADMIN))
1819 return -EPERM;
1820
1821 @@ -124,12 +126,16 @@ nla_put_failure:
1822
1823 static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1824 {
1825 + struct net *net = sock_net(skb->sk);
1826 struct phonet_device_list *pndevs;
1827 struct phonet_device *pnd;
1828 int dev_idx = 0, dev_start_idx = cb->args[0];
1829 int addr_idx = 0, addr_start_idx = cb->args[1];
1830
1831 - pndevs = phonet_device_list(sock_net(skb->sk));
1832 + if (!net_eq(net, &init_net))
1833 + goto skip;
1834 +
1835 + pndevs = phonet_device_list(net);
1836 spin_lock_bh(&pndevs->lock);
1837 list_for_each_entry(pnd, &pndevs->list, list) {
1838 u8 addr;
1839 @@ -154,6 +160,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1840
1841 out:
1842 spin_unlock_bh(&pndevs->lock);
1843 +skip:
1844 cb->args[0] = dev_idx;
1845 cb->args[1] = addr_idx;
1846
1847 diff --git a/net/rds/page.c b/net/rds/page.c
1848 index 3679012..b442a48 100644
1849 --- a/net/rds/page.c
1850 +++ b/net/rds/page.c
1851 @@ -56,30 +56,17 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
1852 unsigned long ret;
1853 void *addr;
1854
1855 - if (to_user)
1856 + addr = kmap(page);
1857 + if (to_user) {
1858 rds_stats_add(s_copy_to_user, bytes);
1859 - else
1860 + ret = copy_to_user(ptr, addr + offset, bytes);
1861 + } else {
1862 rds_stats_add(s_copy_from_user, bytes);
1863 -
1864 - addr = kmap_atomic(page, KM_USER0);
1865 - if (to_user)
1866 - ret = __copy_to_user_inatomic(ptr, addr + offset, bytes);
1867 - else
1868 - ret = __copy_from_user_inatomic(addr + offset, ptr, bytes);
1869 - kunmap_atomic(addr, KM_USER0);
1870 -
1871 - if (ret) {
1872 - addr = kmap(page);
1873 - if (to_user)
1874 - ret = copy_to_user(ptr, addr + offset, bytes);
1875 - else
1876 - ret = copy_from_user(addr + offset, ptr, bytes);
1877 - kunmap(page);
1878 - if (ret)
1879 - return -EFAULT;
1880 + ret = copy_from_user(addr + offset, ptr, bytes);
1881 }
1882 + kunmap(page);
1883
1884 - return 0;
1885 + return ret ? -EFAULT : 0;
1886 }
1887 EXPORT_SYMBOL_GPL(rds_page_copy_user);
1888
1889 diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1890 index 502cce7..7d188bc 100644
1891 --- a/net/rose/af_rose.c
1892 +++ b/net/rose/af_rose.c
1893 @@ -677,7 +677,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1894 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
1895 return -EINVAL;
1896
1897 - if (addr->srose_ndigis > ROSE_MAX_DIGIS)
1898 + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
1899 return -EINVAL;
1900
1901 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
1902 @@ -737,7 +737,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
1903 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
1904 return -EINVAL;
1905
1906 - if (addr->srose_ndigis > ROSE_MAX_DIGIS)
1907 + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
1908 return -EINVAL;
1909
1910 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
1911 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
1912 index fddcf9c..a2e4c60 100644
1913 --- a/net/wireless/wext.c
1914 +++ b/net/wireless/wext.c
1915 @@ -1029,7 +1029,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
1916 } else if (!iwp->pointer)
1917 return -EFAULT;
1918
1919 - extra = kmalloc(extra_size, GFP_KERNEL);
1920 + extra = kzalloc(extra_size, GFP_KERNEL);
1921 if (!extra)
1922 return -ENOMEM;
1923
1924 diff --git a/sound/core/control.c b/sound/core/control.c
1925 index a8b7fab..7834a54 100644
1926 --- a/sound/core/control.c
1927 +++ b/sound/core/control.c
1928 @@ -31,6 +31,7 @@
1929
1930 /* max number of user-defined controls */
1931 #define MAX_USER_CONTROLS 32
1932 +#define MAX_CONTROL_COUNT 1028
1933
1934 struct snd_kctl_ioctl {
1935 struct list_head list; /* list of all ioctls */
1936 @@ -190,6 +191,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control,
1937
1938 if (snd_BUG_ON(!control || !control->count))
1939 return NULL;
1940 +
1941 + if (control->count > MAX_CONTROL_COUNT)
1942 + return NULL;
1943 +
1944 kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL);
1945 if (kctl == NULL) {
1946 snd_printk(KERN_ERR "Cannot allocate control instance\n");
1947 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
1948 index 70d6f25..e4c12a1 100644
1949 --- a/sound/core/rawmidi.c
1950 +++ b/sound/core/rawmidi.c
1951 @@ -530,13 +530,15 @@ static int snd_rawmidi_release(struct inode *inode, struct file *file)
1952 {
1953 struct snd_rawmidi_file *rfile;
1954 struct snd_rawmidi *rmidi;
1955 + struct module *module;
1956
1957 rfile = file->private_data;
1958 rmidi = rfile->rmidi;
1959 rawmidi_release_priv(rfile);
1960 kfree(rfile);
1961 + module = rmidi->card->module;
1962 snd_card_file_remove(rmidi->card, file);
1963 - module_put(rmidi->card->module);
1964 + module_put(module);
1965 return 0;
1966 }
1967
1968 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
1969 index bd0794e..3736bc4 100644
1970 --- a/sound/pci/hda/patch_analog.c
1971 +++ b/sound/pci/hda/patch_analog.c
1972 @@ -3510,6 +3510,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
1973 /* Lenovo Thinkpad T61/X61 */
1974 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
1975 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
1976 + SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
1977 {}
1978 };
1979
1980 diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
1981 index 72db4c3..6811433 100644
1982 --- a/sound/pci/oxygen/oxygen.c
1983 +++ b/sound/pci/oxygen/oxygen.c
1984 @@ -393,6 +393,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
1985 chip->model.suspend = claro_suspend;
1986 chip->model.resume = claro_resume;
1987 chip->model.set_adc_params = set_ak5385_params;
1988 + chip->model.device_config = PLAYBACK_0_TO_I2S |
1989 + PLAYBACK_1_TO_SPDIF |
1990 + CAPTURE_0_FROM_I2S_2 |
1991 + CAPTURE_1_FROM_SPDIF;
1992 break;
1993 }
1994 if (id->driver_data == MODEL_MERIDIAN ||
1995 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
1996 index 7bb827c..401518c 100644
1997 --- a/sound/pci/rme9652/hdsp.c
1998 +++ b/sound/pci/rme9652/hdsp.c
1999 @@ -4610,6 +4610,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
2000 if (err < 0)
2001 return err;
2002
2003 + memset(&info, 0, sizeof(info));
2004 spin_lock_irqsave(&hdsp->lock, flags);
2005 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
2006 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
2007 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
2008 index 0dce331..ec2125c 100644
2009 --- a/sound/pci/rme9652/hdspm.c
2010 +++ b/sound/pci/rme9652/hdspm.c
2011 @@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
2012
2013 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
2014
2015 + memset(&info, 0, sizeof(info));
2016 spin_lock_irq(&hdspm->lock);
2017 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
2018 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);

  ViewVC Help
Powered by ViewVC 1.1.20