/[linux-patches]/genpatches-2.6/trunk/2.6.12-pre/1010_linux-2.6.11.11.patch
Gentoo

Contents of /genpatches-2.6/trunk/2.6.12-pre/1010_linux-2.6.11.11.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4 - (show annotations) (download)
Sat Jun 11 23:20:41 2005 UTC (12 years, 4 months ago) by dsd
File size: 18324 byte(s)
Create 2.6.12 branch
1 diff --git a/Makefile b/Makefile
2 --- a/Makefile
3 +++ b/Makefile
4 @@ -1,7 +1,7 @@
5 VERSION = 2
6 PATCHLEVEL = 6
7 SUBLEVEL = 11
8 -EXTRAVERSION = .10
9 +EXTRAVERSION = .11
10 NAME=Woozy Beaver
11
12 # *DOCUMENTATION*
13 diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
14 --- a/arch/ppc64/kernel/pSeries_iommu.c
15 +++ b/arch/ppc64/kernel/pSeries_iommu.c
16 @@ -401,6 +401,8 @@ static void iommu_bus_setup_pSeriesLP(st
17 struct device_node *dn, *pdn;
18 unsigned int *dma_window = NULL;
19
20 + DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
21 +
22 dn = pci_bus_to_OF_node(bus);
23
24 /* Find nearest ibm,dma-window, walking up the device tree */
25 @@ -455,6 +457,56 @@ static void iommu_dev_setup_pSeries(stru
26 }
27 }
28
29 +static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
30 +{
31 + struct device_node *pdn, *dn;
32 + struct iommu_table *tbl;
33 + int *dma_window = NULL;
34 +
35 + DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name);
36 +
37 + /* dev setup for LPAR is a little tricky, since the device tree might
38 + * contain the dma-window properties per-device and not neccesarily
39 + * for the bus. So we need to search upwards in the tree until we
40 + * either hit a dma-window property, OR find a parent with a table
41 + * already allocated.
42 + */
43 + dn = pci_device_to_OF_node(dev);
44 +
45 + for (pdn = dn; pdn && !pdn->iommu_table; pdn = pdn->parent) {
46 + dma_window = (unsigned int *)get_property(pdn, "ibm,dma-window", NULL);
47 + if (dma_window)
48 + break;
49 + }
50 +
51 + /* Check for parent == NULL so we don't try to setup the empty EADS
52 + * slots on POWER4 machines.
53 + */
54 + if (dma_window == NULL || pdn->parent == NULL) {
55 + /* Fall back to regular (non-LPAR) dev setup */
56 + DBG("No dma window for device, falling back to regular setup\n");
57 + iommu_dev_setup_pSeries(dev);
58 + return;
59 + } else {
60 + DBG("Found DMA window, allocating table\n");
61 + }
62 +
63 + if (!pdn->iommu_table) {
64 + /* iommu_table_setparms_lpar needs bussubno. */
65 + pdn->bussubno = pdn->phb->bus->number;
66 +
67 + tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
68 + GFP_KERNEL);
69 +
70 + iommu_table_setparms_lpar(pdn->phb, pdn, tbl, dma_window);
71 +
72 + pdn->iommu_table = iommu_init_table(tbl);
73 + }
74 +
75 + if (pdn != dn)
76 + dn->iommu_table = pdn->iommu_table;
77 +}
78 +
79 static void iommu_bus_setup_null(struct pci_bus *b) { }
80 static void iommu_dev_setup_null(struct pci_dev *d) { }
81
82 @@ -479,13 +531,14 @@ void iommu_init_early_pSeries(void)
83 ppc_md.tce_free = tce_free_pSeriesLP;
84 }
85 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
86 + ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
87 } else {
88 ppc_md.tce_build = tce_build_pSeries;
89 ppc_md.tce_free = tce_free_pSeries;
90 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
91 + ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
92 }
93
94 - ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
95
96 pci_iommu_init();
97 }
98 diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
99 --- a/arch/x86_64/kernel/ptrace.c
100 +++ b/arch/x86_64/kernel/ptrace.c
101 @@ -129,13 +129,13 @@ static int putreg(struct task_struct *ch
102 value &= 0xffff;
103 return 0;
104 case offsetof(struct user_regs_struct,fs_base):
105 - if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
106 - return -EIO;
107 + if (value >= TASK_SIZE)
108 + return -EIO;
109 child->thread.fs = value;
110 return 0;
111 case offsetof(struct user_regs_struct,gs_base):
112 - if (!((value >> 48) == 0 || (value >> 48) == 0xffff))
113 - return -EIO;
114 + if (value >= TASK_SIZE)
115 + return -EIO;
116 child->thread.gs = value;
117 return 0;
118 case offsetof(struct user_regs_struct, eflags):
119 @@ -149,6 +149,11 @@ static int putreg(struct task_struct *ch
120 return -EIO;
121 value &= 0xffff;
122 break;
123 + case offsetof(struct user_regs_struct, rip):
124 + /* Check if the new RIP address is canonical */
125 + if (value >= TASK_SIZE)
126 + return -EIO;
127 + break;
128 }
129 put_stack_long(child, regno - sizeof(struct pt_regs), value);
130 return 0;
131 diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
132 --- a/arch/x86_64/mm/fault.c
133 +++ b/arch/x86_64/mm/fault.c
134 @@ -236,6 +236,8 @@ static noinline void pgtable_bad(unsigne
135
136 /*
137 * Handle a fault on the vmalloc or module mapping area
138 + *
139 + * This assumes no large pages in there.
140 */
141 static int vmalloc_fault(unsigned long address)
142 {
143 @@ -274,7 +276,10 @@ static int vmalloc_fault(unsigned long a
144 if (!pte_present(*pte_ref))
145 return -1;
146 pte = pte_offset_kernel(pmd, address);
147 - if (!pte_present(*pte) || pte_page(*pte) != pte_page(*pte_ref))
148 + /* Don't use pte_page here, because the mappings can point
149 + outside mem_map, and the NUMA hash lookup cannot handle
150 + that. */
151 + if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
152 BUG();
153 __flush_tlb_all();
154 return 0;
155 @@ -348,7 +353,9 @@ asmlinkage void do_page_fault(struct pt_
156 * protection error (error_code & 1) == 0.
157 */
158 if (unlikely(address >= TASK_SIZE)) {
159 - if (!(error_code & 5)) {
160 + if (!(error_code & 5) &&
161 + ((address >= VMALLOC_START && address < VMALLOC_END) ||
162 + (address >= MODULES_VADDR && address < MODULES_END))) {
163 if (vmalloc_fault(address) < 0)
164 goto bad_area_nosemaphore;
165 return;
166 diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
167 --- a/arch/x86_64/mm/ioremap.c
168 +++ b/arch/x86_64/mm/ioremap.c
169 @@ -266,7 +266,7 @@ void iounmap(volatile void __iomem *addr
170 if ((p->flags >> 20) &&
171 p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
172 /* p->size includes the guard page, but cpa doesn't like that */
173 - change_page_attr(virt_to_page(__va(p->phys_addr)),
174 + change_page_attr_addr((unsigned long)(__va(p->phys_addr)),
175 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
176 PAGE_KERNEL);
177 global_flush_tlb();
178 diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
179 --- a/drivers/ide/ide-disk.c
180 +++ b/drivers/ide/ide-disk.c
181 @@ -133,6 +133,8 @@ static ide_startstop_t __ide_do_rw_disk(
182 if (hwif->no_lba48_dma && lba48 && dma) {
183 if (block + rq->nr_sectors > 1ULL << 28)
184 dma = 0;
185 + else
186 + lba48 = 0;
187 }
188
189 if (!dma) {
190 @@ -146,7 +148,7 @@ static ide_startstop_t __ide_do_rw_disk(
191 /* FIXME: SELECT_MASK(drive, 0) ? */
192
193 if (drive->select.b.lba) {
194 - if (drive->addressing == 1) {
195 + if (lba48) {
196 task_ioreg_t tasklets[10];
197
198 pr_debug("%s: LBA=0x%012llx\n", drive->name, block);
199 diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
200 --- a/drivers/net/3c59x.c
201 +++ b/drivers/net/3c59x.c
202 @@ -1581,7 +1581,8 @@ vortex_up(struct net_device *dev)
203
204 if (VORTEX_PCI(vp)) {
205 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */
206 - pci_restore_state(VORTEX_PCI(vp));
207 + if (vp->pm_state_valid)
208 + pci_restore_state(VORTEX_PCI(vp));
209 pci_enable_device(VORTEX_PCI(vp));
210 }
211
212 @@ -2741,6 +2742,7 @@ vortex_down(struct net_device *dev, int
213 outl(0, ioaddr + DownListPtr);
214
215 if (final_down && VORTEX_PCI(vp)) {
216 + vp->pm_state_valid = 1;
217 pci_save_state(VORTEX_PCI(vp));
218 acpi_set_WOL(dev);
219 }
220 @@ -3243,9 +3245,10 @@ static void acpi_set_WOL(struct net_devi
221 outw(RxEnable, ioaddr + EL3_CMD);
222
223 pci_enable_wake(VORTEX_PCI(vp), 0, 1);
224 +
225 + /* Change the power state to D3; RxEnable doesn't take effect. */
226 + pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
227 }
228 - /* Change the power state to D3; RxEnable doesn't take effect. */
229 - pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
230 }
231
232
233 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
234 --- a/drivers/usb/serial/visor.c
235 +++ b/drivers/usb/serial/visor.c
236 @@ -386,6 +386,7 @@ struct visor_private {
237 int bytes_in;
238 int bytes_out;
239 int outstanding_urbs;
240 + int throttled;
241 };
242
243 /* number of outstanding urbs to prevent userspace DoS from happening */
244 @@ -415,6 +416,7 @@ static int visor_open (struct usb_serial
245 priv->bytes_in = 0;
246 priv->bytes_out = 0;
247 priv->outstanding_urbs = 0;
248 + priv->throttled = 0;
249 spin_unlock_irqrestore(&priv->lock, flags);
250
251 /*
252 @@ -602,6 +604,7 @@ static void visor_read_bulk_callback (st
253 struct tty_struct *tty;
254 unsigned long flags;
255 int i;
256 + int throttled;
257 int result;
258
259 dbg("%s - port %d", __FUNCTION__, port->number);
260 @@ -627,18 +630,21 @@ static void visor_read_bulk_callback (st
261 }
262 spin_lock_irqsave(&priv->lock, flags);
263 priv->bytes_in += urb->actual_length;
264 + throttled = priv->throttled;
265 spin_unlock_irqrestore(&priv->lock, flags);
266
267 - /* Continue trying to always read */
268 - usb_fill_bulk_urb (port->read_urb, port->serial->dev,
269 - usb_rcvbulkpipe(port->serial->dev,
270 - port->bulk_in_endpointAddress),
271 - port->read_urb->transfer_buffer,
272 - port->read_urb->transfer_buffer_length,
273 - visor_read_bulk_callback, port);
274 - result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
275 - if (result)
276 - dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
277 + /* Continue trying to always read if we should */
278 + if (!throttled) {
279 + usb_fill_bulk_urb (port->read_urb, port->serial->dev,
280 + usb_rcvbulkpipe(port->serial->dev,
281 + port->bulk_in_endpointAddress),
282 + port->read_urb->transfer_buffer,
283 + port->read_urb->transfer_buffer_length,
284 + visor_read_bulk_callback, port);
285 + result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
286 + if (result)
287 + dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
288 + }
289 return;
290 }
291
292 @@ -683,16 +689,26 @@ exit:
293
294 static void visor_throttle (struct usb_serial_port *port)
295 {
296 + struct visor_private *priv = usb_get_serial_port_data(port);
297 + unsigned long flags;
298 +
299 dbg("%s - port %d", __FUNCTION__, port->number);
300 - usb_kill_urb(port->read_urb);
301 + spin_lock_irqsave(&priv->lock, flags);
302 + priv->throttled = 1;
303 + spin_unlock_irqrestore(&priv->lock, flags);
304 }
305
306
307 static void visor_unthrottle (struct usb_serial_port *port)
308 {
309 + struct visor_private *priv = usb_get_serial_port_data(port);
310 + unsigned long flags;
311 int result;
312
313 dbg("%s - port %d", __FUNCTION__, port->number);
314 + spin_lock_irqsave(&priv->lock, flags);
315 + priv->throttled = 0;
316 + spin_unlock_irqrestore(&priv->lock, flags);
317
318 port->read_urb->dev = port->serial->dev;
319 result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
320 diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
321 --- a/drivers/video/matrox/matroxfb_accel.c
322 +++ b/drivers/video/matrox/matroxfb_accel.c
323 @@ -438,13 +438,21 @@ static void matroxfb_1bpp_imageblit(WPMI
324 } else if (step == 1) {
325 /* Special case for 1..8bit widths */
326 while (height--) {
327 - mga_writel(mmio, 0, *chardata);
328 +#if defined(__BIG_ENDIAN)
329 + fb_writel((*chardata) << 24, mmio.vaddr);
330 +#else
331 + fb_writel(*chardata, mmio.vaddr);
332 +#endif
333 chardata++;
334 }
335 } else if (step == 2) {
336 /* Special case for 9..15bit widths */
337 while (height--) {
338 - mga_writel(mmio, 0, *(u_int16_t*)chardata);
339 +#if defined(__BIG_ENDIAN)
340 + fb_writel((*(u_int16_t*)chardata) << 16, mmio.vaddr);
341 +#else
342 + fb_writel(*(u_int16_t*)chardata, mmio.vaddr);
343 +#endif
344 chardata += 2;
345 }
346 } else {
347 @@ -454,7 +462,7 @@ static void matroxfb_1bpp_imageblit(WPMI
348
349 for (i = 0; i < step; i += 4) {
350 /* Hope that there are at least three readable bytes beyond the end of bitmap */
351 - mga_writel(mmio, 0, get_unaligned((u_int32_t*)(chardata + i)));
352 + fb_writel(get_unaligned((u_int32_t*)(chardata + i)),mmio.vaddr);
353 }
354 chardata += step;
355 }
356 diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
357 --- a/drivers/video/matrox/matroxfb_base.h
358 +++ b/drivers/video/matrox/matroxfb_base.h
359 @@ -170,14 +170,14 @@ static inline void mga_memcpy_toio(vaddr
360
361 if ((unsigned long)src & 3) {
362 while (len >= 4) {
363 - writel(get_unaligned((u32 *)src), addr);
364 + fb_writel(get_unaligned((u32 *)src), addr);
365 addr++;
366 len -= 4;
367 src += 4;
368 }
369 } else {
370 while (len >= 4) {
371 - writel(*(u32 *)src, addr);
372 + fb_writel(*(u32 *)src, addr);
373 addr++;
374 len -= 4;
375 src += 4;
376 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
377 --- a/fs/ext3/balloc.c
378 +++ b/fs/ext3/balloc.c
379 @@ -268,7 +268,8 @@ void ext3_discard_reservation(struct ino
380
381 if (!rsv_is_empty(&rsv->rsv_window)) {
382 spin_lock(rsv_lock);
383 - rsv_window_remove(inode->i_sb, rsv);
384 + if (!rsv_is_empty(&rsv->rsv_window))
385 + rsv_window_remove(inode->i_sb, rsv);
386 spin_unlock(rsv_lock);
387 }
388 }
389 diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
390 --- a/include/asm-x86_64/processor.h
391 +++ b/include/asm-x86_64/processor.h
392 @@ -160,9 +160,9 @@ static inline void clear_in_cr4 (unsigne
393
394
395 /*
396 - * User space process size. 47bits.
397 + * User space process size. 47bits minus one guard page.
398 */
399 -#define TASK_SIZE (0x800000000000UL)
400 +#define TASK_SIZE (0x800000000000UL - 4096)
401
402 /* This decides where the kernel will search for a free chunk of vm
403 * space during mmap's.
404 diff --git a/include/linux/err.h b/include/linux/err.h
405 --- a/include/linux/err.h
406 +++ b/include/linux/err.h
407 @@ -13,6 +13,8 @@
408 * This should be a per-architecture thing, to allow different
409 * error and pointer decisions.
410 */
411 +#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
412 +
413 static inline void *ERR_PTR(long error)
414 {
415 return (void *) error;
416 @@ -25,7 +27,7 @@ static inline long PTR_ERR(const void *p
417
418 static inline long IS_ERR(const void *ptr)
419 {
420 - return unlikely((unsigned long)ptr > (unsigned long)-1000L);
421 + return IS_ERR_VALUE((unsigned long)ptr);
422 }
423
424 #endif /* _LINUX_ERR_H */
425 diff --git a/mm/mmap.c b/mm/mmap.c
426 --- a/mm/mmap.c
427 +++ b/mm/mmap.c
428 @@ -1315,37 +1315,40 @@ unsigned long
429 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
430 unsigned long pgoff, unsigned long flags)
431 {
432 - if (flags & MAP_FIXED) {
433 - unsigned long ret;
434 + unsigned long ret;
435
436 - if (addr > TASK_SIZE - len)
437 - return -ENOMEM;
438 - if (addr & ~PAGE_MASK)
439 - return -EINVAL;
440 - if (file && is_file_hugepages(file)) {
441 - /*
442 - * Check if the given range is hugepage aligned, and
443 - * can be made suitable for hugepages.
444 - */
445 - ret = prepare_hugepage_range(addr, len);
446 - } else {
447 - /*
448 - * Ensure that a normal request is not falling in a
449 - * reserved hugepage range. For some archs like IA-64,
450 - * there is a separate region for hugepages.
451 - */
452 - ret = is_hugepage_only_range(addr, len);
453 - }
454 - if (ret)
455 - return -EINVAL;
456 - return addr;
457 - }
458 + if (!(flags & MAP_FIXED)) {
459 + unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
460
461 - if (file && file->f_op && file->f_op->get_unmapped_area)
462 - return file->f_op->get_unmapped_area(file, addr, len,
463 - pgoff, flags);
464 + get_area = current->mm->get_unmapped_area;
465 + if (file && file->f_op && file->f_op->get_unmapped_area)
466 + get_area = file->f_op->get_unmapped_area;
467 + addr = get_area(file, addr, len, pgoff, flags);
468 + if (IS_ERR_VALUE(addr))
469 + return addr;
470 + }
471
472 - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
473 + if (addr > TASK_SIZE - len)
474 + return -ENOMEM;
475 + if (addr & ~PAGE_MASK)
476 + return -EINVAL;
477 + if (file && is_file_hugepages(file)) {
478 + /*
479 + * Check if the given range is hugepage aligned, and
480 + * can be made suitable for hugepages.
481 + */
482 + ret = prepare_hugepage_range(addr, len);
483 + } else {
484 + /*
485 + * Ensure that a normal request is not falling in a
486 + * reserved hugepage range. For some archs like IA-64,
487 + * there is a separate region for hugepages.
488 + */
489 + ret = is_hugepage_only_range(addr, len);
490 + }
491 + if (ret)
492 + return -EINVAL;
493 + return addr;
494 }
495
496 EXPORT_SYMBOL(get_unmapped_area);
497 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
498 --- a/net/bridge/netfilter/ebtables.c
499 +++ b/net/bridge/netfilter/ebtables.c
500 @@ -179,9 +179,10 @@ unsigned int ebt_do_table (unsigned int
501 struct ebt_chainstack *cs;
502 struct ebt_entries *chaininfo;
503 char *base;
504 - struct ebt_table_info *private = table->private;
505 + struct ebt_table_info *private;
506
507 read_lock_bh(&table->lock);
508 + private = table->private;
509 cb_base = COUNTER_BASE(private->counters, private->nentries,
510 smp_processor_id());
511 if (private->chainstack)
512 diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
513 --- a/net/rose/rose_route.c
514 +++ b/net/rose/rose_route.c
515 @@ -727,7 +727,8 @@ int rose_rt_ioctl(unsigned int cmd, void
516 }
517 if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
518 return -EINVAL;
519 -
520 + if (rose_route.ndigis > 8) /* No more than 8 digipeats */
521 + return -EINVAL;
522 err = rose_add_node(&rose_route, dev);
523 dev_put(dev);
524 return err;
525 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
526 --- a/sound/usb/usbaudio.c
527 +++ b/sound/usb/usbaudio.c
528 @@ -3276,7 +3276,7 @@ static void snd_usb_audio_disconnect(str
529 }
530 usb_chip[chip->index] = NULL;
531 up(&register_mutex);
532 - snd_card_free_in_thread(card);
533 + snd_card_free(card);
534 } else {
535 up(&register_mutex);
536 }
537 diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
538 --- a/sound/usb/usx2y/usbusx2y.c
539 +++ b/sound/usb/usx2y/usbusx2y.c
540 @@ -1,6 +1,11 @@
541 /*
542 * usbusy2y.c - ALSA USB US-428 Driver
543 *
544 +2005-04-14 Karsten Wiese
545 + Version 0.8.7.2:
546 + Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom.
547 + Tested ok with kernel 2.6.12-rc2.
548 +
549 2004-12-14 Karsten Wiese
550 Version 0.8.7.1:
551 snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open.
552 @@ -143,7 +148,7 @@
553
554
555 MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>");
556 -MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.1");
557 +MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2");
558 MODULE_LICENSE("GPL");
559 MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604), "NAME_ALLCAPS"(0x8001)(0x8005)(0x8007) }}");
560
561 @@ -430,8 +435,6 @@ static void usX2Y_usb_disconnect(struct
562 if (ptr) {
563 usX2Ydev_t* usX2Y = usX2Y((snd_card_t*)ptr);
564 struct list_head* p;
565 - if (usX2Y->chip_status == USX2Y_STAT_CHIP_HUP) // on 2.6.1 kernel snd_usbmidi_disconnect()
566 - return; // calls us back. better leave :-) .
567 usX2Y->chip.shutdown = 1;
568 usX2Y->chip_status = USX2Y_STAT_CHIP_HUP;
569 usX2Y_unlinkSeq(&usX2Y->AS04);
570 @@ -443,7 +446,7 @@ static void usX2Y_usb_disconnect(struct
571 }
572 if (usX2Y->us428ctls_sharedmem)
573 wake_up(&usX2Y->us428ctls_wait_queue_head);
574 - snd_card_free_in_thread((snd_card_t*)ptr);
575 + snd_card_free((snd_card_t*)ptr);
576 }
577 }
578

  ViewVC Help
Powered by ViewVC 1.1.20