/[linux-patches]/genpatches-2.6/tags/3.2-6/2400_kcopy-patch-for-infiniband-driver.patch
Gentoo

Contents of /genpatches-2.6/tags/3.2-6/2400_kcopy-patch-for-infiniband-driver.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2076 - (show annotations) (download)
Thu Feb 2 11:46:07 2012 UTC (2 years, 6 months ago) by mpagano
File size: 20669 byte(s)
3.2-6 release
1 From 3b2e8091390000a15bdceccb57a6e3956a751134 Mon Sep 17 00:00:00 2001
2 From: Alexey Shvetsov <alexxy@gentoo.org>
3 Date: Tue, 17 Jan 2012 21:08:49 +0400
4 Subject: [PATCH] [kcopy] Add kcopy driver
5
6 Add kcopy driver from qlogic to implement zero copy for infiniband psm
7 userspace driver
8
9 Signed-off-by: Alexey Shvetsov <alexxy@gentoo.org>
10 ---
11 drivers/char/Kconfig | 2 +
12 drivers/char/Makefile | 2 +
13 drivers/char/kcopy/Kconfig | 17 ++
14 drivers/char/kcopy/Makefile | 4 +
15 drivers/char/kcopy/kcopy.c | 646 +++++++++++++++++++++++++++++++++++++++++++
16 5 files changed, 671 insertions(+), 0 deletions(-)
17 create mode 100644 drivers/char/kcopy/Kconfig
18 create mode 100644 drivers/char/kcopy/Makefile
19 create mode 100644 drivers/char/kcopy/kcopy.c
20
21 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
22 index 4364303..f206545 100644
23 --- a/drivers/char/Kconfig
24 +++ b/drivers/char/Kconfig
25 @@ -6,6 +6,8 @@ menu "Character devices"
26
27 source "drivers/tty/Kconfig"
28
29 +source "drivers/char/kcopy/Kconfig"
30 +
31 config DEVKMEM
32 bool "/dev/kmem virtual device support"
33 default y
34 --- a/drivers/char/Makefile 2012-02-01 10:18:43.568153662 -0500
35 +++ b/drivers/char/Makefile 2012-02-01 10:19:40.726839554 -0500
36 @@ -65,3 +65,4 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
37 js-rtc-y = rtc.o
38
39 obj-$(CONFIG_TILE_SROM) += tile-srom.o
40 +obj-$(CONFIG_KCOPY) += kcopy/
41 diff --git a/drivers/char/kcopy/Kconfig b/drivers/char/kcopy/Kconfig
42 new file mode 100644
43 index 0000000..453ae52
44 --- /dev/null
45 +++ b/drivers/char/kcopy/Kconfig
46 @@ -0,0 +1,17 @@
47 +#
48 +# KCopy character device configuration
49 +#
50 +
51 +menu "KCopy"
52 +
53 +config KCOPY
54 + tristate "Memory-to-memory copies using kernel assist"
55 + default m
56 + ---help---
57 + High-performance inter-process memory copies. Can often save a
58 + memory copy to shared memory in the application. Useful at least
59 + for MPI applications where the point-to-point nature of vmsplice
60 + and pipes can be a limiting factor in performance.
61 +
62 +endmenu
63 +
64 diff --git a/drivers/char/kcopy/Makefile b/drivers/char/kcopy/Makefile
65 new file mode 100644
66 index 0000000..9cb269b
67 --- /dev/null
68 +++ b/drivers/char/kcopy/Makefile
69 @@ -0,0 +1,4 @@
70 +#
71 +# Makefile for the kernel character device drivers.
72 +#
73 +obj-$(CONFIG_KCOPY) += kcopy.o
74 diff --git a/drivers/char/kcopy/kcopy.c b/drivers/char/kcopy/kcopy.c
75 new file mode 100644
76 index 0000000..a9f915c
77 --- /dev/null
78 +++ b/drivers/char/kcopy/kcopy.c
79 @@ -0,0 +1,646 @@
80 +#include <linux/module.h>
81 +#include <linux/fs.h>
82 +#include <linux/cdev.h>
83 +#include <linux/device.h>
84 +#include <linux/mutex.h>
85 +#include <linux/mman.h>
86 +#include <linux/highmem.h>
87 +#include <linux/spinlock.h>
88 +#include <linux/sched.h>
89 +#include <linux/rbtree.h>
90 +#include <linux/rcupdate.h>
91 +#include <linux/uaccess.h>
92 +#include <linux/slab.h>
93 +
94 +MODULE_LICENSE("GPL");
95 +MODULE_AUTHOR("Arthur Jones <arthur.jones@qlogic.com>");
96 +MODULE_DESCRIPTION("QLogic kcopy driver");
97 +
98 +#define KCOPY_ABI 1
99 +#define KCOPY_MAX_MINORS 64
100 +
101 +struct kcopy_device {
102 + struct cdev cdev;
103 + struct class *class;
104 + struct device *devp[KCOPY_MAX_MINORS];
105 + dev_t dev;
106 +
107 + struct kcopy_file *kf[KCOPY_MAX_MINORS];
108 + struct mutex open_lock;
109 +};
110 +
111 +static struct kcopy_device kcopy_dev;
112 +
113 +/* per file data / one of these is shared per minor */
114 +struct kcopy_file {
115 + int count;
116 +
117 + /* pid indexed */
118 + struct rb_root live_map_tree;
119 +
120 + struct mutex map_lock;
121 +};
122 +
123 +struct kcopy_map_entry {
124 + int count;
125 + struct task_struct *task;
126 + pid_t pid;
127 + struct kcopy_file *file; /* file backpointer */
128 +
129 + struct list_head list; /* free map list */
130 + struct rb_node node; /* live map tree */
131 +};
132 +
133 +#define KCOPY_GET_SYSCALL 1
134 +#define KCOPY_PUT_SYSCALL 2
135 +#define KCOPY_ABI_SYSCALL 3
136 +
137 +struct kcopy_syscall {
138 + __u32 tag;
139 + pid_t pid;
140 + __u64 n;
141 + __u64 src;
142 + __u64 dst;
143 +};
144 +
145 +static const void __user *kcopy_syscall_src(const struct kcopy_syscall *ks)
146 +{
147 + return (const void __user *) (unsigned long) ks->src;
148 +}
149 +
150 +static void __user *kcopy_syscall_dst(const struct kcopy_syscall *ks)
151 +{
152 + return (void __user *) (unsigned long) ks->dst;
153 +}
154 +
155 +static unsigned long kcopy_syscall_n(const struct kcopy_syscall *ks)
156 +{
157 + return (unsigned long) ks->n;
158 +}
159 +
160 +static struct kcopy_map_entry *kcopy_create_entry(struct kcopy_file *file)
161 +{
162 + struct kcopy_map_entry *kme =
163 + kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
164 +
165 + if (!kme)
166 + return NULL;
167 +
168 + kme->count = 1;
169 + kme->file = file;
170 + kme->task = current;
171 + kme->pid = current->tgid;
172 + INIT_LIST_HEAD(&kme->list);
173 +
174 + return kme;
175 +}
176 +
177 +static struct kcopy_map_entry *
178 +kcopy_lookup_pid(struct rb_root *root, pid_t pid)
179 +{
180 + struct rb_node *node = root->rb_node;
181 +
182 + while (node) {
183 + struct kcopy_map_entry *kme =
184 + container_of(node, struct kcopy_map_entry, node);
185 +
186 + if (pid < kme->pid)
187 + node = node->rb_left;
188 + else if (pid > kme->pid)
189 + node = node->rb_right;
190 + else
191 + return kme;
192 + }
193 +
194 + return NULL;
195 +}
196 +
197 +static int kcopy_insert(struct rb_root *root, struct kcopy_map_entry *kme)
198 +{
199 + struct rb_node **new = &(root->rb_node);
200 + struct rb_node *parent = NULL;
201 +
202 + while (*new) {
203 + struct kcopy_map_entry *tkme =
204 + container_of(*new, struct kcopy_map_entry, node);
205 +
206 + parent = *new;
207 + if (kme->pid < tkme->pid)
208 + new = &((*new)->rb_left);
209 + else if (kme->pid > tkme->pid)
210 + new = &((*new)->rb_right);
211 + else {
212 + printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
213 + return -EINVAL;
214 + }
215 + }
216 +
217 + rb_link_node(&kme->node, parent, new);
218 + rb_insert_color(&kme->node, root);
219 +
220 + return 0;
221 +}
222 +
223 +static int kcopy_open(struct inode *inode, struct file *filp)
224 +{
225 + int ret;
226 + const int minor = iminor(inode);
227 + struct kcopy_file *kf = NULL;
228 + struct kcopy_map_entry *kme;
229 + struct kcopy_map_entry *okme;
230 +
231 + if (minor < 0 || minor >= KCOPY_MAX_MINORS)
232 + return -ENODEV;
233 +
234 + mutex_lock(&kcopy_dev.open_lock);
235 +
236 + if (!kcopy_dev.kf[minor]) {
237 + kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
238 +
239 + if (!kf) {
240 + ret = -ENOMEM;
241 + goto bail;
242 + }
243 +
244 + kf->count = 1;
245 + kf->live_map_tree = RB_ROOT;
246 + mutex_init(&kf->map_lock);
247 + kcopy_dev.kf[minor] = kf;
248 + } else {
249 + if (filp->f_flags & O_EXCL) {
250 + ret = -EBUSY;
251 + goto bail;
252 + }
253 + kcopy_dev.kf[minor]->count++;
254 + }
255 +
256 + kme = kcopy_create_entry(kcopy_dev.kf[minor]);
257 + if (!kme) {
258 + ret = -ENOMEM;
259 + goto err_free_kf;
260 + }
261 +
262 + kf = kcopy_dev.kf[minor];
263 +
264 + mutex_lock(&kf->map_lock);
265 +
266 + okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
267 + if (okme) {
268 + /* pid already exists... */
269 + okme->count++;
270 + kfree(kme);
271 + kme = okme;
272 + } else
273 + ret = kcopy_insert(&kf->live_map_tree, kme);
274 +
275 + mutex_unlock(&kf->map_lock);
276 +
277 + filp->private_data = kme;
278 +
279 + ret = 0;
280 + goto bail;
281 +
282 +err_free_kf:
283 + if (kf) {
284 + kcopy_dev.kf[minor] = NULL;
285 + kfree(kf);
286 + }
287 +bail:
288 + mutex_unlock(&kcopy_dev.open_lock);
289 + return ret;
290 +}
291 +
292 +static int kcopy_flush(struct file *filp, fl_owner_t id)
293 +{
294 + struct kcopy_map_entry *kme = filp->private_data;
295 + struct kcopy_file *kf = kme->file;
296 +
297 + if (file_count(filp) == 1) {
298 + mutex_lock(&kf->map_lock);
299 + kme->count--;
300 +
301 + if (!kme->count) {
302 + rb_erase(&kme->node, &kf->live_map_tree);
303 + kfree(kme);
304 + }
305 + mutex_unlock(&kf->map_lock);
306 + }
307 +
308 + return 0;
309 +}
310 +
311 +static int kcopy_release(struct inode *inode, struct file *filp)
312 +{
313 + const int minor = iminor(inode);
314 +
315 + mutex_lock(&kcopy_dev.open_lock);
316 + kcopy_dev.kf[minor]->count--;
317 + if (!kcopy_dev.kf[minor]->count) {
318 + kfree(kcopy_dev.kf[minor]);
319 + kcopy_dev.kf[minor] = NULL;
320 + }
321 + mutex_unlock(&kcopy_dev.open_lock);
322 +
323 + return 0;
324 +}
325 +
326 +static void kcopy_put_pages(struct page **pages, int npages)
327 +{
328 + int j;
329 +
330 + for (j = 0; j < npages; j++)
331 + put_page(pages[j]);
332 +}
333 +
334 +static int kcopy_validate_task(struct task_struct *p)
335 +{
336 + return p && ((current_euid() == task_euid(p)) || (current_euid() == task_uid(p)));
337 +}
338 +
339 +static int kcopy_get_pages(struct kcopy_file *kf, pid_t pid,
340 + struct page **pages, void __user *addr,
341 + int write, size_t npages)
342 +{
343 + int err;
344 + struct mm_struct *mm;
345 + struct kcopy_map_entry *rkme;
346 +
347 + mutex_lock(&kf->map_lock);
348 +
349 + rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
350 + if (!rkme || !kcopy_validate_task(rkme->task)) {
351 + err = -EINVAL;
352 + goto bail_unlock;
353 + }
354 +
355 + mm = get_task_mm(rkme->task);
356 + if (unlikely(!mm)) {
357 + err = -ENOMEM;
358 + goto bail_unlock;
359 + }
360 +
361 + down_read(&mm->mmap_sem);
362 + err = get_user_pages(rkme->task, mm,
363 + (unsigned long) addr, npages, write, 0,
364 + pages, NULL);
365 +
366 + if (err < npages && err > 0) {
367 + kcopy_put_pages(pages, err);
368 + err = -ENOMEM;
369 + } else if (err == npages)
370 + err = 0;
371 +
372 + up_read(&mm->mmap_sem);
373 +
374 + mmput(mm);
375 +
376 +bail_unlock:
377 + mutex_unlock(&kf->map_lock);
378 +
379 + return err;
380 +}
381 +
382 +static unsigned long kcopy_copy_pages_from_user(void __user *src,
383 + struct page **dpages,
384 + unsigned doff,
385 + unsigned long n)
386 +{
387 + struct page *dpage = *dpages;
388 + char *daddr = kmap(dpage);
389 + int ret = 0;
390 +
391 + while (1) {
392 + const unsigned long nleft = PAGE_SIZE - doff;
393 + const unsigned long nc = (n < nleft) ? n : nleft;
394 +
395 + /* if (copy_from_user(daddr + doff, src, nc)) { */
396 + if (__copy_from_user_nocache(daddr + doff, src, nc)) {
397 + ret = -EFAULT;
398 + goto bail;
399 + }
400 +
401 + n -= nc;
402 + if (n == 0)
403 + break;
404 +
405 + doff += nc;
406 + doff &= ~PAGE_MASK;
407 + if (doff == 0) {
408 + kunmap(dpage);
409 + dpages++;
410 + dpage = *dpages;
411 + daddr = kmap(dpage);
412 + }
413 +
414 + src += nc;
415 + }
416 +
417 +bail:
418 + kunmap(dpage);
419 +
420 + return ret;
421 +}
422 +
423 +static unsigned long kcopy_copy_pages_to_user(void __user *dst,
424 + struct page **spages,
425 + unsigned soff,
426 + unsigned long n)
427 +{
428 + struct page *spage = *spages;
429 + const char *saddr = kmap(spage);
430 + int ret = 0;
431 +
432 + while (1) {
433 + const unsigned long nleft = PAGE_SIZE - soff;
434 + const unsigned long nc = (n < nleft) ? n : nleft;
435 +
436 + if (copy_to_user(dst, saddr + soff, nc)) {
437 + ret = -EFAULT;
438 + goto bail;
439 + }
440 +
441 + n -= nc;
442 + if (n == 0)
443 + break;
444 +
445 + soff += nc;
446 + soff &= ~PAGE_MASK;
447 + if (soff == 0) {
448 + kunmap(spage);
449 + spages++;
450 + spage = *spages;
451 + saddr = kmap(spage);
452 + }
453 +
454 + dst += nc;
455 + }
456 +
457 +bail:
458 + kunmap(spage);
459 +
460 + return ret;
461 +}
462 +
463 +static unsigned long kcopy_copy_to_user(void __user *dst,
464 + struct kcopy_file *kf, pid_t pid,
465 + void __user *src,
466 + unsigned long n)
467 +{
468 + struct page **pages;
469 + const int pages_len = PAGE_SIZE / sizeof(struct page *);
470 + int ret = 0;
471 +
472 + pages = (struct page **) __get_free_page(GFP_KERNEL);
473 + if (!pages) {
474 + ret = -ENOMEM;
475 + goto bail;
476 + }
477 +
478 + while (n) {
479 + const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
480 + const unsigned long spages_left =
481 + (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
482 + const unsigned long spages_cp =
483 + min_t(unsigned long, spages_left, pages_len);
484 + const unsigned long sbytes =
485 + PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
486 + const unsigned long nbytes = min_t(unsigned long, sbytes, n);
487 +
488 + ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
489 + if (unlikely(ret))
490 + goto bail_free;
491 +
492 + ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
493 + kcopy_put_pages(pages, spages_cp);
494 + if (ret)
495 + goto bail_free;
496 + dst = (char *) dst + nbytes;
497 + src = (char *) src + nbytes;
498 +
499 + n -= nbytes;
500 + }
501 +
502 +bail_free:
503 + free_page((unsigned long) pages);
504 +bail:
505 + return ret;
506 +}
507 +
508 +static unsigned long kcopy_copy_from_user(const void __user *src,
509 + struct kcopy_file *kf, pid_t pid,
510 + void __user *dst,
511 + unsigned long n)
512 +{
513 + struct page **pages;
514 + const int pages_len = PAGE_SIZE / sizeof(struct page *);
515 + int ret = 0;
516 +
517 + pages = (struct page **) __get_free_page(GFP_KERNEL);
518 + if (!pages) {
519 + ret = -ENOMEM;
520 + goto bail;
521 + }
522 +
523 + while (n) {
524 + const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
525 + const unsigned long dpages_left =
526 + (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
527 + const unsigned long dpages_cp =
528 + min_t(unsigned long, dpages_left, pages_len);
529 + const unsigned long dbytes =
530 + PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
531 + const unsigned long nbytes = min_t(unsigned long, dbytes, n);
532 +
533 + ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
534 + if (unlikely(ret))
535 + goto bail_free;
536 +
537 + ret = kcopy_copy_pages_from_user((void __user *) src,
538 + pages, doff, nbytes);
539 + kcopy_put_pages(pages, dpages_cp);
540 + if (ret)
541 + goto bail_free;
542 +
543 + dst = (char *) dst + nbytes;
544 + src = (char *) src + nbytes;
545 +
546 + n -= nbytes;
547 + }
548 +
549 +bail_free:
550 + free_page((unsigned long) pages);
551 +bail:
552 + return ret;
553 +}
554 +
555 +static int kcopy_do_get(struct kcopy_map_entry *kme, pid_t pid,
556 + const void __user *src, void __user *dst,
557 + unsigned long n)
558 +{
559 + struct kcopy_file *kf = kme->file;
560 + int ret = 0;
561 +
562 + if (n == 0) {
563 + ret = -EINVAL;
564 + goto bail;
565 + }
566 +
567 + ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
568 +
569 +bail:
570 + return ret;
571 +}
572 +
573 +static int kcopy_do_put(struct kcopy_map_entry *kme, const void __user *src,
574 + pid_t pid, void __user *dst,
575 + unsigned long n)
576 +{
577 + struct kcopy_file *kf = kme->file;
578 + int ret = 0;
579 +
580 + if (n == 0) {
581 + ret = -EINVAL;
582 + goto bail;
583 + }
584 +
585 + ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
586 +
587 +bail:
588 + return ret;
589 +}
590 +
591 +static int kcopy_do_abi(u32 __user *dst)
592 +{
593 + u32 val = KCOPY_ABI;
594 + int err;
595 +
596 + err = put_user(val, dst);
597 + if (err)
598 + return -EFAULT;
599 +
600 + return 0;
601 +}
602 +
603 +ssize_t kcopy_write(struct file *filp, const char __user *data, size_t cnt,
604 + loff_t *o)
605 +{
606 + struct kcopy_map_entry *kme = filp->private_data;
607 + struct kcopy_syscall ks;
608 + int err = 0;
609 + const void __user *src;
610 + void __user *dst;
611 + unsigned long n;
612 +
613 + if (cnt != sizeof(struct kcopy_syscall)) {
614 + err = -EINVAL;
615 + goto bail;
616 + }
617 +
618 + err = copy_from_user(&ks, data, cnt);
619 + if (unlikely(err))
620 + goto bail;
621 +
622 + src = kcopy_syscall_src(&ks);
623 + dst = kcopy_syscall_dst(&ks);
624 + n = kcopy_syscall_n(&ks);
625 + if (ks.tag == KCOPY_GET_SYSCALL)
626 + err = kcopy_do_get(kme, ks.pid, src, dst, n);
627 + else if (ks.tag == KCOPY_PUT_SYSCALL)
628 + err = kcopy_do_put(kme, src, ks.pid, dst, n);
629 + else if (ks.tag == KCOPY_ABI_SYSCALL)
630 + err = kcopy_do_abi(dst);
631 + else
632 + err = -EINVAL;
633 +
634 +bail:
635 + return err ? err : cnt;
636 +}
637 +
638 +static const struct file_operations kcopy_fops = {
639 + .owner = THIS_MODULE,
640 + .open = kcopy_open,
641 + .release = kcopy_release,
642 + .flush = kcopy_flush,
643 + .write = kcopy_write,
644 +};
645 +
646 +static int __init kcopy_init(void)
647 +{
648 + int ret;
649 + const char *name = "kcopy";
650 + int i;
651 + int ninit = 0;
652 +
653 + mutex_init(&kcopy_dev.open_lock);
654 +
655 + ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
656 + if (ret)
657 + goto bail;
658 +
659 + kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
660 +
661 + if (IS_ERR(kcopy_dev.class)) {
662 + ret = PTR_ERR(kcopy_dev.class);
663 + printk(KERN_ERR "kcopy: Could not create "
664 + "device class (err %d)\n", -ret);
665 + goto bail_chrdev;
666 + }
667 +
668 + cdev_init(&kcopy_dev.cdev, &kcopy_fops);
669 + ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
670 + if (ret < 0) {
671 + printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
672 + -ret);
673 + goto bail_class;
674 + }
675 +
676 + for (i = 0; i < KCOPY_MAX_MINORS; i++) {
677 + char devname[8];
678 + const int minor = MINOR(kcopy_dev.dev) + i;
679 + const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
680 +
681 + snprintf(devname, sizeof(devname), "kcopy%02d", i);
682 + kcopy_dev.devp[i] =
683 + device_create(kcopy_dev.class, NULL,
684 + dev, NULL, devname);
685 +
686 + if (IS_ERR(kcopy_dev.devp[i])) {
687 + ret = PTR_ERR(kcopy_dev.devp[i]);
688 + printk(KERN_ERR "kcopy: Could not create "
689 + "devp %d (err %d)\n", i, -ret);
690 + goto bail_cdev_add;
691 + }
692 +
693 + ninit++;
694 + }
695 +
696 + ret = 0;
697 + goto bail;
698 +
699 +bail_cdev_add:
700 + for (i = 0; i < ninit; i++)
701 + device_unregister(kcopy_dev.devp[i]);
702 +
703 + cdev_del(&kcopy_dev.cdev);
704 +bail_class:
705 + class_destroy(kcopy_dev.class);
706 +bail_chrdev:
707 + unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
708 +bail:
709 + return ret;
710 +}
711 +
712 +static void __exit kcopy_fini(void)
713 +{
714 + int i;
715 +
716 + for (i = 0; i < KCOPY_MAX_MINORS; i++)
717 + device_unregister(kcopy_dev.devp[i]);
718 +
719 + cdev_del(&kcopy_dev.cdev);
720 + class_destroy(kcopy_dev.class);
721 + unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
722 +}
723 +
724 +module_init(kcopy_init);
725 +module_exit(kcopy_fini);

  ViewVC Help
Powered by ViewVC 1.1.20