/[linux-patches]/genpatches-2.6/tags/3.4-10/2400_kcopy-patch-for-infiniband-driver.patch
Gentoo

Contents of /genpatches-2.6/tags/3.4-10/2400_kcopy-patch-for-infiniband-driver.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2192 - (show annotations) (download)
Fri Aug 17 23:46:53 2012 UTC (6 years, 1 month ago) by mpagano
File size: 16135 byte(s)
3.4-10 release
1 From 1f52075d672a9bdd0069b3ea68be266ef5c229bd Mon Sep 17 00:00:00 2001
2 From: Alexey Shvetsov <alexxy@gentoo.org>
3 Date: Tue, 17 Jan 2012 21:08:49 +0400
4 Subject: [PATCH] [kcopy] Add kcopy driver
5
6 Add kcopy driver from qlogic to implement zero copy for infiniband psm
7 userspace driver
8
9 Signed-off-by: Alexey Shvetsov <alexxy@gentoo.org>
10 ---
11 drivers/char/Kconfig | 2 +
12 drivers/char/Makefile | 2 +
13 drivers/char/kcopy/Kconfig | 17 ++
14 drivers/char/kcopy/Makefile | 4 +
15 drivers/char/kcopy/kcopy.c | 646 +++++++++++++++++++++++++++++++++++++++++++
16 5 files changed, 671 insertions(+)
17 create mode 100644 drivers/char/kcopy/Kconfig
18 create mode 100644 drivers/char/kcopy/Makefile
19 create mode 100644 drivers/char/kcopy/kcopy.c
20
21 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
22 index ee94686..5b81449 100644
23 --- a/drivers/char/Kconfig
24 +++ b/drivers/char/Kconfig
25 @@ -6,6 +6,8 @@ menu "Character devices"
26
27 source "drivers/tty/Kconfig"
28
29 +source "drivers/char/kcopy/Kconfig"
30 +
31 config DEVKMEM
32 bool "/dev/kmem virtual device support"
33 default y
34 diff --git a/drivers/char/Makefile b/drivers/char/Makefile
35 index 0dc5d7c..be519d6 100644
36 --- a/drivers/char/Makefile
37 +++ b/drivers/char/Makefile
38 @@ -64,3 +64,5 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
39 js-rtc-y = rtc.o
40
41 obj-$(CONFIG_TILE_SROM) += tile-srom.o
42 +
43 +obj-$(CONFIG_KCOPY) += kcopy/
44 diff --git a/drivers/char/kcopy/Kconfig b/drivers/char/kcopy/Kconfig
45 new file mode 100644
46 index 0000000..453ae52
47 --- /dev/null
48 +++ b/drivers/char/kcopy/Kconfig
49 @@ -0,0 +1,17 @@
50 +#
51 +# KCopy character device configuration
52 +#
53 +
54 +menu "KCopy"
55 +
56 +config KCOPY
57 + tristate "Memory-to-memory copies using kernel assist"
58 + default m
59 + ---help---
60 + High-performance inter-process memory copies. Can often save a
61 + memory copy to shared memory in the application. Useful at least
62 + for MPI applications where the point-to-point nature of vmsplice
63 + and pipes can be a limiting factor in performance.
64 +
65 +endmenu
66 +
67 diff --git a/drivers/char/kcopy/Makefile b/drivers/char/kcopy/Makefile
68 new file mode 100644
69 index 0000000..9cb269b
70 --- /dev/null
71 +++ b/drivers/char/kcopy/Makefile
72 @@ -0,0 +1,4 @@
73 +#
74 +# Makefile for the kernel character device drivers.
75 +#
76 +obj-$(CONFIG_KCOPY) += kcopy.o
77 diff --git a/drivers/char/kcopy/kcopy.c b/drivers/char/kcopy/kcopy.c
78 new file mode 100644
79 index 0000000..a9f915c
80 --- /dev/null
81 +++ b/drivers/char/kcopy/kcopy.c
82 @@ -0,0 +1,646 @@
83 +#include <linux/module.h>
84 +#include <linux/fs.h>
85 +#include <linux/cdev.h>
86 +#include <linux/device.h>
87 +#include <linux/mutex.h>
88 +#include <linux/mman.h>
89 +#include <linux/highmem.h>
90 +#include <linux/spinlock.h>
91 +#include <linux/sched.h>
92 +#include <linux/rbtree.h>
93 +#include <linux/rcupdate.h>
94 +#include <linux/uaccess.h>
95 +#include <linux/slab.h>
96 +
97 +MODULE_LICENSE("GPL");
98 +MODULE_AUTHOR("Arthur Jones <arthur.jones@qlogic.com>");
99 +MODULE_DESCRIPTION("QLogic kcopy driver");
100 +
101 +#define KCOPY_ABI 1
102 +#define KCOPY_MAX_MINORS 64
103 +
104 +struct kcopy_device {
105 + struct cdev cdev;
106 + struct class *class;
107 + struct device *devp[KCOPY_MAX_MINORS];
108 + dev_t dev;
109 +
110 + struct kcopy_file *kf[KCOPY_MAX_MINORS];
111 + struct mutex open_lock;
112 +};
113 +
114 +static struct kcopy_device kcopy_dev;
115 +
116 +/* per file data / one of these is shared per minor */
117 +struct kcopy_file {
118 + int count;
119 +
120 + /* pid indexed */
121 + struct rb_root live_map_tree;
122 +
123 + struct mutex map_lock;
124 +};
125 +
126 +struct kcopy_map_entry {
127 + int count;
128 + struct task_struct *task;
129 + pid_t pid;
130 + struct kcopy_file *file; /* file backpointer */
131 +
132 + struct list_head list; /* free map list */
133 + struct rb_node node; /* live map tree */
134 +};
135 +
136 +#define KCOPY_GET_SYSCALL 1
137 +#define KCOPY_PUT_SYSCALL 2
138 +#define KCOPY_ABI_SYSCALL 3
139 +
140 +struct kcopy_syscall {
141 + __u32 tag;
142 + pid_t pid;
143 + __u64 n;
144 + __u64 src;
145 + __u64 dst;
146 +};
147 +
148 +static const void __user *kcopy_syscall_src(const struct kcopy_syscall *ks)
149 +{
150 + return (const void __user *) (unsigned long) ks->src;
151 +}
152 +
153 +static void __user *kcopy_syscall_dst(const struct kcopy_syscall *ks)
154 +{
155 + return (void __user *) (unsigned long) ks->dst;
156 +}
157 +
158 +static unsigned long kcopy_syscall_n(const struct kcopy_syscall *ks)
159 +{
160 + return (unsigned long) ks->n;
161 +}
162 +
163 +static struct kcopy_map_entry *kcopy_create_entry(struct kcopy_file *file)
164 +{
165 + struct kcopy_map_entry *kme =
166 + kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
167 +
168 + if (!kme)
169 + return NULL;
170 +
171 + kme->count = 1;
172 + kme->file = file;
173 + kme->task = current;
174 + kme->pid = current->tgid;
175 + INIT_LIST_HEAD(&kme->list);
176 +
177 + return kme;
178 +}
179 +
180 +static struct kcopy_map_entry *
181 +kcopy_lookup_pid(struct rb_root *root, pid_t pid)
182 +{
183 + struct rb_node *node = root->rb_node;
184 +
185 + while (node) {
186 + struct kcopy_map_entry *kme =
187 + container_of(node, struct kcopy_map_entry, node);
188 +
189 + if (pid < kme->pid)
190 + node = node->rb_left;
191 + else if (pid > kme->pid)
192 + node = node->rb_right;
193 + else
194 + return kme;
195 + }
196 +
197 + return NULL;
198 +}
199 +
200 +static int kcopy_insert(struct rb_root *root, struct kcopy_map_entry *kme)
201 +{
202 + struct rb_node **new = &(root->rb_node);
203 + struct rb_node *parent = NULL;
204 +
205 + while (*new) {
206 + struct kcopy_map_entry *tkme =
207 + container_of(*new, struct kcopy_map_entry, node);
208 +
209 + parent = *new;
210 + if (kme->pid < tkme->pid)
211 + new = &((*new)->rb_left);
212 + else if (kme->pid > tkme->pid)
213 + new = &((*new)->rb_right);
214 + else {
215 + printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
216 + return -EINVAL;
217 + }
218 + }
219 +
220 + rb_link_node(&kme->node, parent, new);
221 + rb_insert_color(&kme->node, root);
222 +
223 + return 0;
224 +}
225 +
226 +static int kcopy_open(struct inode *inode, struct file *filp)
227 +{
228 + int ret;
229 + const int minor = iminor(inode);
230 + struct kcopy_file *kf = NULL;
231 + struct kcopy_map_entry *kme;
232 + struct kcopy_map_entry *okme;
233 +
234 + if (minor < 0 || minor >= KCOPY_MAX_MINORS)
235 + return -ENODEV;
236 +
237 + mutex_lock(&kcopy_dev.open_lock);
238 +
239 + if (!kcopy_dev.kf[minor]) {
240 + kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
241 +
242 + if (!kf) {
243 + ret = -ENOMEM;
244 + goto bail;
245 + }
246 +
247 + kf->count = 1;
248 + kf->live_map_tree = RB_ROOT;
249 + mutex_init(&kf->map_lock);
250 + kcopy_dev.kf[minor] = kf;
251 + } else {
252 + if (filp->f_flags & O_EXCL) {
253 + ret = -EBUSY;
254 + goto bail;
255 + }
256 + kcopy_dev.kf[minor]->count++;
257 + }
258 +
259 + kme = kcopy_create_entry(kcopy_dev.kf[minor]);
260 + if (!kme) {
261 + ret = -ENOMEM;
262 + goto err_free_kf;
263 + }
264 +
265 + kf = kcopy_dev.kf[minor];
266 +
267 + mutex_lock(&kf->map_lock);
268 +
269 + okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
270 + if (okme) {
271 + /* pid already exists... */
272 + okme->count++;
273 + kfree(kme);
274 + kme = okme;
275 + } else
276 + ret = kcopy_insert(&kf->live_map_tree, kme);
277 +
278 + mutex_unlock(&kf->map_lock);
279 +
280 + filp->private_data = kme;
281 +
282 + ret = 0;
283 + goto bail;
284 +
285 +err_free_kf:
286 + if (kf) {
287 + kcopy_dev.kf[minor] = NULL;
288 + kfree(kf);
289 + }
290 +bail:
291 + mutex_unlock(&kcopy_dev.open_lock);
292 + return ret;
293 +}
294 +
295 +static int kcopy_flush(struct file *filp, fl_owner_t id)
296 +{
297 + struct kcopy_map_entry *kme = filp->private_data;
298 + struct kcopy_file *kf = kme->file;
299 +
300 + if (file_count(filp) == 1) {
301 + mutex_lock(&kf->map_lock);
302 + kme->count--;
303 +
304 + if (!kme->count) {
305 + rb_erase(&kme->node, &kf->live_map_tree);
306 + kfree(kme);
307 + }
308 + mutex_unlock(&kf->map_lock);
309 + }
310 +
311 + return 0;
312 +}
313 +
314 +static int kcopy_release(struct inode *inode, struct file *filp)
315 +{
316 + const int minor = iminor(inode);
317 +
318 + mutex_lock(&kcopy_dev.open_lock);
319 + kcopy_dev.kf[minor]->count--;
320 + if (!kcopy_dev.kf[minor]->count) {
321 + kfree(kcopy_dev.kf[minor]);
322 + kcopy_dev.kf[minor] = NULL;
323 + }
324 + mutex_unlock(&kcopy_dev.open_lock);
325 +
326 + return 0;
327 +}
328 +
329 +static void kcopy_put_pages(struct page **pages, int npages)
330 +{
331 + int j;
332 +
333 + for (j = 0; j < npages; j++)
334 + put_page(pages[j]);
335 +}
336 +
337 +static int kcopy_validate_task(struct task_struct *p)
338 +{
339 + return p && ((current_euid() == task_euid(p)) || (current_euid() == task_uid(p)));
340 +}
341 +
342 +static int kcopy_get_pages(struct kcopy_file *kf, pid_t pid,
343 + struct page **pages, void __user *addr,
344 + int write, size_t npages)
345 +{
346 + int err;
347 + struct mm_struct *mm;
348 + struct kcopy_map_entry *rkme;
349 +
350 + mutex_lock(&kf->map_lock);
351 +
352 + rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
353 + if (!rkme || !kcopy_validate_task(rkme->task)) {
354 + err = -EINVAL;
355 + goto bail_unlock;
356 + }
357 +
358 + mm = get_task_mm(rkme->task);
359 + if (unlikely(!mm)) {
360 + err = -ENOMEM;
361 + goto bail_unlock;
362 + }
363 +
364 + down_read(&mm->mmap_sem);
365 + err = get_user_pages(rkme->task, mm,
366 + (unsigned long) addr, npages, write, 0,
367 + pages, NULL);
368 +
369 + if (err < npages && err > 0) {
370 + kcopy_put_pages(pages, err);
371 + err = -ENOMEM;
372 + } else if (err == npages)
373 + err = 0;
374 +
375 + up_read(&mm->mmap_sem);
376 +
377 + mmput(mm);
378 +
379 +bail_unlock:
380 + mutex_unlock(&kf->map_lock);
381 +
382 + return err;
383 +}
384 +
385 +static unsigned long kcopy_copy_pages_from_user(void __user *src,
386 + struct page **dpages,
387 + unsigned doff,
388 + unsigned long n)
389 +{
390 + struct page *dpage = *dpages;
391 + char *daddr = kmap(dpage);
392 + int ret = 0;
393 +
394 + while (1) {
395 + const unsigned long nleft = PAGE_SIZE - doff;
396 + const unsigned long nc = (n < nleft) ? n : nleft;
397 +
398 + /* if (copy_from_user(daddr + doff, src, nc)) { */
399 + if (__copy_from_user_nocache(daddr + doff, src, nc)) {
400 + ret = -EFAULT;
401 + goto bail;
402 + }
403 +
404 + n -= nc;
405 + if (n == 0)
406 + break;
407 +
408 + doff += nc;
409 + doff &= ~PAGE_MASK;
410 + if (doff == 0) {
411 + kunmap(dpage);
412 + dpages++;
413 + dpage = *dpages;
414 + daddr = kmap(dpage);
415 + }
416 +
417 + src += nc;
418 + }
419 +
420 +bail:
421 + kunmap(dpage);
422 +
423 + return ret;
424 +}
425 +
426 +static unsigned long kcopy_copy_pages_to_user(void __user *dst,
427 + struct page **spages,
428 + unsigned soff,
429 + unsigned long n)
430 +{
431 + struct page *spage = *spages;
432 + const char *saddr = kmap(spage);
433 + int ret = 0;
434 +
435 + while (1) {
436 + const unsigned long nleft = PAGE_SIZE - soff;
437 + const unsigned long nc = (n < nleft) ? n : nleft;
438 +
439 + if (copy_to_user(dst, saddr + soff, nc)) {
440 + ret = -EFAULT;
441 + goto bail;
442 + }
443 +
444 + n -= nc;
445 + if (n == 0)
446 + break;
447 +
448 + soff += nc;
449 + soff &= ~PAGE_MASK;
450 + if (soff == 0) {
451 + kunmap(spage);
452 + spages++;
453 + spage = *spages;
454 + saddr = kmap(spage);
455 + }
456 +
457 + dst += nc;
458 + }
459 +
460 +bail:
461 + kunmap(spage);
462 +
463 + return ret;
464 +}
465 +
466 +static unsigned long kcopy_copy_to_user(void __user *dst,
467 + struct kcopy_file *kf, pid_t pid,
468 + void __user *src,
469 + unsigned long n)
470 +{
471 + struct page **pages;
472 + const int pages_len = PAGE_SIZE / sizeof(struct page *);
473 + int ret = 0;
474 +
475 + pages = (struct page **) __get_free_page(GFP_KERNEL);
476 + if (!pages) {
477 + ret = -ENOMEM;
478 + goto bail;
479 + }
480 +
481 + while (n) {
482 + const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
483 + const unsigned long spages_left =
484 + (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
485 + const unsigned long spages_cp =
486 + min_t(unsigned long, spages_left, pages_len);
487 + const unsigned long sbytes =
488 + PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
489 + const unsigned long nbytes = min_t(unsigned long, sbytes, n);
490 +
491 + ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
492 + if (unlikely(ret))
493 + goto bail_free;
494 +
495 + ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
496 + kcopy_put_pages(pages, spages_cp);
497 + if (ret)
498 + goto bail_free;
499 + dst = (char *) dst + nbytes;
500 + src = (char *) src + nbytes;
501 +
502 + n -= nbytes;
503 + }
504 +
505 +bail_free:
506 + free_page((unsigned long) pages);
507 +bail:
508 + return ret;
509 +}
510 +
511 +static unsigned long kcopy_copy_from_user(const void __user *src,
512 + struct kcopy_file *kf, pid_t pid,
513 + void __user *dst,
514 + unsigned long n)
515 +{
516 + struct page **pages;
517 + const int pages_len = PAGE_SIZE / sizeof(struct page *);
518 + int ret = 0;
519 +
520 + pages = (struct page **) __get_free_page(GFP_KERNEL);
521 + if (!pages) {
522 + ret = -ENOMEM;
523 + goto bail;
524 + }
525 +
526 + while (n) {
527 + const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
528 + const unsigned long dpages_left =
529 + (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
530 + const unsigned long dpages_cp =
531 + min_t(unsigned long, dpages_left, pages_len);
532 + const unsigned long dbytes =
533 + PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
534 + const unsigned long nbytes = min_t(unsigned long, dbytes, n);
535 +
536 + ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
537 + if (unlikely(ret))
538 + goto bail_free;
539 +
540 + ret = kcopy_copy_pages_from_user((void __user *) src,
541 + pages, doff, nbytes);
542 + kcopy_put_pages(pages, dpages_cp);
543 + if (ret)
544 + goto bail_free;
545 +
546 + dst = (char *) dst + nbytes;
547 + src = (char *) src + nbytes;
548 +
549 + n -= nbytes;
550 + }
551 +
552 +bail_free:
553 + free_page((unsigned long) pages);
554 +bail:
555 + return ret;
556 +}
557 +
558 +static int kcopy_do_get(struct kcopy_map_entry *kme, pid_t pid,
559 + const void __user *src, void __user *dst,
560 + unsigned long n)
561 +{
562 + struct kcopy_file *kf = kme->file;
563 + int ret = 0;
564 +
565 + if (n == 0) {
566 + ret = -EINVAL;
567 + goto bail;
568 + }
569 +
570 + ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
571 +
572 +bail:
573 + return ret;
574 +}
575 +
576 +static int kcopy_do_put(struct kcopy_map_entry *kme, const void __user *src,
577 + pid_t pid, void __user *dst,
578 + unsigned long n)
579 +{
580 + struct kcopy_file *kf = kme->file;
581 + int ret = 0;
582 +
583 + if (n == 0) {
584 + ret = -EINVAL;
585 + goto bail;
586 + }
587 +
588 + ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
589 +
590 +bail:
591 + return ret;
592 +}
593 +
594 +static int kcopy_do_abi(u32 __user *dst)
595 +{
596 + u32 val = KCOPY_ABI;
597 + int err;
598 +
599 + err = put_user(val, dst);
600 + if (err)
601 + return -EFAULT;
602 +
603 + return 0;
604 +}
605 +
606 +ssize_t kcopy_write(struct file *filp, const char __user *data, size_t cnt,
607 + loff_t *o)
608 +{
609 + struct kcopy_map_entry *kme = filp->private_data;
610 + struct kcopy_syscall ks;
611 + int err = 0;
612 + const void __user *src;
613 + void __user *dst;
614 + unsigned long n;
615 +
616 + if (cnt != sizeof(struct kcopy_syscall)) {
617 + err = -EINVAL;
618 + goto bail;
619 + }
620 +
621 + err = copy_from_user(&ks, data, cnt);
622 + if (unlikely(err))
623 + goto bail;
624 +
625 + src = kcopy_syscall_src(&ks);
626 + dst = kcopy_syscall_dst(&ks);
627 + n = kcopy_syscall_n(&ks);
628 + if (ks.tag == KCOPY_GET_SYSCALL)
629 + err = kcopy_do_get(kme, ks.pid, src, dst, n);
630 + else if (ks.tag == KCOPY_PUT_SYSCALL)
631 + err = kcopy_do_put(kme, src, ks.pid, dst, n);
632 + else if (ks.tag == KCOPY_ABI_SYSCALL)
633 + err = kcopy_do_abi(dst);
634 + else
635 + err = -EINVAL;
636 +
637 +bail:
638 + return err ? err : cnt;
639 +}
640 +
641 +static const struct file_operations kcopy_fops = {
642 + .owner = THIS_MODULE,
643 + .open = kcopy_open,
644 + .release = kcopy_release,
645 + .flush = kcopy_flush,
646 + .write = kcopy_write,
647 +};
648 +
649 +static int __init kcopy_init(void)
650 +{
651 + int ret;
652 + const char *name = "kcopy";
653 + int i;
654 + int ninit = 0;
655 +
656 + mutex_init(&kcopy_dev.open_lock);
657 +
658 + ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
659 + if (ret)
660 + goto bail;
661 +
662 + kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
663 +
664 + if (IS_ERR(kcopy_dev.class)) {
665 + ret = PTR_ERR(kcopy_dev.class);
666 + printk(KERN_ERR "kcopy: Could not create "
667 + "device class (err %d)\n", -ret);
668 + goto bail_chrdev;
669 + }
670 +
671 + cdev_init(&kcopy_dev.cdev, &kcopy_fops);
672 + ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
673 + if (ret < 0) {
674 + printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
675 + -ret);
676 + goto bail_class;
677 + }
678 +
679 + for (i = 0; i < KCOPY_MAX_MINORS; i++) {
680 + char devname[8];
681 + const int minor = MINOR(kcopy_dev.dev) + i;
682 + const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
683 +
684 + snprintf(devname, sizeof(devname), "kcopy%02d", i);
685 + kcopy_dev.devp[i] =
686 + device_create(kcopy_dev.class, NULL,
687 + dev, NULL, devname);
688 +
689 + if (IS_ERR(kcopy_dev.devp[i])) {
690 + ret = PTR_ERR(kcopy_dev.devp[i]);
691 + printk(KERN_ERR "kcopy: Could not create "
692 + "devp %d (err %d)\n", i, -ret);
693 + goto bail_cdev_add;
694 + }
695 +
696 + ninit++;
697 + }
698 +
699 + ret = 0;
700 + goto bail;
701 +
702 +bail_cdev_add:
703 + for (i = 0; i < ninit; i++)
704 + device_unregister(kcopy_dev.devp[i]);
705 +
706 + cdev_del(&kcopy_dev.cdev);
707 +bail_class:
708 + class_destroy(kcopy_dev.class);
709 +bail_chrdev:
710 + unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
711 +bail:
712 + return ret;
713 +}
714 +
715 +static void __exit kcopy_fini(void)
716 +{
717 + int i;
718 +
719 + for (i = 0; i < KCOPY_MAX_MINORS; i++)
720 + device_unregister(kcopy_dev.devp[i]);
721 +
722 + cdev_del(&kcopy_dev.cdev);
723 + class_destroy(kcopy_dev.class);
724 + unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
725 +}
726 +
727 +module_init(kcopy_init);
728 +module_exit(kcopy_fini);
729 --
730 1.7.10
731

  ViewVC Help
Powered by ViewVC 1.1.20