/[gentoo-x86]/sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch
Gentoo

Contents of /sys-fs/zfs/files/zfs-0.6.0_rc9-range-lock-caller-allocate.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.5 - (show annotations) (download)
Wed Jul 11 13:20:31 2012 UTC (2 years ago) by ryao
Branch: MAIN
CVS Tags: HEAD
Changes since 1.4: +0 -0 lines
FILE REMOVED
Linux 3.5 support, Change LICENSE variable and remove patch that had been mistakenly reintroduced in -r4 and caused regressions

(Portage version: 2.1.10.65/cvs/Linux x86_64)

1 commit e7deab3edf6940f13013ca147c91472577223923
2 Author: Richard Yao <ryao@cs.stonybrook.edu>
3 Date: Mon Jun 25 14:41:30 2012 -0400
4
5 Make callers responsible for memory allocation in zfs_range_lock()
6
7 zfs_range_lock() is used in zvols, and previously, it could deadlock due
8 to an allocation using KM_SLEEP. We avoid this by moving responsibility
9 the memory allocation from zfs_range_lock() to the caller. This enables
10 us to avoid such deadlocks and use stack allocations, which are more
11 efficient and prevents deadlocks. The contexts in which stack
12 allocations are done do not appear to be stack heavy, so we do not risk
13 overflowing the stack from doing this.
14
15 Signed-off-by: Richard Yao <ryao@cs.stonybrook.edu>
16
17 Conflicts:
18
19 module/zfs/zvol.c
20
21 diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
22 index 72d511b..c5dd0c2 100644
23 --- a/cmd/ztest/ztest.c
24 +++ b/cmd/ztest/ztest.c
25 @@ -973,12 +973,11 @@ ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
26 }
27
28 static rl_t *
29 -ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
30 +ztest_range_lock(rl_t *rl, ztest_ds_t *zd, uint64_t object, uint64_t offset,
31 uint64_t size, rl_type_t type)
32 {
33 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
34 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
35 - rl_t *rl;
36
37 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
38 rl->rl_object = object;
39 @@ -1389,7 +1388,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
40 dmu_tx_t *tx;
41 dmu_buf_t *db;
42 arc_buf_t *abuf = NULL;
43 - rl_t *rl;
44 + rl_t rl;
45
46 if (byteswap)
47 byteswap_uint64_array(lr, sizeof (*lr));
48 @@ -1413,7 +1412,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
49 bt = NULL;
50
51 ztest_object_lock(zd, lr->lr_foid, RL_READER);
52 - rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
53 + ztest_range_lock(&rl, zd, lr->lr_foid, offset, length, RL_WRITER);
54
55 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
56
57 @@ -1438,7 +1437,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
58 if (abuf != NULL)
59 dmu_return_arcbuf(abuf);
60 dmu_buf_rele(db, FTAG);
61 - ztest_range_unlock(rl);
62 + ztest_range_unlock(&rl);
63 ztest_object_unlock(zd, lr->lr_foid);
64 return (ENOSPC);
65 }
66 @@ -1495,7 +1494,7 @@ ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
67
68 dmu_tx_commit(tx);
69
70 - ztest_range_unlock(rl);
71 + ztest_range_unlock(&rl);
72 ztest_object_unlock(zd, lr->lr_foid);
73
74 return (0);
75 @@ -1507,13 +1506,13 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
76 objset_t *os = zd->zd_os;
77 dmu_tx_t *tx;
78 uint64_t txg;
79 - rl_t *rl;
80 + rl_t rl;
81
82 if (byteswap)
83 byteswap_uint64_array(lr, sizeof (*lr));
84
85 ztest_object_lock(zd, lr->lr_foid, RL_READER);
86 - rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
87 + ztest_range_lock(&rl, zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
88 RL_WRITER);
89
90 tx = dmu_tx_create(os);
91 @@ -1522,7 +1521,7 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
92
93 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
94 if (txg == 0) {
95 - ztest_range_unlock(rl);
96 + ztest_range_unlock(&rl);
97 ztest_object_unlock(zd, lr->lr_foid);
98 return (ENOSPC);
99 }
100 @@ -1534,7 +1533,7 @@ ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
101
102 dmu_tx_commit(tx);
103
104 - ztest_range_unlock(rl);
105 + ztest_range_unlock(&rl);
106 ztest_object_unlock(zd, lr->lr_foid);
107
108 return (0);
109 @@ -1670,6 +1669,8 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
110 dmu_object_info_t doi;
111 dmu_buf_t *db;
112 zgd_t *zgd;
113 + rl_t rl;
114 +
115 int error;
116
117 ztest_object_lock(zd, object, RL_READER);
118 @@ -1694,9 +1695,10 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
119 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
120 zgd->zgd_zilog = zd->zd_zilog;
121 zgd->zgd_private = zd;
122 + zgd->zgd_rl = &rl;
123
124 if (buf != NULL) { /* immediate write */
125 - zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
126 + ztest_range_lock(zgd->zgd_rl, zd, object, offset, size,
127 RL_READER);
128
129 error = dmu_read(os, object, offset, size, buf,
130 @@ -1711,7 +1713,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
131 offset = 0;
132 }
133
134 - zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
135 + ztest_range_lock(zgd->zgd_rl, zd, object, offset, size,
136 RL_READER);
137
138 error = dmu_buf_hold(os, object, offset, zgd, &db,
139 @@ -1953,12 +1955,12 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
140 objset_t *os = zd->zd_os;
141 dmu_tx_t *tx;
142 uint64_t txg;
143 - rl_t *rl;
144 + rl_t rl;
145
146 txg_wait_synced(dmu_objset_pool(os), 0);
147
148 ztest_object_lock(zd, object, RL_READER);
149 - rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
150 + ztest_range_lock(&rl, zd, object, offset, size, RL_WRITER);
151
152 tx = dmu_tx_create(os);
153
154 @@ -1974,7 +1976,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
155 (void) dmu_free_long_range(os, object, offset, size);
156 }
157
158 - ztest_range_unlock(rl);
159 + ztest_range_unlock(&rl);
160 ztest_object_unlock(zd, object);
161 }
162
163 diff --git a/include/sys/zfs_rlock.h b/include/sys/zfs_rlock.h
164 index da18b1f..85dc16a 100644
165 --- a/include/sys/zfs_rlock.h
166 +++ b/include/sys/zfs_rlock.h
167 @@ -63,7 +63,7 @@ typedef struct rl {
168 * is converted to WRITER that specified to lock from the start of the
169 * end of file. zfs_range_lock() returns the range lock structure.
170 */
171 -rl_t *zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type);
172 +rl_t *zfs_range_lock(rl_t *rl, znode_t *zp, uint64_t off, uint64_t len, rl_type_t type);
173
174 /*
175 * Unlock range and destroy range lock structure.
176 diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c
177 index f3ada17..eb81777 100644
178 --- a/module/zfs/zfs_rlock.c
179 +++ b/module/zfs/zfs_rlock.c
180 @@ -31,9 +31,9 @@
181 * Interface
182 * ---------
183 * Defined in zfs_rlock.h but essentially:
184 - * rl = zfs_range_lock(zp, off, len, lock_type);
185 - * zfs_range_unlock(rl);
186 - * zfs_range_reduce(rl, off, len);
187 + * zfs_range_lock(&rl, zp, off, len, lock_type);
188 + * zfs_range_unlock(&rl);
189 + * zfs_range_reduce(&rl, off, len);
190 *
191 * AVL tree
192 * --------
193 @@ -420,13 +420,11 @@ got_lock:
194 * previously locked as RL_WRITER).
195 */
196 rl_t *
197 -zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
198 +zfs_range_lock(rl_t *new, znode_t *zp, uint64_t off, uint64_t len, rl_type_t type)
199 {
200 - rl_t *new;
201
202 ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
203
204 - new = kmem_alloc(sizeof (rl_t), KM_SLEEP);
205 new->r_zp = zp;
206 new->r_off = off;
207 if (len + off < off) /* overflow */
208 @@ -531,7 +529,6 @@ zfs_range_unlock_reader(znode_t *zp, rl_t *remove, list_t *free_list)
209 }
210
211 mutex_exit(&zp->z_range_lock);
212 - kmem_free(remove, sizeof (rl_t));
213 }
214 }
215
216 @@ -572,7 +569,9 @@ zfs_range_unlock(rl_t *rl)
217
218 while ((free_rl = list_head(&free_list)) != NULL) {
219 list_remove(&free_list, free_rl);
220 - zfs_range_free(free_rl);
221 + /* Freeing rl is the caller's responsibility */
222 + if (free_rl != rl)
223 + zfs_range_free(free_rl);
224 }
225
226 list_destroy(&free_list);
227 diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
228 index 2da5fec..1ef5299 100644
229 --- a/module/zfs/zfs_vnops.c
230 +++ b/module/zfs/zfs_vnops.c
231 @@ -370,7 +370,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
232 objset_t *os;
233 ssize_t n, nbytes;
234 int error = 0;
235 - rl_t *rl;
236 + rl_t rl;
237 #ifdef HAVE_UIO_ZEROCOPY
238 xuio_t *xuio = NULL;
239 #endif /* HAVE_UIO_ZEROCOPY */
240 @@ -418,7 +418,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
241 /*
242 * Lock the range against changes.
243 */
244 - rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
245 + zfs_range_lock(&rl, zp, uio->uio_loffset, uio->uio_resid, RL_READER);
246
247 /*
248 * If we are reading past end-of-file we can skip
249 @@ -482,7 +482,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
250 n -= nbytes;
251 }
252 out:
253 - zfs_range_unlock(rl);
254 + zfs_range_unlock(&rl);
255
256 ZFS_ACCESSTIME_STAMP(zsb, zp);
257 zfs_inode_update(zp);
258 @@ -524,7 +524,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
259 zilog_t *zilog;
260 offset_t woff;
261 ssize_t n, nbytes;
262 - rl_t *rl;
263 + rl_t rl;
264 int max_blksz = zsb->z_max_blksz;
265 int error = 0;
266 arc_buf_t *abuf;
267 @@ -608,9 +608,9 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
268 * Obtain an appending range lock to guarantee file append
269 * semantics. We reset the write offset once we have the lock.
270 */
271 - rl = zfs_range_lock(zp, 0, n, RL_APPEND);
272 - woff = rl->r_off;
273 - if (rl->r_len == UINT64_MAX) {
274 + zfs_range_lock(&rl, zp, 0, n, RL_APPEND);
275 + woff = rl.r_off;
276 + if (rl.r_len == UINT64_MAX) {
277 /*
278 * We overlocked the file because this write will cause
279 * the file block size to increase.
280 @@ -625,11 +625,11 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
281 * this write, then this range lock will lock the entire file
282 * so that we can re-write the block safely.
283 */
284 - rl = zfs_range_lock(zp, woff, n, RL_WRITER);
285 + zfs_range_lock(&rl, zp, woff, n, RL_WRITER);
286 }
287
288 if (woff >= limit) {
289 - zfs_range_unlock(rl);
290 + zfs_range_unlock(&rl);
291 ZFS_EXIT(zsb);
292 return (EFBIG);
293 }
294 @@ -719,7 +719,7 @@ again:
295 * on the first iteration since zfs_range_reduce() will
296 * shrink down r_len to the appropriate size.
297 */
298 - if (rl->r_len == UINT64_MAX) {
299 + if (rl.r_len == UINT64_MAX) {
300 uint64_t new_blksz;
301
302 if (zp->z_blksz > max_blksz) {
303 @@ -729,7 +729,7 @@ again:
304 new_blksz = MIN(end_size, max_blksz);
305 }
306 zfs_grow_blocksize(zp, new_blksz, tx);
307 - zfs_range_reduce(rl, woff, n);
308 + zfs_range_reduce(&rl, woff, n);
309 }
310
311 /*
312 @@ -842,7 +842,7 @@ again:
313 uio_prefaultpages(MIN(n, max_blksz), uio);
314 }
315
316 - zfs_range_unlock(rl);
317 + zfs_range_unlock(&rl);
318
319 /*
320 * If we're in replay mode, or we made no progress, return error.
321 @@ -893,6 +893,7 @@ zfs_get_done(zgd_t *zgd, int error)
322 if (error == 0 && zgd->zgd_bp)
323 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
324
325 + kmem_free(zgd->zgd_rl, sizeof (rl_t));
326 kmem_free(zgd, sizeof (zgd_t));
327 }
328
329 @@ -935,6 +936,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
330 }
331
332 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
333 + zgd->zgd_rl = (rl_t *)kmem_zalloc(sizeof (rl_t), KM_SLEEP);
334 zgd->zgd_zilog = zsb->z_log;
335 zgd->zgd_private = zp;
336
337 @@ -946,7 +948,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
338 * we don't have to write the data twice.
339 */
340 if (buf != NULL) { /* immediate write */
341 - zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
342 + zfs_range_lock(zgd->zgd_rl, zp, offset, size, RL_READER);
343 /* test for truncation needs to be done while range locked */
344 if (offset >= zp->z_size) {
345 error = ENOENT;
346 @@ -967,7 +969,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
347 size = zp->z_blksz;
348 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
349 offset -= blkoff;
350 - zgd->zgd_rl = zfs_range_lock(zp, offset, size,
351 + zfs_range_lock(zgd->zgd_rl, zp, offset, size,
352 RL_READER);
353 if (zp->z_blksz == size)
354 break;
355 diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
356 index 3a6872f..e363839 100644
357 --- a/module/zfs/zfs_znode.c
358 +++ b/module/zfs/zfs_znode.c
359 @@ -1158,20 +1158,20 @@ zfs_extend(znode_t *zp, uint64_t end)
360 {
361 zfs_sb_t *zsb = ZTOZSB(zp);
362 dmu_tx_t *tx;
363 - rl_t *rl;
364 + rl_t rl;
365 uint64_t newblksz;
366 int error;
367
368 /*
369 * We will change zp_size, lock the whole file.
370 */
371 - rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
372 + zfs_range_lock(&rl, zp, 0, UINT64_MAX, RL_WRITER);
373
374 /*
375 * Nothing to do if file already at desired length.
376 */
377 if (end <= zp->z_size) {
378 - zfs_range_unlock(rl);
379 + zfs_range_unlock(&rl);
380 return (0);
381 }
382 top:
383 @@ -1202,7 +1202,7 @@ top:
384 goto top;
385 }
386 dmu_tx_abort(tx);
387 - zfs_range_unlock(rl);
388 + zfs_range_unlock(&rl);
389 return (error);
390 }
391
392 @@ -1214,7 +1214,7 @@ top:
393 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
394 &zp->z_size, sizeof (zp->z_size), tx));
395
396 - zfs_range_unlock(rl);
397 + zfs_range_unlock(&rl);
398
399 dmu_tx_commit(tx);
400
401 @@ -1235,19 +1235,19 @@ static int
402 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
403 {
404 zfs_sb_t *zsb = ZTOZSB(zp);
405 - rl_t *rl;
406 + rl_t rl;
407 int error;
408
409 /*
410 * Lock the range being freed.
411 */
412 - rl = zfs_range_lock(zp, off, len, RL_WRITER);
413 + zfs_range_lock(&rl, zp, off, len, RL_WRITER);
414
415 /*
416 * Nothing to do if file already at desired length.
417 */
418 if (off >= zp->z_size) {
419 - zfs_range_unlock(rl);
420 + zfs_range_unlock(&rl);
421 return (0);
422 }
423
424 @@ -1256,7 +1256,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
425
426 error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);
427
428 - zfs_range_unlock(rl);
429 + zfs_range_unlock(&rl);
430
431 return (error);
432 }
433 @@ -1275,7 +1275,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
434 {
435 zfs_sb_t *zsb = ZTOZSB(zp);
436 dmu_tx_t *tx;
437 - rl_t *rl;
438 + rl_t rl;
439 int error;
440 sa_bulk_attr_t bulk[2];
441 int count = 0;
442 @@ -1283,19 +1283,19 @@ zfs_trunc(znode_t *zp, uint64_t end)
443 /*
444 * We will change zp_size, lock the whole file.
445 */
446 - rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);
447 + zfs_range_lock(&rl, zp, 0, UINT64_MAX, RL_WRITER);
448
449 /*
450 * Nothing to do if file already at desired length.
451 */
452 if (end >= zp->z_size) {
453 - zfs_range_unlock(rl);
454 + zfs_range_unlock(&rl);
455 return (0);
456 }
457
458 error = dmu_free_long_range(zsb->z_os, zp->z_id, end, -1);
459 if (error) {
460 - zfs_range_unlock(rl);
461 + zfs_range_unlock(&rl);
462 return (error);
463 }
464 top:
465 @@ -1310,7 +1310,7 @@ top:
466 goto top;
467 }
468 dmu_tx_abort(tx);
469 - zfs_range_unlock(rl);
470 + zfs_range_unlock(&rl);
471 return (error);
472 }
473
474 @@ -1327,7 +1327,7 @@ top:
475
476 dmu_tx_commit(tx);
477
478 - zfs_range_unlock(rl);
479 + zfs_range_unlock(&rl);
480
481 return (0);
482 }
483 diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
484 index 125d58d..bbe53d9 100644
485 --- a/module/zfs/zvol.c
486 +++ b/module/zfs/zvol.c
487 @@ -537,7 +537,7 @@ zvol_write(void *arg)
488 uint64_t size = blk_rq_bytes(req);
489 int error = 0;
490 dmu_tx_t *tx;
491 - rl_t *rl;
492 + rl_t rl;
493
494 if (req->cmd_flags & VDEV_REQ_FLUSH)
495 zil_commit(zv->zv_zilog, ZVOL_OBJ);
496 @@ -550,7 +550,7 @@ zvol_write(void *arg)
497 return;
498 }
499
500 - rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
501 + zfs_range_lock(&rl, &zv->zv_znode, offset, size, RL_WRITER);
502
503 tx = dmu_tx_create(zv->zv_objset);
504 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
505 @@ -559,7 +559,7 @@ zvol_write(void *arg)
506 error = dmu_tx_assign(tx, TXG_WAIT);
507 if (error) {
508 dmu_tx_abort(tx);
509 - zfs_range_unlock(rl);
510 + zfs_range_unlock(&rl);
511 blk_end_request(req, -error, size);
512 return;
513 }
514 @@ -570,7 +570,7 @@ zvol_write(void *arg)
515 req->cmd_flags & VDEV_REQ_FUA);
516
517 dmu_tx_commit(tx);
518 - zfs_range_unlock(rl);
519 + zfs_range_unlock(&rl);
520
521 if ((req->cmd_flags & VDEV_REQ_FUA) ||
522 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
523 @@ -589,7 +589,7 @@ zvol_discard(void *arg)
524 uint64_t offset = blk_rq_pos(req) << 9;
525 uint64_t size = blk_rq_bytes(req);
526 int error;
527 - rl_t *rl;
528 + rl_t rl;
529
530 if (offset + size > zv->zv_volsize) {
531 blk_end_request(req, -EIO, size);
532 @@ -601,7 +601,7 @@ zvol_discard(void *arg)
533 return;
534 }
535
536 - rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
537 + zfs_range_lock(&rl, &zv->zv_znode, offset, size, RL_WRITER);
538
539 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);
540
541 @@ -609,7 +609,7 @@ zvol_discard(void *arg)
542 * TODO: maybe we should add the operation to the log.
543 */
544
545 - zfs_range_unlock(rl);
546 + zfs_range_unlock(&rl);
547
548 blk_end_request(req, -error, size);
549 }
550 @@ -630,18 +630,18 @@ zvol_read(void *arg)
551 uint64_t offset = blk_rq_pos(req) << 9;
552 uint64_t size = blk_rq_bytes(req);
553 int error;
554 - rl_t *rl;
555 + rl_t rl;
556
557 if (size == 0) {
558 blk_end_request(req, 0, size);
559 return;
560 }
561
562 - rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
563 + zfs_range_lock(&rl, &zv->zv_znode, offset, size, RL_READER);
564
565 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
566
567 - zfs_range_unlock(rl);
568 + zfs_range_unlock(&rl);
569
570 /* convert checksum errors into IO errors */
571 if (error == ECKSUM)
572 @@ -744,6 +744,7 @@ zvol_get_done(zgd_t *zgd, int error)
573 if (error == 0 && zgd->zgd_bp)
574 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
575
576 + kmem_free(zgd->zgd_rl, sizeof (rl_t));
577 kmem_free(zgd, sizeof (zgd_t));
578 }
579
580 @@ -766,7 +767,8 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
581
582 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
583 zgd->zgd_zilog = zv->zv_zilog;
584 - zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
585 + zgd->zgd_rl = kmem_alloc(sizeof (rl_t), KM_SLEEP);
586 + zfs_range_lock(zgd->zgd_rl, &zv->zv_znode, offset, size, RL_READER);
587
588 /*
589 * Write records come in two flavors: immediate and indirect.

  ViewVC Help
Powered by ViewVC 1.1.20