Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2014-2016 Christoph Hellwig.
4 */
5#include <linux/exportfs.h>
6#include <linux/iomap.h>
7#include <linux/slab.h>
8#include <linux/pr.h>
9
10#include <linux/nfsd/debug.h>
11
12#include "blocklayoutxdr.h"
13#include "pnfs.h"
14#include "filecache.h"
15#include "vfs.h"
16#include "trace.h"
17
18#define NFSDDBG_FACILITY NFSDDBG_PNFS
19
20
21/*
22 * Get an extent from the file system that starts at offset or below
23 * and may be shorter than the requested length.
24 */
25static __be32
26nfsd4_block_map_extent(struct inode *inode, const struct svc_fh *fhp,
27 u64 offset, u64 length, u32 iomode, u64 minlength,
28 struct pnfs_block_extent *bex)
29{
30 struct super_block *sb = inode->i_sb;
31 struct iomap iomap;
32 u32 device_generation = 0;
33 int error;
34
35 error = sb->s_export_op->map_blocks(inode, offset, length, &iomap,
36 iomode != IOMODE_READ, &device_generation);
37 if (error) {
38 if (error == -ENXIO)
39 return nfserr_layoutunavailable;
40 return nfserrno(error);
41 }
42
43 switch (iomap.type) {
44 case IOMAP_MAPPED:
45 if (iomode == IOMODE_READ)
46 bex->es = PNFS_BLOCK_READ_DATA;
47 else
48 bex->es = PNFS_BLOCK_READWRITE_DATA;
49 bex->soff = iomap.addr;
50 break;
51 case IOMAP_UNWRITTEN:
52 if (iomode & IOMODE_RW) {
53 /*
54 * Crack monkey special case from section 2.3.1.
55 */
56 if (minlength == 0) {
57 dprintk("pnfsd: no soup for you!\n");
58 return nfserr_layoutunavailable;
59 }
60
61 bex->es = PNFS_BLOCK_INVALID_DATA;
62 bex->soff = iomap.addr;
63 break;
64 }
65 fallthrough;
66 case IOMAP_HOLE:
67 if (iomode == IOMODE_READ) {
68 bex->es = PNFS_BLOCK_NONE_DATA;
69 break;
70 }
71 fallthrough;
72 case IOMAP_DELALLOC:
73 default:
74 WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
75 return nfserr_layoutunavailable;
76 }
77
78 error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
79 if (error)
80 return nfserrno(error);
81
82 bex->foff = iomap.offset;
83 bex->len = iomap.length;
84 return nfs_ok;
85}
86
87static __be32
88nfsd4_block_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
89 const struct svc_fh *fhp, struct nfsd4_layoutget *args)
90{
91 struct nfsd4_layout_seg *seg = &args->lg_seg;
92 struct pnfs_block_layout *bl;
93 struct pnfs_block_extent *first_bex, *last_bex;
94 u64 offset = seg->offset, length = seg->length;
95 u32 i, nr_extents_max, block_size = i_blocksize(inode);
96 __be32 nfserr;
97
98 if (locks_in_grace(SVC_NET(rqstp)))
99 return nfserr_grace;
100
101 nfserr = nfserr_layoutunavailable;
102 if (seg->offset & (block_size - 1)) {
103 dprintk("pnfsd: I/O misaligned\n");
104 goto out_error;
105 }
106
107 /*
108 * RFC 8881, section 3.3.17:
109 * The layout4 data type defines a layout for a file.
110 *
111 * RFC 8881, section 18.43.3:
112 * The loga_maxcount field specifies the maximum layout size
113 * (in bytes) that the client can handle. If the size of the
114 * layout structure exceeds the size specified by maxcount,
115 * the metadata server will return the NFS4ERR_TOOSMALL error.
116 */
117 nfserr = nfserr_toosmall;
118 if (args->lg_maxcount < PNFS_BLOCK_LAYOUT4_SIZE +
119 PNFS_BLOCK_EXTENT_SIZE)
120 goto out_error;
121
122 /*
123 * Limit the maximum layout size to avoid allocating
124 * a large buffer on the server for each layout request.
125 */
126 nr_extents_max = (min(args->lg_maxcount, PAGE_SIZE) -
127 PNFS_BLOCK_LAYOUT4_SIZE) / PNFS_BLOCK_EXTENT_SIZE;
128
129 /*
130 * Some clients barf on non-zero block numbers for NONE or INVALID
131 * layouts, so make sure to zero the whole structure.
132 */
133 nfserr = nfserrno(-ENOMEM);
134 bl = kzalloc_flex(*bl, extents, nr_extents_max);
135 if (!bl)
136 goto out_error;
137 bl->nr_extents = nr_extents_max;
138 args->lg_content = bl;
139
140 for (i = 0; i < bl->nr_extents; i++) {
141 struct pnfs_block_extent *bex = bl->extents + i;
142 u64 bex_length;
143
144 nfserr = nfsd4_block_map_extent(inode, fhp, offset, length,
145 seg->iomode, args->lg_minlength, bex);
146 if (nfserr != nfs_ok)
147 goto out_error;
148
149 bex_length = bex->len - (offset - bex->foff);
150 if (bex_length >= length) {
151 bl->nr_extents = i + 1;
152 break;
153 }
154
155 offset = bex->foff + bex->len;
156 length -= bex_length;
157 }
158
159 first_bex = bl->extents;
160 last_bex = bl->extents + bl->nr_extents - 1;
161
162 nfserr = nfserr_layoutunavailable;
163 length = last_bex->foff + last_bex->len - seg->offset;
164 if (length < args->lg_minlength) {
165 dprintk("pnfsd: extent smaller than minlength\n");
166 goto out_error;
167 }
168
169 seg->offset = first_bex->foff;
170 seg->length = last_bex->foff - first_bex->foff + last_bex->len;
171 return nfs_ok;
172
173out_error:
174 seg->length = 0;
175 return nfserr;
176}
177
178static __be32
179nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
180 struct iomap *iomaps, int nr_iomaps)
181{
182 struct timespec64 mtime = inode_get_mtime(inode);
183 struct iattr iattr = { .ia_valid = 0 };
184 int error;
185
186 if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
187 timespec64_compare(&lcp->lc_mtime, &mtime) < 0)
188 lcp->lc_mtime = current_time(inode);
189 iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
190 iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
191
192 if (lcp->lc_size_chg) {
193 iattr.ia_valid |= ATTR_SIZE;
194 iattr.ia_size = lcp->lc_newsize;
195 }
196
197 error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
198 nr_iomaps, &iattr);
199 kfree(iomaps);
200 return nfserrno(error);
201}
202
203#ifdef CONFIG_NFSD_BLOCKLAYOUT
204static int
205nfsd4_block_get_device_info_simple(struct super_block *sb,
206 struct nfsd4_getdeviceinfo *gdp)
207{
208 struct pnfs_block_deviceaddr *dev;
209 struct pnfs_block_volume *b;
210
211 dev = kzalloc_flex(*dev, volumes, 1);
212 if (!dev)
213 return -ENOMEM;
214 gdp->gd_device = dev;
215
216 dev->nr_volumes = 1;
217 b = &dev->volumes[0];
218
219 b->type = PNFS_BLOCK_VOLUME_SIMPLE;
220 b->simple.sig_len = PNFS_BLOCK_UUID_LEN;
221 return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len,
222 &b->simple.offset);
223}
224
225static __be32
226nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
227 struct svc_rqst *rqstp,
228 struct nfs4_client *clp,
229 struct nfsd4_getdeviceinfo *gdp)
230{
231 if (bdev_is_partition(sb->s_bdev))
232 return nfserr_inval;
233 return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp));
234}
235
236static __be32
237nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
238 struct nfsd4_layoutcommit *lcp)
239{
240 struct iomap *iomaps;
241 int nr_iomaps;
242 __be32 nfserr;
243
244 rqstp->rq_arg = lcp->lc_up_layout;
245 svcxdr_init_decode(rqstp);
246
247 nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream,
248 &iomaps, &nr_iomaps, i_blocksize(inode));
249 if (nfserr != nfs_ok)
250 return nfserr;
251
252 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
253}
254
255const struct nfsd4_layout_ops bl_layout_ops = {
256 /*
257 * Pretend that we send notification to the client. This is a blatant
258 * lie to force recent Linux clients to cache our device IDs.
259 * We rarely ever change the device ID, so the harm of leaking deviceids
260 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
261 * in this regard, but I filed errata 4119 for this a while ago, and
262 * hopefully the Linux client will eventually start caching deviceids
263 * without this again.
264 */
265 .notify_types =
266 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
267 .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
268 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
269 .proc_layoutget = nfsd4_block_proc_layoutget,
270 .encode_layoutget = nfsd4_block_encode_layoutget,
271 .proc_layoutcommit = nfsd4_block_proc_layoutcommit,
272};
273#endif /* CONFIG_NFSD_BLOCKLAYOUT */
274
275#ifdef CONFIG_NFSD_SCSILAYOUT
276
277#define NFSD_MDS_PR_FENCED XA_MARK_0
278
279/*
280 * Clear the fence flag if the device already has an entry. This occurs
281 * when a client re-registers after a previous fence, allowing new
282 * layouts for this device.
283 *
284 * Insert only on first registration. This bounds cl_dev_fences to the
285 * count of devices this client has accessed, preventing unbounded growth.
286 */
287static inline int nfsd4_scsi_fence_insert(struct nfs4_client *clp,
288 dev_t device)
289{
290 struct xarray *xa = &clp->cl_dev_fences;
291 int ret;
292
293 xa_lock(xa);
294 ret = __xa_insert(xa, device, XA_ZERO_ENTRY, GFP_KERNEL);
295 if (ret == -EBUSY) {
296 __xa_clear_mark(xa, device, NFSD_MDS_PR_FENCED);
297 ret = 0;
298 }
299 xa_unlock(xa);
300 clp->cl_fence_retry_warn = false;
301 return ret;
302}
303
304static inline bool nfsd4_scsi_fence_set(struct nfs4_client *clp, dev_t device)
305{
306 struct xarray *xa = &clp->cl_dev_fences;
307 bool skip;
308
309 xa_lock(xa);
310 skip = xa_get_mark(xa, device, NFSD_MDS_PR_FENCED);
311 if (!skip)
312 __xa_set_mark(xa, device, NFSD_MDS_PR_FENCED);
313 xa_unlock(xa);
314 return skip;
315}
316
317static inline void nfsd4_scsi_fence_clear(struct nfs4_client *clp, dev_t device)
318{
319 xa_clear_mark(&clp->cl_dev_fences, device, NFSD_MDS_PR_FENCED);
320}
321
322#define NFSD_MDS_PR_KEY 0x0100000000000000ULL
323
324/*
325 * We use the client ID as a unique key for the reservations.
326 * This allows us to easily fence a client when recalls fail.
327 */
328static u64 nfsd4_scsi_pr_key(struct nfs4_client *clp)
329{
330 return ((u64)clp->cl_clientid.cl_boot << 32) | clp->cl_clientid.cl_id;
331}
332
333static const u8 designator_types[] = {
334 PS_DESIGNATOR_EUI64,
335 PS_DESIGNATOR_NAA,
336};
337
338static int
339nfsd4_block_get_unique_id(struct gendisk *disk, struct pnfs_block_volume *b)
340{
341 int ret, i;
342
343 for (i = 0; i < ARRAY_SIZE(designator_types); i++) {
344 u8 type = designator_types[i];
345
346 ret = disk->fops->get_unique_id(disk, b->scsi.designator, type);
347 if (ret > 0) {
348 b->scsi.code_set = PS_CODE_SET_BINARY;
349 b->scsi.designator_type = type;
350 b->scsi.designator_len = ret;
351 return 0;
352 }
353 }
354
355 return -EINVAL;
356}
357
358static int
359nfsd4_block_get_device_info_scsi(struct super_block *sb,
360 struct nfs4_client *clp,
361 struct nfsd4_getdeviceinfo *gdp)
362{
363 struct pnfs_block_deviceaddr *dev;
364 struct pnfs_block_volume *b;
365 const struct pr_ops *ops;
366 int ret;
367
368 dev = kzalloc_flex(*dev, volumes, 1);
369 if (!dev)
370 return -ENOMEM;
371 gdp->gd_device = dev;
372
373 dev->nr_volumes = 1;
374 b = &dev->volumes[0];
375
376 b->type = PNFS_BLOCK_VOLUME_SCSI;
377 b->scsi.pr_key = nfsd4_scsi_pr_key(clp);
378
379 ret = nfsd4_block_get_unique_id(sb->s_bdev->bd_disk, b);
380 if (ret < 0)
381 goto out_free_dev;
382
383 ret = -EINVAL;
384 ops = sb->s_bdev->bd_disk->fops->pr_ops;
385 if (!ops) {
386 pr_err("pNFS: device %s does not support PRs.\n",
387 sb->s_id);
388 goto out_free_dev;
389 }
390
391 ret = nfsd4_scsi_fence_insert(clp, sb->s_bdev->bd_dev);
392 if (ret < 0)
393 goto out_free_dev;
394
395 ret = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true);
396 if (ret) {
397 pr_err("pNFS: failed to register key for device %s.\n",
398 sb->s_id);
399 goto out_free_dev;
400 }
401
402 ret = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY,
403 PR_EXCLUSIVE_ACCESS_REG_ONLY, 0);
404 if (ret) {
405 pr_err("pNFS: failed to reserve device %s.\n",
406 sb->s_id);
407 goto out_free_dev;
408 }
409
410 return 0;
411
412out_free_dev:
413 kfree(dev);
414 gdp->gd_device = NULL;
415 return ret;
416}
417
418static __be32
419nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
420 struct svc_rqst *rqstp,
421 struct nfs4_client *clp,
422 struct nfsd4_getdeviceinfo *gdp)
423{
424 if (bdev_is_partition(sb->s_bdev))
425 return nfserr_inval;
426 return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
427}
428static __be32
429nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
430 struct nfsd4_layoutcommit *lcp)
431{
432 struct iomap *iomaps;
433 int nr_iomaps;
434 __be32 nfserr;
435
436 rqstp->rq_arg = lcp->lc_up_layout;
437 svcxdr_init_decode(rqstp);
438
439 nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream,
440 &iomaps, &nr_iomaps, i_blocksize(inode));
441 if (nfserr != nfs_ok)
442 return nfserr;
443
444 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
445}
446
447/*
448 * Perform the fence operation to prevent the client from accessing the
449 * block device. If a fence operation is already in progress, wait for
450 * it to complete before checking the NFSD_MDS_PR_FENCED flag. Once the
451 * operation is complete, check the flag. If NFSD_MDS_PR_FENCED is set,
452 * update the layout stateid by setting the ls_fenced flag to indicate
453 * that the client has been fenced.
454 *
455 * The cl_fence_mutex ensures that the fence operation has been fully
456 * completed, rather than just in progress, when returning from this
457 * function.
458 *
459 * Return true if client was fenced otherwise return false.
460 */
461static bool
462nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
463{
464 struct nfs4_client *clp = ls->ls_stid.sc_client;
465 struct block_device *bdev = file->nf_file->f_path.mnt->mnt_sb->s_bdev;
466 int status;
467 bool ret;
468
469 mutex_lock(&clp->cl_fence_mutex);
470 if (nfsd4_scsi_fence_set(clp, bdev->bd_dev)) {
471 mutex_unlock(&clp->cl_fence_mutex);
472 return true;
473 }
474
475 status = bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
476 nfsd4_scsi_pr_key(clp),
477 PR_EXCLUSIVE_ACCESS_REG_ONLY, true);
478 /*
479 * Reset to allow retry only when the command could not have
480 * reached the device. Negative status means a local error
481 * (e.g., -ENOMEM) prevented the command from being sent.
482 * PR_STS_PATH_FAILED, PR_STS_PATH_FAST_FAILED, and
483 * PR_STS_RETRY_PATH_FAILURE indicate transport path failures
484 * before device delivery.
485 *
486 * For all other errors, the command may have reached the device
487 * and the preempt may have succeeded. Avoid resetting, since
488 * retrying a successful preempt returns PR_STS_IOERR or
489 * PR_STS_RESERVATION_CONFLICT, which would cause an infinite
490 * retry loop.
491 */
492 switch (status) {
493 case 0:
494 case PR_STS_IOERR:
495 case PR_STS_RESERVATION_CONFLICT:
496 ret = true;
497 break;
498 default:
499 /* retry-able and other errors */
500 ret = false;
501 nfsd4_scsi_fence_clear(clp, bdev->bd_dev);
502 break;
503 }
504 mutex_unlock(&clp->cl_fence_mutex);
505
506 trace_nfsd_pnfs_fence(clp, bdev->bd_disk->disk_name, status);
507 return ret;
508}
509
510const struct nfsd4_layout_ops scsi_layout_ops = {
511 /*
512 * Pretend that we send notification to the client. This is a blatant
513 * lie to force recent Linux clients to cache our device IDs.
514 * We rarely ever change the device ID, so the harm of leaking deviceids
515 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
516 * in this regard, but I filed errata 4119 for this a while ago, and
517 * hopefully the Linux client will eventually start caching deviceids
518 * without this again.
519 */
520 .notify_types =
521 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
522 .proc_getdeviceinfo = nfsd4_scsi_proc_getdeviceinfo,
523 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
524 .proc_layoutget = nfsd4_block_proc_layoutget,
525 .encode_layoutget = nfsd4_block_encode_layoutget,
526 .proc_layoutcommit = nfsd4_scsi_proc_layoutcommit,
527 .fence_client = nfsd4_scsi_fence_client,
528};
529#endif /* CONFIG_NFSD_SCSILAYOUT */