Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to setting various queue properties from drivers
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blk-integrity.h>
10#include <linux/pagemap.h>
11#include <linux/backing-dev-defs.h>
12#include <linux/gcd.h>
13#include <linux/lcm.h>
14#include <linux/jiffies.h>
15#include <linux/gfp.h>
16#include <linux/dma-mapping.h>
17#include <linux/t10-pi.h>
18#include <linux/crc64.h>
19
20#include "blk.h"
21#include "blk-rq-qos.h"
22#include "blk-wbt.h"
23
24void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
25{
26 WRITE_ONCE(q->rq_timeout, timeout);
27}
28EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29
30/**
31 * blk_set_stacking_limits - set default limits for stacking devices
32 * @lim: the queue_limits structure to reset
33 *
34 * Prepare queue limits for applying limits from underlying devices using
35 * blk_stack_limits().
36 */
37void blk_set_stacking_limits(struct queue_limits *lim)
38{
39 memset(lim, 0, sizeof(*lim));
40 lim->logical_block_size = SECTOR_SIZE;
41 lim->physical_block_size = SECTOR_SIZE;
42 lim->io_min = SECTOR_SIZE;
43 lim->discard_granularity = SECTOR_SIZE;
44 lim->dma_alignment = SECTOR_SIZE - 1;
45 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
46
47 /* Inherit limits from component devices */
48 lim->max_segments = USHRT_MAX;
49 lim->max_discard_segments = USHRT_MAX;
50 lim->max_hw_sectors = UINT_MAX;
51 lim->max_segment_size = UINT_MAX;
52 lim->max_sectors = UINT_MAX;
53 lim->max_dev_sectors = UINT_MAX;
54 lim->max_write_zeroes_sectors = UINT_MAX;
55 lim->max_hw_wzeroes_unmap_sectors = UINT_MAX;
56 lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
57 lim->max_hw_zone_append_sectors = UINT_MAX;
58 lim->max_user_discard_sectors = UINT_MAX;
59 lim->atomic_write_hw_max = UINT_MAX;
60}
61EXPORT_SYMBOL(blk_set_stacking_limits);
62
63void blk_apply_bdi_limits(struct backing_dev_info *bdi,
64 struct queue_limits *lim)
65{
66 u64 io_opt = lim->io_opt;
67
68 /*
69 * For read-ahead of large files to be effective, we need to read ahead
70 * at least twice the optimal I/O size. For rotational devices that do
71 * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O
72 * size to avoid falling back to the (rather inefficient) small default
73 * read-ahead size.
74 *
75 * There is no hardware limitation for the read-ahead size and the user
76 * might have increased the read-ahead size through sysfs, so don't ever
77 * decrease it.
78 */
79 if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL))
80 io_opt = (u64)lim->max_sectors << SECTOR_SHIFT;
81
82 bdi->ra_pages = max3(bdi->ra_pages,
83 io_opt * 2 >> PAGE_SHIFT,
84 VM_READAHEAD_PAGES);
85 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
86}
87
88static int blk_validate_zoned_limits(struct queue_limits *lim)
89{
90 if (!(lim->features & BLK_FEAT_ZONED)) {
91 if (WARN_ON_ONCE(lim->max_open_zones) ||
92 WARN_ON_ONCE(lim->max_active_zones) ||
93 WARN_ON_ONCE(lim->zone_write_granularity) ||
94 WARN_ON_ONCE(lim->max_zone_append_sectors))
95 return -EINVAL;
96 return 0;
97 }
98
99 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
100 return -EINVAL;
101
102 /*
103 * Given that active zones include open zones, the maximum number of
104 * open zones cannot be larger than the maximum number of active zones.
105 */
106 if (lim->max_active_zones &&
107 lim->max_open_zones > lim->max_active_zones)
108 return -EINVAL;
109
110 if (lim->zone_write_granularity < lim->logical_block_size)
111 lim->zone_write_granularity = lim->logical_block_size;
112
113 /*
114 * The Zone Append size is limited by the maximum I/O size and the zone
115 * size given that it can't span zones.
116 *
117 * If no max_hw_zone_append_sectors limit is provided, the block layer
118 * will emulated it, else we're also bound by the hardware limit.
119 */
120 lim->max_zone_append_sectors =
121 min_not_zero(lim->max_hw_zone_append_sectors,
122 min(lim->chunk_sectors, lim->max_hw_sectors));
123 return 0;
124}
125
126static int blk_validate_integrity_limits(struct queue_limits *lim)
127{
128 struct blk_integrity *bi = &lim->integrity;
129
130 if (!bi->metadata_size) {
131 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
132 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
133 pr_warn("invalid PI settings.\n");
134 return -EINVAL;
135 }
136 bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
137 return 0;
138 }
139
140 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
141 pr_warn("integrity support disabled.\n");
142 return -EINVAL;
143 }
144
145 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
146 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
147 pr_warn("ref tag not support without checksum.\n");
148 return -EINVAL;
149 }
150
151 if (bi->pi_offset + bi->pi_tuple_size > bi->metadata_size) {
152 pr_warn("pi_offset (%u) + pi_tuple_size (%u) exceeds metadata_size (%u)\n",
153 bi->pi_offset, bi->pi_tuple_size, bi->metadata_size);
154 return -EINVAL;
155 }
156
157 switch (bi->csum_type) {
158 case BLK_INTEGRITY_CSUM_NONE:
159 if (bi->pi_tuple_size) {
160 pr_warn("pi_tuple_size must be 0 when checksum type is none\n");
161 return -EINVAL;
162 }
163 break;
164 case BLK_INTEGRITY_CSUM_CRC:
165 case BLK_INTEGRITY_CSUM_IP:
166 if (bi->pi_tuple_size != sizeof(struct t10_pi_tuple)) {
167 pr_warn("pi_tuple_size mismatch for T10 PI: expected %zu, got %u\n",
168 sizeof(struct t10_pi_tuple),
169 bi->pi_tuple_size);
170 return -EINVAL;
171 }
172 break;
173 case BLK_INTEGRITY_CSUM_CRC64:
174 if (bi->pi_tuple_size != sizeof(struct crc64_pi_tuple)) {
175 pr_warn("pi_tuple_size mismatch for CRC64 PI: expected %zu, got %u\n",
176 sizeof(struct crc64_pi_tuple),
177 bi->pi_tuple_size);
178 return -EINVAL;
179 }
180 break;
181 }
182
183 if (!bi->interval_exp) {
184 bi->interval_exp = ilog2(lim->logical_block_size);
185 } else if (bi->interval_exp < SECTOR_SHIFT ||
186 bi->interval_exp > ilog2(lim->logical_block_size)) {
187 pr_warn("invalid interval_exp %u\n", bi->interval_exp);
188 return -EINVAL;
189 }
190
191 /*
192 * Some IO controllers can not handle data intervals straddling
193 * multiple bio_vecs. For those, enforce alignment so that those are
194 * never generated, and that each buffer is aligned as expected.
195 */
196 if (!(bi->flags & BLK_SPLIT_INTERVAL_CAPABLE) && bi->csum_type) {
197 lim->dma_alignment = max(lim->dma_alignment,
198 (1U << bi->interval_exp) - 1);
199 }
200
201 /*
202 * The block layer automatically adds integrity data for bios that don't
203 * already have it. Limit the I/O size so that a single maximum size
204 * metadata segment can cover the integrity data for the entire I/O.
205 */
206 lim->max_sectors = min(lim->max_sectors,
207 max_integrity_io_size(lim) >> SECTOR_SHIFT);
208
209 return 0;
210}
211
212/*
213 * Returns max guaranteed bytes which we can fit in a bio.
214 *
215 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
216 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
217 * the first and last segments.
218 */
219static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
220{
221 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
222 unsigned int length;
223
224 length = min(max_segments, 2) * lim->logical_block_size;
225 if (max_segments > 2)
226 length += (max_segments - 2) * PAGE_SIZE;
227
228 return length;
229}
230
231static void blk_atomic_writes_update_limits(struct queue_limits *lim)
232{
233 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
234 blk_queue_max_guaranteed_bio(lim));
235
236 unit_limit = rounddown_pow_of_two(unit_limit);
237
238 lim->atomic_write_max_sectors =
239 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
240 lim->max_hw_sectors);
241 lim->atomic_write_unit_min =
242 min(lim->atomic_write_hw_unit_min, unit_limit);
243 lim->atomic_write_unit_max =
244 min(lim->atomic_write_hw_unit_max, unit_limit);
245 lim->atomic_write_boundary_sectors =
246 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
247}
248
249/*
250 * Test whether any boundary is aligned with any chunk size. Stacked
251 * devices store any stripe size in t->chunk_sectors.
252 */
253static bool blk_valid_atomic_writes_boundary(unsigned int chunk_sectors,
254 unsigned int boundary_sectors)
255{
256 if (!chunk_sectors || !boundary_sectors)
257 return true;
258
259 if (boundary_sectors > chunk_sectors &&
260 boundary_sectors % chunk_sectors)
261 return false;
262
263 if (chunk_sectors > boundary_sectors &&
264 chunk_sectors % boundary_sectors)
265 return false;
266
267 return true;
268}
269
270static void blk_validate_atomic_write_limits(struct queue_limits *lim)
271{
272 unsigned int boundary_sectors;
273 unsigned int atomic_write_hw_max_sectors =
274 lim->atomic_write_hw_max >> SECTOR_SHIFT;
275
276 if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
277 goto unsupported;
278
279 /* UINT_MAX indicates stacked limits in initial state */
280 if (lim->atomic_write_hw_max == UINT_MAX)
281 goto unsupported;
282
283 if (!lim->atomic_write_hw_max)
284 goto unsupported;
285
286 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min)))
287 goto unsupported;
288
289 if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max)))
290 goto unsupported;
291
292 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min >
293 lim->atomic_write_hw_unit_max))
294 goto unsupported;
295
296 if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max >
297 lim->atomic_write_hw_max))
298 goto unsupported;
299
300 if (WARN_ON_ONCE(lim->chunk_sectors &&
301 atomic_write_hw_max_sectors > lim->chunk_sectors))
302 goto unsupported;
303
304 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
305
306 if (boundary_sectors) {
307 if (WARN_ON_ONCE(lim->atomic_write_hw_max >
308 lim->atomic_write_hw_boundary))
309 goto unsupported;
310
311 if (WARN_ON_ONCE(!blk_valid_atomic_writes_boundary(
312 lim->chunk_sectors, boundary_sectors)))
313 goto unsupported;
314
315 /*
316 * The boundary size just needs to be a multiple of unit_max
317 * (and not necessarily a power-of-2), so this following check
318 * could be relaxed in future.
319 * Furthermore, if needed, unit_max could even be reduced so
320 * that it is compliant with a !power-of-2 boundary.
321 */
322 if (!is_power_of_2(boundary_sectors))
323 goto unsupported;
324 }
325
326 blk_atomic_writes_update_limits(lim);
327 return;
328
329unsupported:
330 lim->atomic_write_max_sectors = 0;
331 lim->atomic_write_boundary_sectors = 0;
332 lim->atomic_write_unit_min = 0;
333 lim->atomic_write_unit_max = 0;
334}
335
336/*
337 * Check that the limits in lim are valid, initialize defaults for unset
338 * values, and cap values based on others where needed.
339 */
340int blk_validate_limits(struct queue_limits *lim)
341{
342 unsigned int max_hw_sectors;
343 unsigned int logical_block_sectors;
344 unsigned long seg_size;
345 int err;
346
347 /*
348 * Unless otherwise specified, default to 512 byte logical blocks and a
349 * physical block size equal to the logical block size.
350 */
351 if (!lim->logical_block_size)
352 lim->logical_block_size = SECTOR_SIZE;
353 else if (blk_validate_block_size(lim->logical_block_size)) {
354 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
355 return -EINVAL;
356 }
357 if (lim->physical_block_size < lim->logical_block_size) {
358 lim->physical_block_size = lim->logical_block_size;
359 } else if (!is_power_of_2(lim->physical_block_size)) {
360 pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size);
361 return -EINVAL;
362 }
363
364 /*
365 * The minimum I/O size defaults to the physical block size unless
366 * explicitly overridden.
367 */
368 if (lim->io_min < lim->physical_block_size)
369 lim->io_min = lim->physical_block_size;
370
371 /*
372 * The optimal I/O size may not be aligned to physical block size
373 * (because it may be limited by dma engines which have no clue about
374 * block size of the disks attached to them), so we round it down here.
375 */
376 lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
377
378 /*
379 * max_hw_sectors has a somewhat weird default for historical reason,
380 * but driver really should set their own instead of relying on this
381 * value.
382 *
383 * The block layer relies on the fact that every driver can
384 * handle at lest a page worth of data per I/O, and needs the value
385 * aligned to the logical block size.
386 */
387 if (!lim->max_hw_sectors)
388 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
389 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
390 return -EINVAL;
391 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
392 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
393 return -EINVAL;
394 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
395 logical_block_sectors);
396
397 /*
398 * The actual max_sectors value is a complex beast and also takes the
399 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
400 * value into account. The ->max_sectors value is always calculated
401 * from these, so directly setting it won't have any effect.
402 */
403 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
404 lim->max_dev_sectors);
405 if (lim->max_user_sectors) {
406 if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
407 return -EINVAL;
408 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
409 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
410 lim->max_sectors =
411 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
412 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
413 lim->max_sectors =
414 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
415 } else {
416 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
417 }
418 lim->max_sectors = round_down(lim->max_sectors,
419 logical_block_sectors);
420
421 /*
422 * Random default for the maximum number of segments. Driver should not
423 * rely on this and set their own.
424 */
425 if (!lim->max_segments)
426 lim->max_segments = BLK_MAX_SEGMENTS;
427
428 if (lim->max_hw_wzeroes_unmap_sectors &&
429 lim->max_hw_wzeroes_unmap_sectors != lim->max_write_zeroes_sectors)
430 return -EINVAL;
431 lim->max_wzeroes_unmap_sectors = min(lim->max_hw_wzeroes_unmap_sectors,
432 lim->max_user_wzeroes_unmap_sectors);
433
434 lim->max_discard_sectors =
435 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
436
437 /*
438 * When discard is not supported, discard_granularity should be reported
439 * as 0 to userspace.
440 */
441 if (lim->max_discard_sectors)
442 lim->discard_granularity =
443 max(lim->discard_granularity, lim->physical_block_size);
444 else
445 lim->discard_granularity = 0;
446
447 if (!lim->max_discard_segments)
448 lim->max_discard_segments = 1;
449
450 /*
451 * By default there is no limit on the segment boundary alignment,
452 * but if there is one it can't be smaller than the page size as
453 * that would break all the normal I/O patterns.
454 */
455 if (!lim->seg_boundary_mask)
456 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
457 if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
458 return -EINVAL;
459
460 /*
461 * Stacking device may have both virtual boundary and max segment
462 * size limit, so allow this setting now, and long-term the two
463 * might need to move out of stacking limits since we have immutable
464 * bvec and lower layer bio splitting is supposed to handle the two
465 * correctly.
466 */
467 if (lim->virt_boundary_mask) {
468 if (!lim->max_segment_size)
469 lim->max_segment_size = UINT_MAX;
470 } else {
471 /*
472 * The maximum segment size has an odd historic 64k default that
473 * drivers probably should override. Just like the I/O size we
474 * require drivers to at least handle a full page per segment.
475 */
476 if (!lim->max_segment_size)
477 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
478 if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
479 return -EINVAL;
480 }
481
482 /* setup max segment size for building new segment in fast path */
483 if (lim->seg_boundary_mask > lim->max_segment_size - 1)
484 seg_size = lim->max_segment_size;
485 else
486 seg_size = lim->seg_boundary_mask + 1;
487 lim->max_fast_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
488
489 /*
490 * We require drivers to at least do logical block aligned I/O, but
491 * historically could not check for that due to the separate calls
492 * to set the limits. Once the transition is finished the check
493 * below should be narrowed down to check the logical block size.
494 */
495 if (!lim->dma_alignment)
496 lim->dma_alignment = SECTOR_SIZE - 1;
497 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
498 return -EINVAL;
499
500 if (lim->alignment_offset) {
501 lim->alignment_offset &= (lim->physical_block_size - 1);
502 lim->flags &= ~BLK_FLAG_MISALIGNED;
503 }
504
505 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
506 lim->features &= ~BLK_FEAT_FUA;
507
508 blk_validate_atomic_write_limits(lim);
509
510 err = blk_validate_integrity_limits(lim);
511 if (err)
512 return err;
513 return blk_validate_zoned_limits(lim);
514}
515EXPORT_SYMBOL_GPL(blk_validate_limits);
516
517/*
518 * Set the default limits for a newly allocated queue. @lim contains the
519 * initial limits set by the driver, which could be no limit in which case
520 * all fields are cleared to zero.
521 */
522int blk_set_default_limits(struct queue_limits *lim)
523{
524 /*
525 * Most defaults are set by capping the bounds in blk_validate_limits,
526 * but these limits are special and need an explicit initialization to
527 * the max value here.
528 */
529 lim->max_user_discard_sectors = UINT_MAX;
530 lim->max_user_wzeroes_unmap_sectors = UINT_MAX;
531 return blk_validate_limits(lim);
532}
533
534/**
535 * queue_limits_commit_update - commit an atomic update of queue limits
536 * @q: queue to update
537 * @lim: limits to apply
538 *
539 * Apply the limits in @lim that were obtained from queue_limits_start_update()
540 * and updated by the caller to @q. The caller must have frozen the queue or
541 * ensure that there are no outstanding I/Os by other means.
542 *
543 * Returns 0 if successful, else a negative error code.
544 */
545int queue_limits_commit_update(struct request_queue *q,
546 struct queue_limits *lim)
547{
548 int error;
549
550 lockdep_assert_held(&q->limits_lock);
551
552 error = blk_validate_limits(lim);
553 if (error)
554 goto out_unlock;
555
556#ifdef CONFIG_BLK_INLINE_ENCRYPTION
557 if (q->crypto_profile && lim->integrity.tag_size) {
558 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
559 error = -EINVAL;
560 goto out_unlock;
561 }
562#endif
563
564 q->limits = *lim;
565 if (q->disk)
566 blk_apply_bdi_limits(q->disk->bdi, lim);
567out_unlock:
568 mutex_unlock(&q->limits_lock);
569 return error;
570}
571EXPORT_SYMBOL_GPL(queue_limits_commit_update);
572
573/**
574 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
575 * @q: queue to update
576 * @lim: limits to apply
577 *
578 * Apply the limits in @lim that were obtained from queue_limits_start_update()
579 * and updated with the new values by the caller to @q. Freezes the queue
580 * before the update and unfreezes it after.
581 *
582 * Returns 0 if successful, else a negative error code.
583 */
584int queue_limits_commit_update_frozen(struct request_queue *q,
585 struct queue_limits *lim)
586{
587 unsigned int memflags;
588 int ret;
589
590 memflags = blk_mq_freeze_queue(q);
591 ret = queue_limits_commit_update(q, lim);
592 blk_mq_unfreeze_queue(q, memflags);
593
594 return ret;
595}
596EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
597
598/**
599 * queue_limits_set - apply queue limits to queue
600 * @q: queue to update
601 * @lim: limits to apply
602 *
603 * Apply the limits in @lim that were freshly initialized to @q.
604 * To update existing limits use queue_limits_start_update() and
605 * queue_limits_commit_update() instead.
606 *
607 * Returns 0 if successful, else a negative error code.
608 */
609int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
610{
611 mutex_lock(&q->limits_lock);
612 return queue_limits_commit_update(q, lim);
613}
614EXPORT_SYMBOL_GPL(queue_limits_set);
615
616static int queue_limit_alignment_offset(const struct queue_limits *lim,
617 sector_t sector)
618{
619 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
620 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
621 << SECTOR_SHIFT;
622
623 return (granularity + lim->alignment_offset - alignment) % granularity;
624}
625
626static unsigned int queue_limit_discard_alignment(
627 const struct queue_limits *lim, sector_t sector)
628{
629 unsigned int alignment, granularity, offset;
630
631 if (!lim->max_discard_sectors)
632 return 0;
633
634 /* Why are these in bytes, not sectors? */
635 alignment = lim->discard_alignment >> SECTOR_SHIFT;
636 granularity = lim->discard_granularity >> SECTOR_SHIFT;
637
638 /* Offset of the partition start in 'granularity' sectors */
639 offset = sector_div(sector, granularity);
640
641 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
642 offset = (granularity + alignment - offset) % granularity;
643
644 /* Turn it back into bytes, gaah */
645 return offset << SECTOR_SHIFT;
646}
647
648static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
649{
650 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
651 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
652 sectors = PAGE_SIZE >> SECTOR_SHIFT;
653 return sectors;
654}
655
656/* Check if second and later bottom devices are compliant */
657static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
658 struct queue_limits *b)
659{
660 /* We're not going to support different boundary sizes.. yet */
661 if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
662 return false;
663
664 /* Can't support this */
665 if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
666 return false;
667
668 /* Or this */
669 if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
670 return false;
671
672 t->atomic_write_hw_max = min(t->atomic_write_hw_max,
673 b->atomic_write_hw_max);
674 t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
675 b->atomic_write_hw_unit_min);
676 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
677 b->atomic_write_hw_unit_max);
678 return true;
679}
680
681static void blk_stack_atomic_writes_chunk_sectors(struct queue_limits *t)
682{
683 unsigned int chunk_bytes;
684
685 if (!t->chunk_sectors)
686 return;
687
688 /*
689 * If chunk sectors is so large that its value in bytes overflows
690 * UINT_MAX, then just shift it down so it definitely will fit.
691 * We don't support atomic writes of such a large size anyway.
692 */
693 if (check_shl_overflow(t->chunk_sectors, SECTOR_SHIFT, &chunk_bytes))
694 chunk_bytes = t->chunk_sectors;
695
696 /*
697 * Find values for limits which work for chunk size.
698 * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
699 * size, as the chunk size is not restricted to a power-of-2.
700 * So we need to find highest power-of-2 which works for the chunk
701 * size.
702 * As an example scenario, we could have t->unit_max = 16K and
703 * t->chunk_sectors = 24KB. For this case, reduce t->unit_max to a
704 * value aligned with both limits, i.e. 8K in this example.
705 */
706 t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
707 max_pow_of_two_factor(chunk_bytes));
708
709 t->atomic_write_hw_unit_min = min(t->atomic_write_hw_unit_min,
710 t->atomic_write_hw_unit_max);
711 t->atomic_write_hw_max = min(t->atomic_write_hw_max, chunk_bytes);
712}
713
714/* Check stacking of first bottom device */
715static bool blk_stack_atomic_writes_head(struct queue_limits *t,
716 struct queue_limits *b)
717{
718 if (!blk_valid_atomic_writes_boundary(t->chunk_sectors,
719 b->atomic_write_hw_boundary >> SECTOR_SHIFT))
720 return false;
721
722 t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
723 t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
724 t->atomic_write_hw_max = b->atomic_write_hw_max;
725 t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
726 return true;
727}
728
729static void blk_stack_atomic_writes_limits(struct queue_limits *t,
730 struct queue_limits *b, sector_t start)
731{
732 if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
733 goto unsupported;
734
735 if (!b->atomic_write_hw_unit_min)
736 goto unsupported;
737
738 if (!blk_atomic_write_start_sect_aligned(start, b))
739 goto unsupported;
740
741 /* UINT_MAX indicates no stacking of bottom devices yet */
742 if (t->atomic_write_hw_max == UINT_MAX) {
743 if (!blk_stack_atomic_writes_head(t, b))
744 goto unsupported;
745 } else {
746 if (!blk_stack_atomic_writes_tail(t, b))
747 goto unsupported;
748 }
749 blk_stack_atomic_writes_chunk_sectors(t);
750 return;
751
752unsupported:
753 t->atomic_write_hw_max = 0;
754 t->atomic_write_hw_unit_max = 0;
755 t->atomic_write_hw_unit_min = 0;
756 t->atomic_write_hw_boundary = 0;
757}
758
759/**
760 * blk_stack_limits - adjust queue_limits for stacked devices
761 * @t: the stacking driver limits (top device)
762 * @b: the underlying queue limits (bottom, component device)
763 * @start: first data sector within component device
764 *
765 * Description:
766 * This function is used by stacking drivers like MD and DM to ensure
767 * that all component devices have compatible block sizes and
768 * alignments. The stacking driver must provide a queue_limits
769 * struct (top) and then iteratively call the stacking function for
770 * all component (bottom) devices. The stacking function will
771 * attempt to combine the values and ensure proper alignment.
772 *
773 * Returns 0 if the top and bottom queue_limits are compatible. The
774 * top device's block sizes and alignment offsets may be adjusted to
775 * ensure alignment with the bottom device. If no compatible sizes
776 * and alignments exist, -1 is returned and the resulting top
777 * queue_limits will have the misaligned flag set to indicate that
778 * the alignment_offset is undefined.
779 */
780int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
781 sector_t start)
782{
783 unsigned int top, bottom, alignment;
784 int ret = 0;
785
786 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
787
788 /*
789 * Some feaures need to be supported both by the stacking driver and all
790 * underlying devices. The stacking driver sets these flags before
791 * stacking the limits, and this will clear the flags if any of the
792 * underlying devices does not support it.
793 */
794 if (!(b->features & BLK_FEAT_NOWAIT))
795 t->features &= ~BLK_FEAT_NOWAIT;
796 if (!(b->features & BLK_FEAT_POLL))
797 t->features &= ~BLK_FEAT_POLL;
798
799 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
800
801 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
802 t->max_user_sectors = min_not_zero(t->max_user_sectors,
803 b->max_user_sectors);
804 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
805 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
806 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
807 b->max_write_zeroes_sectors);
808 t->max_user_wzeroes_unmap_sectors =
809 min(t->max_user_wzeroes_unmap_sectors,
810 b->max_user_wzeroes_unmap_sectors);
811 t->max_hw_wzeroes_unmap_sectors =
812 min(t->max_hw_wzeroes_unmap_sectors,
813 b->max_hw_wzeroes_unmap_sectors);
814
815 t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors,
816 b->max_hw_zone_append_sectors);
817
818 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
819 b->seg_boundary_mask);
820 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
821 b->virt_boundary_mask);
822
823 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
824 t->max_discard_segments = min_not_zero(t->max_discard_segments,
825 b->max_discard_segments);
826 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
827 b->max_integrity_segments);
828
829 t->max_segment_size = min_not_zero(t->max_segment_size,
830 b->max_segment_size);
831
832 alignment = queue_limit_alignment_offset(b, start);
833
834 /* Bottom device has different alignment. Check that it is
835 * compatible with the current top alignment.
836 */
837 if (t->alignment_offset != alignment) {
838
839 top = max(t->physical_block_size, t->io_min)
840 + t->alignment_offset;
841 bottom = max(b->physical_block_size, b->io_min) + alignment;
842
843 /* Verify that top and bottom intervals line up */
844 if (max(top, bottom) % min(top, bottom)) {
845 t->flags |= BLK_FLAG_MISALIGNED;
846 ret = -1;
847 }
848 }
849
850 t->logical_block_size = max(t->logical_block_size,
851 b->logical_block_size);
852
853 t->physical_block_size = max(t->physical_block_size,
854 b->physical_block_size);
855
856 t->io_min = max(t->io_min, b->io_min);
857 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
858 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
859
860 /* Set non-power-of-2 compatible chunk_sectors boundary */
861 if (b->chunk_sectors)
862 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
863
864 /* Physical block size a multiple of the logical block size? */
865 if (t->physical_block_size & (t->logical_block_size - 1)) {
866 t->physical_block_size = t->logical_block_size;
867 t->flags |= BLK_FLAG_MISALIGNED;
868 ret = -1;
869 }
870
871 /* Minimum I/O a multiple of the physical block size? */
872 if (t->io_min & (t->physical_block_size - 1)) {
873 t->io_min = t->physical_block_size;
874 t->flags |= BLK_FLAG_MISALIGNED;
875 ret = -1;
876 }
877
878 /* Optimal I/O a multiple of the physical block size? */
879 if (t->io_opt & (t->physical_block_size - 1)) {
880 t->io_opt = 0;
881 t->flags |= BLK_FLAG_MISALIGNED;
882 ret = -1;
883 }
884
885 /* chunk_sectors a multiple of the physical block size? */
886 if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
887 t->chunk_sectors = 0;
888 t->flags |= BLK_FLAG_MISALIGNED;
889 ret = -1;
890 }
891
892 /* Find lowest common alignment_offset */
893 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
894 % max(t->physical_block_size, t->io_min);
895
896 /* Verify that new alignment_offset is on a logical block boundary */
897 if (t->alignment_offset & (t->logical_block_size - 1)) {
898 t->flags |= BLK_FLAG_MISALIGNED;
899 ret = -1;
900 }
901
902 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
903 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
904 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
905
906 /* Discard alignment and granularity */
907 if (b->discard_granularity) {
908 alignment = queue_limit_discard_alignment(b, start);
909
910 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
911 b->max_discard_sectors);
912 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
913 b->max_hw_discard_sectors);
914 t->discard_granularity = max(t->discard_granularity,
915 b->discard_granularity);
916 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
917 t->discard_granularity;
918 }
919 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
920 b->max_secure_erase_sectors);
921 t->zone_write_granularity = max(t->zone_write_granularity,
922 b->zone_write_granularity);
923 if (!(t->features & BLK_FEAT_ZONED)) {
924 t->zone_write_granularity = 0;
925 t->max_zone_append_sectors = 0;
926 }
927 blk_stack_atomic_writes_limits(t, b, start);
928
929 return ret;
930}
931EXPORT_SYMBOL(blk_stack_limits);
932
933/**
934 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
935 * @t: the stacking driver limits (top device)
936 * @bdev: the underlying block device (bottom)
937 * @offset: offset to beginning of data within component device
938 * @pfx: prefix to use for warnings logged
939 *
940 * Description:
941 * This function is used by stacking drivers like MD and DM to ensure
942 * that all component devices have compatible block sizes and
943 * alignments. The stacking driver must provide a queue_limits
944 * struct (top) and then iteratively call the stacking function for
945 * all component (bottom) devices. The stacking function will
946 * attempt to combine the values and ensure proper alignment.
947 */
948void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
949 sector_t offset, const char *pfx)
950{
951 if (blk_stack_limits(t, bdev_limits(bdev),
952 get_start_sect(bdev) + offset))
953 pr_notice("%s: Warning: Device %pg is misaligned\n",
954 pfx, bdev);
955}
956EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
957
958/**
959 * queue_limits_stack_integrity - stack integrity profile
960 * @t: target queue limits
961 * @b: base queue limits
962 *
963 * Check if the integrity profile in the @b can be stacked into the
964 * target @t. Stacking is possible if either:
965 *
966 * a) does not have any integrity information stacked into it yet
967 * b) the integrity profile in @b is identical to the one in @t
968 *
969 * If @b can be stacked into @t, return %true. Else return %false and clear the
970 * integrity information in @t.
971 */
972bool queue_limits_stack_integrity(struct queue_limits *t,
973 struct queue_limits *b)
974{
975 struct blk_integrity *ti = &t->integrity;
976 struct blk_integrity *bi = &b->integrity;
977
978 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
979 return true;
980
981 if (ti->flags & BLK_INTEGRITY_STACKED) {
982 if (ti->metadata_size != bi->metadata_size)
983 goto incompatible;
984 if (ti->interval_exp != bi->interval_exp)
985 goto incompatible;
986 if (ti->tag_size != bi->tag_size)
987 goto incompatible;
988 if (ti->csum_type != bi->csum_type)
989 goto incompatible;
990 if (ti->pi_tuple_size != bi->pi_tuple_size)
991 goto incompatible;
992 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
993 (bi->flags & BLK_INTEGRITY_REF_TAG))
994 goto incompatible;
995 if ((ti->flags & BLK_SPLIT_INTERVAL_CAPABLE) &&
996 !(bi->flags & BLK_SPLIT_INTERVAL_CAPABLE))
997 ti->flags &= ~BLK_SPLIT_INTERVAL_CAPABLE;
998 } else {
999 ti->flags = BLK_INTEGRITY_STACKED;
1000 ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
1001 (bi->flags & BLK_INTEGRITY_REF_TAG) |
1002 (bi->flags & BLK_SPLIT_INTERVAL_CAPABLE);
1003 ti->csum_type = bi->csum_type;
1004 ti->pi_tuple_size = bi->pi_tuple_size;
1005 ti->metadata_size = bi->metadata_size;
1006 ti->pi_offset = bi->pi_offset;
1007 ti->interval_exp = bi->interval_exp;
1008 ti->tag_size = bi->tag_size;
1009 }
1010 return true;
1011
1012incompatible:
1013 memset(ti, 0, sizeof(*ti));
1014 return false;
1015}
1016EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
1017
1018/**
1019 * blk_set_queue_depth - tell the block layer about the device queue depth
1020 * @q: the request queue for the device
1021 * @depth: queue depth
1022 *
1023 */
1024void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
1025{
1026 q->queue_depth = depth;
1027 rq_qos_queue_depth_changed(q);
1028}
1029EXPORT_SYMBOL(blk_set_queue_depth);
1030
1031int bdev_alignment_offset(struct block_device *bdev)
1032{
1033 struct request_queue *q = bdev_get_queue(bdev);
1034
1035 if (q->limits.flags & BLK_FLAG_MISALIGNED)
1036 return -1;
1037 if (bdev_is_partition(bdev))
1038 return queue_limit_alignment_offset(&q->limits,
1039 bdev->bd_start_sect);
1040 return q->limits.alignment_offset;
1041}
1042EXPORT_SYMBOL_GPL(bdev_alignment_offset);
1043
1044unsigned int bdev_discard_alignment(struct block_device *bdev)
1045{
1046 struct request_queue *q = bdev_get_queue(bdev);
1047
1048 if (bdev_is_partition(bdev))
1049 return queue_limit_discard_alignment(&q->limits,
1050 bdev->bd_start_sect);
1051 return q->limits.discard_alignment;
1052}
1053EXPORT_SYMBOL_GPL(bdev_discard_alignment);