Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 15bfba1ad77fad8e45a37aae54b3c813b33fe27c 521 lines 17 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Block data types and constants. Directly include this file only to 4 * break include dependency loop. 5 */ 6#ifndef __LINUX_BLK_TYPES_H 7#define __LINUX_BLK_TYPES_H 8 9#include <linux/types.h> 10#include <linux/bvec.h> 11#include <linux/device.h> 12#include <linux/ktime.h> 13#include <linux/rw_hint.h> 14 15struct bio_set; 16struct bio; 17struct bio_integrity_payload; 18struct page; 19struct io_context; 20struct cgroup_subsys_state; 21typedef void (bio_end_io_t) (struct bio *); 22struct bio_crypt_ctx; 23 24/* 25 * The basic unit of block I/O is a sector. It is used in a number of contexts 26 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 27 * bytes. Variables of type sector_t represent an offset or size that is a 28 * multiple of 512 bytes. Hence these two constants. 29 */ 30#ifndef SECTOR_SHIFT 31#define SECTOR_SHIFT 9 32#endif 33#ifndef SECTOR_SIZE 34#define SECTOR_SIZE (1 << SECTOR_SHIFT) 35#endif 36 37#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 38#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 39#define SECTOR_MASK (PAGE_SECTORS - 1) 40 41struct block_device { 42 sector_t bd_start_sect; 43 sector_t bd_nr_sectors; 44 struct gendisk * bd_disk; 45 struct request_queue * bd_queue; 46 struct disk_stats __percpu *bd_stats; 47 unsigned long bd_stamp; 48 atomic_t __bd_flags; // partition number + flags 49#define BD_PARTNO 255 // lower 8 bits; assign-once 50#define BD_READ_ONLY (1u<<8) // read-only policy 51#define BD_WRITE_HOLDER (1u<<9) 52#define BD_HAS_SUBMIT_BIO (1u<<10) 53#define BD_RO_WARNED (1u<<11) 54#ifdef CONFIG_FAIL_MAKE_REQUEST 55#define BD_MAKE_IT_FAIL (1u<<12) 56#endif 57 dev_t bd_dev; 58 struct address_space *bd_mapping; /* page cache */ 59 60 atomic_t bd_openers; 61 spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ 62 void * bd_claiming; 63 void * bd_holder; 64 const struct blk_holder_ops *bd_holder_ops; 65 struct mutex bd_holder_lock; 66 int bd_holders; 67 struct kobject *bd_holder_dir; 68 69 atomic_t bd_fsfreeze_count; /* number of freeze requests */ 70 struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ 71 72 struct partition_meta_info *bd_meta_info; 73 int bd_writers; 74#ifdef CONFIG_SECURITY 75 void *bd_security; 76#endif 77 /* 78 * keep this out-of-line as it's both big and not needed in the fast 79 * path 80 */ 81 struct device bd_device; 82} __randomize_layout; 83 84#define bdev_whole(_bdev) \ 85 ((_bdev)->bd_disk->part0) 86 87#define dev_to_bdev(device) \ 88 container_of((device), struct block_device, bd_device) 89 90#define bdev_kobj(_bdev) \ 91 (&((_bdev)->bd_device.kobj)) 92 93/* 94 * Block error status values. See block/blk-core:blk_errors for the details. 95 */ 96typedef u8 __bitwise blk_status_t; 97typedef u16 blk_short_t; 98#define BLK_STS_OK 0 99#define BLK_STS_NOTSUPP ((__force blk_status_t)1) 100#define BLK_STS_TIMEOUT ((__force blk_status_t)2) 101#define BLK_STS_NOSPC ((__force blk_status_t)3) 102#define BLK_STS_TRANSPORT ((__force blk_status_t)4) 103#define BLK_STS_TARGET ((__force blk_status_t)5) 104#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) 105#define BLK_STS_MEDIUM ((__force blk_status_t)7) 106#define BLK_STS_PROTECTION ((__force blk_status_t)8) 107#define BLK_STS_RESOURCE ((__force blk_status_t)9) 108#define BLK_STS_IOERR ((__force blk_status_t)10) 109 110/* hack for device mapper, don't use elsewhere: */ 111#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 112 113/* 114 * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set 115 * and the bio would block (cf bio_wouldblock_error()) 116 */ 117#define BLK_STS_AGAIN ((__force blk_status_t)12) 118 119/* 120 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if 121 * device related resources are unavailable, but the driver can guarantee 122 * that the queue will be rerun in the future once resources become 123 * available again. This is typically the case for device specific 124 * resources that are consumed for IO. If the driver fails allocating these 125 * resources, we know that inflight (or pending) IO will free these 126 * resource upon completion. 127 * 128 * This is different from BLK_STS_RESOURCE in that it explicitly references 129 * a device specific resource. For resources of wider scope, allocation 130 * failure can happen without having pending IO. This means that we can't 131 * rely on request completions freeing these resources, as IO may not be in 132 * flight. Examples of that are kernel memory allocations, DMA mappings, or 133 * any other system wide resources. 134 */ 135#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) 136 137/* 138 * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion 139 * path if the device returns a status indicating that too many zone resources 140 * are currently open. The same command should be successful if resubmitted 141 * after the number of open zones decreases below the device's limits, which is 142 * reported in the request_queue's max_open_zones. 143 */ 144#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14) 145 146/* 147 * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion 148 * path if the device returns a status indicating that too many zone resources 149 * are currently active. The same command should be successful if resubmitted 150 * after the number of active zones decreases below the device's limits, which 151 * is reported in the request_queue's max_active_zones. 152 */ 153#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15) 154 155/* 156 * BLK_STS_OFFLINE is returned from the driver when the target device is offline 157 * or is being taken offline. This could help differentiate the case where a 158 * device is intentionally being shut down from a real I/O error. 159 */ 160#define BLK_STS_OFFLINE ((__force blk_status_t)16) 161 162/* 163 * BLK_STS_DURATION_LIMIT is returned from the driver when the target device 164 * aborted the command because it exceeded one of its Command Duration Limits. 165 */ 166#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17) 167 168/* 169 * Invalid size or alignment. 170 */ 171#define BLK_STS_INVAL ((__force blk_status_t)19) 172 173/** 174 * blk_path_error - returns true if error may be path related 175 * @error: status the request was completed with 176 * 177 * Description: 178 * This classifies block error status into non-retryable errors and ones 179 * that may be successful if retried on a failover path. 180 * 181 * Return: 182 * %false - retrying failover path will not help 183 * %true - may succeed if retried 184 */ 185static inline bool blk_path_error(blk_status_t error) 186{ 187 switch (error) { 188 case BLK_STS_NOTSUPP: 189 case BLK_STS_NOSPC: 190 case BLK_STS_TARGET: 191 case BLK_STS_RESV_CONFLICT: 192 case BLK_STS_MEDIUM: 193 case BLK_STS_PROTECTION: 194 return false; 195 } 196 197 /* Anything else could be a path failure, so should be retried */ 198 return true; 199} 200 201typedef __u32 __bitwise blk_opf_t; 202 203typedef unsigned int blk_qc_t; 204#define BLK_QC_T_NONE -1U 205 206/* 207 * main unit of I/O for the block layer and lower layers (ie drivers and 208 * stacking drivers) 209 */ 210struct bio { 211 struct bio *bi_next; /* request queue link */ 212 struct block_device *bi_bdev; 213 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits 214 * req_flags. 215 */ 216 unsigned short bi_flags; /* BIO_* below */ 217 unsigned short bi_ioprio; 218 enum rw_hint bi_write_hint; 219 u8 bi_write_stream; 220 blk_status_t bi_status; 221 222 /* 223 * The bvec gap bit indicates the lowest set bit in any address offset 224 * between all bi_io_vecs. This field is initialized only after the bio 225 * is split to the hardware limits (see bio_split_io_at()). The value 226 * may be used to consider DMA optimization when performing that 227 * mapping. The value is compared to a power of two mask where the 228 * result depends on any bit set within the mask, so saving the lowest 229 * bit is sufficient to know if any segment gap collides with the mask. 230 */ 231 u8 bi_bvec_gap_bit; 232 233 atomic_t __bi_remaining; 234 235 /* The actual vec list, preserved by bio_reset() */ 236 struct bio_vec *bi_io_vec; 237 struct bvec_iter bi_iter; 238 239 union { 240 /* for polled bios: */ 241 blk_qc_t bi_cookie; 242 /* for plugged zoned writes only: */ 243 unsigned int __bi_nr_segments; 244 }; 245 bio_end_io_t *bi_end_io; 246 void *bi_private; 247#ifdef CONFIG_BLK_CGROUP 248 /* 249 * Represents the association of the css and request_queue for the bio. 250 * If a bio goes direct to device, it will not have a blkg as it will 251 * not have a request_queue associated with it. The reference is put 252 * on release of the bio. 253 */ 254 struct blkcg_gq *bi_blkg; 255 /* Time that this bio was issued. */ 256 u64 issue_time_ns; 257#ifdef CONFIG_BLK_CGROUP_IOCOST 258 u64 bi_iocost_cost; 259#endif 260#endif 261 262#ifdef CONFIG_BLK_INLINE_ENCRYPTION 263 struct bio_crypt_ctx *bi_crypt_context; 264#endif 265 266#if defined(CONFIG_BLK_DEV_INTEGRITY) 267 struct bio_integrity_payload *bi_integrity; /* data integrity */ 268#endif 269 270 unsigned short bi_vcnt; /* how many bio_vec's */ 271 272 /* 273 * Everything starting with bi_max_vecs will be preserved by bio_reset() 274 */ 275 276 /* 277 * Number of elements in `bi_io_vec` that were allocated for this bio. 278 * Only used by the bio submitter to make `bio_add_page` fail once full 279 * and to free the `bi_io_vec` allocation. Must not be used in drivers 280 * and does not hold a useful value for cloned bios. 281 */ 282 unsigned short bi_max_vecs; 283 284 atomic_t __bi_cnt; /* pin count */ 285 286 struct bio_set *bi_pool; 287}; 288 289#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 290#define BIO_MAX_SIZE UINT_MAX /* max value of bi_iter.bi_size */ 291#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> SECTOR_SHIFT) 292 293static inline struct bio_vec *bio_inline_vecs(struct bio *bio) 294{ 295 return (struct bio_vec *)(bio + 1); 296} 297 298/* 299 * bio flags 300 */ 301enum { 302 BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */ 303 BIO_CLONED, /* doesn't own data */ 304 BIO_QUIET, /* Make BIO Quiet */ 305 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */ 306 BIO_REFFED, /* bio has elevated ->bi_cnt */ 307 BIO_BPS_THROTTLED, /* This bio has already been subjected to 308 * throttling rules. Don't do it again. */ 309 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion 310 * of this bio. */ 311 BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ 312 BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ 313 /* 314 * This bio has completed bps throttling at the single tg granularity, 315 * which is different from BIO_BPS_THROTTLED. When the bio is enqueued 316 * into the sq->queued of the upper tg, or is about to be dispatched, 317 * this flag needs to be cleared. Since blk-throttle and rq_qos are not 318 * on the same hierarchical level, reuse the value. 319 */ 320 BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED, 321 BIO_QOS_MERGED, /* but went through rq_qos merge path */ 322 BIO_REMAPPED, 323 BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ 324 BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ 325 BIO_FLAG_LAST 326}; 327 328typedef __u32 __bitwise blk_mq_req_flags_t; 329 330#define REQ_OP_BITS 8 331#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1) 332#define REQ_FLAG_BITS 24 333 334/** 335 * enum req_op - Operations common to the bio and request structures. 336 * We use 8 bits for encoding the operation, and the remaining 24 for flags. 337 * 338 * The least significant bit of the operation number indicates the data 339 * transfer direction: 340 * 341 * - if the least significant bit is set transfers are TO the device 342 * - if the least significant bit is not set transfers are FROM the device 343 * 344 * If a operation does not transfer data the least significant bit has no 345 * meaning. 346 */ 347enum req_op { 348 /** @REQ_OP_READ: read sectors from the device */ 349 REQ_OP_READ = (__force blk_opf_t)0, 350 /** @REQ_OP_WRITE: write sectors to the device */ 351 REQ_OP_WRITE = (__force blk_opf_t)1, 352 /** @REQ_OP_FLUSH: flush the volatile write cache */ 353 REQ_OP_FLUSH = (__force blk_opf_t)2, 354 /** @REQ_OP_DISCARD: discard sectors */ 355 REQ_OP_DISCARD = (__force blk_opf_t)3, 356 /** @REQ_OP_SECURE_ERASE: securely erase sectors */ 357 REQ_OP_SECURE_ERASE = (__force blk_opf_t)5, 358 /** @REQ_OP_ZONE_APPEND: write data at the current zone write pointer */ 359 REQ_OP_ZONE_APPEND = (__force blk_opf_t)7, 360 /** @REQ_OP_WRITE_ZEROES: write the zero filled sector many times */ 361 REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, 362 /** @REQ_OP_ZONE_OPEN: Open a zone */ 363 REQ_OP_ZONE_OPEN = (__force blk_opf_t)11, 364 /** @REQ_OP_ZONE_CLOSE: Close a zone */ 365 REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13, 366 /** @REQ_OP_ZONE_FINISH: Transition a zone to full */ 367 REQ_OP_ZONE_FINISH = (__force blk_opf_t)15, 368 /** @REQ_OP_ZONE_RESET: reset a zone write pointer */ 369 REQ_OP_ZONE_RESET = (__force blk_opf_t)17, 370 /** @REQ_OP_ZONE_RESET_ALL: reset all the zone present on the device */ 371 REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19, 372 373 /* Driver private requests */ 374 /* private: */ 375 REQ_OP_DRV_IN = (__force blk_opf_t)34, 376 REQ_OP_DRV_OUT = (__force blk_opf_t)35, 377 378 REQ_OP_LAST = (__force blk_opf_t)36, 379}; 380 381/* Keep cmd_flag_name[] in sync with the definitions below */ 382enum req_flag_bits { 383 __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 384 REQ_OP_BITS, 385 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 386 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 387 __REQ_SYNC, /* request is sync (sync write or read) */ 388 __REQ_META, /* metadata io request */ 389 __REQ_PRIO, /* boost priority in cfq */ 390 __REQ_NOMERGE, /* don't touch this for merging */ 391 __REQ_IDLE, /* anticipate more IO after this one */ 392 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 393 __REQ_FUA, /* forced unit access */ 394 __REQ_PREFLUSH, /* request for cache flush */ 395 __REQ_RAHEAD, /* read ahead, can fail anytime */ 396 __REQ_BACKGROUND, /* background IO */ 397 __REQ_NOWAIT, /* Don't wait if request will block */ 398 __REQ_POLLED, /* caller polls for completion using bio_poll */ 399 __REQ_ALLOC_CACHE, /* allocate IO from cache if available */ 400 __REQ_SWAP, /* swap I/O */ 401 __REQ_DRV, /* for driver use */ 402 __REQ_FS_PRIVATE, /* for file system (submitter) use */ 403 __REQ_ATOMIC, /* for atomic write operations */ 404 /* 405 * Command specific flags, keep last: 406 */ 407 /* for REQ_OP_WRITE_ZEROES: */ 408 __REQ_NOUNMAP, /* do not free blocks when zeroing */ 409 410 __REQ_NR_BITS, /* stops here */ 411}; 412 413#define REQ_FAILFAST_DEV \ 414 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV) 415#define REQ_FAILFAST_TRANSPORT \ 416 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT) 417#define REQ_FAILFAST_DRIVER \ 418 (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER) 419#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC) 420#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META) 421#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO) 422#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE) 423#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE) 424#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY) 425#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA) 426#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH) 427#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD) 428#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND) 429#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT) 430#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED) 431#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE) 432#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP) 433#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV) 434#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE) 435#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC) 436 437#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP) 438 439#define REQ_FAILFAST_MASK \ 440 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 441 442#define REQ_NOMERGE_FLAGS \ 443 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 444 445enum stat_group { 446 STAT_READ, 447 STAT_WRITE, 448 STAT_DISCARD, 449 STAT_FLUSH, 450 451 NR_STAT_GROUPS 452}; 453 454static inline enum req_op bio_op(const struct bio *bio) 455{ 456 return bio->bi_opf & REQ_OP_MASK; 457} 458 459static inline bool op_is_write(blk_opf_t op) 460{ 461 return !!(op & (__force blk_opf_t)1); 462} 463 464/* 465 * Check if the bio or request is one that needs special treatment in the 466 * flush state machine. 467 */ 468static inline bool op_is_flush(blk_opf_t op) 469{ 470 return op & (REQ_FUA | REQ_PREFLUSH); 471} 472 473/* 474 * Reads are always treated as synchronous, as are requests with the FUA or 475 * PREFLUSH flag. Other operations may be marked as synchronous using the 476 * REQ_SYNC flag. 477 */ 478static inline bool op_is_sync(blk_opf_t op) 479{ 480 return (op & REQ_OP_MASK) == REQ_OP_READ || 481 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); 482} 483 484static inline bool op_is_discard(blk_opf_t op) 485{ 486 return (op & REQ_OP_MASK) == REQ_OP_DISCARD; 487} 488 489/* 490 * Check if a bio or request operation is a zone management operation. 491 */ 492static inline bool op_is_zone_mgmt(enum req_op op) 493{ 494 switch (op & REQ_OP_MASK) { 495 case REQ_OP_ZONE_RESET: 496 case REQ_OP_ZONE_RESET_ALL: 497 case REQ_OP_ZONE_OPEN: 498 case REQ_OP_ZONE_CLOSE: 499 case REQ_OP_ZONE_FINISH: 500 return true; 501 default: 502 return false; 503 } 504} 505 506static inline int op_stat_group(enum req_op op) 507{ 508 if (op_is_discard(op)) 509 return STAT_DISCARD; 510 return op_is_write(op); 511} 512 513struct blk_rq_stat { 514 u64 mean; 515 u64 min; 516 u64 max; 517 u32 nr_samples; 518 u64 batch; 519}; 520 521#endif /* __LINUX_BLK_TYPES_H */