Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 206 lines 6.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright 2023 Red Hat 4 */ 5 6#ifndef VIO_H 7#define VIO_H 8 9#include <linux/bio.h> 10#include <linux/blkdev.h> 11#include <linux/compiler.h> 12#include <linux/kernel.h> 13#include <linux/list.h> 14 15#include "completion.h" 16#include "constants.h" 17#include "types.h" 18#include "vdo.h" 19 20enum { 21 MAX_BLOCKS_PER_VIO = (BIO_MAX_VECS << PAGE_SHIFT) / VDO_BLOCK_SIZE, 22}; 23 24struct pooled_vio { 25 /* The underlying vio */ 26 struct vio vio; 27 /* The list entry for chaining pooled vios together */ 28 struct list_head list_entry; 29 /* The context set by the pool */ 30 void *context; 31 /* The list entry used by the pool */ 32 struct list_head pool_entry; 33 /* The pool this vio is allocated from */ 34 struct vio_pool *pool; 35}; 36 37/** 38 * as_vio() - Convert a generic vdo_completion to a vio. 39 * @completion: The completion to convert. 40 * 41 * Return: The completion as a vio. 42 */ 43static inline struct vio *as_vio(struct vdo_completion *completion) 44{ 45 vdo_assert_completion_type(completion, VIO_COMPLETION); 46 return container_of(completion, struct vio, completion); 47} 48 49/** 50 * get_vio_bio_zone_thread_id() - Get the thread id of the bio zone in which a vio should submit 51 * its I/O. 52 * @vio: The vio. 53 * 54 * Return: The id of the bio zone thread the vio should use. 55 */ 56static inline thread_id_t __must_check get_vio_bio_zone_thread_id(struct vio *vio) 57{ 58 return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone]; 59} 60 61physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio); 62 63/** 64 * assert_vio_in_bio_zone() - Check that a vio is running on the correct thread for its bio zone. 65 * @vio: The vio to check. 66 */ 67static inline void assert_vio_in_bio_zone(struct vio *vio) 68{ 69 thread_id_t expected = get_vio_bio_zone_thread_id(vio); 70 thread_id_t thread_id = vdo_get_callback_thread_id(); 71 72 VDO_ASSERT_LOG_ONLY((expected == thread_id), 73 "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", 74 (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, 75 expected); 76} 77 78int vdo_create_bio(struct bio **bio_ptr); 79void vdo_free_bio(struct bio *bio); 80int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type, 81 enum vio_priority priority, void *parent, 82 unsigned int block_count, char *data, struct vio *vio); 83int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type, 84 enum vio_priority priority, 85 void *parent, unsigned int block_count, 86 char *data, struct vio **vio_ptr); 87 88static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type, 89 enum vio_priority priority, 90 void *parent, char *data, 91 struct vio **vio_ptr) 92{ 93 return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data, 94 vio_ptr); 95} 96 97void free_vio_components(struct vio *vio); 98void free_vio(struct vio *vio); 99 100/** 101 * initialize_vio() - Initialize a vio. 102 * @vio: The vio to initialize. 103 * @bio: The bio this vio should use for its I/O. 104 * @block_count: The size of this vio in vdo blocks. 105 * @vio_type: The vio type. 106 * @priority: The relative priority of the vio. 107 * @vdo: The vdo for this vio. 108 */ 109static inline void initialize_vio(struct vio *vio, struct bio *bio, 110 unsigned int block_count, enum vio_type vio_type, 111 enum vio_priority priority, struct vdo *vdo) 112{ 113 /* data_vio's may not span multiple blocks */ 114 BUG_ON((vio_type == VIO_TYPE_DATA) && (block_count != 1)); 115 116 vio->bio = bio; 117 vio->block_count = block_count; 118 vio->type = vio_type; 119 vio->priority = priority; 120 vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION); 121} 122 123void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback, 124 blk_opf_t bi_opf, physical_block_number_t pbn); 125 126int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback, 127 blk_opf_t bi_opf, physical_block_number_t pbn); 128int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t callback, 129 blk_opf_t bi_opf, physical_block_number_t pbn); 130 131void update_vio_error_stats(struct vio *vio, const char *format, ...) 132 __printf(2, 3); 133 134/** 135 * is_data_vio() - Check whether a vio is servicing an external data request. 136 * @vio: The vio to check. 137 */ 138static inline bool is_data_vio(struct vio *vio) 139{ 140 return (vio->type == VIO_TYPE_DATA); 141} 142 143/** 144 * get_metadata_priority() - Convert a vio's priority to a work item priority. 145 * @vio: The vio. 146 * 147 * Return: The priority with which to submit the vio's bio. 148 */ 149static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio) 150{ 151 return ((vio->priority == VIO_PRIORITY_HIGH) ? 152 BIO_Q_HIGH_PRIORITY : 153 BIO_Q_METADATA_PRIORITY); 154} 155 156/** 157 * continue_vio() - Enqueue a vio to run its next callback. 158 * @vio: The vio to continue. 159 * @result: The result of the current operation. 160 */ 161static inline void continue_vio(struct vio *vio, int result) 162{ 163 if (unlikely(result != VDO_SUCCESS)) 164 vdo_set_completion_result(&vio->completion, result); 165 166 vdo_enqueue_completion(&vio->completion, VDO_WORK_Q_DEFAULT_PRIORITY); 167} 168 169void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio); 170void vdo_count_completed_bios(struct bio *bio); 171 172/** 173 * continue_vio_after_io() - Continue a vio now that its I/O has returned. 174 * @vio: The vio to continue. 175 * @callback: The next operation for this vio. 176 * @thread: Which thread to run the next operation on. 177 */ 178static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback, 179 thread_id_t thread) 180{ 181 vdo_count_completed_bios(vio->bio); 182 vdo_set_completion_callback(&vio->completion, callback, thread); 183 continue_vio(vio, blk_status_to_errno(vio->bio->bi_status)); 184} 185 186void vio_record_metadata_io_error(struct vio *vio); 187 188/* A vio_pool is a collection of preallocated vios used to write arbitrary metadata blocks. */ 189 190static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio) 191{ 192 return container_of(vio, struct pooled_vio, vio); 193} 194 195struct vio_pool; 196 197int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count, 198 thread_id_t thread_id, enum vio_type vio_type, 199 enum vio_priority priority, void *context, 200 struct vio_pool **pool_ptr); 201void free_vio_pool(struct vio_pool *pool); 202bool __must_check is_vio_pool_busy(struct vio_pool *pool); 203void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter); 204void return_vio_to_pool(struct pooled_vio *vio); 205 206#endif /* VIO_H */