| 1 | #ifndef BLK_MQ_H |
| 2 | #define BLK_MQ_H |
| 3 | |
| 4 | #include <linux/blkdev.h> |
| 5 | |
| 6 | struct blk_mq_tags; |
| 7 | struct blk_flush_queue; |
| 8 | |
| 9 | struct blk_mq_cpu_notifier { |
| 10 | struct list_head list; |
| 11 | void *data; |
| 12 | int (*notify)(void *data, unsigned long action, unsigned int cpu); |
| 13 | }; |
| 14 | |
| 15 | struct blk_mq_ctxmap { |
| 16 | unsigned int size; |
| 17 | unsigned int bits_per_word; |
| 18 | struct blk_align_bitmap *map; |
| 19 | }; |
| 20 | |
| 21 | struct blk_mq_hw_ctx { |
| 22 | struct { |
| 23 | spinlock_t lock; |
| 24 | struct list_head dispatch; |
| 25 | } ____cacheline_aligned_in_smp; |
| 26 | |
| 27 | unsigned long state; /* BLK_MQ_S_* flags */ |
| 28 | struct delayed_work run_work; |
| 29 | struct delayed_work delay_work; |
| 30 | cpumask_var_t cpumask; |
| 31 | int next_cpu; |
| 32 | int next_cpu_batch; |
| 33 | |
| 34 | unsigned long flags; /* BLK_MQ_F_* flags */ |
| 35 | |
| 36 | struct request_queue *queue; |
| 37 | struct blk_flush_queue *fq; |
| 38 | |
| 39 | void *driver_data; |
| 40 | |
| 41 | struct blk_mq_ctxmap ctx_map; |
| 42 | |
| 43 | unsigned int nr_ctx; |
| 44 | struct blk_mq_ctx **ctxs; |
| 45 | |
| 46 | atomic_t wait_index; |
| 47 | |
| 48 | struct blk_mq_tags *tags; |
| 49 | |
| 50 | unsigned long queued; |
| 51 | unsigned long run; |
| 52 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 |
| 53 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 54 | |
| 55 | unsigned int numa_node; |
| 56 | unsigned int queue_num; |
| 57 | |
| 58 | atomic_t nr_active; |
| 59 | |
| 60 | struct blk_mq_cpu_notifier cpu_notifier; |
| 61 | struct kobject kobj; |
| 62 | |
| 63 | unsigned long poll_invoked; |
| 64 | unsigned long poll_success; |
| 65 | }; |
| 66 | |
| 67 | struct blk_mq_tag_set { |
| 68 | struct blk_mq_ops *ops; |
| 69 | unsigned int nr_hw_queues; |
| 70 | unsigned int queue_depth; /* max hw supported */ |
| 71 | unsigned int reserved_tags; |
| 72 | unsigned int cmd_size; /* per-request extra data */ |
| 73 | int numa_node; |
| 74 | unsigned int timeout; |
| 75 | unsigned int flags; /* BLK_MQ_F_* */ |
| 76 | void *driver_data; |
| 77 | |
| 78 | struct blk_mq_tags **tags; |
| 79 | |
| 80 | struct mutex tag_list_lock; |
| 81 | struct list_head tag_list; |
| 82 | }; |
| 83 | |
| 84 | struct blk_mq_queue_data { |
| 85 | struct request *rq; |
| 86 | struct list_head *list; |
| 87 | bool last; |
| 88 | }; |
| 89 | |
| 90 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); |
| 91 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
| 92 | typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); |
| 93 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
| 94 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
| 95 | typedef int (init_request_fn)(void *, struct request *, unsigned int, |
| 96 | unsigned int, unsigned int); |
| 97 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, |
| 98 | unsigned int); |
| 99 | |
| 100 | typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, |
| 101 | bool); |
| 102 | typedef void (busy_tag_iter_fn)(struct request *, void *, bool); |
| 103 | typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); |
| 104 | |
| 105 | |
| 106 | struct blk_mq_ops { |
| 107 | /* |
| 108 | * Queue request |
| 109 | */ |
| 110 | queue_rq_fn *queue_rq; |
| 111 | |
| 112 | /* |
| 113 | * Map to specific hardware queue |
| 114 | */ |
| 115 | map_queue_fn *map_queue; |
| 116 | |
| 117 | /* |
| 118 | * Called on request timeout |
| 119 | */ |
| 120 | timeout_fn *timeout; |
| 121 | |
| 122 | /* |
| 123 | * Called to poll for completion of a specific tag. |
| 124 | */ |
| 125 | poll_fn *poll; |
| 126 | |
| 127 | softirq_done_fn *complete; |
| 128 | |
| 129 | /* |
| 130 | * Called when the block layer side of a hardware queue has been |
| 131 | * set up, allowing the driver to allocate/init matching structures. |
| 132 | * Ditto for exit/teardown. |
| 133 | */ |
| 134 | init_hctx_fn *init_hctx; |
| 135 | exit_hctx_fn *exit_hctx; |
| 136 | |
| 137 | /* |
| 138 | * Called for every command allocated by the block layer to allow |
| 139 | * the driver to set up driver specific data. |
| 140 | * |
| 141 | * Tag greater than or equal to queue_depth is for setting up |
| 142 | * flush request. |
| 143 | * |
| 144 | * Ditto for exit/teardown. |
| 145 | */ |
| 146 | init_request_fn *init_request; |
| 147 | exit_request_fn *exit_request; |
| 148 | }; |
| 149 | |
| 150 | enum { |
| 151 | BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ |
| 152 | BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ |
| 153 | BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ |
| 154 | |
| 155 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
| 156 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 157 | BLK_MQ_F_SG_MERGE = 1 << 2, |
| 158 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
| 159 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| 160 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, |
| 161 | |
| 162 | BLK_MQ_S_STOPPED = 0, |
| 163 | BLK_MQ_S_TAG_ACTIVE = 1, |
| 164 | |
| 165 | BLK_MQ_MAX_DEPTH = 10240, |
| 166 | |
| 167 | BLK_MQ_CPU_WORK_BATCH = 8, |
| 168 | }; |
| 169 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ |
| 170 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ |
| 171 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) |
| 172 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ |
| 173 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ |
| 174 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) |
| 175 | |
| 176 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
| 177 | struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, |
| 178 | struct request_queue *q); |
| 179 | int blk_mq_register_disk(struct gendisk *); |
| 180 | void blk_mq_unregister_disk(struct gendisk *); |
| 181 | |
| 182 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
| 183 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); |
| 184 | |
| 185 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
| 186 | |
| 187 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
| 188 | void blk_mq_free_request(struct request *rq); |
| 189 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); |
| 190 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
| 191 | |
| 192 | enum { |
| 193 | BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */ |
| 194 | BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */ |
| 195 | }; |
| 196 | |
| 197 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, |
| 198 | unsigned int flags); |
| 199 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
| 200 | struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags); |
| 201 | |
| 202 | enum { |
| 203 | BLK_MQ_UNIQUE_TAG_BITS = 16, |
| 204 | BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, |
| 205 | }; |
| 206 | |
| 207 | u32 blk_mq_unique_tag(struct request *rq); |
| 208 | |
| 209 | static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) |
| 210 | { |
| 211 | return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; |
| 212 | } |
| 213 | |
| 214 | static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) |
| 215 | { |
| 216 | return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; |
| 217 | } |
| 218 | |
| 219 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
| 220 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
| 221 | |
| 222 | int blk_mq_request_started(struct request *rq); |
| 223 | void blk_mq_start_request(struct request *rq); |
| 224 | void blk_mq_end_request(struct request *rq, int error); |
| 225 | void __blk_mq_end_request(struct request *rq, int error); |
| 226 | |
| 227 | void blk_mq_requeue_request(struct request *rq); |
| 228 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
| 229 | void blk_mq_cancel_requeue_work(struct request_queue *q); |
| 230 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 231 | void blk_mq_abort_requeue_list(struct request_queue *q); |
| 232 | void blk_mq_complete_request(struct request *rq, int error); |
| 233 | |
| 234 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 235 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); |
| 236 | void blk_mq_stop_hw_queues(struct request_queue *q); |
| 237 | void blk_mq_start_hw_queues(struct request_queue *q); |
| 238 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
| 239 | void blk_mq_run_hw_queues(struct request_queue *q, bool async); |
| 240 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| 241 | void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
| 242 | void *priv); |
| 243 | void blk_mq_freeze_queue(struct request_queue *q); |
| 244 | void blk_mq_unfreeze_queue(struct request_queue *q); |
| 245 | void blk_mq_freeze_queue_start(struct request_queue *q); |
| 246 | |
| 247 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); |
| 248 | |
| 249 | /* |
| 250 | * Driver command data is immediately after the request. So subtract request |
| 251 | * size to get back to the original request, add request size to get the PDU. |
| 252 | */ |
| 253 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) |
| 254 | { |
| 255 | return pdu - sizeof(struct request); |
| 256 | } |
| 257 | static inline void *blk_mq_rq_to_pdu(struct request *rq) |
| 258 | { |
| 259 | return rq + 1; |
| 260 | } |
| 261 | |
| 262 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
| 263 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
| 264 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) |
| 265 | |
| 266 | #define queue_for_each_ctx(q, ctx, i) \ |
| 267 | for ((i) = 0; (i) < (q)->nr_queues && \ |
| 268 | ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) |
| 269 | |
| 270 | #define hctx_for_each_ctx(hctx, ctx, i) \ |
| 271 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
| 272 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) |
| 273 | |
| 274 | #define blk_ctx_sum(q, sum) \ |
| 275 | ({ \ |
| 276 | struct blk_mq_ctx *__x; \ |
| 277 | unsigned int __ret = 0, __i; \ |
| 278 | \ |
| 279 | queue_for_each_ctx((q), __x, __i) \ |
| 280 | __ret += sum; \ |
| 281 | __ret; \ |
| 282 | }) |
| 283 | |
| 284 | #endif |