Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/backing-dev.h> | |
4 | #include <linux/bio.h> | |
5 | #include <linux/blkdev.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/workqueue.h> | |
10 | #include <linux/smp.h> | |
11 | ||
12 | #include <linux/blk-mq.h> | |
13 | #include "blk-mq.h" | |
14 | #include "blk-mq-tag.h" | |
15 | ||
16 | static void blk_mq_sysfs_release(struct kobject *kobj) | |
17 | { | |
18 | } | |
19 | ||
20 | struct blk_mq_ctx_sysfs_entry { | |
21 | struct attribute attr; | |
22 | ssize_t (*show)(struct blk_mq_ctx *, char *); | |
23 | ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); | |
24 | }; | |
25 | ||
26 | struct blk_mq_hw_ctx_sysfs_entry { | |
27 | struct attribute attr; | |
28 | ssize_t (*show)(struct blk_mq_hw_ctx *, char *); | |
29 | ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); | |
30 | }; | |
31 | ||
32 | static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, | |
33 | char *page) | |
34 | { | |
35 | struct blk_mq_ctx_sysfs_entry *entry; | |
36 | struct blk_mq_ctx *ctx; | |
37 | struct request_queue *q; | |
38 | ssize_t res; | |
39 | ||
40 | entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); | |
41 | ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
42 | q = ctx->queue; | |
43 | ||
44 | if (!entry->show) | |
45 | return -EIO; | |
46 | ||
47 | res = -ENOENT; | |
48 | mutex_lock(&q->sysfs_lock); | |
49 | if (!blk_queue_dying(q)) | |
50 | res = entry->show(ctx, page); | |
51 | mutex_unlock(&q->sysfs_lock); | |
52 | return res; | |
53 | } | |
54 | ||
55 | static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, | |
56 | const char *page, size_t length) | |
57 | { | |
58 | struct blk_mq_ctx_sysfs_entry *entry; | |
59 | struct blk_mq_ctx *ctx; | |
60 | struct request_queue *q; | |
61 | ssize_t res; | |
62 | ||
63 | entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); | |
64 | ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
65 | q = ctx->queue; | |
66 | ||
67 | if (!entry->store) | |
68 | return -EIO; | |
69 | ||
70 | res = -ENOENT; | |
71 | mutex_lock(&q->sysfs_lock); | |
72 | if (!blk_queue_dying(q)) | |
73 | res = entry->store(ctx, page, length); | |
74 | mutex_unlock(&q->sysfs_lock); | |
75 | return res; | |
76 | } | |
77 | ||
78 | static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, | |
79 | struct attribute *attr, char *page) | |
80 | { | |
81 | struct blk_mq_hw_ctx_sysfs_entry *entry; | |
82 | struct blk_mq_hw_ctx *hctx; | |
83 | struct request_queue *q; | |
84 | ssize_t res; | |
85 | ||
86 | entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); | |
87 | hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); | |
88 | q = hctx->queue; | |
89 | ||
90 | if (!entry->show) | |
91 | return -EIO; | |
92 | ||
93 | res = -ENOENT; | |
94 | mutex_lock(&q->sysfs_lock); | |
95 | if (!blk_queue_dying(q)) | |
96 | res = entry->show(hctx, page); | |
97 | mutex_unlock(&q->sysfs_lock); | |
98 | return res; | |
99 | } | |
100 | ||
101 | static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, | |
102 | struct attribute *attr, const char *page, | |
103 | size_t length) | |
104 | { | |
105 | struct blk_mq_hw_ctx_sysfs_entry *entry; | |
106 | struct blk_mq_hw_ctx *hctx; | |
107 | struct request_queue *q; | |
108 | ssize_t res; | |
109 | ||
110 | entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); | |
111 | hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); | |
112 | q = hctx->queue; | |
113 | ||
114 | if (!entry->store) | |
115 | return -EIO; | |
116 | ||
117 | res = -ENOENT; | |
118 | mutex_lock(&q->sysfs_lock); | |
119 | if (!blk_queue_dying(q)) | |
120 | res = entry->store(hctx, page, length); | |
121 | mutex_unlock(&q->sysfs_lock); | |
122 | return res; | |
123 | } | |
124 | ||
125 | static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) | |
126 | { | |
127 | return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], | |
128 | ctx->rq_dispatched[0]); | |
129 | } | |
130 | ||
131 | static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page) | |
132 | { | |
133 | return sprintf(page, "%lu\n", ctx->rq_merged); | |
134 | } | |
135 | ||
136 | static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) | |
137 | { | |
138 | return sprintf(page, "%lu %lu\n", ctx->rq_completed[1], | |
139 | ctx->rq_completed[0]); | |
140 | } | |
141 | ||
142 | static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) | |
143 | { | |
320ae51f | 144 | struct request *rq; |
596f5aad ML |
145 | int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg); |
146 | ||
147 | list_for_each_entry(rq, list, queuelist) { | |
148 | const int rq_len = 2 * sizeof(rq) + 2; | |
149 | ||
150 | /* if the output will be truncated */ | |
151 | if (PAGE_SIZE - 1 < len + rq_len) { | |
152 | /* backspacing if it can't hold '\t...\n' */ | |
153 | if (PAGE_SIZE - 1 < len + 5) | |
154 | len -= rq_len; | |
155 | len += snprintf(page + len, PAGE_SIZE - 1 - len, | |
156 | "\t...\n"); | |
157 | break; | |
158 | } | |
159 | len += snprintf(page + len, PAGE_SIZE - 1 - len, | |
160 | "\t%p\n", rq); | |
161 | } | |
162 | ||
163 | return len; | |
320ae51f JA |
164 | } |
165 | ||
166 | static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) | |
167 | { | |
168 | ssize_t ret; | |
169 | ||
170 | spin_lock(&ctx->lock); | |
171 | ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending"); | |
172 | spin_unlock(&ctx->lock); | |
173 | ||
174 | return ret; | |
175 | } | |
176 | ||
05229bee JA |
177 | static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page) |
178 | { | |
179 | return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success); | |
180 | } | |
181 | ||
320ae51f JA |
182 | static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, |
183 | char *page) | |
184 | { | |
185 | return sprintf(page, "%lu\n", hctx->queued); | |
186 | } | |
187 | ||
188 | static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page) | |
189 | { | |
190 | return sprintf(page, "%lu\n", hctx->run); | |
191 | } | |
192 | ||
193 | static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx, | |
194 | char *page) | |
195 | { | |
196 | char *start_page = page; | |
197 | int i; | |
198 | ||
199 | page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]); | |
200 | ||
201 | for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) { | |
202 | unsigned long d = 1U << (i - 1); | |
203 | ||
204 | page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]); | |
205 | } | |
206 | ||
207 | return page - start_page; | |
208 | } | |
209 | ||
210 | static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, | |
211 | char *page) | |
212 | { | |
213 | ssize_t ret; | |
214 | ||
215 | spin_lock(&hctx->lock); | |
216 | ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending"); | |
217 | spin_unlock(&hctx->lock); | |
218 | ||
219 | return ret; | |
220 | } | |
221 | ||
320ae51f JA |
222 | static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) |
223 | { | |
224 | return blk_mq_tag_sysfs_show(hctx->tags, page); | |
225 | } | |
226 | ||
0d2602ca JA |
227 | static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) |
228 | { | |
229 | return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); | |
230 | } | |
231 | ||
676141e4 JA |
232 | static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) |
233 | { | |
cb2da43e | 234 | unsigned int i, first = 1; |
676141e4 JA |
235 | ssize_t ret = 0; |
236 | ||
cb2da43e | 237 | for_each_cpu(i, hctx->cpumask) { |
676141e4 JA |
238 | if (first) |
239 | ret += sprintf(ret + page, "%u", i); | |
240 | else | |
241 | ret += sprintf(ret + page, ", %u", i); | |
242 | ||
243 | first = 0; | |
244 | } | |
245 | ||
676141e4 JA |
246 | ret += sprintf(ret + page, "\n"); |
247 | return ret; | |
248 | } | |
249 | ||
320ae51f JA |
250 | static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = { |
251 | .attr = {.name = "dispatched", .mode = S_IRUGO }, | |
252 | .show = blk_mq_sysfs_dispatched_show, | |
253 | }; | |
254 | static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = { | |
255 | .attr = {.name = "merged", .mode = S_IRUGO }, | |
256 | .show = blk_mq_sysfs_merged_show, | |
257 | }; | |
258 | static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = { | |
259 | .attr = {.name = "completed", .mode = S_IRUGO }, | |
260 | .show = blk_mq_sysfs_completed_show, | |
261 | }; | |
262 | static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = { | |
263 | .attr = {.name = "rq_list", .mode = S_IRUGO }, | |
264 | .show = blk_mq_sysfs_rq_list_show, | |
265 | }; | |
266 | ||
267 | static struct attribute *default_ctx_attrs[] = { | |
268 | &blk_mq_sysfs_dispatched.attr, | |
269 | &blk_mq_sysfs_merged.attr, | |
270 | &blk_mq_sysfs_completed.attr, | |
271 | &blk_mq_sysfs_rq_list.attr, | |
272 | NULL, | |
273 | }; | |
274 | ||
275 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = { | |
276 | .attr = {.name = "queued", .mode = S_IRUGO }, | |
277 | .show = blk_mq_hw_sysfs_queued_show, | |
278 | }; | |
279 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = { | |
280 | .attr = {.name = "run", .mode = S_IRUGO }, | |
281 | .show = blk_mq_hw_sysfs_run_show, | |
282 | }; | |
283 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { | |
284 | .attr = {.name = "dispatched", .mode = S_IRUGO }, | |
285 | .show = blk_mq_hw_sysfs_dispatched_show, | |
286 | }; | |
0d2602ca JA |
287 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = { |
288 | .attr = {.name = "active", .mode = S_IRUGO }, | |
289 | .show = blk_mq_hw_sysfs_active_show, | |
290 | }; | |
320ae51f JA |
291 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { |
292 | .attr = {.name = "pending", .mode = S_IRUGO }, | |
293 | .show = blk_mq_hw_sysfs_rq_list_show, | |
294 | }; | |
320ae51f JA |
295 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { |
296 | .attr = {.name = "tags", .mode = S_IRUGO }, | |
297 | .show = blk_mq_hw_sysfs_tags_show, | |
298 | }; | |
676141e4 JA |
299 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { |
300 | .attr = {.name = "cpu_list", .mode = S_IRUGO }, | |
301 | .show = blk_mq_hw_sysfs_cpus_show, | |
302 | }; | |
05229bee JA |
303 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = { |
304 | .attr = {.name = "io_poll", .mode = S_IRUGO }, | |
305 | .show = blk_mq_hw_sysfs_poll_show, | |
306 | }; | |
320ae51f JA |
307 | |
308 | static struct attribute *default_hw_ctx_attrs[] = { | |
309 | &blk_mq_hw_sysfs_queued.attr, | |
310 | &blk_mq_hw_sysfs_run.attr, | |
311 | &blk_mq_hw_sysfs_dispatched.attr, | |
312 | &blk_mq_hw_sysfs_pending.attr, | |
320ae51f | 313 | &blk_mq_hw_sysfs_tags.attr, |
676141e4 | 314 | &blk_mq_hw_sysfs_cpus.attr, |
0d2602ca | 315 | &blk_mq_hw_sysfs_active.attr, |
05229bee | 316 | &blk_mq_hw_sysfs_poll.attr, |
320ae51f JA |
317 | NULL, |
318 | }; | |
319 | ||
320 | static const struct sysfs_ops blk_mq_sysfs_ops = { | |
321 | .show = blk_mq_sysfs_show, | |
322 | .store = blk_mq_sysfs_store, | |
323 | }; | |
324 | ||
325 | static const struct sysfs_ops blk_mq_hw_sysfs_ops = { | |
326 | .show = blk_mq_hw_sysfs_show, | |
327 | .store = blk_mq_hw_sysfs_store, | |
328 | }; | |
329 | ||
330 | static struct kobj_type blk_mq_ktype = { | |
331 | .sysfs_ops = &blk_mq_sysfs_ops, | |
332 | .release = blk_mq_sysfs_release, | |
333 | }; | |
334 | ||
335 | static struct kobj_type blk_mq_ctx_ktype = { | |
336 | .sysfs_ops = &blk_mq_sysfs_ops, | |
337 | .default_attrs = default_ctx_attrs, | |
74170118 | 338 | .release = blk_mq_sysfs_release, |
320ae51f JA |
339 | }; |
340 | ||
341 | static struct kobj_type blk_mq_hw_ktype = { | |
342 | .sysfs_ops = &blk_mq_hw_sysfs_ops, | |
343 | .default_attrs = default_hw_ctx_attrs, | |
74170118 | 344 | .release = blk_mq_sysfs_release, |
320ae51f JA |
345 | }; |
346 | ||
ee3c5db0 | 347 | static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) |
67aec14c JA |
348 | { |
349 | struct blk_mq_ctx *ctx; | |
350 | int i; | |
351 | ||
4593fdbe | 352 | if (!hctx->nr_ctx) |
67aec14c JA |
353 | return; |
354 | ||
355 | hctx_for_each_ctx(hctx, ctx, i) | |
356 | kobject_del(&ctx->kobj); | |
357 | ||
358 | kobject_del(&hctx->kobj); | |
359 | } | |
360 | ||
ee3c5db0 | 361 | static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) |
67aec14c JA |
362 | { |
363 | struct request_queue *q = hctx->queue; | |
364 | struct blk_mq_ctx *ctx; | |
365 | int i, ret; | |
366 | ||
4593fdbe | 367 | if (!hctx->nr_ctx) |
67aec14c JA |
368 | return 0; |
369 | ||
370 | ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); | |
371 | if (ret) | |
372 | return ret; | |
373 | ||
374 | hctx_for_each_ctx(hctx, ctx, i) { | |
375 | ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); | |
376 | if (ret) | |
377 | break; | |
378 | } | |
379 | ||
380 | return ret; | |
381 | } | |
382 | ||
320ae51f JA |
383 | void blk_mq_unregister_disk(struct gendisk *disk) |
384 | { | |
385 | struct request_queue *q = disk->queue; | |
85157366 AV |
386 | struct blk_mq_hw_ctx *hctx; |
387 | struct blk_mq_ctx *ctx; | |
388 | int i, j; | |
389 | ||
4593fdbe AM |
390 | blk_mq_disable_hotplug(); |
391 | ||
85157366 | 392 | queue_for_each_hw_ctx(q, hctx, i) { |
67aec14c JA |
393 | blk_mq_unregister_hctx(hctx); |
394 | ||
395 | hctx_for_each_ctx(hctx, ctx, j) | |
85157366 | 396 | kobject_put(&ctx->kobj); |
67aec14c | 397 | |
85157366 AV |
398 | kobject_put(&hctx->kobj); |
399 | } | |
320ae51f JA |
400 | |
401 | kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); | |
402 | kobject_del(&q->mq_kobj); | |
85157366 | 403 | kobject_put(&q->mq_kobj); |
320ae51f JA |
404 | |
405 | kobject_put(&disk_to_dev(disk)->kobj); | |
4593fdbe AM |
406 | |
407 | q->mq_sysfs_init_done = false; | |
408 | blk_mq_enable_hotplug(); | |
320ae51f JA |
409 | } |
410 | ||
868f2f0b KB |
411 | void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) |
412 | { | |
413 | kobject_init(&hctx->kobj, &blk_mq_hw_ktype); | |
414 | } | |
415 | ||
67aec14c JA |
416 | static void blk_mq_sysfs_init(struct request_queue *q) |
417 | { | |
67aec14c | 418 | struct blk_mq_ctx *ctx; |
897bb0c7 | 419 | int cpu; |
67aec14c JA |
420 | |
421 | kobject_init(&q->mq_kobj, &blk_mq_ktype); | |
422 | ||
897bb0c7 TG |
423 | for_each_possible_cpu(cpu) { |
424 | ctx = per_cpu_ptr(q->queue_ctx, cpu); | |
06a41a99 | 425 | kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); |
897bb0c7 | 426 | } |
67aec14c JA |
427 | } |
428 | ||
320ae51f JA |
429 | int blk_mq_register_disk(struct gendisk *disk) |
430 | { | |
431 | struct device *dev = disk_to_dev(disk); | |
432 | struct request_queue *q = disk->queue; | |
433 | struct blk_mq_hw_ctx *hctx; | |
67aec14c | 434 | int ret, i; |
320ae51f | 435 | |
4593fdbe AM |
436 | blk_mq_disable_hotplug(); |
437 | ||
67aec14c | 438 | blk_mq_sysfs_init(q); |
320ae51f JA |
439 | |
440 | ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); | |
441 | if (ret < 0) | |
4593fdbe | 442 | goto out; |
320ae51f JA |
443 | |
444 | kobject_uevent(&q->mq_kobj, KOBJ_ADD); | |
445 | ||
446 | queue_for_each_hw_ctx(q, hctx, i) { | |
67aec14c | 447 | ret = blk_mq_register_hctx(hctx); |
320ae51f JA |
448 | if (ret) |
449 | break; | |
320ae51f JA |
450 | } |
451 | ||
4593fdbe | 452 | if (ret) |
320ae51f | 453 | blk_mq_unregister_disk(disk); |
4593fdbe AM |
454 | else |
455 | q->mq_sysfs_init_done = true; | |
456 | out: | |
457 | blk_mq_enable_hotplug(); | |
320ae51f | 458 | |
4593fdbe | 459 | return ret; |
320ae51f | 460 | } |
b62c21b7 | 461 | EXPORT_SYMBOL_GPL(blk_mq_register_disk); |
67aec14c JA |
462 | |
463 | void blk_mq_sysfs_unregister(struct request_queue *q) | |
464 | { | |
465 | struct blk_mq_hw_ctx *hctx; | |
466 | int i; | |
467 | ||
4593fdbe AM |
468 | if (!q->mq_sysfs_init_done) |
469 | return; | |
470 | ||
67aec14c JA |
471 | queue_for_each_hw_ctx(q, hctx, i) |
472 | blk_mq_unregister_hctx(hctx); | |
473 | } | |
474 | ||
475 | int blk_mq_sysfs_register(struct request_queue *q) | |
476 | { | |
477 | struct blk_mq_hw_ctx *hctx; | |
478 | int i, ret = 0; | |
479 | ||
4593fdbe AM |
480 | if (!q->mq_sysfs_init_done) |
481 | return ret; | |
482 | ||
67aec14c JA |
483 | queue_for_each_hw_ctx(q, hctx, i) { |
484 | ret = blk_mq_register_hctx(hctx); | |
485 | if (ret) | |
486 | break; | |
487 | } | |
488 | ||
489 | return ret; | |
490 | } |