Commit | Line | Data |
---|---|---|
f30664e2 SO |
1 | /* |
2 | * Block driver for s390 storage class memory. | |
3 | * | |
4 | * Copyright IBM Corp. 2012 | |
5 | * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> | |
6 | */ | |
7 | ||
8 | #define KMSG_COMPONENT "scm_block" | |
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
10 | ||
11 | #include <linux/interrupt.h> | |
12 | #include <linux/spinlock.h> | |
9d4df77f | 13 | #include <linux/mempool.h> |
f30664e2 SO |
14 | #include <linux/module.h> |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/genhd.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/list.h> | |
19 | #include <asm/eadm.h> | |
20 | #include "scm_blk.h" | |
21 | ||
22 | debug_info_t *scm_debug; | |
23 | static int scm_major; | |
9d4df77f | 24 | static mempool_t *aidaw_pool; |
f30664e2 SO |
25 | static DEFINE_SPINLOCK(list_lock); |
26 | static LIST_HEAD(inactive_requests); | |
27 | static unsigned int nr_requests = 64; | |
8622384f | 28 | static unsigned int nr_requests_per_io = 8; |
f30664e2 SO |
29 | static atomic_t nr_devices = ATOMIC_INIT(0); |
30 | module_param(nr_requests, uint, S_IRUGO); | |
31 | MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); | |
32 | ||
8622384f SO |
33 | module_param(nr_requests_per_io, uint, S_IRUGO); |
34 | MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); | |
35 | ||
f30664e2 SO |
36 | MODULE_DESCRIPTION("Block driver for s390 storage class memory."); |
37 | MODULE_LICENSE("GPL"); | |
38 | MODULE_ALIAS("scm:scmdev*"); | |
39 | ||
40 | static void __scm_free_rq(struct scm_request *scmrq) | |
41 | { | |
42 | struct aob_rq_header *aobrq = to_aobrq(scmrq); | |
43 | ||
44 | free_page((unsigned long) scmrq->aob); | |
0d804b20 | 45 | __scm_free_rq_cluster(scmrq); |
8622384f | 46 | kfree(scmrq->request); |
f30664e2 SO |
47 | kfree(aobrq); |
48 | } | |
49 | ||
50 | static void scm_free_rqs(void) | |
51 | { | |
52 | struct list_head *iter, *safe; | |
53 | struct scm_request *scmrq; | |
54 | ||
55 | spin_lock_irq(&list_lock); | |
56 | list_for_each_safe(iter, safe, &inactive_requests) { | |
57 | scmrq = list_entry(iter, struct scm_request, list); | |
58 | list_del(&scmrq->list); | |
59 | __scm_free_rq(scmrq); | |
60 | } | |
61 | spin_unlock_irq(&list_lock); | |
9d4df77f SO |
62 | |
63 | mempool_destroy(aidaw_pool); | |
f30664e2 SO |
64 | } |
65 | ||
66 | static int __scm_alloc_rq(void) | |
67 | { | |
68 | struct aob_rq_header *aobrq; | |
69 | struct scm_request *scmrq; | |
70 | ||
71 | aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL); | |
72 | if (!aobrq) | |
73 | return -ENOMEM; | |
74 | ||
75 | scmrq = (void *) aobrq->data; | |
f30664e2 | 76 | scmrq->aob = (void *) get_zeroed_page(GFP_DMA); |
8622384f SO |
77 | if (!scmrq->aob) |
78 | goto free; | |
0d804b20 | 79 | |
8622384f SO |
80 | scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), |
81 | GFP_KERNEL); | |
82 | if (!scmrq->request) | |
83 | goto free; | |
84 | ||
85 | if (__scm_alloc_rq_cluster(scmrq)) | |
86 | goto free; | |
0d804b20 | 87 | |
f30664e2 SO |
88 | INIT_LIST_HEAD(&scmrq->list); |
89 | spin_lock_irq(&list_lock); | |
90 | list_add(&scmrq->list, &inactive_requests); | |
91 | spin_unlock_irq(&list_lock); | |
92 | ||
93 | return 0; | |
8622384f SO |
94 | free: |
95 | __scm_free_rq(scmrq); | |
96 | return -ENOMEM; | |
f30664e2 SO |
97 | } |
98 | ||
99 | static int scm_alloc_rqs(unsigned int nrqs) | |
100 | { | |
101 | int ret = 0; | |
102 | ||
9d4df77f SO |
103 | aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); |
104 | if (!aidaw_pool) | |
105 | return -ENOMEM; | |
106 | ||
f30664e2 SO |
107 | while (nrqs-- && !ret) |
108 | ret = __scm_alloc_rq(); | |
109 | ||
110 | return ret; | |
111 | } | |
112 | ||
113 | static struct scm_request *scm_request_fetch(void) | |
114 | { | |
115 | struct scm_request *scmrq = NULL; | |
116 | ||
117 | spin_lock(&list_lock); | |
118 | if (list_empty(&inactive_requests)) | |
119 | goto out; | |
120 | scmrq = list_first_entry(&inactive_requests, struct scm_request, list); | |
121 | list_del(&scmrq->list); | |
122 | out: | |
123 | spin_unlock(&list_lock); | |
124 | return scmrq; | |
125 | } | |
126 | ||
127 | static void scm_request_done(struct scm_request *scmrq) | |
128 | { | |
129 | unsigned long flags; | |
bbc610a9 SO |
130 | struct msb *msb; |
131 | u64 aidaw; | |
132 | int i; | |
f30664e2 | 133 | |
8622384f | 134 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { |
bbc610a9 SO |
135 | msb = &scmrq->aob->msb[i]; |
136 | aidaw = msb->data_addr; | |
137 | ||
138 | if ((msb->flags & MSB_FLAG_IDA) && aidaw && | |
139 | IS_ALIGNED(aidaw, PAGE_SIZE)) | |
140 | mempool_free(virt_to_page(aidaw), aidaw_pool); | |
141 | } | |
9d4df77f | 142 | |
f30664e2 SO |
143 | spin_lock_irqsave(&list_lock, flags); |
144 | list_add(&scmrq->list, &inactive_requests); | |
145 | spin_unlock_irqrestore(&list_lock, flags); | |
146 | } | |
147 | ||
4fa3c019 SO |
148 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) |
149 | { | |
150 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; | |
151 | } | |
152 | ||
de88d0d2 | 153 | static inline struct aidaw *scm_aidaw_alloc(void) |
9d4df77f SO |
154 | { |
155 | struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); | |
156 | ||
157 | return page ? page_address(page) : NULL; | |
158 | } | |
159 | ||
de88d0d2 SO |
160 | static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) |
161 | { | |
162 | unsigned long _aidaw = (unsigned long) aidaw; | |
163 | unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; | |
164 | ||
165 | return (bytes / sizeof(*aidaw)) * PAGE_SIZE; | |
166 | } | |
167 | ||
168 | struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) | |
169 | { | |
170 | struct aidaw *aidaw; | |
171 | ||
172 | if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) | |
173 | return scmrq->next_aidaw; | |
174 | ||
175 | aidaw = scm_aidaw_alloc(); | |
176 | if (aidaw) | |
177 | memset(aidaw, 0, PAGE_SIZE); | |
178 | return aidaw; | |
179 | } | |
180 | ||
9d4df77f | 181 | static int scm_request_prepare(struct scm_request *scmrq) |
f30664e2 SO |
182 | { |
183 | struct scm_blk_dev *bdev = scmrq->bdev; | |
184 | struct scm_device *scmdev = bdev->gendisk->private_data; | |
bbc610a9 SO |
185 | int pos = scmrq->aob->request.msb_count; |
186 | struct msb *msb = &scmrq->aob->msb[pos]; | |
187 | struct request *req = scmrq->request[pos]; | |
f30664e2 | 188 | struct req_iterator iter; |
de88d0d2 | 189 | struct aidaw *aidaw; |
7988613b | 190 | struct bio_vec bv; |
f30664e2 | 191 | |
bbc610a9 | 192 | aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); |
9d4df77f SO |
193 | if (!aidaw) |
194 | return -ENOMEM; | |
195 | ||
f30664e2 | 196 | msb->bs = MSB_BS_4K; |
bbc610a9 SO |
197 | scmrq->aob->request.msb_count++; |
198 | msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); | |
199 | msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; | |
f30664e2 SO |
200 | msb->flags |= MSB_FLAG_IDA; |
201 | msb->data_addr = (u64) aidaw; | |
202 | ||
bbc610a9 | 203 | rq_for_each_segment(bv, req, iter) { |
7988613b KO |
204 | WARN_ON(bv.bv_offset); |
205 | msb->blk_count += bv.bv_len >> 12; | |
206 | aidaw->data_addr = (u64) page_address(bv.bv_page); | |
f30664e2 SO |
207 | aidaw++; |
208 | } | |
9d4df77f | 209 | |
bbc610a9 | 210 | scmrq->next_aidaw = aidaw; |
9d4df77f | 211 | return 0; |
f30664e2 SO |
212 | } |
213 | ||
bbc610a9 SO |
214 | static inline void scm_request_set(struct scm_request *scmrq, |
215 | struct request *req) | |
216 | { | |
217 | scmrq->request[scmrq->aob->request.msb_count] = req; | |
218 | } | |
219 | ||
f30664e2 | 220 | static inline void scm_request_init(struct scm_blk_dev *bdev, |
bbc610a9 | 221 | struct scm_request *scmrq) |
f30664e2 SO |
222 | { |
223 | struct aob_rq_header *aobrq = to_aobrq(scmrq); | |
224 | struct aob *aob = scmrq->aob; | |
225 | ||
8622384f SO |
226 | memset(scmrq->request, 0, |
227 | nr_requests_per_io * sizeof(scmrq->request[0])); | |
f30664e2 | 228 | memset(aob, 0, sizeof(*aob)); |
f30664e2 SO |
229 | aobrq->scmdev = bdev->scmdev; |
230 | aob->request.cmd_code = ARQB_CMD_MOVE; | |
231 | aob->request.data = (u64) aobrq; | |
f30664e2 SO |
232 | scmrq->bdev = bdev; |
233 | scmrq->retries = 4; | |
234 | scmrq->error = 0; | |
de88d0d2 | 235 | /* We don't use all msbs - place aidaws at the end of the aob page. */ |
8622384f | 236 | scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; |
0d804b20 | 237 | scm_request_cluster_init(scmrq); |
f30664e2 SO |
238 | } |
239 | ||
240 | static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) | |
241 | { | |
242 | if (atomic_read(&bdev->queued_reqs)) { | |
243 | /* Queue restart is triggered by the next interrupt. */ | |
244 | return; | |
245 | } | |
246 | blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); | |
247 | } | |
248 | ||
0d804b20 | 249 | void scm_request_requeue(struct scm_request *scmrq) |
f30664e2 SO |
250 | { |
251 | struct scm_blk_dev *bdev = scmrq->bdev; | |
bbc610a9 | 252 | int i; |
f30664e2 | 253 | |
0d804b20 | 254 | scm_release_cluster(scmrq); |
8622384f | 255 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
bbc610a9 SO |
256 | blk_requeue_request(bdev->rq, scmrq->request[i]); |
257 | ||
8360cb5f | 258 | atomic_dec(&bdev->queued_reqs); |
f30664e2 SO |
259 | scm_request_done(scmrq); |
260 | scm_ensure_queue_restart(bdev); | |
261 | } | |
262 | ||
0d804b20 | 263 | void scm_request_finish(struct scm_request *scmrq) |
f30664e2 | 264 | { |
8360cb5f | 265 | struct scm_blk_dev *bdev = scmrq->bdev; |
bbc610a9 | 266 | int i; |
8360cb5f | 267 | |
0d804b20 | 268 | scm_release_cluster(scmrq); |
8622384f | 269 | for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) |
bbc610a9 SO |
270 | blk_end_request_all(scmrq->request[i], scmrq->error); |
271 | ||
8360cb5f | 272 | atomic_dec(&bdev->queued_reqs); |
f30664e2 SO |
273 | scm_request_done(scmrq); |
274 | } | |
275 | ||
bbc610a9 SO |
276 | static int scm_request_start(struct scm_request *scmrq) |
277 | { | |
278 | struct scm_blk_dev *bdev = scmrq->bdev; | |
279 | int ret; | |
280 | ||
281 | atomic_inc(&bdev->queued_reqs); | |
282 | if (!scmrq->aob->request.msb_count) { | |
283 | scm_request_requeue(scmrq); | |
284 | return -EINVAL; | |
285 | } | |
286 | ||
287 | ret = eadm_start_aob(scmrq->aob); | |
288 | if (ret) { | |
289 | SCM_LOG(5, "no subchannel"); | |
290 | scm_request_requeue(scmrq); | |
291 | } | |
292 | return ret; | |
293 | } | |
294 | ||
f30664e2 SO |
295 | static void scm_blk_request(struct request_queue *rq) |
296 | { | |
297 | struct scm_device *scmdev = rq->queuedata; | |
298 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); | |
bbc610a9 | 299 | struct scm_request *scmrq = NULL; |
f30664e2 | 300 | struct request *req; |
f30664e2 SO |
301 | |
302 | while ((req = blk_peek_request(rq))) { | |
de9587a2 SM |
303 | if (req->cmd_type != REQ_TYPE_FS) { |
304 | blk_start_request(req); | |
305 | blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); | |
306 | blk_end_request_all(req, -EIO); | |
f30664e2 | 307 | continue; |
de9587a2 | 308 | } |
f30664e2 | 309 | |
bbc610a9 SO |
310 | if (!scm_permit_request(bdev, req)) |
311 | goto out; | |
312 | ||
f30664e2 | 313 | if (!scmrq) { |
bbc610a9 SO |
314 | scmrq = scm_request_fetch(); |
315 | if (!scmrq) { | |
316 | SCM_LOG(5, "no request"); | |
317 | goto out; | |
318 | } | |
319 | scm_request_init(bdev, scmrq); | |
f30664e2 | 320 | } |
bbc610a9 SO |
321 | scm_request_set(scmrq, req); |
322 | ||
0d804b20 SO |
323 | if (!scm_reserve_cluster(scmrq)) { |
324 | SCM_LOG(5, "cluster busy"); | |
bbc610a9 SO |
325 | scm_request_set(scmrq, NULL); |
326 | if (scmrq->aob->request.msb_count) | |
327 | goto out; | |
328 | ||
0d804b20 SO |
329 | scm_request_done(scmrq); |
330 | return; | |
331 | } | |
bbc610a9 | 332 | |
0d804b20 | 333 | if (scm_need_cluster_request(scmrq)) { |
bbc610a9 SO |
334 | if (scmrq->aob->request.msb_count) { |
335 | /* Start cluster requests separately. */ | |
336 | scm_request_set(scmrq, NULL); | |
337 | if (scm_request_start(scmrq)) | |
338 | return; | |
339 | } else { | |
340 | atomic_inc(&bdev->queued_reqs); | |
341 | blk_start_request(req); | |
342 | scm_initiate_cluster_request(scmrq); | |
343 | } | |
344 | scmrq = NULL; | |
345 | continue; | |
0d804b20 | 346 | } |
9d4df77f SO |
347 | |
348 | if (scm_request_prepare(scmrq)) { | |
bbc610a9 SO |
349 | SCM_LOG(5, "aidaw alloc failed"); |
350 | scm_request_set(scmrq, NULL); | |
351 | goto out; | |
9d4df77f | 352 | } |
f30664e2 SO |
353 | blk_start_request(req); |
354 | ||
8622384f | 355 | if (scmrq->aob->request.msb_count < nr_requests_per_io) |
bbc610a9 SO |
356 | continue; |
357 | ||
358 | if (scm_request_start(scmrq)) | |
f30664e2 | 359 | return; |
bbc610a9 SO |
360 | |
361 | scmrq = NULL; | |
f30664e2 | 362 | } |
bbc610a9 SO |
363 | out: |
364 | if (scmrq) | |
365 | scm_request_start(scmrq); | |
366 | else | |
367 | scm_ensure_queue_restart(bdev); | |
f30664e2 SO |
368 | } |
369 | ||
370 | static void __scmrq_log_error(struct scm_request *scmrq) | |
371 | { | |
372 | struct aob *aob = scmrq->aob; | |
373 | ||
374 | if (scmrq->error == -ETIMEDOUT) | |
375 | SCM_LOG(1, "Request timeout"); | |
376 | else { | |
377 | SCM_LOG(1, "Request error"); | |
378 | SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); | |
379 | } | |
380 | if (scmrq->retries) | |
381 | SCM_LOG(1, "Retry request"); | |
382 | else | |
383 | pr_err("An I/O operation to SCM failed with rc=%d\n", | |
384 | scmrq->error); | |
385 | } | |
386 | ||
387 | void scm_blk_irq(struct scm_device *scmdev, void *data, int error) | |
388 | { | |
389 | struct scm_request *scmrq = data; | |
390 | struct scm_blk_dev *bdev = scmrq->bdev; | |
391 | ||
392 | scmrq->error = error; | |
393 | if (error) | |
394 | __scmrq_log_error(scmrq); | |
395 | ||
396 | spin_lock(&bdev->lock); | |
397 | list_add_tail(&scmrq->list, &bdev->finished_requests); | |
398 | spin_unlock(&bdev->lock); | |
399 | tasklet_hi_schedule(&bdev->tasklet); | |
400 | } | |
401 | ||
4fa3c019 SO |
402 | static void scm_blk_handle_error(struct scm_request *scmrq) |
403 | { | |
404 | struct scm_blk_dev *bdev = scmrq->bdev; | |
405 | unsigned long flags; | |
406 | ||
407 | if (scmrq->error != -EIO) | |
408 | goto restart; | |
409 | ||
410 | /* For -EIO the response block is valid. */ | |
411 | switch (scmrq->aob->response.eqc) { | |
412 | case EQC_WR_PROHIBIT: | |
413 | spin_lock_irqsave(&bdev->lock, flags); | |
414 | if (bdev->state != SCM_WR_PROHIBIT) | |
3bff6038 | 415 | pr_info("%lx: Write access to the SCM increment is suspended\n", |
4fa3c019 SO |
416 | (unsigned long) bdev->scmdev->address); |
417 | bdev->state = SCM_WR_PROHIBIT; | |
418 | spin_unlock_irqrestore(&bdev->lock, flags); | |
419 | goto requeue; | |
420 | default: | |
421 | break; | |
422 | } | |
423 | ||
424 | restart: | |
605c3698 | 425 | if (!eadm_start_aob(scmrq->aob)) |
4fa3c019 SO |
426 | return; |
427 | ||
428 | requeue: | |
429 | spin_lock_irqsave(&bdev->rq_lock, flags); | |
430 | scm_request_requeue(scmrq); | |
431 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | |
432 | } | |
433 | ||
f30664e2 SO |
434 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) |
435 | { | |
436 | struct scm_request *scmrq; | |
437 | unsigned long flags; | |
438 | ||
439 | spin_lock_irqsave(&bdev->lock, flags); | |
440 | while (!list_empty(&bdev->finished_requests)) { | |
441 | scmrq = list_first_entry(&bdev->finished_requests, | |
442 | struct scm_request, list); | |
443 | list_del(&scmrq->list); | |
444 | spin_unlock_irqrestore(&bdev->lock, flags); | |
445 | ||
446 | if (scmrq->error && scmrq->retries-- > 0) { | |
4fa3c019 SO |
447 | scm_blk_handle_error(scmrq); |
448 | ||
f30664e2 SO |
449 | /* Request restarted or requeued, handle next. */ |
450 | spin_lock_irqsave(&bdev->lock, flags); | |
451 | continue; | |
452 | } | |
0d804b20 SO |
453 | |
454 | if (scm_test_cluster_request(scmrq)) { | |
455 | scm_cluster_request_irq(scmrq); | |
456 | spin_lock_irqsave(&bdev->lock, flags); | |
457 | continue; | |
458 | } | |
459 | ||
f30664e2 | 460 | scm_request_finish(scmrq); |
f30664e2 SO |
461 | spin_lock_irqsave(&bdev->lock, flags); |
462 | } | |
463 | spin_unlock_irqrestore(&bdev->lock, flags); | |
464 | /* Look out for more requests. */ | |
465 | blk_run_queue(bdev->rq); | |
466 | } | |
467 | ||
605c3698 SO |
468 | static const struct block_device_operations scm_blk_devops = { |
469 | .owner = THIS_MODULE, | |
470 | }; | |
471 | ||
f30664e2 SO |
472 | int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) |
473 | { | |
474 | struct request_queue *rq; | |
475 | int len, ret = -ENOMEM; | |
476 | unsigned int devindex, nr_max_blk; | |
477 | ||
478 | devindex = atomic_inc_return(&nr_devices) - 1; | |
479 | /* scma..scmz + scmaa..scmzz */ | |
480 | if (devindex > 701) { | |
481 | ret = -ENODEV; | |
482 | goto out; | |
483 | } | |
484 | ||
485 | bdev->scmdev = scmdev; | |
4fa3c019 | 486 | bdev->state = SCM_OPER; |
f30664e2 SO |
487 | spin_lock_init(&bdev->rq_lock); |
488 | spin_lock_init(&bdev->lock); | |
489 | INIT_LIST_HEAD(&bdev->finished_requests); | |
490 | atomic_set(&bdev->queued_reqs, 0); | |
491 | tasklet_init(&bdev->tasklet, | |
492 | (void (*)(unsigned long)) scm_blk_tasklet, | |
493 | (unsigned long) bdev); | |
494 | ||
495 | rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); | |
496 | if (!rq) | |
497 | goto out; | |
498 | ||
499 | bdev->rq = rq; | |
500 | nr_max_blk = min(scmdev->nr_max_block, | |
501 | (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); | |
502 | ||
503 | blk_queue_logical_block_size(rq, 1 << 12); | |
504 | blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ | |
505 | blk_queue_max_segments(rq, nr_max_blk); | |
506 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); | |
b277da0a | 507 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); |
0d804b20 | 508 | scm_blk_dev_cluster_setup(bdev); |
f30664e2 SO |
509 | |
510 | bdev->gendisk = alloc_disk(SCM_NR_PARTS); | |
511 | if (!bdev->gendisk) | |
512 | goto out_queue; | |
513 | ||
514 | rq->queuedata = scmdev; | |
515 | bdev->gendisk->driverfs_dev = &scmdev->dev; | |
516 | bdev->gendisk->private_data = scmdev; | |
517 | bdev->gendisk->fops = &scm_blk_devops; | |
518 | bdev->gendisk->queue = rq; | |
519 | bdev->gendisk->major = scm_major; | |
520 | bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; | |
521 | ||
522 | len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); | |
523 | if (devindex > 25) { | |
524 | len += snprintf(bdev->gendisk->disk_name + len, | |
525 | DISK_NAME_LEN - len, "%c", | |
526 | 'a' + (devindex / 26) - 1); | |
527 | devindex = devindex % 26; | |
528 | } | |
529 | snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", | |
530 | 'a' + devindex); | |
531 | ||
532 | /* 512 byte sectors */ | |
533 | set_capacity(bdev->gendisk, scmdev->size >> 9); | |
534 | add_disk(bdev->gendisk); | |
535 | return 0; | |
536 | ||
537 | out_queue: | |
538 | blk_cleanup_queue(rq); | |
539 | out: | |
540 | atomic_dec(&nr_devices); | |
541 | return ret; | |
542 | } | |
543 | ||
544 | void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) | |
545 | { | |
546 | tasklet_kill(&bdev->tasklet); | |
547 | del_gendisk(bdev->gendisk); | |
548 | blk_cleanup_queue(bdev->gendisk->queue); | |
549 | put_disk(bdev->gendisk); | |
550 | } | |
551 | ||
4fa3c019 SO |
552 | void scm_blk_set_available(struct scm_blk_dev *bdev) |
553 | { | |
554 | unsigned long flags; | |
555 | ||
556 | spin_lock_irqsave(&bdev->lock, flags); | |
557 | if (bdev->state == SCM_WR_PROHIBIT) | |
3bff6038 | 558 | pr_info("%lx: Write access to the SCM increment is restored\n", |
4fa3c019 SO |
559 | (unsigned long) bdev->scmdev->address); |
560 | bdev->state = SCM_OPER; | |
561 | spin_unlock_irqrestore(&bdev->lock, flags); | |
562 | } | |
563 | ||
8622384f SO |
564 | static bool __init scm_blk_params_valid(void) |
565 | { | |
566 | if (!nr_requests_per_io || nr_requests_per_io > 64) | |
567 | return false; | |
568 | ||
569 | return scm_cluster_size_valid(); | |
570 | } | |
571 | ||
f30664e2 SO |
572 | static int __init scm_blk_init(void) |
573 | { | |
0d804b20 SO |
574 | int ret = -EINVAL; |
575 | ||
8622384f | 576 | if (!scm_blk_params_valid()) |
0d804b20 | 577 | goto out; |
f30664e2 SO |
578 | |
579 | ret = register_blkdev(0, "scm"); | |
580 | if (ret < 0) | |
581 | goto out; | |
582 | ||
583 | scm_major = ret; | |
94f9852d WY |
584 | ret = scm_alloc_rqs(nr_requests); |
585 | if (ret) | |
fff60fab | 586 | goto out_free; |
f30664e2 SO |
587 | |
588 | scm_debug = debug_register("scm_log", 16, 1, 16); | |
94f9852d WY |
589 | if (!scm_debug) { |
590 | ret = -ENOMEM; | |
f30664e2 | 591 | goto out_free; |
94f9852d | 592 | } |
f30664e2 SO |
593 | |
594 | debug_register_view(scm_debug, &debug_hex_ascii_view); | |
595 | debug_set_level(scm_debug, 2); | |
596 | ||
597 | ret = scm_drv_init(); | |
598 | if (ret) | |
599 | goto out_dbf; | |
600 | ||
601 | return ret; | |
602 | ||
603 | out_dbf: | |
604 | debug_unregister(scm_debug); | |
605 | out_free: | |
606 | scm_free_rqs(); | |
f30664e2 SO |
607 | unregister_blkdev(scm_major, "scm"); |
608 | out: | |
609 | return ret; | |
610 | } | |
611 | module_init(scm_blk_init); | |
612 | ||
613 | static void __exit scm_blk_cleanup(void) | |
614 | { | |
615 | scm_drv_cleanup(); | |
616 | debug_unregister(scm_debug); | |
617 | scm_free_rqs(); | |
618 | unregister_blkdev(scm_major, "scm"); | |
619 | } | |
620 | module_exit(scm_blk_cleanup); |