Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to barrier IO handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
5a0e3ad6 | 8 | #include <linux/gfp.h> |
86db1e29 JA |
9 | |
10 | #include "blk.h" | |
11 | ||
12 | /** | |
13 | * blk_queue_ordered - does this queue support ordered writes | |
14 | * @q: the request queue | |
15 | * @ordered: one of QUEUE_ORDERED_* | |
86db1e29 JA |
16 | * |
17 | * Description: | |
18 | * For journalled file systems, doing ordered writes on a commit | |
19 | * block instead of explicitly doing wait_on_buffer (which is bad | |
20 | * for performance) can be a big win. Block drivers supporting this | |
21 | * feature should call this function and indicate so. | |
22 | * | |
23 | **/ | |
00fff265 | 24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered) |
86db1e29 | 25 | { |
86db1e29 JA |
26 | if (ordered != QUEUE_ORDERED_NONE && |
27 | ordered != QUEUE_ORDERED_DRAIN && | |
28 | ordered != QUEUE_ORDERED_DRAIN_FLUSH && | |
29 | ordered != QUEUE_ORDERED_DRAIN_FUA && | |
30 | ordered != QUEUE_ORDERED_TAG && | |
31 | ordered != QUEUE_ORDERED_TAG_FLUSH && | |
32 | ordered != QUEUE_ORDERED_TAG_FUA) { | |
33 | printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); | |
34 | return -EINVAL; | |
35 | } | |
36 | ||
37 | q->ordered = ordered; | |
38 | q->next_ordered = ordered; | |
86db1e29 JA |
39 | |
40 | return 0; | |
41 | } | |
86db1e29 JA |
42 | EXPORT_SYMBOL(blk_queue_ordered); |
43 | ||
44 | /* | |
45 | * Cache flushing for ordered writes handling | |
46 | */ | |
6f6a036e | 47 | unsigned blk_ordered_cur_seq(struct request_queue *q) |
86db1e29 JA |
48 | { |
49 | if (!q->ordseq) | |
50 | return 0; | |
51 | return 1 << ffz(q->ordseq); | |
52 | } | |
53 | ||
54 | unsigned blk_ordered_req_seq(struct request *rq) | |
55 | { | |
56 | struct request_queue *q = rq->q; | |
57 | ||
58 | BUG_ON(q->ordseq == 0); | |
59 | ||
60 | if (rq == &q->pre_flush_rq) | |
61 | return QUEUE_ORDSEQ_PREFLUSH; | |
62 | if (rq == &q->bar_rq) | |
63 | return QUEUE_ORDSEQ_BAR; | |
64 | if (rq == &q->post_flush_rq) | |
65 | return QUEUE_ORDSEQ_POSTFLUSH; | |
66 | ||
67 | /* | |
68 | * !fs requests don't need to follow barrier ordering. Always | |
69 | * put them at the front. This fixes the following deadlock. | |
70 | * | |
71 | * http://thread.gmane.org/gmane.linux.kernel/537473 | |
72 | */ | |
33659ebb | 73 | if (rq->cmd_type != REQ_TYPE_FS) |
86db1e29 JA |
74 | return QUEUE_ORDSEQ_DRAIN; |
75 | ||
76 | if ((rq->cmd_flags & REQ_ORDERED_COLOR) == | |
77 | (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) | |
78 | return QUEUE_ORDSEQ_DRAIN; | |
79 | else | |
80 | return QUEUE_ORDSEQ_DONE; | |
81 | } | |
82 | ||
8f11b3e9 | 83 | bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) |
86db1e29 JA |
84 | { |
85 | struct request *rq; | |
86 | ||
87 | if (error && !q->orderr) | |
88 | q->orderr = error; | |
89 | ||
90 | BUG_ON(q->ordseq & seq); | |
91 | q->ordseq |= seq; | |
92 | ||
93 | if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) | |
8f11b3e9 | 94 | return false; |
86db1e29 JA |
95 | |
96 | /* | |
97 | * Okay, sequence complete. | |
98 | */ | |
99 | q->ordseq = 0; | |
100 | rq = q->orig_bar_rq; | |
40cbbb78 | 101 | __blk_end_request_all(rq, q->orderr); |
8f11b3e9 | 102 | return true; |
86db1e29 JA |
103 | } |
104 | ||
105 | static void pre_flush_end_io(struct request *rq, int error) | |
106 | { | |
107 | elv_completed_request(rq->q, rq); | |
108 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); | |
109 | } | |
110 | ||
111 | static void bar_end_io(struct request *rq, int error) | |
112 | { | |
113 | elv_completed_request(rq->q, rq); | |
114 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); | |
115 | } | |
116 | ||
117 | static void post_flush_end_io(struct request *rq, int error) | |
118 | { | |
119 | elv_completed_request(rq->q, rq); | |
120 | blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); | |
121 | } | |
122 | ||
123 | static void queue_flush(struct request_queue *q, unsigned which) | |
124 | { | |
125 | struct request *rq; | |
126 | rq_end_io_fn *end_io; | |
127 | ||
313e4299 | 128 | if (which == QUEUE_ORDERED_DO_PREFLUSH) { |
86db1e29 JA |
129 | rq = &q->pre_flush_rq; |
130 | end_io = pre_flush_end_io; | |
131 | } else { | |
132 | rq = &q->post_flush_rq; | |
133 | end_io = post_flush_end_io; | |
134 | } | |
135 | ||
2a4aa30c | 136 | blk_rq_init(q, rq); |
28e18d01 | 137 | rq->cmd_type = REQ_TYPE_FS; |
8749534f | 138 | rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH; |
16f2319f | 139 | rq->rq_disk = q->orig_bar_rq->rq_disk; |
86db1e29 | 140 | rq->end_io = end_io; |
86db1e29 JA |
141 | |
142 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | |
143 | } | |
144 | ||
8f11b3e9 | 145 | static inline bool start_ordered(struct request_queue *q, struct request **rqp) |
86db1e29 | 146 | { |
8f11b3e9 TH |
147 | struct request *rq = *rqp; |
148 | unsigned skip = 0; | |
149 | ||
86db1e29 JA |
150 | q->orderr = 0; |
151 | q->ordered = q->next_ordered; | |
152 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | |
153 | ||
58eea927 TH |
154 | /* |
155 | * For an empty barrier, there's no actual BAR request, which | |
156 | * in turn makes POSTFLUSH unnecessary. Mask them off. | |
157 | */ | |
5b93629b | 158 | if (!blk_rq_sectors(rq)) { |
58eea927 TH |
159 | q->ordered &= ~(QUEUE_ORDERED_DO_BAR | |
160 | QUEUE_ORDERED_DO_POSTFLUSH); | |
a185eb4b TH |
161 | /* |
162 | * Empty barrier on a write-through device w/ ordered | |
163 | * tag has no command to issue and without any command | |
164 | * to issue, ordering by tag can't be used. Drain | |
165 | * instead. | |
166 | */ | |
167 | if ((q->ordered & QUEUE_ORDERED_BY_TAG) && | |
168 | !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) { | |
169 | q->ordered &= ~QUEUE_ORDERED_BY_TAG; | |
170 | q->ordered |= QUEUE_ORDERED_BY_DRAIN; | |
171 | } | |
172 | } | |
58eea927 | 173 | |
f671620e | 174 | /* stash away the original request */ |
9934c8c0 | 175 | blk_dequeue_request(rq); |
86db1e29 | 176 | q->orig_bar_rq = rq; |
f671620e | 177 | rq = NULL; |
86db1e29 JA |
178 | |
179 | /* | |
180 | * Queue ordered sequence. As we stack them at the head, we | |
181 | * need to queue in reverse order. Note that we rely on that | |
182 | * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs | |
58eea927 | 183 | * request gets inbetween ordered sequence. |
86db1e29 | 184 | */ |
58eea927 | 185 | if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) { |
313e4299 | 186 | queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); |
f671620e TH |
187 | rq = &q->post_flush_rq; |
188 | } else | |
8f11b3e9 | 189 | skip |= QUEUE_ORDSEQ_POSTFLUSH; |
86db1e29 | 190 | |
f671620e TH |
191 | if (q->ordered & QUEUE_ORDERED_DO_BAR) { |
192 | rq = &q->bar_rq; | |
193 | ||
194 | /* initialize proxy request and queue it */ | |
195 | blk_rq_init(q, rq); | |
196 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | |
7b6d91da | 197 | rq->cmd_flags |= REQ_WRITE; |
f671620e TH |
198 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
199 | rq->cmd_flags |= REQ_FUA; | |
200 | init_request_from_bio(rq, q->orig_bar_rq->bio); | |
201 | rq->end_io = bar_end_io; | |
202 | ||
203 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | |
204 | } else | |
8f11b3e9 | 205 | skip |= QUEUE_ORDSEQ_BAR; |
86db1e29 | 206 | |
313e4299 TH |
207 | if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { |
208 | queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); | |
86db1e29 JA |
209 | rq = &q->pre_flush_rq; |
210 | } else | |
8f11b3e9 | 211 | skip |= QUEUE_ORDSEQ_PREFLUSH; |
86db1e29 | 212 | |
0a7ae2ff | 213 | if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) |
86db1e29 | 214 | rq = NULL; |
f671620e | 215 | else |
8f11b3e9 | 216 | skip |= QUEUE_ORDSEQ_DRAIN; |
86db1e29 | 217 | |
8f11b3e9 TH |
218 | *rqp = rq; |
219 | ||
220 | /* | |
221 | * Complete skipped sequences. If whole sequence is complete, | |
222 | * return false to tell elevator that this request is gone. | |
223 | */ | |
224 | return !blk_ordered_complete_seq(q, skip, 0); | |
86db1e29 JA |
225 | } |
226 | ||
8f11b3e9 | 227 | bool blk_do_ordered(struct request_queue *q, struct request **rqp) |
86db1e29 JA |
228 | { |
229 | struct request *rq = *rqp; | |
33659ebb CH |
230 | const int is_barrier = rq->cmd_type == REQ_TYPE_FS && |
231 | (rq->cmd_flags & REQ_HARDBARRIER); | |
86db1e29 JA |
232 | |
233 | if (!q->ordseq) { | |
234 | if (!is_barrier) | |
8f11b3e9 | 235 | return true; |
86db1e29 | 236 | |
8f11b3e9 TH |
237 | if (q->next_ordered != QUEUE_ORDERED_NONE) |
238 | return start_ordered(q, rqp); | |
239 | else { | |
86db1e29 | 240 | /* |
a7384677 TH |
241 | * Queue ordering not supported. Terminate |
242 | * with prejudice. | |
86db1e29 | 243 | */ |
9934c8c0 | 244 | blk_dequeue_request(rq); |
40cbbb78 | 245 | __blk_end_request_all(rq, -EOPNOTSUPP); |
86db1e29 | 246 | *rqp = NULL; |
8f11b3e9 | 247 | return false; |
86db1e29 JA |
248 | } |
249 | } | |
250 | ||
251 | /* | |
252 | * Ordered sequence in progress | |
253 | */ | |
254 | ||
255 | /* Special requests are not subject to ordering rules. */ | |
33659ebb | 256 | if (rq->cmd_type != REQ_TYPE_FS && |
86db1e29 | 257 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) |
8f11b3e9 | 258 | return true; |
86db1e29 | 259 | |
313e4299 | 260 | if (q->ordered & QUEUE_ORDERED_BY_TAG) { |
86db1e29 JA |
261 | /* Ordered by tag. Blocking the next barrier is enough. */ |
262 | if (is_barrier && rq != &q->bar_rq) | |
263 | *rqp = NULL; | |
264 | } else { | |
265 | /* Ordered by draining. Wait for turn. */ | |
266 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); | |
267 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) | |
268 | *rqp = NULL; | |
269 | } | |
270 | ||
8f11b3e9 | 271 | return true; |
86db1e29 JA |
272 | } |
273 | ||
274 | static void bio_end_empty_barrier(struct bio *bio, int err) | |
275 | { | |
cc66b451 JA |
276 | if (err) { |
277 | if (err == -EOPNOTSUPP) | |
278 | set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); | |
86db1e29 | 279 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
cc66b451 | 280 | } |
f17e232e DM |
281 | if (bio->bi_private) |
282 | complete(bio->bi_private); | |
283 | bio_put(bio); | |
86db1e29 JA |
284 | } |
285 | ||
286 | /** | |
287 | * blkdev_issue_flush - queue a flush | |
288 | * @bdev: blockdev to issue flush for | |
fbd9b09a | 289 | * @gfp_mask: memory allocation flags (for bio_alloc) |
86db1e29 | 290 | * @error_sector: error sector |
fbd9b09a | 291 | * @flags: BLKDEV_IFL_* flags to control behaviour |
86db1e29 JA |
292 | * |
293 | * Description: | |
294 | * Issue a flush for the block device in question. Caller can supply | |
295 | * room for storing the error offset in case of a flush error, if they | |
f17e232e DM |
296 | * wish to. If WAIT flag is not passed then caller may check only what |
297 | * request was pushed in some internal queue for later handling. | |
86db1e29 | 298 | */ |
fbd9b09a DM |
299 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
300 | sector_t *error_sector, unsigned long flags) | |
86db1e29 JA |
301 | { |
302 | DECLARE_COMPLETION_ONSTACK(wait); | |
303 | struct request_queue *q; | |
304 | struct bio *bio; | |
fbd9b09a | 305 | int ret = 0; |
86db1e29 JA |
306 | |
307 | if (bdev->bd_disk == NULL) | |
308 | return -ENXIO; | |
309 | ||
310 | q = bdev_get_queue(bdev); | |
311 | if (!q) | |
312 | return -ENXIO; | |
313 | ||
f10d9f61 DC |
314 | /* |
315 | * some block devices may not have their queue correctly set up here | |
316 | * (e.g. loop device without a backing file) and so issuing a flush | |
317 | * here will panic. Ensure there is a request function before issuing | |
318 | * the barrier. | |
319 | */ | |
320 | if (!q->make_request_fn) | |
321 | return -ENXIO; | |
322 | ||
fbd9b09a | 323 | bio = bio_alloc(gfp_mask, 0); |
86db1e29 | 324 | bio->bi_end_io = bio_end_empty_barrier; |
86db1e29 | 325 | bio->bi_bdev = bdev; |
f17e232e DM |
326 | if (test_bit(BLKDEV_WAIT, &flags)) |
327 | bio->bi_private = &wait; | |
86db1e29 | 328 | |
f17e232e DM |
329 | bio_get(bio); |
330 | submit_bio(WRITE_BARRIER, bio); | |
331 | if (test_bit(BLKDEV_WAIT, &flags)) { | |
332 | wait_for_completion(&wait); | |
333 | /* | |
334 | * The driver must store the error location in ->bi_sector, if | |
335 | * it supports it. For non-stacked drivers, this should be | |
336 | * copied from blk_rq_pos(rq). | |
337 | */ | |
338 | if (error_sector) | |
339 | *error_sector = bio->bi_sector; | |
340 | } | |
86db1e29 | 341 | |
cc66b451 JA |
342 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) |
343 | ret = -EOPNOTSUPP; | |
344 | else if (!bio_flagged(bio, BIO_UPTODATE)) | |
86db1e29 JA |
345 | ret = -EIO; |
346 | ||
347 | bio_put(bio); | |
348 | return ret; | |
349 | } | |
86db1e29 | 350 | EXPORT_SYMBOL(blkdev_issue_flush); |