| 1 | /* bounce buffer handling for block devices |
| 2 | * |
| 3 | * - Split from highmem.c |
| 4 | */ |
| 5 | |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/export.h> |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/gfp.h> |
| 12 | #include <linux/bio.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/mempool.h> |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/hash.h> |
| 18 | #include <linux/highmem.h> |
| 19 | #include <linux/bootmem.h> |
| 20 | #include <linux/printk.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | |
| 23 | #include <trace/events/block.h> |
| 24 | |
| 25 | #define POOL_SIZE 64 |
| 26 | #define ISA_POOL_SIZE 16 |
| 27 | |
| 28 | static mempool_t *page_pool, *isa_page_pool; |
| 29 | |
| 30 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) |
| 31 | static __init int init_emergency_pool(void) |
| 32 | { |
| 33 | #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) |
| 34 | if (max_pfn <= max_low_pfn) |
| 35 | return 0; |
| 36 | #endif |
| 37 | |
| 38 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); |
| 39 | BUG_ON(!page_pool); |
| 40 | pr_info("pool size: %d pages\n", POOL_SIZE); |
| 41 | |
| 42 | return 0; |
| 43 | } |
| 44 | |
| 45 | __initcall(init_emergency_pool); |
| 46 | #endif |
| 47 | |
| 48 | #ifdef CONFIG_HIGHMEM |
| 49 | /* |
| 50 | * highmem version, map in to vec |
| 51 | */ |
| 52 | static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) |
| 53 | { |
| 54 | unsigned long flags; |
| 55 | unsigned char *vto; |
| 56 | |
| 57 | local_irq_save(flags); |
| 58 | vto = kmap_atomic(to->bv_page); |
| 59 | memcpy(vto + to->bv_offset, vfrom, to->bv_len); |
| 60 | kunmap_atomic(vto); |
| 61 | local_irq_restore(flags); |
| 62 | } |
| 63 | |
| 64 | #else /* CONFIG_HIGHMEM */ |
| 65 | |
| 66 | #define bounce_copy_vec(to, vfrom) \ |
| 67 | memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) |
| 68 | |
| 69 | #endif /* CONFIG_HIGHMEM */ |
| 70 | |
| 71 | /* |
| 72 | * allocate pages in the DMA region for the ISA pool |
| 73 | */ |
| 74 | static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) |
| 75 | { |
| 76 | return mempool_alloc_pages(gfp_mask | GFP_DMA, data); |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA |
| 81 | * as the max address, so check if the pool has already been created. |
| 82 | */ |
| 83 | int init_emergency_isa_pool(void) |
| 84 | { |
| 85 | if (isa_page_pool) |
| 86 | return 0; |
| 87 | |
| 88 | isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, |
| 89 | mempool_free_pages, (void *) 0); |
| 90 | BUG_ON(!isa_page_pool); |
| 91 | |
| 92 | pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE); |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * Simple bounce buffer support for highmem pages. Depending on the |
| 98 | * queue gfp mask set, *to may or may not be a highmem page. kmap it |
| 99 | * always, it will do the Right Thing |
| 100 | */ |
| 101 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) |
| 102 | { |
| 103 | unsigned char *vfrom; |
| 104 | struct bio_vec tovec, *fromvec = from->bi_io_vec; |
| 105 | struct bvec_iter iter; |
| 106 | |
| 107 | bio_for_each_segment(tovec, to, iter) { |
| 108 | if (tovec.bv_page != fromvec->bv_page) { |
| 109 | /* |
| 110 | * fromvec->bv_offset and fromvec->bv_len might have |
| 111 | * been modified by the block layer, so use the original |
| 112 | * copy, bounce_copy_vec already uses tovec->bv_len |
| 113 | */ |
| 114 | vfrom = page_address(fromvec->bv_page) + |
| 115 | tovec.bv_offset; |
| 116 | |
| 117 | bounce_copy_vec(&tovec, vfrom); |
| 118 | flush_dcache_page(tovec.bv_page); |
| 119 | } |
| 120 | |
| 121 | fromvec++; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) |
| 126 | { |
| 127 | struct bio *bio_orig = bio->bi_private; |
| 128 | struct bio_vec *bvec, *org_vec; |
| 129 | int i; |
| 130 | |
| 131 | if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) |
| 132 | set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); |
| 133 | |
| 134 | /* |
| 135 | * free up bounce indirect pages used |
| 136 | */ |
| 137 | bio_for_each_segment_all(bvec, bio, i) { |
| 138 | org_vec = bio_orig->bi_io_vec + i; |
| 139 | if (bvec->bv_page == org_vec->bv_page) |
| 140 | continue; |
| 141 | |
| 142 | dec_zone_page_state(bvec->bv_page, NR_BOUNCE); |
| 143 | mempool_free(bvec->bv_page, pool); |
| 144 | } |
| 145 | |
| 146 | bio_endio(bio_orig, err); |
| 147 | bio_put(bio); |
| 148 | } |
| 149 | |
| 150 | static void bounce_end_io_write(struct bio *bio, int err) |
| 151 | { |
| 152 | bounce_end_io(bio, page_pool, err); |
| 153 | } |
| 154 | |
| 155 | static void bounce_end_io_write_isa(struct bio *bio, int err) |
| 156 | { |
| 157 | |
| 158 | bounce_end_io(bio, isa_page_pool, err); |
| 159 | } |
| 160 | |
| 161 | static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) |
| 162 | { |
| 163 | struct bio *bio_orig = bio->bi_private; |
| 164 | |
| 165 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) |
| 166 | copy_to_high_bio_irq(bio_orig, bio); |
| 167 | |
| 168 | bounce_end_io(bio, pool, err); |
| 169 | } |
| 170 | |
| 171 | static void bounce_end_io_read(struct bio *bio, int err) |
| 172 | { |
| 173 | __bounce_end_io_read(bio, page_pool, err); |
| 174 | } |
| 175 | |
| 176 | static void bounce_end_io_read_isa(struct bio *bio, int err) |
| 177 | { |
| 178 | __bounce_end_io_read(bio, isa_page_pool, err); |
| 179 | } |
| 180 | |
| 181 | #ifdef CONFIG_NEED_BOUNCE_POOL |
| 182 | static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) |
| 183 | { |
| 184 | if (bio_data_dir(bio) != WRITE) |
| 185 | return 0; |
| 186 | |
| 187 | if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) |
| 188 | return 0; |
| 189 | |
| 190 | return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); |
| 191 | } |
| 192 | #else |
| 193 | static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) |
| 194 | { |
| 195 | return 0; |
| 196 | } |
| 197 | #endif /* CONFIG_NEED_BOUNCE_POOL */ |
| 198 | |
| 199 | static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, |
| 200 | mempool_t *pool, int force) |
| 201 | { |
| 202 | struct bio *bio; |
| 203 | int rw = bio_data_dir(*bio_orig); |
| 204 | struct bio_vec *to, from; |
| 205 | struct bvec_iter iter; |
| 206 | unsigned i; |
| 207 | |
| 208 | if (force) |
| 209 | goto bounce; |
| 210 | bio_for_each_segment(from, *bio_orig, iter) |
| 211 | if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) |
| 212 | goto bounce; |
| 213 | |
| 214 | return; |
| 215 | bounce: |
| 216 | bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); |
| 217 | |
| 218 | bio_for_each_segment_all(to, bio, i) { |
| 219 | struct page *page = to->bv_page; |
| 220 | |
| 221 | if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) |
| 222 | continue; |
| 223 | |
| 224 | inc_zone_page_state(to->bv_page, NR_BOUNCE); |
| 225 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); |
| 226 | |
| 227 | if (rw == WRITE) { |
| 228 | char *vto, *vfrom; |
| 229 | |
| 230 | flush_dcache_page(page); |
| 231 | |
| 232 | vto = page_address(to->bv_page) + to->bv_offset; |
| 233 | vfrom = kmap_atomic(page) + to->bv_offset; |
| 234 | memcpy(vto, vfrom, to->bv_len); |
| 235 | kunmap_atomic(vfrom); |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | trace_block_bio_bounce(q, *bio_orig); |
| 240 | |
| 241 | bio->bi_flags |= (1 << BIO_BOUNCED); |
| 242 | |
| 243 | if (pool == page_pool) { |
| 244 | bio->bi_end_io = bounce_end_io_write; |
| 245 | if (rw == READ) |
| 246 | bio->bi_end_io = bounce_end_io_read; |
| 247 | } else { |
| 248 | bio->bi_end_io = bounce_end_io_write_isa; |
| 249 | if (rw == READ) |
| 250 | bio->bi_end_io = bounce_end_io_read_isa; |
| 251 | } |
| 252 | |
| 253 | bio->bi_private = *bio_orig; |
| 254 | *bio_orig = bio; |
| 255 | } |
| 256 | |
| 257 | void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) |
| 258 | { |
| 259 | int must_bounce; |
| 260 | mempool_t *pool; |
| 261 | |
| 262 | /* |
| 263 | * Data-less bio, nothing to bounce |
| 264 | */ |
| 265 | if (!bio_has_data(*bio_orig)) |
| 266 | return; |
| 267 | |
| 268 | must_bounce = must_snapshot_stable_pages(q, *bio_orig); |
| 269 | |
| 270 | /* |
| 271 | * for non-isa bounce case, just check if the bounce pfn is equal |
| 272 | * to or bigger than the highest pfn in the system -- in that case, |
| 273 | * don't waste time iterating over bio segments |
| 274 | */ |
| 275 | if (!(q->bounce_gfp & GFP_DMA)) { |
| 276 | if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) |
| 277 | return; |
| 278 | pool = page_pool; |
| 279 | } else { |
| 280 | BUG_ON(!isa_page_pool); |
| 281 | pool = isa_page_pool; |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | * slow path |
| 286 | */ |
| 287 | __blk_queue_bounce(q, bio_orig, pool, must_bounce); |
| 288 | } |
| 289 | |
| 290 | EXPORT_SYMBOL(blk_queue_bounce); |