]>
Commit | Line | Data |
---|---|---|
831058de DH |
1 | /* bounce buffer handling for block devices |
2 | * | |
3 | * - Split from highmem.c | |
4 | */ | |
5 | ||
6 | #include <linux/mm.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/swap.h> | |
9 | #include <linux/bio.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/mempool.h> | |
12 | #include <linux/blkdev.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/hash.h> | |
15 | #include <linux/highmem.h> | |
16 | #include <linux/blktrace_api.h> | |
17 | #include <asm/tlbflush.h> | |
18 | ||
19 | #define POOL_SIZE 64 | |
20 | #define ISA_POOL_SIZE 16 | |
21 | ||
22 | static mempool_t *page_pool, *isa_page_pool; | |
23 | ||
24 | #ifdef CONFIG_HIGHMEM | |
25 | static __init int init_emergency_pool(void) | |
26 | { | |
27 | struct sysinfo i; | |
28 | si_meminfo(&i); | |
29 | si_swapinfo(&i); | |
30 | ||
31 | if (!i.totalhigh) | |
32 | return 0; | |
33 | ||
34 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); | |
35 | BUG_ON(!page_pool); | |
36 | printk("highmem bounce pool size: %d pages\n", POOL_SIZE); | |
37 | ||
38 | return 0; | |
39 | } | |
40 | ||
41 | __initcall(init_emergency_pool); | |
42 | ||
43 | /* | |
44 | * highmem version, map in to vec | |
45 | */ | |
46 | static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) | |
47 | { | |
48 | unsigned long flags; | |
49 | unsigned char *vto; | |
50 | ||
51 | local_irq_save(flags); | |
52 | vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); | |
53 | memcpy(vto + to->bv_offset, vfrom, to->bv_len); | |
54 | kunmap_atomic(vto, KM_BOUNCE_READ); | |
55 | local_irq_restore(flags); | |
56 | } | |
57 | ||
58 | #else /* CONFIG_HIGHMEM */ | |
59 | ||
60 | #define bounce_copy_vec(to, vfrom) \ | |
61 | memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) | |
62 | ||
63 | #endif /* CONFIG_HIGHMEM */ | |
64 | ||
65 | /* | |
66 | * allocate pages in the DMA region for the ISA pool | |
67 | */ | |
68 | static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) | |
69 | { | |
70 | return mempool_alloc_pages(gfp_mask | GFP_DMA, data); | |
71 | } | |
72 | ||
73 | /* | |
74 | * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA | |
75 | * as the max address, so check if the pool has already been created. | |
76 | */ | |
77 | int init_emergency_isa_pool(void) | |
78 | { | |
79 | if (isa_page_pool) | |
80 | return 0; | |
81 | ||
82 | isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, | |
83 | mempool_free_pages, (void *) 0); | |
84 | BUG_ON(!isa_page_pool); | |
85 | ||
86 | printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); | |
87 | return 0; | |
88 | } | |
89 | ||
90 | /* | |
91 | * Simple bounce buffer support for highmem pages. Depending on the | |
92 | * queue gfp mask set, *to may or may not be a highmem page. kmap it | |
93 | * always, it will do the Right Thing | |
94 | */ | |
95 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | |
96 | { | |
97 | unsigned char *vfrom; | |
98 | struct bio_vec *tovec, *fromvec; | |
99 | int i; | |
100 | ||
101 | __bio_for_each_segment(tovec, to, i, 0) { | |
102 | fromvec = from->bi_io_vec + i; | |
103 | ||
104 | /* | |
105 | * not bounced | |
106 | */ | |
107 | if (tovec->bv_page == fromvec->bv_page) | |
108 | continue; | |
109 | ||
110 | /* | |
111 | * fromvec->bv_offset and fromvec->bv_len might have been | |
112 | * modified by the block layer, so use the original copy, | |
113 | * bounce_copy_vec already uses tovec->bv_len | |
114 | */ | |
115 | vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | |
116 | ||
117 | flush_dcache_page(tovec->bv_page); | |
118 | bounce_copy_vec(tovec, vfrom); | |
119 | } | |
120 | } | |
121 | ||
122 | static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) | |
123 | { | |
124 | struct bio *bio_orig = bio->bi_private; | |
125 | struct bio_vec *bvec, *org_vec; | |
126 | int i; | |
127 | ||
128 | if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) | |
129 | set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); | |
130 | ||
131 | /* | |
132 | * free up bounce indirect pages used | |
133 | */ | |
134 | __bio_for_each_segment(bvec, bio, i, 0) { | |
135 | org_vec = bio_orig->bi_io_vec + i; | |
136 | if (bvec->bv_page == org_vec->bv_page) | |
137 | continue; | |
138 | ||
139 | dec_zone_page_state(bvec->bv_page, NR_BOUNCE); | |
140 | mempool_free(bvec->bv_page, pool); | |
141 | } | |
142 | ||
143 | bio_endio(bio_orig, bio_orig->bi_size, err); | |
144 | bio_put(bio); | |
145 | } | |
146 | ||
147 | static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) | |
148 | { | |
149 | if (bio->bi_size) | |
150 | return 1; | |
151 | ||
152 | bounce_end_io(bio, page_pool, err); | |
153 | return 0; | |
154 | } | |
155 | ||
156 | static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) | |
157 | { | |
158 | if (bio->bi_size) | |
159 | return 1; | |
160 | ||
161 | bounce_end_io(bio, isa_page_pool, err); | |
162 | return 0; | |
163 | } | |
164 | ||
165 | static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) | |
166 | { | |
167 | struct bio *bio_orig = bio->bi_private; | |
168 | ||
169 | if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | |
170 | copy_to_high_bio_irq(bio_orig, bio); | |
171 | ||
172 | bounce_end_io(bio, pool, err); | |
173 | } | |
174 | ||
175 | static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) | |
176 | { | |
177 | if (bio->bi_size) | |
178 | return 1; | |
179 | ||
180 | __bounce_end_io_read(bio, page_pool, err); | |
181 | return 0; | |
182 | } | |
183 | ||
184 | static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) | |
185 | { | |
186 | if (bio->bi_size) | |
187 | return 1; | |
188 | ||
189 | __bounce_end_io_read(bio, isa_page_pool, err); | |
190 | return 0; | |
191 | } | |
192 | ||
165125e1 | 193 | static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, |
831058de DH |
194 | mempool_t *pool) |
195 | { | |
196 | struct page *page; | |
197 | struct bio *bio = NULL; | |
198 | int i, rw = bio_data_dir(*bio_orig); | |
199 | struct bio_vec *to, *from; | |
200 | ||
201 | bio_for_each_segment(from, *bio_orig, i) { | |
202 | page = from->bv_page; | |
203 | ||
204 | /* | |
205 | * is destination page below bounce pfn? | |
206 | */ | |
f772b3d9 | 207 | if (page_to_pfn(page) <= q->bounce_pfn) |
831058de DH |
208 | continue; |
209 | ||
210 | /* | |
211 | * irk, bounce it | |
212 | */ | |
213 | if (!bio) | |
214 | bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); | |
215 | ||
216 | to = bio->bi_io_vec + i; | |
217 | ||
218 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); | |
219 | to->bv_len = from->bv_len; | |
220 | to->bv_offset = from->bv_offset; | |
221 | inc_zone_page_state(to->bv_page, NR_BOUNCE); | |
222 | ||
223 | if (rw == WRITE) { | |
224 | char *vto, *vfrom; | |
225 | ||
226 | flush_dcache_page(from->bv_page); | |
227 | vto = page_address(to->bv_page) + to->bv_offset; | |
228 | vfrom = kmap(from->bv_page) + from->bv_offset; | |
229 | memcpy(vto, vfrom, to->bv_len); | |
230 | kunmap(from->bv_page); | |
231 | } | |
232 | } | |
233 | ||
234 | /* | |
235 | * no pages bounced | |
236 | */ | |
237 | if (!bio) | |
238 | return; | |
239 | ||
c43a5082 JA |
240 | blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); |
241 | ||
831058de DH |
242 | /* |
243 | * at least one page was bounced, fill in possible non-highmem | |
244 | * pages | |
245 | */ | |
246 | __bio_for_each_segment(from, *bio_orig, i, 0) { | |
247 | to = bio_iovec_idx(bio, i); | |
248 | if (!to->bv_page) { | |
249 | to->bv_page = from->bv_page; | |
250 | to->bv_len = from->bv_len; | |
251 | to->bv_offset = from->bv_offset; | |
252 | } | |
253 | } | |
254 | ||
255 | bio->bi_bdev = (*bio_orig)->bi_bdev; | |
256 | bio->bi_flags |= (1 << BIO_BOUNCED); | |
257 | bio->bi_sector = (*bio_orig)->bi_sector; | |
258 | bio->bi_rw = (*bio_orig)->bi_rw; | |
259 | ||
260 | bio->bi_vcnt = (*bio_orig)->bi_vcnt; | |
261 | bio->bi_idx = (*bio_orig)->bi_idx; | |
262 | bio->bi_size = (*bio_orig)->bi_size; | |
263 | ||
264 | if (pool == page_pool) { | |
265 | bio->bi_end_io = bounce_end_io_write; | |
266 | if (rw == READ) | |
267 | bio->bi_end_io = bounce_end_io_read; | |
268 | } else { | |
269 | bio->bi_end_io = bounce_end_io_write_isa; | |
270 | if (rw == READ) | |
271 | bio->bi_end_io = bounce_end_io_read_isa; | |
272 | } | |
273 | ||
274 | bio->bi_private = *bio_orig; | |
275 | *bio_orig = bio; | |
276 | } | |
277 | ||
165125e1 | 278 | void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) |
831058de DH |
279 | { |
280 | mempool_t *pool; | |
281 | ||
282 | /* | |
283 | * for non-isa bounce case, just check if the bounce pfn is equal | |
284 | * to or bigger than the highest pfn in the system -- in that case, | |
285 | * don't waste time iterating over bio segments | |
286 | */ | |
287 | if (!(q->bounce_gfp & GFP_DMA)) { | |
288 | if (q->bounce_pfn >= blk_max_pfn) | |
289 | return; | |
290 | pool = page_pool; | |
291 | } else { | |
292 | BUG_ON(!isa_page_pool); | |
293 | pool = isa_page_pool; | |
294 | } | |
295 | ||
831058de DH |
296 | /* |
297 | * slow path | |
298 | */ | |
299 | __blk_queue_bounce(q, bio_orig, pool); | |
300 | } | |
301 | ||
302 | EXPORT_SYMBOL(blk_queue_bounce); |