]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2.5 block I/O model | |
3 | * | |
4 | * Copyright (C) 2001 Jens Axboe <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public Licens | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
19 | */ | |
20 | #ifndef __LINUX_BIO_H | |
21 | #define __LINUX_BIO_H | |
22 | ||
23 | #include <linux/highmem.h> | |
24 | #include <linux/mempool.h> | |
22e2c507 | 25 | #include <linux/ioprio.h> |
1da177e4 LT |
26 | |
27 | /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ | |
28 | #include <asm/io.h> | |
29 | ||
30 | #if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY) | |
31 | #define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1)) | |
32 | #define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE) | |
33 | #else | |
34 | #define BIOVEC_VIRT_START_SIZE(x) 0 | |
35 | #define BIOVEC_VIRT_OVERSIZE(x) 0 | |
36 | #endif | |
37 | ||
38 | #ifndef BIO_VMERGE_BOUNDARY | |
39 | #define BIO_VMERGE_BOUNDARY 0 | |
40 | #endif | |
41 | ||
42 | #define BIO_DEBUG | |
43 | ||
44 | #ifdef BIO_DEBUG | |
45 | #define BIO_BUG_ON BUG_ON | |
46 | #else | |
47 | #define BIO_BUG_ON | |
48 | #endif | |
49 | ||
d84a8477 | 50 | #define BIO_MAX_PAGES 256 |
1da177e4 LT |
51 | #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) |
52 | #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) | |
53 | ||
54 | /* | |
55 | * was unsigned short, but we might as well be ready for > 64kB I/O pages | |
56 | */ | |
57 | struct bio_vec { | |
58 | struct page *bv_page; | |
59 | unsigned int bv_len; | |
60 | unsigned int bv_offset; | |
61 | }; | |
62 | ||
63 | struct bio_set; | |
64 | struct bio; | |
65 | typedef int (bio_end_io_t) (struct bio *, unsigned int, int); | |
66 | typedef void (bio_destructor_t) (struct bio *); | |
67 | ||
68 | /* | |
69 | * main unit of I/O for the block layer and lower layers (ie drivers and | |
70 | * stacking drivers) | |
71 | */ | |
72 | struct bio { | |
2c2345c2 RG |
73 | sector_t bi_sector; /* device address in 512 byte |
74 | sectors */ | |
1da177e4 LT |
75 | struct bio *bi_next; /* request queue link */ |
76 | struct block_device *bi_bdev; | |
77 | unsigned long bi_flags; /* status, command, etc */ | |
78 | unsigned long bi_rw; /* bottom bits READ/WRITE, | |
79 | * top bits priority | |
80 | */ | |
81 | ||
82 | unsigned short bi_vcnt; /* how many bio_vec's */ | |
83 | unsigned short bi_idx; /* current index into bvl_vec */ | |
84 | ||
85 | /* Number of segments in this BIO after | |
86 | * physical address coalescing is performed. | |
87 | */ | |
88 | unsigned short bi_phys_segments; | |
89 | ||
90 | /* Number of segments after physical and DMA remapping | |
91 | * hardware coalescing is performed. | |
92 | */ | |
93 | unsigned short bi_hw_segments; | |
94 | ||
95 | unsigned int bi_size; /* residual I/O count */ | |
96 | ||
97 | /* | |
98 | * To keep track of the max hw size, we account for the | |
99 | * sizes of the first and last virtually mergeable segments | |
100 | * in this bio | |
101 | */ | |
102 | unsigned int bi_hw_front_size; | |
103 | unsigned int bi_hw_back_size; | |
104 | ||
105 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ | |
106 | ||
107 | struct bio_vec *bi_io_vec; /* the actual vec list */ | |
108 | ||
109 | bio_end_io_t *bi_end_io; | |
110 | atomic_t bi_cnt; /* pin count */ | |
111 | ||
112 | void *bi_private; | |
113 | ||
114 | bio_destructor_t *bi_destructor; /* destructor */ | |
1da177e4 LT |
115 | }; |
116 | ||
117 | /* | |
118 | * bio flags | |
119 | */ | |
120 | #define BIO_UPTODATE 0 /* ok after I/O completion */ | |
121 | #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ | |
122 | #define BIO_EOF 2 /* out-out-bounds error */ | |
123 | #define BIO_SEG_VALID 3 /* nr_hw_seg valid */ | |
124 | #define BIO_CLONED 4 /* doesn't own data */ | |
125 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ | |
126 | #define BIO_USER_MAPPED 6 /* contains user pages */ | |
127 | #define BIO_EOPNOTSUPP 7 /* not supported */ | |
128 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | |
129 | ||
130 | /* | |
131 | * top 4 bits of bio flags indicate the pool this bio came from | |
132 | */ | |
133 | #define BIO_POOL_BITS (4) | |
134 | #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) | |
135 | #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) | |
136 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) | |
137 | ||
138 | /* | |
139 | * bio bi_rw flags | |
140 | * | |
141 | * bit 0 -- read (not set) or write (set) | |
142 | * bit 1 -- rw-ahead when set | |
143 | * bit 2 -- barrier | |
144 | * bit 3 -- fail fast, don't want low level driver retries | |
145 | * bit 4 -- synchronous I/O hint: the block layer will unplug immediately | |
146 | */ | |
147 | #define BIO_RW 0 | |
148 | #define BIO_RW_AHEAD 1 | |
149 | #define BIO_RW_BARRIER 2 | |
150 | #define BIO_RW_FAILFAST 3 | |
151 | #define BIO_RW_SYNC 4 | |
5404bc7a | 152 | #define BIO_RW_META 5 |
1da177e4 | 153 | |
22e2c507 JA |
154 | /* |
155 | * upper 16 bits of bi_rw define the io priority of this bio | |
156 | */ | |
157 | #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) | |
158 | #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) | |
159 | #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) | |
160 | ||
161 | #define bio_set_prio(bio, prio) do { \ | |
162 | WARN_ON(prio >= (1 << IOPRIO_BITS)); \ | |
163 | (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ | |
164 | (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ | |
165 | } while (0) | |
166 | ||
1da177e4 LT |
167 | /* |
168 | * various member access, note that bio_data should of course not be used | |
169 | * on highmem page vectors | |
170 | */ | |
171 | #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) | |
172 | #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) | |
173 | #define bio_page(bio) bio_iovec((bio))->bv_page | |
174 | #define bio_offset(bio) bio_iovec((bio))->bv_offset | |
175 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) | |
176 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | |
177 | #define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9) | |
178 | #define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio))) | |
179 | #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) | |
180 | #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) | |
181 | #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) | |
182 | #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) | |
5404bc7a | 183 | #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) |
1da177e4 LT |
184 | |
185 | /* | |
186 | * will die | |
187 | */ | |
188 | #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) | |
189 | #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) | |
190 | ||
191 | /* | |
192 | * queues that have highmem support enabled may still need to revert to | |
193 | * PIO transfers occasionally and thus map high pages temporarily. For | |
194 | * permanent PIO fall back, user is probably better off disabling highmem | |
195 | * I/O completely on that queue (see ide-dma for example) | |
196 | */ | |
197 | #define __bio_kmap_atomic(bio, idx, kmtype) \ | |
198 | (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \ | |
199 | bio_iovec_idx((bio), (idx))->bv_offset) | |
200 | ||
201 | #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype) | |
202 | ||
203 | /* | |
204 | * merge helpers etc | |
205 | */ | |
206 | ||
207 | #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) | |
208 | #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) | |
209 | ||
210 | /* | |
211 | * allow arch override, for eg virtualized architectures (put in asm/io.h) | |
212 | */ | |
213 | #ifndef BIOVEC_PHYS_MERGEABLE | |
214 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
215 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) | |
216 | #endif | |
217 | ||
218 | #define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \ | |
219 | ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0) | |
220 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ | |
221 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | |
222 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | |
223 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) | |
224 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ | |
225 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | |
226 | ||
227 | #define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO) | |
228 | ||
229 | /* | |
230 | * drivers should not use the __ version unless they _really_ want to | |
231 | * run through the entire bio and not just pending pieces | |
232 | */ | |
233 | #define __bio_for_each_segment(bvl, bio, i, start_idx) \ | |
234 | for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ | |
235 | i < (bio)->bi_vcnt; \ | |
236 | bvl++, i++) | |
237 | ||
238 | #define bio_for_each_segment(bvl, bio, i) \ | |
239 | __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx) | |
240 | ||
241 | /* | |
242 | * get a reference to a bio, so it won't disappear. the intended use is | |
243 | * something like: | |
244 | * | |
245 | * bio_get(bio); | |
246 | * submit_bio(rw, bio); | |
247 | * if (bio->bi_flags ...) | |
248 | * do_something | |
249 | * bio_put(bio); | |
250 | * | |
251 | * without the bio_get(), it could potentially complete I/O before submit_bio | |
252 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) | |
253 | * runs | |
254 | */ | |
255 | #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) | |
256 | ||
257 | ||
258 | /* | |
259 | * A bio_pair is used when we need to split a bio. | |
260 | * This can only happen for a bio that refers to just one | |
261 | * page of data, and in the unusual situation when the | |
262 | * page crosses a chunk/device boundary | |
263 | * | |
264 | * The address of the master bio is stored in bio1.bi_private | |
265 | * The address of the pool the pair was allocated from is stored | |
266 | * in bio2.bi_private | |
267 | */ | |
268 | struct bio_pair { | |
269 | struct bio bio1, bio2; | |
270 | struct bio_vec bv1, bv2; | |
271 | atomic_t cnt; | |
272 | int error; | |
273 | }; | |
274 | extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, | |
275 | int first_sectors); | |
276 | extern mempool_t *bio_split_pool; | |
277 | extern void bio_pair_release(struct bio_pair *dbio); | |
278 | ||
279 | extern struct bio_set *bioset_create(int, int, int); | |
280 | extern void bioset_free(struct bio_set *); | |
281 | ||
dd0fc66f AV |
282 | extern struct bio *bio_alloc(gfp_t, int); |
283 | extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); | |
1da177e4 | 284 | extern void bio_put(struct bio *); |
3676347a | 285 | extern void bio_free(struct bio *, struct bio_set *); |
1da177e4 LT |
286 | |
287 | extern void bio_endio(struct bio *, unsigned int, int); | |
288 | struct request_queue; | |
289 | extern int bio_phys_segments(struct request_queue *, struct bio *); | |
290 | extern int bio_hw_segments(struct request_queue *, struct bio *); | |
291 | ||
292 | extern void __bio_clone(struct bio *, struct bio *); | |
dd0fc66f | 293 | extern struct bio *bio_clone(struct bio *, gfp_t); |
1da177e4 LT |
294 | |
295 | extern void bio_init(struct bio *); | |
296 | ||
297 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); | |
6e68af66 MC |
298 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
299 | unsigned int, unsigned int); | |
1da177e4 LT |
300 | extern int bio_get_nr_vecs(struct block_device *); |
301 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, | |
302 | unsigned long, unsigned int, int); | |
f1970baf JB |
303 | struct sg_iovec; |
304 | extern struct bio *bio_map_user_iov(struct request_queue *, | |
305 | struct block_device *, | |
306 | struct sg_iovec *, int, int); | |
1da177e4 | 307 | extern void bio_unmap_user(struct bio *); |
df46b9a4 | 308 | extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
27496a8c | 309 | gfp_t); |
1da177e4 LT |
310 | extern void bio_set_pages_dirty(struct bio *bio); |
311 | extern void bio_check_pages_dirty(struct bio *bio); | |
e61c9018 | 312 | extern void bio_release_pages(struct bio *bio); |
1da177e4 LT |
313 | extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); |
314 | extern int bio_uncopy_user(struct bio *); | |
315 | void zero_fill_bio(struct bio *bio); | |
316 | ||
317 | #ifdef CONFIG_HIGHMEM | |
318 | /* | |
319 | * remember to add offset! and never ever reenable interrupts between a | |
320 | * bvec_kmap_irq and bvec_kunmap_irq!! | |
321 | * | |
322 | * This function MUST be inlined - it plays with the CPU interrupt flags. | |
1da177e4 | 323 | */ |
c2d08dad | 324 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
1da177e4 LT |
325 | { |
326 | unsigned long addr; | |
327 | ||
328 | /* | |
329 | * might not be a highmem page, but the preempt/irq count | |
330 | * balancing is a lot nicer this way | |
331 | */ | |
332 | local_irq_save(*flags); | |
333 | addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ); | |
334 | ||
335 | BUG_ON(addr & ~PAGE_MASK); | |
336 | ||
337 | return (char *) addr + bvec->bv_offset; | |
338 | } | |
339 | ||
c2d08dad | 340 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
1da177e4 LT |
341 | { |
342 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; | |
343 | ||
344 | kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ); | |
345 | local_irq_restore(*flags); | |
346 | } | |
347 | ||
348 | #else | |
349 | #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset) | |
350 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) | |
351 | #endif | |
352 | ||
c2d08dad | 353 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
1da177e4 LT |
354 | unsigned long *flags) |
355 | { | |
356 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); | |
357 | } | |
358 | #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) | |
359 | ||
360 | #define bio_kmap_irq(bio, flags) \ | |
361 | __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) | |
362 | #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) | |
363 | ||
364 | #endif /* __LINUX_BIO_H */ |