]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/fs/hpfs/buffer.c | |
4 | * | |
5 | * Mikulas Patocka ([email protected]), 1998-1999 | |
6 | * | |
7 | * general buffer i/o | |
8 | */ | |
e8edc6e0 | 9 | #include <linux/sched.h> |
5a0e3ad6 | 10 | #include <linux/slab.h> |
275f495d | 11 | #include <linux/blkdev.h> |
1da177e4 LT |
12 | #include "hpfs_fn.h" |
13 | ||
a64eefaa MP |
14 | secno hpfs_search_hotfix_map(struct super_block *s, secno sec) |
15 | { | |
16 | unsigned i; | |
17 | struct hpfs_sb_info *sbi = hpfs_sb(s); | |
18 | for (i = 0; unlikely(i < sbi->n_hotfixes); i++) { | |
19 | if (sbi->hotfix_from[i] == sec) { | |
20 | return sbi->hotfix_to[i]; | |
21 | } | |
22 | } | |
23 | return sec; | |
24 | } | |
25 | ||
26 | unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n) | |
27 | { | |
28 | unsigned i; | |
29 | struct hpfs_sb_info *sbi = hpfs_sb(s); | |
30 | for (i = 0; unlikely(i < sbi->n_hotfixes); i++) { | |
31 | if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) { | |
32 | n = sbi->hotfix_from[i] - sec; | |
33 | } | |
34 | } | |
35 | return n; | |
36 | } | |
37 | ||
275f495d MP |
38 | void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n) |
39 | { | |
40 | struct buffer_head *bh; | |
41 | struct blk_plug plug; | |
42 | ||
43 | if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size)) | |
44 | return; | |
45 | ||
a64eefaa MP |
46 | if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n)) |
47 | return; | |
48 | ||
275f495d MP |
49 | bh = sb_find_get_block(s, secno); |
50 | if (bh) { | |
51 | if (buffer_uptodate(bh)) { | |
52 | brelse(bh); | |
53 | return; | |
54 | } | |
55 | brelse(bh); | |
9fa88c5d | 56 | } |
275f495d MP |
57 | |
58 | blk_start_plug(&plug); | |
59 | while (n > 0) { | |
60 | if (unlikely(secno >= hpfs_sb(s)->sb_fs_size)) | |
61 | break; | |
62 | sb_breadahead(s, secno); | |
63 | secno++; | |
64 | n--; | |
65 | } | |
66 | blk_finish_plug(&plug); | |
67 | } | |
68 | ||
1da177e4 LT |
69 | /* Map a sector into a buffer and return pointers to it and to the buffer. */ |
70 | ||
71 | void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, | |
72 | int ahead) | |
73 | { | |
74 | struct buffer_head *bh; | |
75 | ||
7dd29d8d MP |
76 | hpfs_lock_assert(s); |
77 | ||
275f495d MP |
78 | hpfs_prefetch_sectors(s, secno, ahead); |
79 | ||
1da177e4 LT |
80 | cond_resched(); |
81 | ||
a64eefaa | 82 | *bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno)); |
1da177e4 LT |
83 | if (bh != NULL) |
84 | return bh->b_data; | |
85 | else { | |
a19189e5 | 86 | pr_err("%s(): read error\n", __func__); |
1da177e4 LT |
87 | return NULL; |
88 | } | |
89 | } | |
90 | ||
91 | /* Like hpfs_map_sector but don't read anything */ | |
92 | ||
93 | void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp) | |
94 | { | |
95 | struct buffer_head *bh; | |
96 | /*return hpfs_map_sector(s, secno, bhp, 0);*/ | |
97 | ||
7dd29d8d MP |
98 | hpfs_lock_assert(s); |
99 | ||
1da177e4 LT |
100 | cond_resched(); |
101 | ||
a64eefaa | 102 | if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) { |
1da177e4 LT |
103 | if (!buffer_uptodate(bh)) wait_on_buffer(bh); |
104 | set_buffer_uptodate(bh); | |
105 | return bh->b_data; | |
106 | } else { | |
a19189e5 | 107 | pr_err("%s(): getblk failed\n", __func__); |
1da177e4 LT |
108 | return NULL; |
109 | } | |
110 | } | |
111 | ||
112 | /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ | |
113 | ||
114 | void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh, | |
115 | int ahead) | |
116 | { | |
1da177e4 LT |
117 | char *data; |
118 | ||
7dd29d8d MP |
119 | hpfs_lock_assert(s); |
120 | ||
1da177e4 LT |
121 | cond_resched(); |
122 | ||
123 | if (secno & 3) { | |
a19189e5 | 124 | pr_err("%s(): unaligned read\n", __func__); |
1da177e4 LT |
125 | return NULL; |
126 | } | |
127 | ||
275f495d MP |
128 | hpfs_prefetch_sectors(s, secno, 4 + ahead); |
129 | ||
a64eefaa MP |
130 | if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0; |
131 | if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1; | |
132 | if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2; | |
133 | if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3; | |
1c0b8a7a MP |
134 | |
135 | if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) && | |
136 | likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) && | |
137 | likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) { | |
138 | return qbh->data = qbh->bh[0]->b_data; | |
139 | } | |
140 | ||
f52720ca | 141 | qbh->data = data = kmalloc(2048, GFP_NOFS); |
1da177e4 | 142 | if (!data) { |
a19189e5 | 143 | pr_err("%s(): out of memory\n", __func__); |
1c0b8a7a | 144 | goto bail4; |
1da177e4 LT |
145 | } |
146 | ||
1c0b8a7a MP |
147 | memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512); |
148 | memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512); | |
149 | memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512); | |
150 | memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512); | |
1da177e4 LT |
151 | |
152 | return data; | |
153 | ||
1c0b8a7a MP |
154 | bail4: |
155 | brelse(qbh->bh[3]); | |
1da177e4 LT |
156 | bail3: |
157 | brelse(qbh->bh[2]); | |
158 | bail2: | |
159 | brelse(qbh->bh[1]); | |
160 | bail1: | |
161 | brelse(qbh->bh[0]); | |
162 | bail0: | |
1da177e4 LT |
163 | return NULL; |
164 | } | |
165 | ||
166 | /* Don't read sectors */ | |
167 | ||
168 | void *hpfs_get_4sectors(struct super_block *s, unsigned secno, | |
169 | struct quad_buffer_head *qbh) | |
170 | { | |
171 | cond_resched(); | |
172 | ||
7dd29d8d MP |
173 | hpfs_lock_assert(s); |
174 | ||
1da177e4 | 175 | if (secno & 3) { |
a19189e5 | 176 | pr_err("%s(): unaligned read\n", __func__); |
1da177e4 LT |
177 | return NULL; |
178 | } | |
179 | ||
1c0b8a7a MP |
180 | if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0; |
181 | if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1; | |
182 | if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2; | |
183 | if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3; | |
184 | ||
185 | if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) && | |
186 | likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) && | |
187 | likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) { | |
188 | return qbh->data = qbh->bh[0]->b_data; | |
189 | } | |
190 | ||
1da177e4 | 191 | if (!(qbh->data = kmalloc(2048, GFP_NOFS))) { |
a19189e5 | 192 | pr_err("%s(): out of memory\n", __func__); |
1c0b8a7a | 193 | goto bail4; |
1da177e4 | 194 | } |
1da177e4 LT |
195 | return qbh->data; |
196 | ||
1c0b8a7a MP |
197 | bail4: |
198 | brelse(qbh->bh[3]); | |
199 | bail3: | |
200 | brelse(qbh->bh[2]); | |
201 | bail2: | |
202 | brelse(qbh->bh[1]); | |
203 | bail1: | |
204 | brelse(qbh->bh[0]); | |
205 | bail0: | |
1da177e4 LT |
206 | return NULL; |
207 | } | |
208 | ||
209 | ||
210 | void hpfs_brelse4(struct quad_buffer_head *qbh) | |
211 | { | |
1c0b8a7a MP |
212 | if (unlikely(qbh->data != qbh->bh[0]->b_data)) |
213 | kfree(qbh->data); | |
1da177e4 | 214 | brelse(qbh->bh[0]); |
1c0b8a7a MP |
215 | brelse(qbh->bh[1]); |
216 | brelse(qbh->bh[2]); | |
217 | brelse(qbh->bh[3]); | |
1da177e4 LT |
218 | } |
219 | ||
220 | void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) | |
221 | { | |
1c0b8a7a MP |
222 | if (unlikely(qbh->data != qbh->bh[0]->b_data)) { |
223 | memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512); | |
224 | memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512); | |
225 | memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); | |
226 | memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512); | |
227 | } | |
1da177e4 LT |
228 | mark_buffer_dirty(qbh->bh[0]); |
229 | mark_buffer_dirty(qbh->bh[1]); | |
230 | mark_buffer_dirty(qbh->bh[2]); | |
231 | mark_buffer_dirty(qbh->bh[3]); | |
232 | } |