]>
Commit | Line | Data |
---|---|---|
a94da204 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* -*- linux-c -*- ------------------------------------------------------- * |
3 | * | |
4 | * Copyright 2001 H. Peter Anvin - All Rights Reserved | |
5 | * | |
1da177e4 LT |
6 | * ----------------------------------------------------------------------- */ |
7 | ||
8 | /* | |
9 | * linux/fs/isofs/compress.c | |
10 | * | |
11 | * Transparent decompression of files on an iso9660 filesystem | |
12 | */ | |
13 | ||
1da177e4 | 14 | #include <linux/module.h> |
1da177e4 | 15 | #include <linux/init.h> |
2f8b5444 | 16 | #include <linux/bio.h> |
94f2f715 | 17 | |
5ac7c2fd | 18 | #include <linux/slab.h> |
1da177e4 LT |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/zlib.h> | |
1da177e4 | 21 | |
94f2f715 | 22 | #include "isofs.h" |
1da177e4 LT |
23 | #include "zisofs.h" |
24 | ||
25 | /* This should probably be global. */ | |
09cbfeaf | 26 | static char zisofs_sink_page[PAGE_SIZE]; |
1da177e4 LT |
27 | |
28 | /* | |
29 | * This contains the zlib memory allocation and the mutex for the | |
30 | * allocation; this avoids failures at block-decompression time. | |
31 | */ | |
32 | static void *zisofs_zlib_workspace; | |
a36a151e | 33 | static DEFINE_MUTEX(zisofs_zlib_lock); |
1da177e4 LT |
34 | |
35 | /* | |
59bc0552 JK |
36 | * Read data of @inode from @block_start to @block_end and uncompress |
37 | * to one zisofs block. Store the data in the @pages array with @pcount | |
38 | * entries. Start storing at offset @poffset of the first page. | |
1da177e4 | 39 | */ |
59bc0552 JK |
40 | static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, |
41 | loff_t block_end, int pcount, | |
42 | struct page **pages, unsigned poffset, | |
43 | int *errp) | |
1da177e4 | 44 | { |
1da177e4 | 45 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; |
59bc0552 JK |
46 | unsigned int bufsize = ISOFS_BUFFER_SIZE(inode); |
47 | unsigned int bufshift = ISOFS_BUFFER_BITS(inode); | |
48 | unsigned int bufmask = bufsize - 1; | |
49 | int i, block_size = block_end - block_start; | |
50 | z_stream stream = { .total_out = 0, | |
51 | .avail_in = 0, | |
52 | .avail_out = 0, }; | |
53 | int zerr; | |
54 | int needblocks = (block_size + (block_start & bufmask) + bufmask) | |
55 | >> bufshift; | |
56 | int haveblocks; | |
57 | blkcnt_t blocknum; | |
5ac7c2fd | 58 | struct buffer_head **bhs; |
59bc0552 JK |
59 | int curbh, curpage; |
60 | ||
61 | if (block_size > deflateBound(1UL << zisofs_block_shift)) { | |
62 | *errp = -EIO; | |
08ca0db8 DY |
63 | return 0; |
64 | } | |
59bc0552 JK |
65 | /* Empty block? */ |
66 | if (block_size == 0) { | |
67 | for ( i = 0 ; i < pcount ; i++ ) { | |
68 | if (!pages[i]) | |
69 | continue; | |
09cbfeaf | 70 | memset(page_address(pages[i]), 0, PAGE_SIZE); |
59bc0552 JK |
71 | flush_dcache_page(pages[i]); |
72 | SetPageUptodate(pages[i]); | |
1da177e4 | 73 | } |
09cbfeaf | 74 | return ((loff_t)pcount) << PAGE_SHIFT; |
1da177e4 LT |
75 | } |
76 | ||
59bc0552 JK |
77 | /* Because zlib is not thread-safe, do all the I/O at the top. */ |
78 | blocknum = block_start >> bufshift; | |
5ac7c2fd KS |
79 | bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL); |
80 | if (!bhs) { | |
81 | *errp = -ENOMEM; | |
82 | return 0; | |
83 | } | |
59bc0552 | 84 | haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); |
dfec8a14 | 85 | ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); |
1da177e4 | 86 | |
59bc0552 JK |
87 | curbh = 0; |
88 | curpage = 0; | |
89 | /* | |
90 | * First block is special since it may be fractional. We also wait for | |
91 | * it before grabbing the zlib mutex; odds are that the subsequent | |
92 | * blocks are going to come in in short order so we don't hold the zlib | |
93 | * mutex longer than necessary. | |
94 | */ | |
1da177e4 | 95 | |
59bc0552 JK |
96 | if (!bhs[0]) |
97 | goto b_eio; | |
1da177e4 | 98 | |
59bc0552 JK |
99 | wait_on_buffer(bhs[0]); |
100 | if (!buffer_uptodate(bhs[0])) { | |
101 | *errp = -EIO; | |
102 | goto b_eio; | |
1da177e4 | 103 | } |
1da177e4 | 104 | |
59bc0552 JK |
105 | stream.workspace = zisofs_zlib_workspace; |
106 | mutex_lock(&zisofs_zlib_lock); | |
fab5a60a | 107 | |
59bc0552 JK |
108 | zerr = zlib_inflateInit(&stream); |
109 | if (zerr != Z_OK) { | |
110 | if (zerr == Z_MEM_ERROR) | |
111 | *errp = -ENOMEM; | |
112 | else | |
113 | *errp = -EIO; | |
114 | printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n", | |
115 | zerr); | |
116 | goto z_eio; | |
117 | } | |
118 | ||
119 | while (curpage < pcount && curbh < haveblocks && | |
120 | zerr != Z_STREAM_END) { | |
121 | if (!stream.avail_out) { | |
122 | if (pages[curpage]) { | |
123 | stream.next_out = page_address(pages[curpage]) | |
124 | + poffset; | |
09cbfeaf | 125 | stream.avail_out = PAGE_SIZE - poffset; |
59bc0552 JK |
126 | poffset = 0; |
127 | } else { | |
128 | stream.next_out = (void *)&zisofs_sink_page; | |
09cbfeaf | 129 | stream.avail_out = PAGE_SIZE; |
1da177e4 LT |
130 | } |
131 | } | |
59bc0552 JK |
132 | if (!stream.avail_in) { |
133 | wait_on_buffer(bhs[curbh]); | |
134 | if (!buffer_uptodate(bhs[curbh])) { | |
135 | *errp = -EIO; | |
136 | break; | |
137 | } | |
138 | stream.next_in = bhs[curbh]->b_data + | |
139 | (block_start & bufmask); | |
140 | stream.avail_in = min_t(unsigned, bufsize - | |
141 | (block_start & bufmask), | |
142 | block_size); | |
143 | block_size -= stream.avail_in; | |
144 | block_start = 0; | |
1da177e4 LT |
145 | } |
146 | ||
59bc0552 JK |
147 | while (stream.avail_out && stream.avail_in) { |
148 | zerr = zlib_inflate(&stream, Z_SYNC_FLUSH); | |
149 | if (zerr == Z_BUF_ERROR && stream.avail_in == 0) | |
150 | break; | |
151 | if (zerr == Z_STREAM_END) | |
152 | break; | |
153 | if (zerr != Z_OK) { | |
154 | /* EOF, error, or trying to read beyond end of input */ | |
155 | if (zerr == Z_MEM_ERROR) | |
156 | *errp = -ENOMEM; | |
157 | else { | |
158 | printk(KERN_DEBUG | |
159 | "zisofs: zisofs_inflate returned" | |
160 | " %d, inode = %lu," | |
161 | " page idx = %d, bh idx = %d," | |
d97b07c5 YL |
162 | " avail_in = %ld," |
163 | " avail_out = %ld\n", | |
59bc0552 JK |
164 | zerr, inode->i_ino, curpage, |
165 | curbh, stream.avail_in, | |
166 | stream.avail_out); | |
167 | *errp = -EIO; | |
1da177e4 | 168 | } |
59bc0552 | 169 | goto inflate_out; |
1da177e4 | 170 | } |
59bc0552 | 171 | } |
1da177e4 | 172 | |
59bc0552 JK |
173 | if (!stream.avail_out) { |
174 | /* This page completed */ | |
175 | if (pages[curpage]) { | |
176 | flush_dcache_page(pages[curpage]); | |
177 | SetPageUptodate(pages[curpage]); | |
1da177e4 | 178 | } |
59bc0552 JK |
179 | curpage++; |
180 | } | |
181 | if (!stream.avail_in) | |
182 | curbh++; | |
183 | } | |
184 | inflate_out: | |
185 | zlib_inflateEnd(&stream); | |
1da177e4 | 186 | |
59bc0552 JK |
187 | z_eio: |
188 | mutex_unlock(&zisofs_zlib_lock); | |
189 | ||
190 | b_eio: | |
191 | for (i = 0; i < haveblocks; i++) | |
192 | brelse(bhs[i]); | |
5ac7c2fd | 193 | kfree(bhs); |
59bc0552 JK |
194 | return stream.total_out; |
195 | } | |
196 | ||
197 | /* | |
198 | * Uncompress data so that pages[full_page] is fully uptodate and possibly | |
199 | * fills in other pages if we have data for them. | |
200 | */ | |
201 | static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, | |
202 | struct page **pages) | |
203 | { | |
204 | loff_t start_off, end_off; | |
205 | loff_t block_start, block_end; | |
206 | unsigned int header_size = ISOFS_I(inode)->i_format_parm[0]; | |
207 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; | |
208 | unsigned int blockptr; | |
209 | loff_t poffset = 0; | |
210 | blkcnt_t cstart_block, cend_block; | |
211 | struct buffer_head *bh; | |
212 | unsigned int blkbits = ISOFS_BUFFER_BITS(inode); | |
213 | unsigned int blksize = 1 << blkbits; | |
214 | int err; | |
215 | loff_t ret; | |
216 | ||
217 | BUG_ON(!pages[full_page]); | |
218 | ||
219 | /* | |
220 | * We want to read at least 'full_page' page. Because we have to | |
221 | * uncompress the whole compression block anyway, fill the surrounding | |
222 | * pages with the data we have anyway... | |
223 | */ | |
224 | start_off = page_offset(pages[full_page]); | |
09cbfeaf | 225 | end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size); |
59bc0552 JK |
226 | |
227 | cstart_block = start_off >> zisofs_block_shift; | |
228 | cend_block = (end_off + (1 << zisofs_block_shift) - 1) | |
229 | >> zisofs_block_shift; | |
230 | ||
09cbfeaf KS |
231 | WARN_ON(start_off - (full_page << PAGE_SHIFT) != |
232 | ((cstart_block << zisofs_block_shift) & PAGE_MASK)); | |
59bc0552 JK |
233 | |
234 | /* Find the pointer to this specific chunk */ | |
235 | /* Note: we're not using isonum_731() here because the data is known aligned */ | |
236 | /* Note: header_size is in 32-bit words (4 bytes) */ | |
237 | blockptr = (header_size + cstart_block) << 2; | |
238 | bh = isofs_bread(inode, blockptr >> blkbits); | |
239 | if (!bh) | |
240 | return -EIO; | |
241 | block_start = le32_to_cpu(*(__le32 *) | |
242 | (bh->b_data + (blockptr & (blksize - 1)))); | |
243 | ||
244 | while (cstart_block < cend_block && pcount > 0) { | |
245 | /* Load end of the compressed block in the file */ | |
246 | blockptr += 4; | |
247 | /* Traversed to next block? */ | |
248 | if (!(blockptr & (blksize - 1))) { | |
249 | brelse(bh); | |
250 | ||
251 | bh = isofs_bread(inode, blockptr >> blkbits); | |
252 | if (!bh) | |
253 | return -EIO; | |
254 | } | |
255 | block_end = le32_to_cpu(*(__le32 *) | |
256 | (bh->b_data + (blockptr & (blksize - 1)))); | |
257 | if (block_start > block_end) { | |
258 | brelse(bh); | |
259 | return -EIO; | |
260 | } | |
261 | err = 0; | |
262 | ret = zisofs_uncompress_block(inode, block_start, block_end, | |
263 | pcount, pages, poffset, &err); | |
264 | poffset += ret; | |
09cbfeaf KS |
265 | pages += poffset >> PAGE_SHIFT; |
266 | pcount -= poffset >> PAGE_SHIFT; | |
267 | full_page -= poffset >> PAGE_SHIFT; | |
268 | poffset &= ~PAGE_MASK; | |
59bc0552 JK |
269 | |
270 | if (err) { | |
271 | brelse(bh); | |
272 | /* | |
273 | * Did we finish reading the page we really wanted | |
274 | * to read? | |
275 | */ | |
276 | if (full_page < 0) | |
277 | return 0; | |
278 | return err; | |
1da177e4 | 279 | } |
1da177e4 | 280 | |
59bc0552 JK |
281 | block_start = block_end; |
282 | cstart_block++; | |
283 | } | |
284 | ||
285 | if (poffset && *pages) { | |
286 | memset(page_address(*pages) + poffset, 0, | |
09cbfeaf | 287 | PAGE_SIZE - poffset); |
59bc0552 JK |
288 | flush_dcache_page(*pages); |
289 | SetPageUptodate(*pages); | |
290 | } | |
291 | return 0; | |
292 | } | |
1da177e4 | 293 | |
59bc0552 JK |
294 | /* |
295 | * When decompressing, we typically obtain more than one page | |
296 | * per reference. We inject the additional pages into the page | |
297 | * cache as a form of readahead. | |
298 | */ | |
299 | static int zisofs_readpage(struct file *file, struct page *page) | |
300 | { | |
496ad9aa | 301 | struct inode *inode = file_inode(file); |
59bc0552 JK |
302 | struct address_space *mapping = inode->i_mapping; |
303 | int err; | |
304 | int i, pcount, full_page; | |
305 | unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; | |
306 | unsigned int zisofs_pages_per_cblock = | |
09cbfeaf KS |
307 | PAGE_SHIFT <= zisofs_block_shift ? |
308 | (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; | |
5ac7c2fd | 309 | struct page **pages; |
59bc0552 JK |
310 | pgoff_t index = page->index, end_index; |
311 | ||
09cbfeaf | 312 | end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
59bc0552 JK |
313 | /* |
314 | * If this page is wholly outside i_size we just return zero; | |
315 | * do_generic_file_read() will handle this for us | |
316 | */ | |
317 | if (index >= end_index) { | |
318 | SetPageUptodate(page); | |
319 | unlock_page(page); | |
320 | return 0; | |
321 | } | |
322 | ||
09cbfeaf | 323 | if (PAGE_SHIFT <= zisofs_block_shift) { |
59bc0552 JK |
324 | /* We have already been given one page, this is the one |
325 | we must do. */ | |
326 | full_page = index & (zisofs_pages_per_cblock - 1); | |
327 | pcount = min_t(int, zisofs_pages_per_cblock, | |
328 | end_index - (index & ~(zisofs_pages_per_cblock - 1))); | |
329 | index -= full_page; | |
330 | } else { | |
331 | full_page = 0; | |
332 | pcount = 1; | |
333 | } | |
5ac7c2fd KS |
334 | pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), |
335 | sizeof(*pages), GFP_KERNEL); | |
336 | if (!pages) { | |
337 | unlock_page(page); | |
338 | return -ENOMEM; | |
339 | } | |
59bc0552 JK |
340 | pages[full_page] = page; |
341 | ||
342 | for (i = 0; i < pcount; i++, index++) { | |
343 | if (i != full_page) | |
344 | pages[i] = grab_cache_page_nowait(mapping, index); | |
345 | if (pages[i]) { | |
346 | ClearPageError(pages[i]); | |
347 | kmap(pages[i]); | |
1da177e4 LT |
348 | } |
349 | } | |
350 | ||
59bc0552 | 351 | err = zisofs_fill_pages(inode, full_page, pcount, pages); |
1da177e4 LT |
352 | |
353 | /* Release any residual pages, do not SetPageUptodate */ | |
59bc0552 JK |
354 | for (i = 0; i < pcount; i++) { |
355 | if (pages[i]) { | |
356 | flush_dcache_page(pages[i]); | |
357 | if (i == full_page && err) | |
358 | SetPageError(pages[i]); | |
359 | kunmap(pages[i]); | |
360 | unlock_page(pages[i]); | |
361 | if (i != full_page) | |
09cbfeaf | 362 | put_page(pages[i]); |
1da177e4 | 363 | } |
1da177e4 LT |
364 | } |
365 | ||
366 | /* At this point, err contains 0 or -EIO depending on the "critical" page */ | |
5ac7c2fd | 367 | kfree(pages); |
1da177e4 LT |
368 | return err; |
369 | } | |
370 | ||
f5e54d6e | 371 | const struct address_space_operations zisofs_aops = { |
1da177e4 | 372 | .readpage = zisofs_readpage, |
1da177e4 LT |
373 | /* No bmap operation supported */ |
374 | }; | |
375 | ||
1da177e4 LT |
376 | int __init zisofs_init(void) |
377 | { | |
1da177e4 LT |
378 | zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize()); |
379 | if ( !zisofs_zlib_workspace ) | |
380 | return -ENOMEM; | |
1da177e4 | 381 | |
1da177e4 LT |
382 | return 0; |
383 | } | |
384 | ||
385 | void zisofs_cleanup(void) | |
386 | { | |
1da177e4 | 387 | vfree(zisofs_zlib_workspace); |
1da177e4 | 388 | } |