]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/fs/ufs/ufs_dir.c | |
4 | * | |
5 | * Copyright (C) 1996 | |
6 | * Adrian Rodriguez ([email protected]) | |
7 | * Laboratory for Computer Science Research Computing Facility | |
8 | * Rutgers, The State University of New Jersey | |
9 | * | |
10 | * swab support by Francois-Rene Rideau <[email protected]> 19970406 | |
11 | * | |
12 | * 4.4BSD (FreeBSD) support added on February 1st 1998 by | |
13 | * Niels Kristian Bech Jensen <[email protected]> partially based | |
14 | * on code by Martin von Loewis <[email protected]>. | |
b71034e5 ED |
15 | * |
16 | * Migration to usage of "page cache" on May 2006 by | |
17 | * Evgeniy Dushistov <[email protected]> based on ext2 code base. | |
1da177e4 LT |
18 | */ |
19 | ||
20 | #include <linux/time.h> | |
21 | #include <linux/fs.h> | |
82b9d1d0 | 22 | #include <linux/swap.h> |
bb8c2d66 | 23 | #include <linux/iversion.h> |
1da177e4 | 24 | |
e5420598 | 25 | #include "ufs_fs.h" |
bcd6d4ec | 26 | #include "ufs.h" |
1da177e4 LT |
27 | #include "swab.h" |
28 | #include "util.h" | |
29 | ||
1da177e4 LT |
30 | /* |
31 | * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. | |
32 | * | |
33 | * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. | |
34 | */ | |
35 | static inline int ufs_match(struct super_block *sb, int len, | |
89031bc7 | 36 | const unsigned char *name, struct ufs_dir_entry *de) |
1da177e4 LT |
37 | { |
38 | if (len != ufs_get_de_namlen(sb, de)) | |
39 | return 0; | |
40 | if (!de->d_ino) | |
41 | return 0; | |
42 | return !memcmp(name, de->d_name, len); | |
43 | } | |
44 | ||
0f3e63f3 | 45 | static void ufs_commit_chunk(struct folio *folio, loff_t pos, unsigned len) |
1da177e4 | 46 | { |
0f3e63f3 | 47 | struct address_space *mapping = folio->mapping; |
82b9d1d0 | 48 | struct inode *dir = mapping->host; |
82b9d1d0 | 49 | |
bb8c2d66 | 50 | inode_inc_iversion(dir); |
97edbc02 | 51 | block_write_end(NULL, mapping, pos, len, len, folio, NULL); |
82b9d1d0 NP |
52 | if (pos+len > dir->i_size) { |
53 | i_size_write(dir, pos+len); | |
54 | mark_inode_dirty(dir); | |
55 | } | |
0f3e63f3 | 56 | folio_unlock(folio); |
9e22031a CH |
57 | } |
58 | ||
59 | static int ufs_handle_dirsync(struct inode *dir) | |
60 | { | |
61 | int err; | |
62 | ||
63 | err = filemap_write_and_wait(dir->i_mapping); | |
64 | if (!err) | |
65 | err = sync_inode_metadata(dir, 1); | |
b71034e5 ED |
66 | return err; |
67 | } | |
1da177e4 | 68 | |
89031bc7 | 69 | ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) |
b71034e5 ED |
70 | { |
71 | ino_t res = 0; | |
72 | struct ufs_dir_entry *de; | |
e95d2754 | 73 | struct folio *folio; |
b71034e5 | 74 | |
e95d2754 | 75 | de = ufs_find_entry(dir, qstr, &folio); |
b71034e5 ED |
76 | if (de) { |
77 | res = fs32_to_cpu(dir->i_sb, de->d_ino); | |
516b97cf | 78 | folio_release_kmap(folio, de); |
1da177e4 | 79 | } |
b71034e5 | 80 | return res; |
1da177e4 LT |
81 | } |
82 | ||
1da177e4 | 83 | |
b71034e5 ED |
84 | /* Releases the page */ |
85 | void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, | |
597697c5 | 86 | struct folio *folio, struct inode *inode, |
70d45cdb | 87 | bool update_times) |
1da177e4 | 88 | { |
597697c5 | 89 | loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); |
82b9d1d0 | 90 | unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); |
b71034e5 | 91 | int err; |
1da177e4 | 92 | |
597697c5 | 93 | folio_lock(folio); |
128d1e89 | 94 | err = ufs_prepare_chunk(folio, pos, len); |
b71034e5 | 95 | BUG_ON(err); |
82b9d1d0 | 96 | |
b71034e5 ED |
97 | de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); |
98 | ufs_set_de_type(dir->i_sb, de, inode->i_mode); | |
82b9d1d0 | 99 | |
0f3e63f3 | 100 | ufs_commit_chunk(folio, pos, len); |
516b97cf | 101 | folio_release_kmap(folio, de); |
70d45cdb | 102 | if (update_times) |
d936d382 | 103 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
b71034e5 | 104 | mark_inode_dirty(dir); |
9e22031a | 105 | ufs_handle_dirsync(dir); |
b71034e5 | 106 | } |
1da177e4 | 107 | |
a60b0e8f | 108 | static bool ufs_check_folio(struct folio *folio, char *kaddr) |
b71034e5 | 109 | { |
a60b0e8f | 110 | struct inode *dir = folio->mapping->host; |
b71034e5 | 111 | struct super_block *sb = dir->i_sb; |
b71034e5 | 112 | unsigned offs, rec_len; |
a60b0e8f | 113 | unsigned limit = folio_size(folio); |
f336953b | 114 | const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; |
b71034e5 ED |
115 | struct ufs_dir_entry *p; |
116 | char *error; | |
117 | ||
a60b0e8f MWO |
118 | if (dir->i_size < folio_pos(folio) + limit) { |
119 | limit = offset_in_folio(folio, dir->i_size); | |
f336953b | 120 | if (limit & chunk_mask) |
b71034e5 ED |
121 | goto Ebadsize; |
122 | if (!limit) | |
123 | goto out; | |
1da177e4 | 124 | } |
b71034e5 ED |
125 | for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) { |
126 | p = (struct ufs_dir_entry *)(kaddr + offs); | |
127 | rec_len = fs16_to_cpu(sb, p->d_reclen); | |
128 | ||
129 | if (rec_len < UFS_DIR_REC_LEN(1)) | |
130 | goto Eshort; | |
131 | if (rec_len & 3) | |
132 | goto Ealign; | |
133 | if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p))) | |
134 | goto Enamelen; | |
f336953b | 135 | if (((offs + rec_len - 1) ^ offs) & ~chunk_mask) |
b71034e5 ED |
136 | goto Espan; |
137 | if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * | |
138 | UFS_SB(sb)->s_uspi->s_ncg)) | |
139 | goto Einumber; | |
140 | } | |
141 | if (offs != limit) | |
142 | goto Eend; | |
143 | out: | |
a60b0e8f | 144 | folio_set_checked(folio); |
be5b82db | 145 | return true; |
b71034e5 ED |
146 | |
147 | /* Too bad, we had an error */ | |
148 | ||
149 | Ebadsize: | |
a60b0e8f | 150 | ufs_error(sb, __func__, |
b71034e5 ED |
151 | "size of directory #%lu is not a multiple of chunk size", |
152 | dir->i_ino | |
153 | ); | |
154 | goto fail; | |
155 | Eshort: | |
156 | error = "rec_len is smaller than minimal"; | |
157 | goto bad_entry; | |
158 | Ealign: | |
159 | error = "unaligned directory entry"; | |
160 | goto bad_entry; | |
161 | Enamelen: | |
162 | error = "rec_len is too small for name_len"; | |
163 | goto bad_entry; | |
164 | Espan: | |
165 | error = "directory entry across blocks"; | |
166 | goto bad_entry; | |
167 | Einumber: | |
168 | error = "inode out of bounds"; | |
169 | bad_entry: | |
a60b0e8f MWO |
170 | ufs_error(sb, __func__, "bad entry in directory #%lu: %s - " |
171 | "offset=%llu, rec_len=%d, name_len=%d", | |
172 | dir->i_ino, error, folio_pos(folio) + offs, | |
b71034e5 ED |
173 | rec_len, ufs_get_de_namlen(sb, p)); |
174 | goto fail; | |
175 | Eend: | |
176 | p = (struct ufs_dir_entry *)(kaddr + offs); | |
9746077a | 177 | ufs_error(sb, __func__, |
b71034e5 | 178 | "entry in directory #%lu spans the page boundary" |
a60b0e8f MWO |
179 | "offset=%llu", |
180 | dir->i_ino, folio_pos(folio) + offs); | |
b71034e5 | 181 | fail: |
be5b82db | 182 | return false; |
b71034e5 | 183 | } |
1da177e4 | 184 | |
5fe08b1d MWO |
185 | static void *ufs_get_folio(struct inode *dir, unsigned long n, |
186 | struct folio **foliop) | |
b71034e5 ED |
187 | { |
188 | struct address_space *mapping = dir->i_mapping; | |
bf9883d5 | 189 | struct folio *folio = read_mapping_folio(mapping, n, NULL); |
5fe08b1d | 190 | void *kaddr; |
bf9883d5 MWO |
191 | |
192 | if (IS_ERR(folio)) | |
5fe08b1d | 193 | return ERR_CAST(folio); |
516b97cf | 194 | kaddr = kmap_local_folio(folio, 0); |
bf9883d5 | 195 | if (unlikely(!folio_test_checked(folio))) { |
a60b0e8f | 196 | if (!ufs_check_folio(folio, kaddr)) |
bf9883d5 | 197 | goto fail; |
1da177e4 | 198 | } |
5fe08b1d MWO |
199 | *foliop = folio; |
200 | return kaddr; | |
1da177e4 | 201 | |
b71034e5 | 202 | fail: |
516b97cf | 203 | folio_release_kmap(folio, kaddr); |
b71034e5 | 204 | return ERR_PTR(-EIO); |
1da177e4 LT |
205 | } |
206 | ||
b71034e5 ED |
207 | /* |
208 | * Return the offset into page `page_nr' of the last valid | |
209 | * byte in that page, plus one. | |
210 | */ | |
211 | static unsigned | |
212 | ufs_last_byte(struct inode *inode, unsigned long page_nr) | |
1da177e4 | 213 | { |
b71034e5 ED |
214 | unsigned last_byte = inode->i_size; |
215 | ||
09cbfeaf KS |
216 | last_byte -= page_nr << PAGE_SHIFT; |
217 | if (last_byte > PAGE_SIZE) | |
218 | last_byte = PAGE_SIZE; | |
b71034e5 | 219 | return last_byte; |
1da177e4 LT |
220 | } |
221 | ||
b71034e5 ED |
222 | static inline struct ufs_dir_entry * |
223 | ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p) | |
1da177e4 | 224 | { |
b71034e5 ED |
225 | return (struct ufs_dir_entry *)((char *)p + |
226 | fs16_to_cpu(sb, p->d_reclen)); | |
1da177e4 | 227 | } |
b71034e5 | 228 | |
597697c5 | 229 | struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct folio **foliop) |
1da177e4 | 230 | { |
597697c5 | 231 | struct ufs_dir_entry *de = ufs_get_folio(dir, 0, foliop); |
5fe08b1d | 232 | |
597697c5 MWO |
233 | if (!IS_ERR(de)) |
234 | return ufs_next_entry(dir->i_sb, de); | |
1da177e4 | 235 | |
597697c5 | 236 | return NULL; |
1da177e4 LT |
237 | } |
238 | ||
b71034e5 ED |
239 | /* |
240 | * ufs_find_entry() | |
241 | * | |
242 | * finds an entry in the specified directory with the wanted name. It | |
243 | * returns the page in which the entry was found, and the entry itself | |
244 | * (as a parameter - res_dir). Page is returned mapped and unlocked. | |
245 | * Entry is guaranteed to be valid. | |
246 | */ | |
89031bc7 | 247 | struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, |
e95d2754 | 248 | struct folio **foliop) |
1da177e4 | 249 | { |
b71034e5 | 250 | struct super_block *sb = dir->i_sb; |
89031bc7 | 251 | const unsigned char *name = qstr->name; |
08049707 | 252 | int namelen = qstr->len; |
b71034e5 ED |
253 | unsigned reclen = UFS_DIR_REC_LEN(namelen); |
254 | unsigned long start, n; | |
5d754ced | 255 | unsigned long npages = dir_pages(dir); |
dd187a26 | 256 | struct ufs_inode_info *ui = UFS_I(dir); |
b71034e5 ED |
257 | struct ufs_dir_entry *de; |
258 | ||
abf5d15f | 259 | UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen); |
b71034e5 ED |
260 | |
261 | if (npages == 0 || namelen > UFS_MAXNAMLEN) | |
262 | goto out; | |
263 | ||
dd187a26 ED |
264 | start = ui->i_dir_start_lookup; |
265 | ||
b71034e5 ED |
266 | if (start >= npages) |
267 | start = 0; | |
268 | n = start; | |
269 | do { | |
e95d2754 | 270 | char *kaddr = ufs_get_folio(dir, n, foliop); |
5fe08b1d MWO |
271 | |
272 | if (!IS_ERR(kaddr)) { | |
273 | de = (struct ufs_dir_entry *)kaddr; | |
b71034e5 ED |
274 | kaddr += ufs_last_byte(dir, n) - reclen; |
275 | while ((char *) de <= kaddr) { | |
b71034e5 ED |
276 | if (ufs_match(sb, namelen, name, de)) |
277 | goto found; | |
278 | de = ufs_next_entry(sb, de); | |
279 | } | |
516b97cf | 280 | folio_release_kmap(*foliop, kaddr); |
b71034e5 ED |
281 | } |
282 | if (++n >= npages) | |
283 | n = 0; | |
284 | } while (n != start); | |
285 | out: | |
286 | return NULL; | |
287 | ||
288 | found: | |
dd187a26 | 289 | ui->i_dir_start_lookup = n; |
b71034e5 | 290 | return de; |
1da177e4 LT |
291 | } |
292 | ||
293 | /* | |
b71034e5 | 294 | * Parent is locked. |
1da177e4 LT |
295 | */ |
296 | int ufs_add_link(struct dentry *dentry, struct inode *inode) | |
297 | { | |
2b0143b5 | 298 | struct inode *dir = d_inode(dentry->d_parent); |
89031bc7 | 299 | const unsigned char *name = dentry->d_name.name; |
1da177e4 | 300 | int namelen = dentry->d_name.len; |
b71034e5 ED |
301 | struct super_block *sb = dir->i_sb; |
302 | unsigned reclen = UFS_DIR_REC_LEN(namelen); | |
f336953b | 303 | const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; |
b71034e5 | 304 | unsigned short rec_len, name_len; |
5fe08b1d | 305 | struct folio *folio = NULL; |
b71034e5 | 306 | struct ufs_dir_entry *de; |
5d754ced | 307 | unsigned long npages = dir_pages(dir); |
b71034e5 | 308 | unsigned long n; |
82b9d1d0 | 309 | loff_t pos; |
1da177e4 LT |
310 | int err; |
311 | ||
abf5d15f | 312 | UFSD("ENTER, name %s, namelen %u\n", name, namelen); |
b71034e5 ED |
313 | |
314 | /* | |
315 | * We take care of directory expansion in the same loop. | |
5fe08b1d | 316 | * This code plays outside i_size, so it locks the folio |
b71034e5 ED |
317 | * to protect that region. |
318 | */ | |
319 | for (n = 0; n <= npages; n++) { | |
5fe08b1d | 320 | char *kaddr = ufs_get_folio(dir, n, &folio); |
b71034e5 ED |
321 | char *dir_end; |
322 | ||
5fe08b1d MWO |
323 | if (IS_ERR(kaddr)) |
324 | return PTR_ERR(kaddr); | |
325 | folio_lock(folio); | |
b71034e5 ED |
326 | dir_end = kaddr + ufs_last_byte(dir, n); |
327 | de = (struct ufs_dir_entry *)kaddr; | |
5fe08b1d | 328 | kaddr += folio_size(folio) - reclen; |
b71034e5 ED |
329 | while ((char *)de <= kaddr) { |
330 | if ((char *)de == dir_end) { | |
331 | /* We hit i_size */ | |
332 | name_len = 0; | |
f336953b ED |
333 | rec_len = chunk_size; |
334 | de->d_reclen = cpu_to_fs16(sb, chunk_size); | |
b71034e5 ED |
335 | de->d_ino = 0; |
336 | goto got_it; | |
1da177e4 | 337 | } |
b71034e5 | 338 | if (de->d_reclen == 0) { |
9746077a | 339 | ufs_error(dir->i_sb, __func__, |
b71034e5 ED |
340 | "zero-length directory entry"); |
341 | err = -EIO; | |
342 | goto out_unlock; | |
343 | } | |
344 | err = -EEXIST; | |
345 | if (ufs_match(sb, namelen, name, de)) | |
346 | goto out_unlock; | |
347 | name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)); | |
348 | rec_len = fs16_to_cpu(sb, de->d_reclen); | |
349 | if (!de->d_ino && rec_len >= reclen) | |
350 | goto got_it; | |
351 | if (rec_len >= name_len + reclen) | |
352 | goto got_it; | |
353 | de = (struct ufs_dir_entry *) ((char *) de + rec_len); | |
1da177e4 | 354 | } |
5fe08b1d | 355 | folio_unlock(folio); |
516b97cf | 356 | folio_release_kmap(folio, kaddr); |
1da177e4 | 357 | } |
b71034e5 ED |
358 | BUG(); |
359 | return -EINVAL; | |
360 | ||
361 | got_it: | |
5fe08b1d | 362 | pos = folio_pos(folio) + offset_in_folio(folio, de); |
128d1e89 | 363 | err = ufs_prepare_chunk(folio, pos, rec_len); |
b71034e5 ED |
364 | if (err) |
365 | goto out_unlock; | |
1da177e4 | 366 | if (de->d_ino) { |
b71034e5 ED |
367 | struct ufs_dir_entry *de1 = |
368 | (struct ufs_dir_entry *) ((char *) de + name_len); | |
369 | de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len); | |
370 | de->d_reclen = cpu_to_fs16(sb, name_len); | |
371 | ||
1da177e4 LT |
372 | de = de1; |
373 | } | |
b71034e5 | 374 | |
1da177e4 | 375 | ufs_set_de_namlen(sb, de, namelen); |
b71034e5 | 376 | memcpy(de->d_name, name, namelen + 1); |
1da177e4 LT |
377 | de->d_ino = cpu_to_fs32(sb, inode->i_ino); |
378 | ufs_set_de_type(sb, de, inode->i_mode); | |
b71034e5 | 379 | |
0f3e63f3 | 380 | ufs_commit_chunk(folio, pos, rec_len); |
d936d382 | 381 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
b71034e5 | 382 | |
1da177e4 | 383 | mark_inode_dirty(dir); |
9e22031a | 384 | err = ufs_handle_dirsync(dir); |
b71034e5 ED |
385 | /* OFFSET_CACHE */ |
386 | out_put: | |
516b97cf | 387 | folio_release_kmap(folio, de); |
b71034e5 ED |
388 | return err; |
389 | out_unlock: | |
5fe08b1d | 390 | folio_unlock(folio); |
b71034e5 ED |
391 | goto out_put; |
392 | } | |
393 | ||
394 | static inline unsigned | |
395 | ufs_validate_entry(struct super_block *sb, char *base, | |
396 | unsigned offset, unsigned mask) | |
397 | { | |
398 | struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset); | |
399 | struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask)); | |
e0d508f1 | 400 | while ((char*)p < (char*)de) |
b71034e5 | 401 | p = ufs_next_entry(sb, p); |
b71034e5 ED |
402 | return (char *)p - base; |
403 | } | |
1da177e4 | 404 | |
b71034e5 ED |
405 | |
406 | /* | |
407 | * This is blatantly stolen from ext2fs | |
408 | */ | |
409 | static int | |
80886298 | 410 | ufs_readdir(struct file *file, struct dir_context *ctx) |
b71034e5 | 411 | { |
80886298 AV |
412 | loff_t pos = ctx->pos; |
413 | struct inode *inode = file_inode(file); | |
b71034e5 | 414 | struct super_block *sb = inode->i_sb; |
09cbfeaf KS |
415 | unsigned int offset = pos & ~PAGE_MASK; |
416 | unsigned long n = pos >> PAGE_SHIFT; | |
5d754ced | 417 | unsigned long npages = dir_pages(inode); |
f336953b | 418 | unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); |
0bea8287 | 419 | bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data); |
b71034e5 ED |
420 | unsigned flags = UFS_SB(sb)->s_flags; |
421 | ||
abf5d15f | 422 | UFSD("BEGIN\n"); |
b71034e5 ED |
423 | |
424 | if (pos > inode->i_size - UFS_DIR_REC_LEN(1)) | |
425 | return 0; | |
426 | ||
427 | for ( ; n < npages; n++, offset = 0) { | |
b71034e5 | 428 | struct ufs_dir_entry *de; |
5fe08b1d MWO |
429 | struct folio *folio; |
430 | char *kaddr = ufs_get_folio(inode, n, &folio); | |
431 | char *limit; | |
b71034e5 | 432 | |
5fe08b1d | 433 | if (IS_ERR(kaddr)) { |
9746077a | 434 | ufs_error(sb, __func__, |
b71034e5 ED |
435 | "bad page in #%lu", |
436 | inode->i_ino); | |
09cbfeaf | 437 | ctx->pos += PAGE_SIZE - offset; |
5fe08b1d | 438 | return PTR_ERR(kaddr); |
b71034e5 | 439 | } |
b71034e5 ED |
440 | if (unlikely(need_revalidate)) { |
441 | if (offset) { | |
442 | offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); | |
09cbfeaf | 443 | ctx->pos = (n<<PAGE_SHIFT) + offset; |
b71034e5 | 444 | } |
0bea8287 | 445 | *(u64 *)file->private_data = inode_query_iversion(inode); |
bb8c2d66 | 446 | need_revalidate = false; |
b71034e5 ED |
447 | } |
448 | de = (struct ufs_dir_entry *)(kaddr+offset); | |
449 | limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1); | |
450 | for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) { | |
b71034e5 | 451 | if (de->d_ino) { |
b71034e5 ED |
452 | unsigned char d_type = DT_UNKNOWN; |
453 | ||
abf5d15f ED |
454 | UFSD("filldir(%s,%u)\n", de->d_name, |
455 | fs32_to_cpu(sb, de->d_ino)); | |
456 | UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); | |
b71034e5 ED |
457 | |
458 | if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) | |
459 | d_type = de->d_u.d_44.d_type; | |
460 | ||
80886298 | 461 | if (!dir_emit(ctx, de->d_name, |
b71034e5 | 462 | ufs_get_de_namlen(sb, de), |
80886298 AV |
463 | fs32_to_cpu(sb, de->d_ino), |
464 | d_type)) { | |
516b97cf | 465 | folio_release_kmap(folio, de); |
b71034e5 ED |
466 | return 0; |
467 | } | |
468 | } | |
80886298 | 469 | ctx->pos += fs16_to_cpu(sb, de->d_reclen); |
b71034e5 | 470 | } |
516b97cf | 471 | folio_release_kmap(folio, kaddr); |
b71034e5 | 472 | } |
1da177e4 LT |
473 | return 0; |
474 | } | |
475 | ||
b71034e5 | 476 | |
1da177e4 LT |
477 | /* |
478 | * ufs_delete_entry deletes a directory entry by merging it with the | |
479 | * previous entry. | |
480 | */ | |
b71034e5 | 481 | int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, |
767bd0af | 482 | struct folio *folio) |
1da177e4 | 483 | { |
b71034e5 | 484 | struct super_block *sb = inode->i_sb; |
767bd0af MWO |
485 | size_t from, to; |
486 | char *kaddr; | |
82b9d1d0 | 487 | loff_t pos; |
767bd0af | 488 | struct ufs_dir_entry *de, *pde = NULL; |
b71034e5 | 489 | int err; |
1da177e4 | 490 | |
abf5d15f | 491 | UFSD("ENTER\n"); |
1da177e4 | 492 | |
767bd0af MWO |
493 | from = offset_in_folio(folio, dir); |
494 | to = from + fs16_to_cpu(sb, dir->d_reclen); | |
495 | kaddr = (char *)dir - from; | |
496 | from &= ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); | |
497 | de = (struct ufs_dir_entry *) (kaddr + from); | |
498 | ||
abf5d15f | 499 | UFSD("ino %u, reclen %u, namlen %u, name %s\n", |
b71034e5 ED |
500 | fs32_to_cpu(sb, de->d_ino), |
501 | fs16_to_cpu(sb, de->d_reclen), | |
abf5d15f | 502 | ufs_get_de_namlen(sb, de), de->d_name); |
b71034e5 ED |
503 | |
504 | while ((char*)de < (char*)dir) { | |
505 | if (de->d_reclen == 0) { | |
9746077a | 506 | ufs_error(inode->i_sb, __func__, |
b71034e5 ED |
507 | "zero-length directory entry"); |
508 | err = -EIO; | |
509 | goto out; | |
1da177e4 | 510 | } |
b71034e5 ED |
511 | pde = de; |
512 | de = ufs_next_entry(sb, de); | |
1da177e4 | 513 | } |
b71034e5 | 514 | if (pde) |
767bd0af MWO |
515 | from = offset_in_folio(folio, pde); |
516 | pos = folio_pos(folio) + from; | |
517 | folio_lock(folio); | |
128d1e89 | 518 | err = ufs_prepare_chunk(folio, pos, to - from); |
b71034e5 ED |
519 | BUG_ON(err); |
520 | if (pde) | |
82b9d1d0 | 521 | pde->d_reclen = cpu_to_fs16(sb, to - from); |
b71034e5 | 522 | dir->d_ino = 0; |
0f3e63f3 | 523 | ufs_commit_chunk(folio, pos, to - from); |
d936d382 | 524 | inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); |
b71034e5 | 525 | mark_inode_dirty(inode); |
9e22031a | 526 | err = ufs_handle_dirsync(inode); |
b71034e5 | 527 | out: |
516b97cf | 528 | folio_release_kmap(folio, kaddr); |
abf5d15f | 529 | UFSD("EXIT\n"); |
b71034e5 | 530 | return err; |
1da177e4 LT |
531 | } |
532 | ||
533 | int ufs_make_empty(struct inode * inode, struct inode *dir) | |
534 | { | |
535 | struct super_block * sb = dir->i_sb; | |
b71034e5 | 536 | struct address_space *mapping = inode->i_mapping; |
f4a20e53 | 537 | struct folio *folio = filemap_grab_folio(mapping, 0); |
f336953b | 538 | const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; |
1da177e4 LT |
539 | struct ufs_dir_entry * de; |
540 | int err; | |
f4a20e53 | 541 | char *kaddr; |
1da177e4 | 542 | |
f4a20e53 MWO |
543 | if (IS_ERR(folio)) |
544 | return PTR_ERR(folio); | |
82b9d1d0 | 545 | |
128d1e89 | 546 | err = ufs_prepare_chunk(folio, 0, chunk_size); |
b71034e5 | 547 | if (err) { |
f4a20e53 | 548 | folio_unlock(folio); |
b71034e5 ED |
549 | goto fail; |
550 | } | |
551 | ||
f4a20e53 MWO |
552 | kaddr = kmap_local_folio(folio, 0); |
553 | memset(kaddr, 0, folio_size(folio)); | |
b71034e5 | 554 | |
f4a20e53 | 555 | de = (struct ufs_dir_entry *)kaddr; |
1da177e4 | 556 | |
1da177e4 LT |
557 | de->d_ino = cpu_to_fs32(sb, inode->i_ino); |
558 | ufs_set_de_type(sb, de, inode->i_mode); | |
559 | ufs_set_de_namlen(sb, de, 1); | |
560 | de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1)); | |
561 | strcpy (de->d_name, "."); | |
562 | de = (struct ufs_dir_entry *) | |
563 | ((char *)de + fs16_to_cpu(sb, de->d_reclen)); | |
564 | de->d_ino = cpu_to_fs32(sb, dir->i_ino); | |
565 | ufs_set_de_type(sb, de, dir->i_mode); | |
f336953b | 566 | de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); |
1da177e4 LT |
567 | ufs_set_de_namlen(sb, de, 2); |
568 | strcpy (de->d_name, ".."); | |
f4a20e53 | 569 | kunmap_local(kaddr); |
b71034e5 | 570 | |
0f3e63f3 | 571 | ufs_commit_chunk(folio, 0, chunk_size); |
9e22031a | 572 | err = ufs_handle_dirsync(inode); |
b71034e5 | 573 | fail: |
f4a20e53 | 574 | folio_put(folio); |
b71034e5 | 575 | return err; |
1da177e4 LT |
576 | } |
577 | ||
578 | /* | |
579 | * routine to check that the specified directory is empty (for rmdir) | |
580 | */ | |
b71034e5 | 581 | int ufs_empty_dir(struct inode * inode) |
1da177e4 | 582 | { |
b71034e5 | 583 | struct super_block *sb = inode->i_sb; |
5fe08b1d MWO |
584 | struct folio *folio; |
585 | char *kaddr; | |
5d754ced | 586 | unsigned long i, npages = dir_pages(inode); |
b71034e5 ED |
587 | |
588 | for (i = 0; i < npages; i++) { | |
b71034e5 | 589 | struct ufs_dir_entry *de; |
b71034e5 | 590 | |
5fe08b1d MWO |
591 | kaddr = ufs_get_folio(inode, i, &folio); |
592 | if (IS_ERR(kaddr)) | |
b71034e5 ED |
593 | continue; |
594 | ||
b71034e5 ED |
595 | de = (struct ufs_dir_entry *)kaddr; |
596 | kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); | |
597 | ||
598 | while ((char *)de <= kaddr) { | |
599 | if (de->d_reclen == 0) { | |
9746077a | 600 | ufs_error(inode->i_sb, __func__, |
b71034e5 ED |
601 | "zero-length directory entry: " |
602 | "kaddr=%p, de=%p\n", kaddr, de); | |
603 | goto not_empty; | |
1da177e4 | 604 | } |
b71034e5 ED |
605 | if (de->d_ino) { |
606 | u16 namelen=ufs_get_de_namlen(sb, de); | |
607 | /* check for . and .. */ | |
608 | if (de->d_name[0] != '.') | |
609 | goto not_empty; | |
610 | if (namelen > 2) | |
611 | goto not_empty; | |
612 | if (namelen < 2) { | |
613 | if (inode->i_ino != | |
614 | fs32_to_cpu(sb, de->d_ino)) | |
615 | goto not_empty; | |
616 | } else if (de->d_name[1] != '.') | |
617 | goto not_empty; | |
618 | } | |
619 | de = ufs_next_entry(sb, de); | |
1da177e4 | 620 | } |
516b97cf | 621 | folio_release_kmap(folio, kaddr); |
1da177e4 | 622 | } |
1da177e4 | 623 | return 1; |
b71034e5 ED |
624 | |
625 | not_empty: | |
516b97cf | 626 | folio_release_kmap(folio, kaddr); |
b71034e5 | 627 | return 0; |
1da177e4 LT |
628 | } |
629 | ||
0bea8287 CB |
630 | static int ufs_dir_open(struct inode *inode, struct file *file) |
631 | { | |
632 | file->private_data = kzalloc(sizeof(u64), GFP_KERNEL); | |
633 | if (!file->private_data) | |
634 | return -ENOMEM; | |
635 | return 0; | |
636 | } | |
637 | ||
638 | static int ufs_dir_release(struct inode *inode, struct file *file) | |
639 | { | |
640 | kfree(file->private_data); | |
641 | return 0; | |
642 | } | |
643 | ||
644 | static loff_t ufs_dir_llseek(struct file *file, loff_t offset, int whence) | |
645 | { | |
646 | return generic_llseek_cookie(file, offset, whence, | |
647 | (u64 *)file->private_data); | |
648 | } | |
649 | ||
4b6f5d20 | 650 | const struct file_operations ufs_dir_operations = { |
0bea8287 CB |
651 | .open = ufs_dir_open, |
652 | .release = ufs_dir_release, | |
1da177e4 | 653 | .read = generic_read_dir, |
3b0a3c1a | 654 | .iterate_shared = ufs_readdir, |
1b061d92 | 655 | .fsync = generic_file_fsync, |
0bea8287 | 656 | .llseek = ufs_dir_llseek, |
1da177e4 | 657 | }; |