1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * minix directory handling functions
9 * Updated to filesystem version 3 by Daniel Aragones
13 #include <linux/buffer_head.h>
14 #include <linux/highmem.h>
15 #include <linux/swap.h>
17 typedef struct minix_dir_entry minix_dirent;
18 typedef struct minix3_dir_entry minix3_dirent;
20 static int minix_readdir(struct file *, struct dir_context *);
22 const struct file_operations minix_dir_operations = {
23 .llseek = generic_file_llseek,
24 .read = generic_read_dir,
25 .iterate_shared = minix_readdir,
26 .fsync = generic_file_fsync,
30 * Return the offset into page `page_nr' of the last valid
31 * byte in that page, plus one.
34 minix_last_byte(struct inode *inode, unsigned long page_nr)
36 unsigned last_byte = PAGE_SIZE;
38 if (page_nr == (inode->i_size >> PAGE_SHIFT))
39 last_byte = inode->i_size & (PAGE_SIZE - 1);
43 static void dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
45 struct address_space *mapping = page->mapping;
46 struct inode *dir = mapping->host;
48 block_write_end(NULL, mapping, pos, len, len, page, NULL);
50 if (pos+len > dir->i_size) {
51 i_size_write(dir, pos+len);
52 mark_inode_dirty(dir);
57 static int minix_handle_dirsync(struct inode *dir)
61 err = filemap_write_and_wait(dir->i_mapping);
63 err = sync_inode_metadata(dir, 1);
67 static void *dir_get_page(struct inode *dir, unsigned long n, struct page **p)
69 struct address_space *mapping = dir->i_mapping;
70 struct page *page = read_mapping_page(mapping, n, NULL);
72 return ERR_CAST(page);
74 return kmap_local_page(page);
77 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
79 return (void*)((char*)de + sbi->s_dirsize);
82 static int minix_readdir(struct file *file, struct dir_context *ctx)
84 struct inode *inode = file_inode(file);
85 struct super_block *sb = inode->i_sb;
86 struct minix_sb_info *sbi = minix_sb(sb);
87 unsigned chunk_size = sbi->s_dirsize;
88 unsigned long npages = dir_pages(inode);
89 unsigned long pos = ctx->pos;
93 ctx->pos = pos = ALIGN(pos, chunk_size);
94 if (pos >= inode->i_size)
97 offset = pos & ~PAGE_MASK;
98 n = pos >> PAGE_SHIFT;
100 for ( ; n < npages; n++, offset = 0) {
101 char *p, *kaddr, *limit;
104 kaddr = dir_get_page(inode, n, &page);
108 limit = kaddr + minix_last_byte(inode, n) - chunk_size;
109 for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
112 if (sbi->s_version == MINIX_V3) {
113 minix3_dirent *de3 = (minix3_dirent *)p;
115 inumber = de3->inode;
117 minix_dirent *de = (minix_dirent *)p;
122 unsigned l = strnlen(name, sbi->s_namelen);
123 if (!dir_emit(ctx, name, l,
124 inumber, DT_UNKNOWN)) {
125 unmap_and_put_page(page, p);
129 ctx->pos += chunk_size;
131 unmap_and_put_page(page, kaddr);
136 static inline int namecompare(int len, int maxlen,
137 const char * name, const char * buffer)
139 if (len < maxlen && buffer[len])
141 return !memcmp(name, buffer, len);
147 * finds an entry in the specified directory with the wanted name. It
148 * returns the cache buffer in which the entry was found, and the entry
149 * itself (as a parameter - res_dir). It does NOT read the inode of the
150 * entry - you'll have to do that yourself if you want to.
152 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
154 const char * name = dentry->d_name.name;
155 int namelen = dentry->d_name.len;
156 struct inode * dir = d_inode(dentry->d_parent);
157 struct super_block * sb = dir->i_sb;
158 struct minix_sb_info * sbi = minix_sb(sb);
160 unsigned long npages = dir_pages(dir);
161 struct page *page = NULL;
168 for (n = 0; n < npages; n++) {
171 kaddr = dir_get_page(dir, n, &page);
175 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
176 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
177 if (sbi->s_version == MINIX_V3) {
178 minix3_dirent *de3 = (minix3_dirent *)p;
180 inumber = de3->inode;
182 minix_dirent *de = (minix_dirent *)p;
188 if (namecompare(namelen, sbi->s_namelen, name, namx))
191 unmap_and_put_page(page, kaddr);
197 return (minix_dirent *)p;
200 int minix_add_link(struct dentry *dentry, struct inode *inode)
202 struct inode *dir = d_inode(dentry->d_parent);
203 const char * name = dentry->d_name.name;
204 int namelen = dentry->d_name.len;
205 struct super_block * sb = dir->i_sb;
206 struct minix_sb_info * sbi = minix_sb(sb);
207 struct page *page = NULL;
208 unsigned long npages = dir_pages(dir);
219 * We take care of directory expansion in the same loop
220 * This code plays outside i_size, so it locks the page
221 * to protect that region.
223 for (n = 0; n <= npages; n++) {
224 char *limit, *dir_end;
226 kaddr = dir_get_page(dir, n, &page);
228 return PTR_ERR(kaddr);
230 dir_end = kaddr + minix_last_byte(dir, n);
231 limit = kaddr + PAGE_SIZE - sbi->s_dirsize;
232 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
233 de = (minix_dirent *)p;
234 de3 = (minix3_dirent *)p;
235 if (sbi->s_version == MINIX_V3) {
237 inumber = de3->inode;
244 if (sbi->s_version == MINIX_V3)
253 if (namecompare(namelen, sbi->s_namelen, name, namx))
257 unmap_and_put_page(page, kaddr);
263 pos = page_offset(page) + offset_in_page(p);
264 err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
267 memcpy (namx, name, namelen);
268 if (sbi->s_version == MINIX_V3) {
269 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
270 de3->inode = inode->i_ino;
272 memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
273 de->inode = inode->i_ino;
275 dir_commit_chunk(page, pos, sbi->s_dirsize);
276 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
277 mark_inode_dirty(dir);
278 err = minix_handle_dirsync(dir);
280 unmap_and_put_page(page, kaddr);
287 int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
289 struct inode *inode = page->mapping->host;
290 loff_t pos = page_offset(page) + offset_in_page(de);
291 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
292 unsigned len = sbi->s_dirsize;
296 err = minix_prepare_chunk(page, pos, len);
301 if (sbi->s_version == MINIX_V3)
302 ((minix3_dirent *)de)->inode = 0;
305 dir_commit_chunk(page, pos, len);
306 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
307 mark_inode_dirty(inode);
308 return minix_handle_dirsync(inode);
311 int minix_make_empty(struct inode *inode, struct inode *dir)
313 struct page *page = grab_cache_page(inode->i_mapping, 0);
314 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
320 err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
326 kaddr = kmap_local_page(page);
327 memset(kaddr, 0, PAGE_SIZE);
329 if (sbi->s_version == MINIX_V3) {
330 minix3_dirent *de3 = (minix3_dirent *)kaddr;
332 de3->inode = inode->i_ino;
333 strcpy(de3->name, ".");
334 de3 = minix_next_entry(de3, sbi);
335 de3->inode = dir->i_ino;
336 strcpy(de3->name, "..");
338 minix_dirent *de = (minix_dirent *)kaddr;
340 de->inode = inode->i_ino;
341 strcpy(de->name, ".");
342 de = minix_next_entry(de, sbi);
343 de->inode = dir->i_ino;
344 strcpy(de->name, "..");
348 dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
349 err = minix_handle_dirsync(inode);
356 * routine to check that the specified directory is empty (for rmdir)
358 int minix_empty_dir(struct inode * inode)
360 struct page *page = NULL;
361 unsigned long i, npages = dir_pages(inode);
362 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
366 for (i = 0; i < npages; i++) {
369 kaddr = dir_get_page(inode, i, &page);
373 limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
374 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
375 if (sbi->s_version == MINIX_V3) {
376 minix3_dirent *de3 = (minix3_dirent *)p;
378 inumber = de3->inode;
380 minix_dirent *de = (minix_dirent *)p;
386 /* check for . and .. */
390 if (inumber != inode->i_ino)
392 } else if (name[1] != '.')
398 unmap_and_put_page(page, kaddr);
403 unmap_and_put_page(page, kaddr);
407 /* Releases the page */
408 int minix_set_link(struct minix_dir_entry *de, struct page *page,
411 struct inode *dir = page->mapping->host;
412 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
413 loff_t pos = page_offset(page) + offset_in_page(de);
417 err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
422 if (sbi->s_version == MINIX_V3)
423 ((minix3_dirent *)de)->inode = inode->i_ino;
425 de->inode = inode->i_ino;
426 dir_commit_chunk(page, pos, sbi->s_dirsize);
427 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
428 mark_inode_dirty(dir);
429 return minix_handle_dirsync(dir);
432 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
434 struct minix_sb_info *sbi = minix_sb(dir->i_sb);
435 struct minix_dir_entry *de = dir_get_page(dir, 0, p);
438 return minix_next_entry(de, sbi);
442 ino_t minix_inode_by_name(struct dentry *dentry)
445 struct minix_dir_entry *de = minix_find_entry(dentry, &page);
449 struct address_space *mapping = page->mapping;
450 struct inode *inode = mapping->host;
451 struct minix_sb_info *sbi = minix_sb(inode->i_sb);
453 if (sbi->s_version == MINIX_V3)
454 res = ((minix3_dirent *) de)->inode;
457 unmap_and_put_page(page, de);