]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/minix/dir.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * minix directory handling functions | |
7 | */ | |
8 | ||
9 | #include "minix.h" | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/smp_lock.h> | |
12 | ||
13 | typedef struct minix_dir_entry minix_dirent; | |
14 | ||
15 | static int minix_readdir(struct file *, void *, filldir_t); | |
16 | ||
4b6f5d20 | 17 | const struct file_operations minix_dir_operations = { |
1da177e4 LT |
18 | .read = generic_read_dir, |
19 | .readdir = minix_readdir, | |
20 | .fsync = minix_sync_file, | |
21 | }; | |
22 | ||
23 | static inline void dir_put_page(struct page *page) | |
24 | { | |
25 | kunmap(page); | |
26 | page_cache_release(page); | |
27 | } | |
28 | ||
29 | /* | |
30 | * Return the offset into page `page_nr' of the last valid | |
31 | * byte in that page, plus one. | |
32 | */ | |
33 | static unsigned | |
34 | minix_last_byte(struct inode *inode, unsigned long page_nr) | |
35 | { | |
36 | unsigned last_byte = PAGE_CACHE_SIZE; | |
37 | ||
38 | if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) | |
39 | last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); | |
40 | return last_byte; | |
41 | } | |
42 | ||
43 | static inline unsigned long dir_pages(struct inode *inode) | |
44 | { | |
45 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | |
46 | } | |
47 | ||
48 | static int dir_commit_chunk(struct page *page, unsigned from, unsigned to) | |
49 | { | |
50 | struct inode *dir = (struct inode *)page->mapping->host; | |
51 | int err = 0; | |
52 | page->mapping->a_ops->commit_write(NULL, page, from, to); | |
53 | if (IS_DIRSYNC(dir)) | |
54 | err = write_one_page(page, 1); | |
55 | else | |
56 | unlock_page(page); | |
57 | return err; | |
58 | } | |
59 | ||
60 | static struct page * dir_get_page(struct inode *dir, unsigned long n) | |
61 | { | |
62 | struct address_space *mapping = dir->i_mapping; | |
090d2b18 | 63 | struct page *page = read_mapping_page(mapping, n, NULL); |
1da177e4 LT |
64 | if (!IS_ERR(page)) { |
65 | wait_on_page_locked(page); | |
66 | kmap(page); | |
67 | if (!PageUptodate(page)) | |
68 | goto fail; | |
69 | } | |
70 | return page; | |
71 | ||
72 | fail: | |
73 | dir_put_page(page); | |
74 | return ERR_PTR(-EIO); | |
75 | } | |
76 | ||
77 | static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) | |
78 | { | |
79 | return (void*)((char*)de + sbi->s_dirsize); | |
80 | } | |
81 | ||
82 | static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) | |
83 | { | |
84 | unsigned long pos = filp->f_pos; | |
85 | struct inode *inode = filp->f_dentry->d_inode; | |
86 | struct super_block *sb = inode->i_sb; | |
87 | unsigned offset = pos & ~PAGE_CACHE_MASK; | |
88 | unsigned long n = pos >> PAGE_CACHE_SHIFT; | |
89 | unsigned long npages = dir_pages(inode); | |
90 | struct minix_sb_info *sbi = minix_sb(sb); | |
91 | unsigned chunk_size = sbi->s_dirsize; | |
92 | ||
93 | lock_kernel(); | |
94 | ||
95 | pos = (pos + chunk_size-1) & ~(chunk_size-1); | |
96 | if (pos >= inode->i_size) | |
97 | goto done; | |
98 | ||
99 | for ( ; n < npages; n++, offset = 0) { | |
100 | char *p, *kaddr, *limit; | |
101 | struct page *page = dir_get_page(inode, n); | |
102 | ||
103 | if (IS_ERR(page)) | |
104 | continue; | |
105 | kaddr = (char *)page_address(page); | |
106 | p = kaddr+offset; | |
107 | limit = kaddr + minix_last_byte(inode, n) - chunk_size; | |
108 | for ( ; p <= limit ; p = minix_next_entry(p, sbi)) { | |
109 | minix_dirent *de = (minix_dirent *)p; | |
110 | if (de->inode) { | |
111 | int over; | |
112 | unsigned l = strnlen(de->name,sbi->s_namelen); | |
113 | ||
114 | offset = p - kaddr; | |
115 | over = filldir(dirent, de->name, l, | |
116 | (n<<PAGE_CACHE_SHIFT) | offset, | |
117 | de->inode, DT_UNKNOWN); | |
118 | if (over) { | |
119 | dir_put_page(page); | |
120 | goto done; | |
121 | } | |
122 | } | |
123 | } | |
124 | dir_put_page(page); | |
125 | } | |
126 | ||
127 | done: | |
128 | filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; | |
129 | unlock_kernel(); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | static inline int namecompare(int len, int maxlen, | |
134 | const char * name, const char * buffer) | |
135 | { | |
136 | if (len < maxlen && buffer[len]) | |
137 | return 0; | |
138 | return !memcmp(name, buffer, len); | |
139 | } | |
140 | ||
141 | /* | |
142 | * minix_find_entry() | |
143 | * | |
144 | * finds an entry in the specified directory with the wanted name. It | |
145 | * returns the cache buffer in which the entry was found, and the entry | |
146 | * itself (as a parameter - res_dir). It does NOT read the inode of the | |
147 | * entry - you'll have to do that yourself if you want to. | |
148 | */ | |
149 | minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) | |
150 | { | |
151 | const char * name = dentry->d_name.name; | |
152 | int namelen = dentry->d_name.len; | |
153 | struct inode * dir = dentry->d_parent->d_inode; | |
154 | struct super_block * sb = dir->i_sb; | |
155 | struct minix_sb_info * sbi = minix_sb(sb); | |
156 | unsigned long n; | |
157 | unsigned long npages = dir_pages(dir); | |
158 | struct page *page = NULL; | |
159 | struct minix_dir_entry *de; | |
160 | ||
161 | *res_page = NULL; | |
162 | ||
163 | for (n = 0; n < npages; n++) { | |
164 | char *kaddr; | |
165 | page = dir_get_page(dir, n); | |
166 | if (IS_ERR(page)) | |
167 | continue; | |
168 | ||
169 | kaddr = (char*)page_address(page); | |
170 | de = (struct minix_dir_entry *) kaddr; | |
171 | kaddr += minix_last_byte(dir, n) - sbi->s_dirsize; | |
172 | for ( ; (char *) de <= kaddr ; de = minix_next_entry(de,sbi)) { | |
173 | if (!de->inode) | |
174 | continue; | |
175 | if (namecompare(namelen,sbi->s_namelen,name,de->name)) | |
176 | goto found; | |
177 | } | |
178 | dir_put_page(page); | |
179 | } | |
180 | return NULL; | |
181 | ||
182 | found: | |
183 | *res_page = page; | |
184 | return de; | |
185 | } | |
186 | ||
187 | int minix_add_link(struct dentry *dentry, struct inode *inode) | |
188 | { | |
189 | struct inode *dir = dentry->d_parent->d_inode; | |
190 | const char * name = dentry->d_name.name; | |
191 | int namelen = dentry->d_name.len; | |
192 | struct super_block * sb = dir->i_sb; | |
193 | struct minix_sb_info * sbi = minix_sb(sb); | |
194 | struct page *page = NULL; | |
195 | struct minix_dir_entry * de; | |
196 | unsigned long npages = dir_pages(dir); | |
197 | unsigned long n; | |
198 | char *kaddr; | |
199 | unsigned from, to; | |
200 | int err; | |
201 | ||
202 | /* | |
203 | * We take care of directory expansion in the same loop | |
204 | * This code plays outside i_size, so it locks the page | |
205 | * to protect that region. | |
206 | */ | |
207 | for (n = 0; n <= npages; n++) { | |
208 | char *dir_end; | |
209 | ||
210 | page = dir_get_page(dir, n); | |
211 | err = PTR_ERR(page); | |
212 | if (IS_ERR(page)) | |
213 | goto out; | |
214 | lock_page(page); | |
215 | kaddr = (char*)page_address(page); | |
216 | dir_end = kaddr + minix_last_byte(dir, n); | |
217 | de = (minix_dirent *)kaddr; | |
218 | kaddr += PAGE_CACHE_SIZE - sbi->s_dirsize; | |
219 | while ((char *)de <= kaddr) { | |
220 | if ((char *)de == dir_end) { | |
221 | /* We hit i_size */ | |
222 | de->inode = 0; | |
223 | goto got_it; | |
224 | } | |
225 | if (!de->inode) | |
226 | goto got_it; | |
227 | err = -EEXIST; | |
228 | if (namecompare(namelen,sbi->s_namelen,name,de->name)) | |
229 | goto out_unlock; | |
230 | de = minix_next_entry(de, sbi); | |
231 | } | |
232 | unlock_page(page); | |
233 | dir_put_page(page); | |
234 | } | |
235 | BUG(); | |
236 | return -EINVAL; | |
237 | ||
238 | got_it: | |
239 | from = (char*)de - (char*)page_address(page); | |
240 | to = from + sbi->s_dirsize; | |
241 | err = page->mapping->a_ops->prepare_write(NULL, page, from, to); | |
242 | if (err) | |
243 | goto out_unlock; | |
244 | memcpy (de->name, name, namelen); | |
245 | memset (de->name + namelen, 0, sbi->s_dirsize - namelen - 2); | |
246 | de->inode = inode->i_ino; | |
247 | err = dir_commit_chunk(page, from, to); | |
248 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | |
249 | mark_inode_dirty(dir); | |
250 | out_put: | |
251 | dir_put_page(page); | |
252 | out: | |
253 | return err; | |
254 | out_unlock: | |
255 | unlock_page(page); | |
256 | goto out_put; | |
257 | } | |
258 | ||
259 | int minix_delete_entry(struct minix_dir_entry *de, struct page *page) | |
260 | { | |
261 | struct address_space *mapping = page->mapping; | |
262 | struct inode *inode = (struct inode*)mapping->host; | |
263 | char *kaddr = page_address(page); | |
264 | unsigned from = (char*)de - kaddr; | |
265 | unsigned to = from + minix_sb(inode->i_sb)->s_dirsize; | |
266 | int err; | |
267 | ||
268 | lock_page(page); | |
269 | err = mapping->a_ops->prepare_write(NULL, page, from, to); | |
270 | if (err == 0) { | |
271 | de->inode = 0; | |
272 | err = dir_commit_chunk(page, from, to); | |
273 | } else { | |
274 | unlock_page(page); | |
275 | } | |
276 | dir_put_page(page); | |
277 | inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; | |
278 | mark_inode_dirty(inode); | |
279 | return err; | |
280 | } | |
281 | ||
282 | int minix_make_empty(struct inode *inode, struct inode *dir) | |
283 | { | |
284 | struct address_space *mapping = inode->i_mapping; | |
285 | struct page *page = grab_cache_page(mapping, 0); | |
286 | struct minix_sb_info * sbi = minix_sb(inode->i_sb); | |
287 | struct minix_dir_entry * de; | |
288 | char *kaddr; | |
289 | int err; | |
290 | ||
291 | if (!page) | |
292 | return -ENOMEM; | |
293 | err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * sbi->s_dirsize); | |
294 | if (err) { | |
295 | unlock_page(page); | |
296 | goto fail; | |
297 | } | |
298 | ||
299 | kaddr = kmap_atomic(page, KM_USER0); | |
300 | memset(kaddr, 0, PAGE_CACHE_SIZE); | |
301 | ||
302 | de = (struct minix_dir_entry *)kaddr; | |
303 | de->inode = inode->i_ino; | |
304 | strcpy(de->name,"."); | |
305 | de = minix_next_entry(de, sbi); | |
306 | de->inode = dir->i_ino; | |
307 | strcpy(de->name,".."); | |
308 | kunmap_atomic(kaddr, KM_USER0); | |
309 | ||
310 | err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); | |
311 | fail: | |
312 | page_cache_release(page); | |
313 | return err; | |
314 | } | |
315 | ||
316 | /* | |
317 | * routine to check that the specified directory is empty (for rmdir) | |
318 | */ | |
319 | int minix_empty_dir(struct inode * inode) | |
320 | { | |
321 | struct page *page = NULL; | |
322 | unsigned long i, npages = dir_pages(inode); | |
323 | struct minix_sb_info *sbi = minix_sb(inode->i_sb); | |
324 | ||
325 | for (i = 0; i < npages; i++) { | |
326 | char *kaddr; | |
327 | minix_dirent * de; | |
328 | page = dir_get_page(inode, i); | |
329 | ||
330 | if (IS_ERR(page)) | |
331 | continue; | |
332 | ||
333 | kaddr = (char *)page_address(page); | |
334 | de = (minix_dirent *)kaddr; | |
335 | kaddr += minix_last_byte(inode, i) - sbi->s_dirsize; | |
336 | ||
337 | while ((char *)de <= kaddr) { | |
338 | if (de->inode != 0) { | |
339 | /* check for . and .. */ | |
340 | if (de->name[0] != '.') | |
341 | goto not_empty; | |
342 | if (!de->name[1]) { | |
343 | if (de->inode != inode->i_ino) | |
344 | goto not_empty; | |
345 | } else if (de->name[1] != '.') | |
346 | goto not_empty; | |
347 | else if (de->name[2]) | |
348 | goto not_empty; | |
349 | } | |
350 | de = minix_next_entry(de, sbi); | |
351 | } | |
352 | dir_put_page(page); | |
353 | } | |
354 | return 1; | |
355 | ||
356 | not_empty: | |
357 | dir_put_page(page); | |
358 | return 0; | |
359 | } | |
360 | ||
361 | /* Releases the page */ | |
362 | void minix_set_link(struct minix_dir_entry *de, struct page *page, | |
363 | struct inode *inode) | |
364 | { | |
365 | struct inode *dir = (struct inode*)page->mapping->host; | |
366 | struct minix_sb_info *sbi = minix_sb(dir->i_sb); | |
367 | unsigned from = (char *)de-(char*)page_address(page); | |
368 | unsigned to = from + sbi->s_dirsize; | |
369 | int err; | |
370 | ||
371 | lock_page(page); | |
372 | err = page->mapping->a_ops->prepare_write(NULL, page, from, to); | |
373 | if (err == 0) { | |
374 | de->inode = inode->i_ino; | |
375 | err = dir_commit_chunk(page, from, to); | |
376 | } else { | |
377 | unlock_page(page); | |
378 | } | |
379 | dir_put_page(page); | |
380 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | |
381 | mark_inode_dirty(dir); | |
382 | } | |
383 | ||
384 | struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) | |
385 | { | |
386 | struct page *page = dir_get_page(dir, 0); | |
387 | struct minix_sb_info *sbi = minix_sb(dir->i_sb); | |
388 | struct minix_dir_entry *de = NULL; | |
389 | ||
390 | if (!IS_ERR(page)) { | |
391 | de = minix_next_entry(page_address(page), sbi); | |
392 | *p = page; | |
393 | } | |
394 | return de; | |
395 | } | |
396 | ||
397 | ino_t minix_inode_by_name(struct dentry *dentry) | |
398 | { | |
399 | struct page *page; | |
400 | struct minix_dir_entry *de = minix_find_entry(dentry, &page); | |
401 | ino_t res = 0; | |
402 | ||
403 | if (de) { | |
404 | res = de->inode; | |
405 | dir_put_page(page); | |
406 | } | |
407 | return res; | |
408 | } |