]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/ext2/xattr.c | |
3 | * | |
4 | * Copyright (C) 2001-2003 Andreas Gruenbacher <[email protected]> | |
5 | * | |
6 | * Fix by Harrison Xing <[email protected]>. | |
7 | * Extended attributes for symlinks and special files added per | |
8 | * suggestion of Luka Renko <[email protected]>. | |
9 | * xattr consolidation Copyright (c) 2004 James Morris <[email protected]>, | |
10 | * Red Hat Inc. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Extended attributes are stored on disk blocks allocated outside of | |
16 | * any inode. The i_file_acl field is then made to point to this allocated | |
17 | * block. If all extended attributes of an inode are identical, these | |
18 | * inodes may share the same extended attribute block. Such situations | |
19 | * are automatically detected by keeping a cache of recent attribute block | |
20 | * numbers and hashes over the block's contents in memory. | |
21 | * | |
22 | * | |
23 | * Extended attribute block layout: | |
24 | * | |
25 | * +------------------+ | |
26 | * | header | | |
27 | * | entry 1 | | | |
28 | * | entry 2 | | growing downwards | |
29 | * | entry 3 | v | |
30 | * | four null bytes | | |
31 | * | . . . | | |
32 | * | value 1 | ^ | |
33 | * | value 3 | | growing upwards | |
34 | * | value 2 | | | |
35 | * +------------------+ | |
36 | * | |
37 | * The block header is followed by multiple entry descriptors. These entry | |
38 | * descriptors are variable in size, and alligned to EXT2_XATTR_PAD | |
39 | * byte boundaries. The entry descriptors are sorted by attribute name, | |
40 | * so that two extended attribute blocks can be compared efficiently. | |
41 | * | |
42 | * Attribute values are aligned to the end of the block, stored in | |
43 | * no specific order. They are also padded to EXT2_XATTR_PAD byte | |
44 | * boundaries. No additional gaps are left between them. | |
45 | * | |
46 | * Locking strategy | |
47 | * ---------------- | |
48 | * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. | |
49 | * EA blocks are only changed if they are exclusive to an inode, so | |
50 | * holding xattr_sem also means that nothing but the EA block's reference | |
51 | * count will change. Multiple writers to an EA block are synchronized | |
52 | * by the bh lock. No more than a single bh lock is held at any time | |
53 | * to avoid deadlocks. | |
54 | */ | |
55 | ||
56 | #include <linux/buffer_head.h> | |
57 | #include <linux/module.h> | |
58 | #include <linux/init.h> | |
59 | #include <linux/slab.h> | |
60 | #include <linux/mbcache.h> | |
61 | #include <linux/quotaops.h> | |
62 | #include <linux/rwsem.h> | |
63 | #include "ext2.h" | |
64 | #include "xattr.h" | |
65 | #include "acl.h" | |
66 | ||
67 | #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) | |
68 | #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) | |
69 | #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) | |
70 | #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) | |
71 | ||
72 | #ifdef EXT2_XATTR_DEBUG | |
73 | # define ea_idebug(inode, f...) do { \ | |
74 | printk(KERN_DEBUG "inode %s:%ld: ", \ | |
75 | inode->i_sb->s_id, inode->i_ino); \ | |
76 | printk(f); \ | |
77 | printk("\n"); \ | |
78 | } while (0) | |
79 | # define ea_bdebug(bh, f...) do { \ | |
80 | char b[BDEVNAME_SIZE]; \ | |
81 | printk(KERN_DEBUG "block %s:%lu: ", \ | |
82 | bdevname(bh->b_bdev, b), \ | |
83 | (unsigned long) bh->b_blocknr); \ | |
84 | printk(f); \ | |
85 | printk("\n"); \ | |
86 | } while (0) | |
87 | #else | |
88 | # define ea_idebug(f...) | |
89 | # define ea_bdebug(f...) | |
90 | #endif | |
91 | ||
92 | static int ext2_xattr_set2(struct inode *, struct buffer_head *, | |
93 | struct ext2_xattr_header *); | |
94 | ||
95 | static int ext2_xattr_cache_insert(struct buffer_head *); | |
96 | static struct buffer_head *ext2_xattr_cache_find(struct inode *, | |
97 | struct ext2_xattr_header *); | |
98 | static void ext2_xattr_rehash(struct ext2_xattr_header *, | |
99 | struct ext2_xattr_entry *); | |
100 | ||
101 | static struct mb_cache *ext2_xattr_cache; | |
102 | ||
103 | static struct xattr_handler *ext2_xattr_handler_map[] = { | |
104 | [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, | |
105 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | |
106 | [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext2_xattr_acl_access_handler, | |
107 | [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler, | |
108 | #endif | |
109 | [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, | |
110 | #ifdef CONFIG_EXT2_FS_SECURITY | |
111 | [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, | |
112 | #endif | |
113 | }; | |
114 | ||
115 | struct xattr_handler *ext2_xattr_handlers[] = { | |
116 | &ext2_xattr_user_handler, | |
117 | &ext2_xattr_trusted_handler, | |
118 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | |
119 | &ext2_xattr_acl_access_handler, | |
120 | &ext2_xattr_acl_default_handler, | |
121 | #endif | |
122 | #ifdef CONFIG_EXT2_FS_SECURITY | |
123 | &ext2_xattr_security_handler, | |
124 | #endif | |
125 | NULL | |
126 | }; | |
127 | ||
128 | static inline struct xattr_handler * | |
129 | ext2_xattr_handler(int name_index) | |
130 | { | |
131 | struct xattr_handler *handler = NULL; | |
132 | ||
133 | if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) | |
134 | handler = ext2_xattr_handler_map[name_index]; | |
135 | return handler; | |
136 | } | |
137 | ||
138 | /* | |
139 | * ext2_xattr_get() | |
140 | * | |
141 | * Copy an extended attribute into the buffer | |
142 | * provided, or compute the buffer size required. | |
143 | * Buffer is NULL to compute the size of the buffer required. | |
144 | * | |
145 | * Returns a negative error number on failure, or the number of bytes | |
146 | * used / required on success. | |
147 | */ | |
148 | int | |
149 | ext2_xattr_get(struct inode *inode, int name_index, const char *name, | |
150 | void *buffer, size_t buffer_size) | |
151 | { | |
152 | struct buffer_head *bh = NULL; | |
153 | struct ext2_xattr_entry *entry; | |
154 | size_t name_len, size; | |
155 | char *end; | |
156 | int error; | |
157 | ||
158 | ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", | |
159 | name_index, name, buffer, (long)buffer_size); | |
160 | ||
161 | if (name == NULL) | |
162 | return -EINVAL; | |
163 | down_read(&EXT2_I(inode)->xattr_sem); | |
164 | error = -ENODATA; | |
165 | if (!EXT2_I(inode)->i_file_acl) | |
166 | goto cleanup; | |
167 | ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); | |
168 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
169 | error = -EIO; | |
170 | if (!bh) | |
171 | goto cleanup; | |
172 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
173 | atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); | |
174 | end = bh->b_data + bh->b_size; | |
175 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
176 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
177 | bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", | |
178 | "inode %ld: bad block %d", inode->i_ino, | |
179 | EXT2_I(inode)->i_file_acl); | |
180 | error = -EIO; | |
181 | goto cleanup; | |
182 | } | |
183 | /* find named attribute */ | |
184 | name_len = strlen(name); | |
185 | ||
186 | error = -ERANGE; | |
187 | if (name_len > 255) | |
188 | goto cleanup; | |
189 | entry = FIRST_ENTRY(bh); | |
190 | while (!IS_LAST_ENTRY(entry)) { | |
191 | struct ext2_xattr_entry *next = | |
192 | EXT2_XATTR_NEXT(entry); | |
193 | if ((char *)next >= end) | |
194 | goto bad_block; | |
195 | if (name_index == entry->e_name_index && | |
196 | name_len == entry->e_name_len && | |
197 | memcmp(name, entry->e_name, name_len) == 0) | |
198 | goto found; | |
199 | entry = next; | |
200 | } | |
201 | /* Check the remaining name entries */ | |
202 | while (!IS_LAST_ENTRY(entry)) { | |
203 | struct ext2_xattr_entry *next = | |
204 | EXT2_XATTR_NEXT(entry); | |
205 | if ((char *)next >= end) | |
206 | goto bad_block; | |
207 | entry = next; | |
208 | } | |
209 | if (ext2_xattr_cache_insert(bh)) | |
210 | ea_idebug(inode, "cache insert failed"); | |
211 | error = -ENODATA; | |
212 | goto cleanup; | |
213 | found: | |
214 | /* check the buffer size */ | |
215 | if (entry->e_value_block != 0) | |
216 | goto bad_block; | |
217 | size = le32_to_cpu(entry->e_value_size); | |
218 | if (size > inode->i_sb->s_blocksize || | |
219 | le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) | |
220 | goto bad_block; | |
221 | ||
222 | if (ext2_xattr_cache_insert(bh)) | |
223 | ea_idebug(inode, "cache insert failed"); | |
224 | if (buffer) { | |
225 | error = -ERANGE; | |
226 | if (size > buffer_size) | |
227 | goto cleanup; | |
228 | /* return value of attribute */ | |
229 | memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), | |
230 | size); | |
231 | } | |
232 | error = size; | |
233 | ||
234 | cleanup: | |
235 | brelse(bh); | |
236 | up_read(&EXT2_I(inode)->xattr_sem); | |
237 | ||
238 | return error; | |
239 | } | |
240 | ||
241 | /* | |
242 | * ext2_xattr_list() | |
243 | * | |
244 | * Copy a list of attribute names into the buffer | |
245 | * provided, or compute the buffer size required. | |
246 | * Buffer is NULL to compute the size of the buffer required. | |
247 | * | |
248 | * Returns a negative error number on failure, or the number of bytes | |
249 | * used / required on success. | |
250 | */ | |
251 | static int | |
252 | ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size) | |
253 | { | |
254 | struct buffer_head *bh = NULL; | |
255 | struct ext2_xattr_entry *entry; | |
256 | char *end; | |
257 | size_t rest = buffer_size; | |
258 | int error; | |
259 | ||
260 | ea_idebug(inode, "buffer=%p, buffer_size=%ld", | |
261 | buffer, (long)buffer_size); | |
262 | ||
263 | down_read(&EXT2_I(inode)->xattr_sem); | |
264 | error = 0; | |
265 | if (!EXT2_I(inode)->i_file_acl) | |
266 | goto cleanup; | |
267 | ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); | |
268 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
269 | error = -EIO; | |
270 | if (!bh) | |
271 | goto cleanup; | |
272 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
273 | atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); | |
274 | end = bh->b_data + bh->b_size; | |
275 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
276 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
277 | bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", | |
278 | "inode %ld: bad block %d", inode->i_ino, | |
279 | EXT2_I(inode)->i_file_acl); | |
280 | error = -EIO; | |
281 | goto cleanup; | |
282 | } | |
283 | ||
284 | /* check the on-disk data structure */ | |
285 | entry = FIRST_ENTRY(bh); | |
286 | while (!IS_LAST_ENTRY(entry)) { | |
287 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry); | |
288 | ||
289 | if ((char *)next >= end) | |
290 | goto bad_block; | |
291 | entry = next; | |
292 | } | |
293 | if (ext2_xattr_cache_insert(bh)) | |
294 | ea_idebug(inode, "cache insert failed"); | |
295 | ||
296 | /* list the attribute names */ | |
297 | for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); | |
298 | entry = EXT2_XATTR_NEXT(entry)) { | |
299 | struct xattr_handler *handler = | |
300 | ext2_xattr_handler(entry->e_name_index); | |
301 | ||
302 | if (handler) { | |
303 | size_t size = handler->list(inode, buffer, rest, | |
304 | entry->e_name, | |
305 | entry->e_name_len); | |
306 | if (buffer) { | |
307 | if (size > rest) { | |
308 | error = -ERANGE; | |
309 | goto cleanup; | |
310 | } | |
311 | buffer += size; | |
312 | } | |
313 | rest -= size; | |
314 | } | |
315 | } | |
316 | error = buffer_size - rest; /* total size */ | |
317 | ||
318 | cleanup: | |
319 | brelse(bh); | |
320 | up_read(&EXT2_I(inode)->xattr_sem); | |
321 | ||
322 | return error; | |
323 | } | |
324 | ||
325 | /* | |
326 | * Inode operation listxattr() | |
327 | * | |
328 | * dentry->d_inode->i_sem: don't care | |
329 | */ | |
330 | ssize_t | |
331 | ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) | |
332 | { | |
333 | return ext2_xattr_list(dentry->d_inode, buffer, size); | |
334 | } | |
335 | ||
336 | /* | |
337 | * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is | |
338 | * not set, set it. | |
339 | */ | |
340 | static void ext2_xattr_update_super_block(struct super_block *sb) | |
341 | { | |
342 | if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) | |
343 | return; | |
344 | ||
345 | lock_super(sb); | |
346 | EXT2_SB(sb)->s_es->s_feature_compat |= | |
347 | cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR); | |
348 | sb->s_dirt = 1; | |
349 | mark_buffer_dirty(EXT2_SB(sb)->s_sbh); | |
350 | unlock_super(sb); | |
351 | } | |
352 | ||
353 | /* | |
354 | * ext2_xattr_set() | |
355 | * | |
356 | * Create, replace or remove an extended attribute for this inode. Buffer | |
357 | * is NULL to remove an existing extended attribute, and non-NULL to | |
358 | * either replace an existing extended attribute, or create a new extended | |
359 | * attribute. The flags XATTR_REPLACE and XATTR_CREATE | |
360 | * specify that an extended attribute must exist and must not exist | |
361 | * previous to the call, respectively. | |
362 | * | |
363 | * Returns 0, or a negative error number on failure. | |
364 | */ | |
365 | int | |
366 | ext2_xattr_set(struct inode *inode, int name_index, const char *name, | |
367 | const void *value, size_t value_len, int flags) | |
368 | { | |
369 | struct super_block *sb = inode->i_sb; | |
370 | struct buffer_head *bh = NULL; | |
371 | struct ext2_xattr_header *header = NULL; | |
372 | struct ext2_xattr_entry *here, *last; | |
373 | size_t name_len, free, min_offs = sb->s_blocksize; | |
374 | int not_found = 1, error; | |
375 | char *end; | |
376 | ||
377 | /* | |
378 | * header -- Points either into bh, or to a temporarily | |
379 | * allocated buffer. | |
380 | * here -- The named entry found, or the place for inserting, within | |
381 | * the block pointed to by header. | |
382 | * last -- Points right after the last named entry within the block | |
383 | * pointed to by header. | |
384 | * min_offs -- The offset of the first value (values are aligned | |
385 | * towards the end of the block). | |
386 | * end -- Points right after the block pointed to by header. | |
387 | */ | |
388 | ||
389 | ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", | |
390 | name_index, name, value, (long)value_len); | |
391 | ||
392 | if (IS_RDONLY(inode)) | |
393 | return -EROFS; | |
394 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) | |
395 | return -EPERM; | |
396 | if (value == NULL) | |
397 | value_len = 0; | |
398 | if (name == NULL) | |
399 | return -EINVAL; | |
400 | name_len = strlen(name); | |
401 | if (name_len > 255 || value_len > sb->s_blocksize) | |
402 | return -ERANGE; | |
403 | down_write(&EXT2_I(inode)->xattr_sem); | |
404 | if (EXT2_I(inode)->i_file_acl) { | |
405 | /* The inode already has an extended attribute block. */ | |
406 | bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); | |
407 | error = -EIO; | |
408 | if (!bh) | |
409 | goto cleanup; | |
410 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
411 | atomic_read(&(bh->b_count)), | |
412 | le32_to_cpu(HDR(bh)->h_refcount)); | |
413 | header = HDR(bh); | |
414 | end = bh->b_data + bh->b_size; | |
415 | if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
416 | header->h_blocks != cpu_to_le32(1)) { | |
417 | bad_block: ext2_error(sb, "ext2_xattr_set", | |
418 | "inode %ld: bad block %d", inode->i_ino, | |
419 | EXT2_I(inode)->i_file_acl); | |
420 | error = -EIO; | |
421 | goto cleanup; | |
422 | } | |
423 | /* Find the named attribute. */ | |
424 | here = FIRST_ENTRY(bh); | |
425 | while (!IS_LAST_ENTRY(here)) { | |
426 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here); | |
427 | if ((char *)next >= end) | |
428 | goto bad_block; | |
429 | if (!here->e_value_block && here->e_value_size) { | |
430 | size_t offs = le16_to_cpu(here->e_value_offs); | |
431 | if (offs < min_offs) | |
432 | min_offs = offs; | |
433 | } | |
434 | not_found = name_index - here->e_name_index; | |
435 | if (!not_found) | |
436 | not_found = name_len - here->e_name_len; | |
437 | if (!not_found) | |
438 | not_found = memcmp(name, here->e_name,name_len); | |
439 | if (not_found <= 0) | |
440 | break; | |
441 | here = next; | |
442 | } | |
443 | last = here; | |
444 | /* We still need to compute min_offs and last. */ | |
445 | while (!IS_LAST_ENTRY(last)) { | |
446 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last); | |
447 | if ((char *)next >= end) | |
448 | goto bad_block; | |
449 | if (!last->e_value_block && last->e_value_size) { | |
450 | size_t offs = le16_to_cpu(last->e_value_offs); | |
451 | if (offs < min_offs) | |
452 | min_offs = offs; | |
453 | } | |
454 | last = next; | |
455 | } | |
456 | ||
457 | /* Check whether we have enough space left. */ | |
458 | free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); | |
459 | } else { | |
460 | /* We will use a new extended attribute block. */ | |
461 | free = sb->s_blocksize - | |
462 | sizeof(struct ext2_xattr_header) - sizeof(__u32); | |
463 | here = last = NULL; /* avoid gcc uninitialized warning. */ | |
464 | } | |
465 | ||
466 | if (not_found) { | |
467 | /* Request to remove a nonexistent attribute? */ | |
468 | error = -ENODATA; | |
469 | if (flags & XATTR_REPLACE) | |
470 | goto cleanup; | |
471 | error = 0; | |
472 | if (value == NULL) | |
473 | goto cleanup; | |
474 | } else { | |
475 | /* Request to create an existing attribute? */ | |
476 | error = -EEXIST; | |
477 | if (flags & XATTR_CREATE) | |
478 | goto cleanup; | |
479 | if (!here->e_value_block && here->e_value_size) { | |
480 | size_t size = le32_to_cpu(here->e_value_size); | |
481 | ||
482 | if (le16_to_cpu(here->e_value_offs) + size > | |
483 | sb->s_blocksize || size > sb->s_blocksize) | |
484 | goto bad_block; | |
485 | free += EXT2_XATTR_SIZE(size); | |
486 | } | |
487 | free += EXT2_XATTR_LEN(name_len); | |
488 | } | |
489 | error = -ENOSPC; | |
490 | if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) | |
491 | goto cleanup; | |
492 | ||
493 | /* Here we know that we can set the new attribute. */ | |
494 | ||
495 | if (header) { | |
496 | struct mb_cache_entry *ce; | |
497 | ||
498 | /* assert(header == HDR(bh)); */ | |
499 | ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, | |
500 | bh->b_blocknr); | |
501 | lock_buffer(bh); | |
502 | if (header->h_refcount == cpu_to_le32(1)) { | |
503 | ea_bdebug(bh, "modifying in-place"); | |
504 | if (ce) | |
505 | mb_cache_entry_free(ce); | |
506 | /* keep the buffer locked while modifying it. */ | |
507 | } else { | |
508 | int offset; | |
509 | ||
510 | if (ce) | |
511 | mb_cache_entry_release(ce); | |
512 | unlock_buffer(bh); | |
513 | ea_bdebug(bh, "cloning"); | |
514 | header = kmalloc(bh->b_size, GFP_KERNEL); | |
515 | error = -ENOMEM; | |
516 | if (header == NULL) | |
517 | goto cleanup; | |
518 | memcpy(header, HDR(bh), bh->b_size); | |
519 | header->h_refcount = cpu_to_le32(1); | |
520 | ||
521 | offset = (char *)here - bh->b_data; | |
522 | here = ENTRY((char *)header + offset); | |
523 | offset = (char *)last - bh->b_data; | |
524 | last = ENTRY((char *)header + offset); | |
525 | } | |
526 | } else { | |
527 | /* Allocate a buffer where we construct the new block. */ | |
528 | header = kmalloc(sb->s_blocksize, GFP_KERNEL); | |
529 | error = -ENOMEM; | |
530 | if (header == NULL) | |
531 | goto cleanup; | |
532 | memset(header, 0, sb->s_blocksize); | |
533 | end = (char *)header + sb->s_blocksize; | |
534 | header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); | |
535 | header->h_blocks = header->h_refcount = cpu_to_le32(1); | |
536 | last = here = ENTRY(header+1); | |
537 | } | |
538 | ||
539 | /* Iff we are modifying the block in-place, bh is locked here. */ | |
540 | ||
541 | if (not_found) { | |
542 | /* Insert the new name. */ | |
543 | size_t size = EXT2_XATTR_LEN(name_len); | |
544 | size_t rest = (char *)last - (char *)here; | |
545 | memmove((char *)here + size, here, rest); | |
546 | memset(here, 0, size); | |
547 | here->e_name_index = name_index; | |
548 | here->e_name_len = name_len; | |
549 | memcpy(here->e_name, name, name_len); | |
550 | } else { | |
551 | if (!here->e_value_block && here->e_value_size) { | |
552 | char *first_val = (char *)header + min_offs; | |
553 | size_t offs = le16_to_cpu(here->e_value_offs); | |
554 | char *val = (char *)header + offs; | |
555 | size_t size = EXT2_XATTR_SIZE( | |
556 | le32_to_cpu(here->e_value_size)); | |
557 | ||
558 | if (size == EXT2_XATTR_SIZE(value_len)) { | |
559 | /* The old and the new value have the same | |
560 | size. Just replace. */ | |
561 | here->e_value_size = cpu_to_le32(value_len); | |
562 | memset(val + size - EXT2_XATTR_PAD, 0, | |
563 | EXT2_XATTR_PAD); /* Clear pad bytes. */ | |
564 | memcpy(val, value, value_len); | |
565 | goto skip_replace; | |
566 | } | |
567 | ||
568 | /* Remove the old value. */ | |
569 | memmove(first_val + size, first_val, val - first_val); | |
570 | memset(first_val, 0, size); | |
571 | here->e_value_offs = 0; | |
572 | min_offs += size; | |
573 | ||
574 | /* Adjust all value offsets. */ | |
575 | last = ENTRY(header+1); | |
576 | while (!IS_LAST_ENTRY(last)) { | |
577 | size_t o = le16_to_cpu(last->e_value_offs); | |
578 | if (!last->e_value_block && o < offs) | |
579 | last->e_value_offs = | |
580 | cpu_to_le16(o + size); | |
581 | last = EXT2_XATTR_NEXT(last); | |
582 | } | |
583 | } | |
584 | if (value == NULL) { | |
585 | /* Remove the old name. */ | |
586 | size_t size = EXT2_XATTR_LEN(name_len); | |
587 | last = ENTRY((char *)last - size); | |
588 | memmove(here, (char*)here + size, | |
589 | (char*)last - (char*)here); | |
590 | memset(last, 0, size); | |
591 | } | |
592 | } | |
593 | ||
594 | if (value != NULL) { | |
595 | /* Insert the new value. */ | |
596 | here->e_value_size = cpu_to_le32(value_len); | |
597 | if (value_len) { | |
598 | size_t size = EXT2_XATTR_SIZE(value_len); | |
599 | char *val = (char *)header + min_offs - size; | |
600 | here->e_value_offs = | |
601 | cpu_to_le16((char *)val - (char *)header); | |
602 | memset(val + size - EXT2_XATTR_PAD, 0, | |
603 | EXT2_XATTR_PAD); /* Clear the pad bytes. */ | |
604 | memcpy(val, value, value_len); | |
605 | } | |
606 | } | |
607 | ||
608 | skip_replace: | |
609 | if (IS_LAST_ENTRY(ENTRY(header+1))) { | |
610 | /* This block is now empty. */ | |
611 | if (bh && header == HDR(bh)) | |
612 | unlock_buffer(bh); /* we were modifying in-place. */ | |
613 | error = ext2_xattr_set2(inode, bh, NULL); | |
614 | } else { | |
615 | ext2_xattr_rehash(header, here); | |
616 | if (bh && header == HDR(bh)) | |
617 | unlock_buffer(bh); /* we were modifying in-place. */ | |
618 | error = ext2_xattr_set2(inode, bh, header); | |
619 | } | |
620 | ||
621 | cleanup: | |
622 | brelse(bh); | |
623 | if (!(bh && header == HDR(bh))) | |
624 | kfree(header); | |
625 | up_write(&EXT2_I(inode)->xattr_sem); | |
626 | ||
627 | return error; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Second half of ext2_xattr_set(): Update the file system. | |
632 | */ | |
633 | static int | |
634 | ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, | |
635 | struct ext2_xattr_header *header) | |
636 | { | |
637 | struct super_block *sb = inode->i_sb; | |
638 | struct buffer_head *new_bh = NULL; | |
639 | int error; | |
640 | ||
641 | if (header) { | |
642 | new_bh = ext2_xattr_cache_find(inode, header); | |
643 | if (new_bh) { | |
644 | /* We found an identical block in the cache. */ | |
645 | if (new_bh == old_bh) { | |
646 | ea_bdebug(new_bh, "keeping this block"); | |
647 | } else { | |
648 | /* The old block is released after updating | |
649 | the inode. */ | |
650 | ea_bdebug(new_bh, "reusing block"); | |
651 | ||
652 | error = -EDQUOT; | |
653 | if (DQUOT_ALLOC_BLOCK(inode, 1)) { | |
654 | unlock_buffer(new_bh); | |
655 | goto cleanup; | |
656 | } | |
657 | HDR(new_bh)->h_refcount = cpu_to_le32(1 + | |
658 | le32_to_cpu(HDR(new_bh)->h_refcount)); | |
659 | ea_bdebug(new_bh, "refcount now=%d", | |
660 | le32_to_cpu(HDR(new_bh)->h_refcount)); | |
661 | } | |
662 | unlock_buffer(new_bh); | |
663 | } else if (old_bh && header == HDR(old_bh)) { | |
664 | /* Keep this block. No need to lock the block as we | |
665 | don't need to change the reference count. */ | |
666 | new_bh = old_bh; | |
667 | get_bh(new_bh); | |
668 | ext2_xattr_cache_insert(new_bh); | |
669 | } else { | |
670 | /* We need to allocate a new block */ | |
671 | int goal = le32_to_cpu(EXT2_SB(sb)->s_es-> | |
672 | s_first_data_block) + | |
673 | EXT2_I(inode)->i_block_group * | |
674 | EXT2_BLOCKS_PER_GROUP(sb); | |
675 | int block = ext2_new_block(inode, goal, | |
676 | NULL, NULL, &error); | |
677 | if (error) | |
678 | goto cleanup; | |
679 | ea_idebug(inode, "creating block %d", block); | |
680 | ||
681 | new_bh = sb_getblk(sb, block); | |
682 | if (!new_bh) { | |
683 | ext2_free_blocks(inode, block, 1); | |
684 | error = -EIO; | |
685 | goto cleanup; | |
686 | } | |
687 | lock_buffer(new_bh); | |
688 | memcpy(new_bh->b_data, header, new_bh->b_size); | |
689 | set_buffer_uptodate(new_bh); | |
690 | unlock_buffer(new_bh); | |
691 | ext2_xattr_cache_insert(new_bh); | |
692 | ||
693 | ext2_xattr_update_super_block(sb); | |
694 | } | |
695 | mark_buffer_dirty(new_bh); | |
696 | if (IS_SYNC(inode)) { | |
697 | sync_dirty_buffer(new_bh); | |
698 | error = -EIO; | |
699 | if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) | |
700 | goto cleanup; | |
701 | } | |
702 | } | |
703 | ||
704 | /* Update the inode. */ | |
705 | EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; | |
706 | inode->i_ctime = CURRENT_TIME_SEC; | |
707 | if (IS_SYNC(inode)) { | |
708 | error = ext2_sync_inode (inode); | |
709 | /* In case sync failed due to ENOSPC the inode was actually | |
710 | * written (only some dirty data were not) so we just proceed | |
711 | * as if nothing happened and cleanup the unused block */ | |
712 | if (error && error != -ENOSPC) { | |
713 | if (new_bh && new_bh != old_bh) | |
714 | DQUOT_FREE_BLOCK(inode, 1); | |
715 | goto cleanup; | |
716 | } | |
717 | } else | |
718 | mark_inode_dirty(inode); | |
719 | ||
720 | error = 0; | |
721 | if (old_bh && old_bh != new_bh) { | |
722 | struct mb_cache_entry *ce; | |
723 | ||
724 | /* | |
725 | * If there was an old block and we are no longer using it, | |
726 | * release the old block. | |
727 | */ | |
728 | ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev, | |
729 | old_bh->b_blocknr); | |
730 | lock_buffer(old_bh); | |
731 | if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { | |
732 | /* Free the old block. */ | |
733 | if (ce) | |
734 | mb_cache_entry_free(ce); | |
735 | ea_bdebug(old_bh, "freeing"); | |
736 | ext2_free_blocks(inode, old_bh->b_blocknr, 1); | |
737 | /* We let our caller release old_bh, so we | |
738 | * need to duplicate the buffer before. */ | |
739 | get_bh(old_bh); | |
740 | bforget(old_bh); | |
741 | } else { | |
742 | /* Decrement the refcount only. */ | |
743 | HDR(old_bh)->h_refcount = cpu_to_le32( | |
744 | le32_to_cpu(HDR(old_bh)->h_refcount) - 1); | |
745 | if (ce) | |
746 | mb_cache_entry_release(ce); | |
747 | DQUOT_FREE_BLOCK(inode, 1); | |
748 | mark_buffer_dirty(old_bh); | |
749 | ea_bdebug(old_bh, "refcount now=%d", | |
750 | le32_to_cpu(HDR(old_bh)->h_refcount)); | |
751 | } | |
752 | unlock_buffer(old_bh); | |
753 | } | |
754 | ||
755 | cleanup: | |
756 | brelse(new_bh); | |
757 | ||
758 | return error; | |
759 | } | |
760 | ||
761 | /* | |
762 | * ext2_xattr_delete_inode() | |
763 | * | |
764 | * Free extended attribute resources associated with this inode. This | |
765 | * is called immediately before an inode is freed. | |
766 | */ | |
767 | void | |
768 | ext2_xattr_delete_inode(struct inode *inode) | |
769 | { | |
770 | struct buffer_head *bh = NULL; | |
771 | struct mb_cache_entry *ce; | |
772 | ||
773 | down_write(&EXT2_I(inode)->xattr_sem); | |
774 | if (!EXT2_I(inode)->i_file_acl) | |
775 | goto cleanup; | |
776 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
777 | if (!bh) { | |
778 | ext2_error(inode->i_sb, "ext2_xattr_delete_inode", | |
779 | "inode %ld: block %d read error", inode->i_ino, | |
780 | EXT2_I(inode)->i_file_acl); | |
781 | goto cleanup; | |
782 | } | |
783 | ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); | |
784 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
785 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
786 | ext2_error(inode->i_sb, "ext2_xattr_delete_inode", | |
787 | "inode %ld: bad block %d", inode->i_ino, | |
788 | EXT2_I(inode)->i_file_acl); | |
789 | goto cleanup; | |
790 | } | |
791 | ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr); | |
792 | lock_buffer(bh); | |
793 | if (HDR(bh)->h_refcount == cpu_to_le32(1)) { | |
794 | if (ce) | |
795 | mb_cache_entry_free(ce); | |
796 | ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); | |
797 | get_bh(bh); | |
798 | bforget(bh); | |
799 | } else { | |
800 | HDR(bh)->h_refcount = cpu_to_le32( | |
801 | le32_to_cpu(HDR(bh)->h_refcount) - 1); | |
802 | if (ce) | |
803 | mb_cache_entry_release(ce); | |
804 | mark_buffer_dirty(bh); | |
805 | if (IS_SYNC(inode)) | |
806 | sync_dirty_buffer(bh); | |
807 | DQUOT_FREE_BLOCK(inode, 1); | |
808 | } | |
809 | ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount) - 1); | |
810 | unlock_buffer(bh); | |
811 | EXT2_I(inode)->i_file_acl = 0; | |
812 | ||
813 | cleanup: | |
814 | brelse(bh); | |
815 | up_write(&EXT2_I(inode)->xattr_sem); | |
816 | } | |
817 | ||
818 | /* | |
819 | * ext2_xattr_put_super() | |
820 | * | |
821 | * This is called when a file system is unmounted. | |
822 | */ | |
823 | void | |
824 | ext2_xattr_put_super(struct super_block *sb) | |
825 | { | |
826 | mb_cache_shrink(ext2_xattr_cache, sb->s_bdev); | |
827 | } | |
828 | ||
829 | ||
830 | /* | |
831 | * ext2_xattr_cache_insert() | |
832 | * | |
833 | * Create a new entry in the extended attribute cache, and insert | |
834 | * it unless such an entry is already in the cache. | |
835 | * | |
836 | * Returns 0, or a negative error number on failure. | |
837 | */ | |
838 | static int | |
839 | ext2_xattr_cache_insert(struct buffer_head *bh) | |
840 | { | |
841 | __u32 hash = le32_to_cpu(HDR(bh)->h_hash); | |
842 | struct mb_cache_entry *ce; | |
843 | int error; | |
844 | ||
845 | ce = mb_cache_entry_alloc(ext2_xattr_cache); | |
846 | if (!ce) | |
847 | return -ENOMEM; | |
848 | error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); | |
849 | if (error) { | |
850 | mb_cache_entry_free(ce); | |
851 | if (error == -EBUSY) { | |
852 | ea_bdebug(bh, "already in cache (%d cache entries)", | |
853 | atomic_read(&ext2_xattr_cache->c_entry_count)); | |
854 | error = 0; | |
855 | } | |
856 | } else { | |
857 | ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, | |
858 | atomic_read(&ext2_xattr_cache->c_entry_count)); | |
859 | mb_cache_entry_release(ce); | |
860 | } | |
861 | return error; | |
862 | } | |
863 | ||
864 | /* | |
865 | * ext2_xattr_cmp() | |
866 | * | |
867 | * Compare two extended attribute blocks for equality. | |
868 | * | |
869 | * Returns 0 if the blocks are equal, 1 if they differ, and | |
870 | * a negative error number on errors. | |
871 | */ | |
872 | static int | |
873 | ext2_xattr_cmp(struct ext2_xattr_header *header1, | |
874 | struct ext2_xattr_header *header2) | |
875 | { | |
876 | struct ext2_xattr_entry *entry1, *entry2; | |
877 | ||
878 | entry1 = ENTRY(header1+1); | |
879 | entry2 = ENTRY(header2+1); | |
880 | while (!IS_LAST_ENTRY(entry1)) { | |
881 | if (IS_LAST_ENTRY(entry2)) | |
882 | return 1; | |
883 | if (entry1->e_hash != entry2->e_hash || | |
884 | entry1->e_name_index != entry2->e_name_index || | |
885 | entry1->e_name_len != entry2->e_name_len || | |
886 | entry1->e_value_size != entry2->e_value_size || | |
887 | memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) | |
888 | return 1; | |
889 | if (entry1->e_value_block != 0 || entry2->e_value_block != 0) | |
890 | return -EIO; | |
891 | if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), | |
892 | (char *)header2 + le16_to_cpu(entry2->e_value_offs), | |
893 | le32_to_cpu(entry1->e_value_size))) | |
894 | return 1; | |
895 | ||
896 | entry1 = EXT2_XATTR_NEXT(entry1); | |
897 | entry2 = EXT2_XATTR_NEXT(entry2); | |
898 | } | |
899 | if (!IS_LAST_ENTRY(entry2)) | |
900 | return 1; | |
901 | return 0; | |
902 | } | |
903 | ||
904 | /* | |
905 | * ext2_xattr_cache_find() | |
906 | * | |
907 | * Find an identical extended attribute block. | |
908 | * | |
909 | * Returns a locked buffer head to the block found, or NULL if such | |
910 | * a block was not found or an error occurred. | |
911 | */ | |
912 | static struct buffer_head * | |
913 | ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) | |
914 | { | |
915 | __u32 hash = le32_to_cpu(header->h_hash); | |
916 | struct mb_cache_entry *ce; | |
917 | ||
918 | if (!header->h_hash) | |
919 | return NULL; /* never share */ | |
920 | ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); | |
921 | again: | |
922 | ce = mb_cache_entry_find_first(ext2_xattr_cache, 0, | |
923 | inode->i_sb->s_bdev, hash); | |
924 | while (ce) { | |
925 | struct buffer_head *bh; | |
926 | ||
927 | if (IS_ERR(ce)) { | |
928 | if (PTR_ERR(ce) == -EAGAIN) | |
929 | goto again; | |
930 | break; | |
931 | } | |
932 | ||
933 | bh = sb_bread(inode->i_sb, ce->e_block); | |
934 | if (!bh) { | |
935 | ext2_error(inode->i_sb, "ext2_xattr_cache_find", | |
936 | "inode %ld: block %ld read error", | |
937 | inode->i_ino, (unsigned long) ce->e_block); | |
938 | } else { | |
939 | lock_buffer(bh); | |
940 | if (le32_to_cpu(HDR(bh)->h_refcount) > | |
941 | EXT2_XATTR_REFCOUNT_MAX) { | |
942 | ea_idebug(inode, "block %ld refcount %d>%d", | |
943 | (unsigned long) ce->e_block, | |
944 | le32_to_cpu(HDR(bh)->h_refcount), | |
945 | EXT2_XATTR_REFCOUNT_MAX); | |
946 | } else if (!ext2_xattr_cmp(header, HDR(bh))) { | |
947 | ea_bdebug(bh, "b_count=%d", | |
948 | atomic_read(&(bh->b_count))); | |
949 | mb_cache_entry_release(ce); | |
950 | return bh; | |
951 | } | |
952 | unlock_buffer(bh); | |
953 | brelse(bh); | |
954 | } | |
955 | ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash); | |
956 | } | |
957 | return NULL; | |
958 | } | |
959 | ||
960 | #define NAME_HASH_SHIFT 5 | |
961 | #define VALUE_HASH_SHIFT 16 | |
962 | ||
963 | /* | |
964 | * ext2_xattr_hash_entry() | |
965 | * | |
966 | * Compute the hash of an extended attribute. | |
967 | */ | |
968 | static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, | |
969 | struct ext2_xattr_entry *entry) | |
970 | { | |
971 | __u32 hash = 0; | |
972 | char *name = entry->e_name; | |
973 | int n; | |
974 | ||
975 | for (n=0; n < entry->e_name_len; n++) { | |
976 | hash = (hash << NAME_HASH_SHIFT) ^ | |
977 | (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ | |
978 | *name++; | |
979 | } | |
980 | ||
981 | if (entry->e_value_block == 0 && entry->e_value_size != 0) { | |
982 | __le32 *value = (__le32 *)((char *)header + | |
983 | le16_to_cpu(entry->e_value_offs)); | |
984 | for (n = (le32_to_cpu(entry->e_value_size) + | |
985 | EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { | |
986 | hash = (hash << VALUE_HASH_SHIFT) ^ | |
987 | (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ | |
988 | le32_to_cpu(*value++); | |
989 | } | |
990 | } | |
991 | entry->e_hash = cpu_to_le32(hash); | |
992 | } | |
993 | ||
994 | #undef NAME_HASH_SHIFT | |
995 | #undef VALUE_HASH_SHIFT | |
996 | ||
997 | #define BLOCK_HASH_SHIFT 16 | |
998 | ||
999 | /* | |
1000 | * ext2_xattr_rehash() | |
1001 | * | |
1002 | * Re-compute the extended attribute hash value after an entry has changed. | |
1003 | */ | |
1004 | static void ext2_xattr_rehash(struct ext2_xattr_header *header, | |
1005 | struct ext2_xattr_entry *entry) | |
1006 | { | |
1007 | struct ext2_xattr_entry *here; | |
1008 | __u32 hash = 0; | |
1009 | ||
1010 | ext2_xattr_hash_entry(header, entry); | |
1011 | here = ENTRY(header+1); | |
1012 | while (!IS_LAST_ENTRY(here)) { | |
1013 | if (!here->e_hash) { | |
1014 | /* Block is not shared if an entry's hash value == 0 */ | |
1015 | hash = 0; | |
1016 | break; | |
1017 | } | |
1018 | hash = (hash << BLOCK_HASH_SHIFT) ^ | |
1019 | (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ | |
1020 | le32_to_cpu(here->e_hash); | |
1021 | here = EXT2_XATTR_NEXT(here); | |
1022 | } | |
1023 | header->h_hash = cpu_to_le32(hash); | |
1024 | } | |
1025 | ||
1026 | #undef BLOCK_HASH_SHIFT | |
1027 | ||
1028 | int __init | |
1029 | init_ext2_xattr(void) | |
1030 | { | |
1031 | ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL, | |
1032 | sizeof(struct mb_cache_entry) + | |
1033 | sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6); | |
1034 | if (!ext2_xattr_cache) | |
1035 | return -ENOMEM; | |
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | void | |
1040 | exit_ext2_xattr(void) | |
1041 | { | |
1042 | mb_cache_destroy(ext2_xattr_cache); | |
1043 | } |