]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/ext2/xattr.c | |
3 | * | |
4 | * Copyright (C) 2001-2003 Andreas Gruenbacher <[email protected]> | |
5 | * | |
6 | * Fix by Harrison Xing <[email protected]>. | |
7 | * Extended attributes for symlinks and special files added per | |
8 | * suggestion of Luka Renko <[email protected]>. | |
9 | * xattr consolidation Copyright (c) 2004 James Morris <[email protected]>, | |
10 | * Red Hat Inc. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Extended attributes are stored on disk blocks allocated outside of | |
16 | * any inode. The i_file_acl field is then made to point to this allocated | |
17 | * block. If all extended attributes of an inode are identical, these | |
18 | * inodes may share the same extended attribute block. Such situations | |
19 | * are automatically detected by keeping a cache of recent attribute block | |
20 | * numbers and hashes over the block's contents in memory. | |
21 | * | |
22 | * | |
23 | * Extended attribute block layout: | |
24 | * | |
25 | * +------------------+ | |
26 | * | header | | |
27 | * | entry 1 | | | |
28 | * | entry 2 | | growing downwards | |
29 | * | entry 3 | v | |
30 | * | four null bytes | | |
31 | * | . . . | | |
32 | * | value 1 | ^ | |
33 | * | value 3 | | growing upwards | |
34 | * | value 2 | | | |
35 | * +------------------+ | |
36 | * | |
37 | * The block header is followed by multiple entry descriptors. These entry | |
38 | * descriptors are variable in size, and alligned to EXT2_XATTR_PAD | |
39 | * byte boundaries. The entry descriptors are sorted by attribute name, | |
40 | * so that two extended attribute blocks can be compared efficiently. | |
41 | * | |
42 | * Attribute values are aligned to the end of the block, stored in | |
43 | * no specific order. They are also padded to EXT2_XATTR_PAD byte | |
44 | * boundaries. No additional gaps are left between them. | |
45 | * | |
46 | * Locking strategy | |
47 | * ---------------- | |
48 | * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. | |
49 | * EA blocks are only changed if they are exclusive to an inode, so | |
50 | * holding xattr_sem also means that nothing but the EA block's reference | |
51 | * count will change. Multiple writers to an EA block are synchronized | |
52 | * by the bh lock. No more than a single bh lock is held at any time | |
53 | * to avoid deadlocks. | |
54 | */ | |
55 | ||
56 | #include <linux/buffer_head.h> | |
57 | #include <linux/module.h> | |
58 | #include <linux/init.h> | |
59 | #include <linux/slab.h> | |
60 | #include <linux/mbcache.h> | |
61 | #include <linux/quotaops.h> | |
62 | #include <linux/rwsem.h> | |
431547b3 | 63 | #include <linux/security.h> |
1da177e4 LT |
64 | #include "ext2.h" |
65 | #include "xattr.h" | |
66 | #include "acl.h" | |
67 | ||
68 | #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) | |
69 | #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) | |
70 | #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) | |
71 | #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) | |
72 | ||
73 | #ifdef EXT2_XATTR_DEBUG | |
74 | # define ea_idebug(inode, f...) do { \ | |
75 | printk(KERN_DEBUG "inode %s:%ld: ", \ | |
76 | inode->i_sb->s_id, inode->i_ino); \ | |
77 | printk(f); \ | |
78 | printk("\n"); \ | |
79 | } while (0) | |
80 | # define ea_bdebug(bh, f...) do { \ | |
81 | char b[BDEVNAME_SIZE]; \ | |
82 | printk(KERN_DEBUG "block %s:%lu: ", \ | |
83 | bdevname(bh->b_bdev, b), \ | |
84 | (unsigned long) bh->b_blocknr); \ | |
85 | printk(f); \ | |
86 | printk("\n"); \ | |
87 | } while (0) | |
88 | #else | |
89 | # define ea_idebug(f...) | |
90 | # define ea_bdebug(f...) | |
91 | #endif | |
92 | ||
93 | static int ext2_xattr_set2(struct inode *, struct buffer_head *, | |
94 | struct ext2_xattr_header *); | |
95 | ||
96 | static int ext2_xattr_cache_insert(struct buffer_head *); | |
97 | static struct buffer_head *ext2_xattr_cache_find(struct inode *, | |
98 | struct ext2_xattr_header *); | |
99 | static void ext2_xattr_rehash(struct ext2_xattr_header *, | |
100 | struct ext2_xattr_entry *); | |
101 | ||
102 | static struct mb_cache *ext2_xattr_cache; | |
103 | ||
749c72ef | 104 | static const struct xattr_handler *ext2_xattr_handler_map[] = { |
1da177e4 LT |
105 | [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, |
106 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | |
107 | [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext2_xattr_acl_access_handler, | |
108 | [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler, | |
109 | #endif | |
110 | [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, | |
111 | #ifdef CONFIG_EXT2_FS_SECURITY | |
112 | [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, | |
113 | #endif | |
114 | }; | |
115 | ||
749c72ef | 116 | const struct xattr_handler *ext2_xattr_handlers[] = { |
1da177e4 LT |
117 | &ext2_xattr_user_handler, |
118 | &ext2_xattr_trusted_handler, | |
119 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | |
120 | &ext2_xattr_acl_access_handler, | |
121 | &ext2_xattr_acl_default_handler, | |
122 | #endif | |
123 | #ifdef CONFIG_EXT2_FS_SECURITY | |
124 | &ext2_xattr_security_handler, | |
125 | #endif | |
126 | NULL | |
127 | }; | |
128 | ||
749c72ef | 129 | static inline const struct xattr_handler * |
1da177e4 LT |
130 | ext2_xattr_handler(int name_index) |
131 | { | |
749c72ef | 132 | const struct xattr_handler *handler = NULL; |
1da177e4 LT |
133 | |
134 | if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) | |
135 | handler = ext2_xattr_handler_map[name_index]; | |
136 | return handler; | |
137 | } | |
138 | ||
139 | /* | |
140 | * ext2_xattr_get() | |
141 | * | |
142 | * Copy an extended attribute into the buffer | |
143 | * provided, or compute the buffer size required. | |
144 | * Buffer is NULL to compute the size of the buffer required. | |
145 | * | |
146 | * Returns a negative error number on failure, or the number of bytes | |
147 | * used / required on success. | |
148 | */ | |
149 | int | |
150 | ext2_xattr_get(struct inode *inode, int name_index, const char *name, | |
151 | void *buffer, size_t buffer_size) | |
152 | { | |
153 | struct buffer_head *bh = NULL; | |
154 | struct ext2_xattr_entry *entry; | |
155 | size_t name_len, size; | |
156 | char *end; | |
157 | int error; | |
158 | ||
159 | ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", | |
160 | name_index, name, buffer, (long)buffer_size); | |
161 | ||
162 | if (name == NULL) | |
163 | return -EINVAL; | |
164 | down_read(&EXT2_I(inode)->xattr_sem); | |
165 | error = -ENODATA; | |
166 | if (!EXT2_I(inode)->i_file_acl) | |
167 | goto cleanup; | |
168 | ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); | |
169 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
170 | error = -EIO; | |
171 | if (!bh) | |
172 | goto cleanup; | |
173 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
174 | atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); | |
175 | end = bh->b_data + bh->b_size; | |
176 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
177 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
178 | bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", | |
179 | "inode %ld: bad block %d", inode->i_ino, | |
180 | EXT2_I(inode)->i_file_acl); | |
181 | error = -EIO; | |
182 | goto cleanup; | |
183 | } | |
184 | /* find named attribute */ | |
185 | name_len = strlen(name); | |
186 | ||
187 | error = -ERANGE; | |
188 | if (name_len > 255) | |
189 | goto cleanup; | |
190 | entry = FIRST_ENTRY(bh); | |
191 | while (!IS_LAST_ENTRY(entry)) { | |
192 | struct ext2_xattr_entry *next = | |
193 | EXT2_XATTR_NEXT(entry); | |
194 | if ((char *)next >= end) | |
195 | goto bad_block; | |
196 | if (name_index == entry->e_name_index && | |
197 | name_len == entry->e_name_len && | |
198 | memcmp(name, entry->e_name, name_len) == 0) | |
199 | goto found; | |
200 | entry = next; | |
201 | } | |
1da177e4 LT |
202 | if (ext2_xattr_cache_insert(bh)) |
203 | ea_idebug(inode, "cache insert failed"); | |
204 | error = -ENODATA; | |
205 | goto cleanup; | |
206 | found: | |
207 | /* check the buffer size */ | |
208 | if (entry->e_value_block != 0) | |
209 | goto bad_block; | |
210 | size = le32_to_cpu(entry->e_value_size); | |
211 | if (size > inode->i_sb->s_blocksize || | |
212 | le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) | |
213 | goto bad_block; | |
214 | ||
215 | if (ext2_xattr_cache_insert(bh)) | |
216 | ea_idebug(inode, "cache insert failed"); | |
217 | if (buffer) { | |
218 | error = -ERANGE; | |
219 | if (size > buffer_size) | |
220 | goto cleanup; | |
221 | /* return value of attribute */ | |
222 | memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), | |
223 | size); | |
224 | } | |
225 | error = size; | |
226 | ||
227 | cleanup: | |
228 | brelse(bh); | |
229 | up_read(&EXT2_I(inode)->xattr_sem); | |
230 | ||
231 | return error; | |
232 | } | |
233 | ||
234 | /* | |
235 | * ext2_xattr_list() | |
236 | * | |
237 | * Copy a list of attribute names into the buffer | |
238 | * provided, or compute the buffer size required. | |
239 | * Buffer is NULL to compute the size of the buffer required. | |
240 | * | |
241 | * Returns a negative error number on failure, or the number of bytes | |
242 | * used / required on success. | |
243 | */ | |
244 | static int | |
431547b3 | 245 | ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) |
1da177e4 | 246 | { |
431547b3 | 247 | struct inode *inode = dentry->d_inode; |
1da177e4 LT |
248 | struct buffer_head *bh = NULL; |
249 | struct ext2_xattr_entry *entry; | |
250 | char *end; | |
251 | size_t rest = buffer_size; | |
252 | int error; | |
253 | ||
254 | ea_idebug(inode, "buffer=%p, buffer_size=%ld", | |
255 | buffer, (long)buffer_size); | |
256 | ||
257 | down_read(&EXT2_I(inode)->xattr_sem); | |
258 | error = 0; | |
259 | if (!EXT2_I(inode)->i_file_acl) | |
260 | goto cleanup; | |
261 | ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); | |
262 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
263 | error = -EIO; | |
264 | if (!bh) | |
265 | goto cleanup; | |
266 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
267 | atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); | |
268 | end = bh->b_data + bh->b_size; | |
269 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
270 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
271 | bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", | |
272 | "inode %ld: bad block %d", inode->i_ino, | |
273 | EXT2_I(inode)->i_file_acl); | |
274 | error = -EIO; | |
275 | goto cleanup; | |
276 | } | |
277 | ||
278 | /* check the on-disk data structure */ | |
279 | entry = FIRST_ENTRY(bh); | |
280 | while (!IS_LAST_ENTRY(entry)) { | |
281 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry); | |
282 | ||
283 | if ((char *)next >= end) | |
284 | goto bad_block; | |
285 | entry = next; | |
286 | } | |
287 | if (ext2_xattr_cache_insert(bh)) | |
288 | ea_idebug(inode, "cache insert failed"); | |
289 | ||
290 | /* list the attribute names */ | |
291 | for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); | |
292 | entry = EXT2_XATTR_NEXT(entry)) { | |
749c72ef | 293 | const struct xattr_handler *handler = |
1da177e4 LT |
294 | ext2_xattr_handler(entry->e_name_index); |
295 | ||
296 | if (handler) { | |
431547b3 | 297 | size_t size = handler->list(dentry, buffer, rest, |
1da177e4 | 298 | entry->e_name, |
431547b3 CH |
299 | entry->e_name_len, |
300 | handler->flags); | |
1da177e4 LT |
301 | if (buffer) { |
302 | if (size > rest) { | |
303 | error = -ERANGE; | |
304 | goto cleanup; | |
305 | } | |
306 | buffer += size; | |
307 | } | |
308 | rest -= size; | |
309 | } | |
310 | } | |
311 | error = buffer_size - rest; /* total size */ | |
312 | ||
313 | cleanup: | |
314 | brelse(bh); | |
315 | up_read(&EXT2_I(inode)->xattr_sem); | |
316 | ||
317 | return error; | |
318 | } | |
319 | ||
320 | /* | |
321 | * Inode operation listxattr() | |
322 | * | |
1b1dcc1b | 323 | * dentry->d_inode->i_mutex: don't care |
1da177e4 LT |
324 | */ |
325 | ssize_t | |
326 | ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) | |
327 | { | |
431547b3 | 328 | return ext2_xattr_list(dentry, buffer, size); |
1da177e4 LT |
329 | } |
330 | ||
331 | /* | |
332 | * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is | |
333 | * not set, set it. | |
334 | */ | |
335 | static void ext2_xattr_update_super_block(struct super_block *sb) | |
336 | { | |
337 | if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) | |
338 | return; | |
339 | ||
c15271f4 | 340 | spin_lock(&EXT2_SB(sb)->s_lock); |
ed2908f3 | 341 | EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); |
c15271f4 | 342 | spin_unlock(&EXT2_SB(sb)->s_lock); |
1da177e4 LT |
343 | sb->s_dirt = 1; |
344 | mark_buffer_dirty(EXT2_SB(sb)->s_sbh); | |
1da177e4 LT |
345 | } |
346 | ||
347 | /* | |
348 | * ext2_xattr_set() | |
349 | * | |
6e9510b0 | 350 | * Create, replace or remove an extended attribute for this inode. Value |
1da177e4 LT |
351 | * is NULL to remove an existing extended attribute, and non-NULL to |
352 | * either replace an existing extended attribute, or create a new extended | |
353 | * attribute. The flags XATTR_REPLACE and XATTR_CREATE | |
354 | * specify that an extended attribute must exist and must not exist | |
355 | * previous to the call, respectively. | |
356 | * | |
357 | * Returns 0, or a negative error number on failure. | |
358 | */ | |
359 | int | |
360 | ext2_xattr_set(struct inode *inode, int name_index, const char *name, | |
361 | const void *value, size_t value_len, int flags) | |
362 | { | |
363 | struct super_block *sb = inode->i_sb; | |
364 | struct buffer_head *bh = NULL; | |
365 | struct ext2_xattr_header *header = NULL; | |
366 | struct ext2_xattr_entry *here, *last; | |
367 | size_t name_len, free, min_offs = sb->s_blocksize; | |
368 | int not_found = 1, error; | |
369 | char *end; | |
370 | ||
371 | /* | |
372 | * header -- Points either into bh, or to a temporarily | |
373 | * allocated buffer. | |
374 | * here -- The named entry found, or the place for inserting, within | |
375 | * the block pointed to by header. | |
376 | * last -- Points right after the last named entry within the block | |
377 | * pointed to by header. | |
378 | * min_offs -- The offset of the first value (values are aligned | |
379 | * towards the end of the block). | |
380 | * end -- Points right after the block pointed to by header. | |
381 | */ | |
382 | ||
383 | ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", | |
384 | name_index, name, value, (long)value_len); | |
385 | ||
1da177e4 LT |
386 | if (value == NULL) |
387 | value_len = 0; | |
388 | if (name == NULL) | |
389 | return -EINVAL; | |
390 | name_len = strlen(name); | |
391 | if (name_len > 255 || value_len > sb->s_blocksize) | |
392 | return -ERANGE; | |
393 | down_write(&EXT2_I(inode)->xattr_sem); | |
394 | if (EXT2_I(inode)->i_file_acl) { | |
395 | /* The inode already has an extended attribute block. */ | |
396 | bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); | |
397 | error = -EIO; | |
398 | if (!bh) | |
399 | goto cleanup; | |
400 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
401 | atomic_read(&(bh->b_count)), | |
402 | le32_to_cpu(HDR(bh)->h_refcount)); | |
403 | header = HDR(bh); | |
404 | end = bh->b_data + bh->b_size; | |
405 | if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
406 | header->h_blocks != cpu_to_le32(1)) { | |
407 | bad_block: ext2_error(sb, "ext2_xattr_set", | |
408 | "inode %ld: bad block %d", inode->i_ino, | |
409 | EXT2_I(inode)->i_file_acl); | |
410 | error = -EIO; | |
411 | goto cleanup; | |
412 | } | |
413 | /* Find the named attribute. */ | |
414 | here = FIRST_ENTRY(bh); | |
415 | while (!IS_LAST_ENTRY(here)) { | |
416 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here); | |
417 | if ((char *)next >= end) | |
418 | goto bad_block; | |
419 | if (!here->e_value_block && here->e_value_size) { | |
420 | size_t offs = le16_to_cpu(here->e_value_offs); | |
421 | if (offs < min_offs) | |
422 | min_offs = offs; | |
423 | } | |
424 | not_found = name_index - here->e_name_index; | |
425 | if (!not_found) | |
426 | not_found = name_len - here->e_name_len; | |
427 | if (!not_found) | |
428 | not_found = memcmp(name, here->e_name,name_len); | |
429 | if (not_found <= 0) | |
430 | break; | |
431 | here = next; | |
432 | } | |
433 | last = here; | |
434 | /* We still need to compute min_offs and last. */ | |
435 | while (!IS_LAST_ENTRY(last)) { | |
436 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last); | |
437 | if ((char *)next >= end) | |
438 | goto bad_block; | |
439 | if (!last->e_value_block && last->e_value_size) { | |
440 | size_t offs = le16_to_cpu(last->e_value_offs); | |
441 | if (offs < min_offs) | |
442 | min_offs = offs; | |
443 | } | |
444 | last = next; | |
445 | } | |
446 | ||
447 | /* Check whether we have enough space left. */ | |
448 | free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); | |
449 | } else { | |
450 | /* We will use a new extended attribute block. */ | |
451 | free = sb->s_blocksize - | |
452 | sizeof(struct ext2_xattr_header) - sizeof(__u32); | |
453 | here = last = NULL; /* avoid gcc uninitialized warning. */ | |
454 | } | |
455 | ||
456 | if (not_found) { | |
457 | /* Request to remove a nonexistent attribute? */ | |
458 | error = -ENODATA; | |
459 | if (flags & XATTR_REPLACE) | |
460 | goto cleanup; | |
461 | error = 0; | |
462 | if (value == NULL) | |
463 | goto cleanup; | |
464 | } else { | |
465 | /* Request to create an existing attribute? */ | |
466 | error = -EEXIST; | |
467 | if (flags & XATTR_CREATE) | |
468 | goto cleanup; | |
469 | if (!here->e_value_block && here->e_value_size) { | |
470 | size_t size = le32_to_cpu(here->e_value_size); | |
471 | ||
472 | if (le16_to_cpu(here->e_value_offs) + size > | |
473 | sb->s_blocksize || size > sb->s_blocksize) | |
474 | goto bad_block; | |
475 | free += EXT2_XATTR_SIZE(size); | |
476 | } | |
477 | free += EXT2_XATTR_LEN(name_len); | |
478 | } | |
479 | error = -ENOSPC; | |
480 | if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) | |
481 | goto cleanup; | |
482 | ||
483 | /* Here we know that we can set the new attribute. */ | |
484 | ||
485 | if (header) { | |
486 | struct mb_cache_entry *ce; | |
487 | ||
488 | /* assert(header == HDR(bh)); */ | |
489 | ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, | |
490 | bh->b_blocknr); | |
491 | lock_buffer(bh); | |
492 | if (header->h_refcount == cpu_to_le32(1)) { | |
493 | ea_bdebug(bh, "modifying in-place"); | |
494 | if (ce) | |
495 | mb_cache_entry_free(ce); | |
496 | /* keep the buffer locked while modifying it. */ | |
497 | } else { | |
498 | int offset; | |
499 | ||
500 | if (ce) | |
501 | mb_cache_entry_release(ce); | |
502 | unlock_buffer(bh); | |
503 | ea_bdebug(bh, "cloning"); | |
504 | header = kmalloc(bh->b_size, GFP_KERNEL); | |
505 | error = -ENOMEM; | |
506 | if (header == NULL) | |
507 | goto cleanup; | |
508 | memcpy(header, HDR(bh), bh->b_size); | |
509 | header->h_refcount = cpu_to_le32(1); | |
510 | ||
511 | offset = (char *)here - bh->b_data; | |
512 | here = ENTRY((char *)header + offset); | |
513 | offset = (char *)last - bh->b_data; | |
514 | last = ENTRY((char *)header + offset); | |
515 | } | |
516 | } else { | |
517 | /* Allocate a buffer where we construct the new block. */ | |
f8314dc6 | 518 | header = kzalloc(sb->s_blocksize, GFP_KERNEL); |
1da177e4 LT |
519 | error = -ENOMEM; |
520 | if (header == NULL) | |
521 | goto cleanup; | |
1da177e4 LT |
522 | end = (char *)header + sb->s_blocksize; |
523 | header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); | |
524 | header->h_blocks = header->h_refcount = cpu_to_le32(1); | |
525 | last = here = ENTRY(header+1); | |
526 | } | |
527 | ||
528 | /* Iff we are modifying the block in-place, bh is locked here. */ | |
529 | ||
530 | if (not_found) { | |
531 | /* Insert the new name. */ | |
532 | size_t size = EXT2_XATTR_LEN(name_len); | |
533 | size_t rest = (char *)last - (char *)here; | |
534 | memmove((char *)here + size, here, rest); | |
535 | memset(here, 0, size); | |
536 | here->e_name_index = name_index; | |
537 | here->e_name_len = name_len; | |
538 | memcpy(here->e_name, name, name_len); | |
539 | } else { | |
540 | if (!here->e_value_block && here->e_value_size) { | |
541 | char *first_val = (char *)header + min_offs; | |
542 | size_t offs = le16_to_cpu(here->e_value_offs); | |
543 | char *val = (char *)header + offs; | |
544 | size_t size = EXT2_XATTR_SIZE( | |
545 | le32_to_cpu(here->e_value_size)); | |
546 | ||
547 | if (size == EXT2_XATTR_SIZE(value_len)) { | |
548 | /* The old and the new value have the same | |
549 | size. Just replace. */ | |
550 | here->e_value_size = cpu_to_le32(value_len); | |
551 | memset(val + size - EXT2_XATTR_PAD, 0, | |
552 | EXT2_XATTR_PAD); /* Clear pad bytes. */ | |
553 | memcpy(val, value, value_len); | |
554 | goto skip_replace; | |
555 | } | |
556 | ||
557 | /* Remove the old value. */ | |
558 | memmove(first_val + size, first_val, val - first_val); | |
559 | memset(first_val, 0, size); | |
560 | here->e_value_offs = 0; | |
561 | min_offs += size; | |
562 | ||
563 | /* Adjust all value offsets. */ | |
564 | last = ENTRY(header+1); | |
565 | while (!IS_LAST_ENTRY(last)) { | |
566 | size_t o = le16_to_cpu(last->e_value_offs); | |
567 | if (!last->e_value_block && o < offs) | |
568 | last->e_value_offs = | |
569 | cpu_to_le16(o + size); | |
570 | last = EXT2_XATTR_NEXT(last); | |
571 | } | |
572 | } | |
573 | if (value == NULL) { | |
574 | /* Remove the old name. */ | |
575 | size_t size = EXT2_XATTR_LEN(name_len); | |
576 | last = ENTRY((char *)last - size); | |
577 | memmove(here, (char*)here + size, | |
578 | (char*)last - (char*)here); | |
579 | memset(last, 0, size); | |
580 | } | |
581 | } | |
582 | ||
583 | if (value != NULL) { | |
584 | /* Insert the new value. */ | |
585 | here->e_value_size = cpu_to_le32(value_len); | |
586 | if (value_len) { | |
587 | size_t size = EXT2_XATTR_SIZE(value_len); | |
588 | char *val = (char *)header + min_offs - size; | |
589 | here->e_value_offs = | |
590 | cpu_to_le16((char *)val - (char *)header); | |
591 | memset(val + size - EXT2_XATTR_PAD, 0, | |
592 | EXT2_XATTR_PAD); /* Clear the pad bytes. */ | |
593 | memcpy(val, value, value_len); | |
594 | } | |
595 | } | |
596 | ||
597 | skip_replace: | |
598 | if (IS_LAST_ENTRY(ENTRY(header+1))) { | |
599 | /* This block is now empty. */ | |
600 | if (bh && header == HDR(bh)) | |
601 | unlock_buffer(bh); /* we were modifying in-place. */ | |
602 | error = ext2_xattr_set2(inode, bh, NULL); | |
603 | } else { | |
604 | ext2_xattr_rehash(header, here); | |
605 | if (bh && header == HDR(bh)) | |
606 | unlock_buffer(bh); /* we were modifying in-place. */ | |
607 | error = ext2_xattr_set2(inode, bh, header); | |
608 | } | |
609 | ||
610 | cleanup: | |
611 | brelse(bh); | |
612 | if (!(bh && header == HDR(bh))) | |
613 | kfree(header); | |
614 | up_write(&EXT2_I(inode)->xattr_sem); | |
615 | ||
616 | return error; | |
617 | } | |
618 | ||
619 | /* | |
620 | * Second half of ext2_xattr_set(): Update the file system. | |
621 | */ | |
622 | static int | |
623 | ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, | |
624 | struct ext2_xattr_header *header) | |
625 | { | |
626 | struct super_block *sb = inode->i_sb; | |
627 | struct buffer_head *new_bh = NULL; | |
628 | int error; | |
629 | ||
630 | if (header) { | |
631 | new_bh = ext2_xattr_cache_find(inode, header); | |
632 | if (new_bh) { | |
633 | /* We found an identical block in the cache. */ | |
634 | if (new_bh == old_bh) { | |
635 | ea_bdebug(new_bh, "keeping this block"); | |
636 | } else { | |
637 | /* The old block is released after updating | |
638 | the inode. */ | |
639 | ea_bdebug(new_bh, "reusing block"); | |
640 | ||
5dd4056d CH |
641 | error = dquot_alloc_block(inode, 1); |
642 | if (error) { | |
1da177e4 LT |
643 | unlock_buffer(new_bh); |
644 | goto cleanup; | |
645 | } | |
fba4d399 | 646 | le32_add_cpu(&HDR(new_bh)->h_refcount, 1); |
1da177e4 LT |
647 | ea_bdebug(new_bh, "refcount now=%d", |
648 | le32_to_cpu(HDR(new_bh)->h_refcount)); | |
649 | } | |
650 | unlock_buffer(new_bh); | |
651 | } else if (old_bh && header == HDR(old_bh)) { | |
652 | /* Keep this block. No need to lock the block as we | |
653 | don't need to change the reference count. */ | |
654 | new_bh = old_bh; | |
655 | get_bh(new_bh); | |
656 | ext2_xattr_cache_insert(new_bh); | |
657 | } else { | |
658 | /* We need to allocate a new block */ | |
24097d12 AM |
659 | ext2_fsblk_t goal = ext2_group_first_block_no(sb, |
660 | EXT2_I(inode)->i_block_group); | |
a686cd89 | 661 | int block = ext2_new_block(inode, goal, &error); |
1da177e4 LT |
662 | if (error) |
663 | goto cleanup; | |
664 | ea_idebug(inode, "creating block %d", block); | |
665 | ||
666 | new_bh = sb_getblk(sb, block); | |
667 | if (!new_bh) { | |
668 | ext2_free_blocks(inode, block, 1); | |
addacc7d | 669 | mark_inode_dirty(inode); |
1da177e4 LT |
670 | error = -EIO; |
671 | goto cleanup; | |
672 | } | |
673 | lock_buffer(new_bh); | |
674 | memcpy(new_bh->b_data, header, new_bh->b_size); | |
675 | set_buffer_uptodate(new_bh); | |
676 | unlock_buffer(new_bh); | |
677 | ext2_xattr_cache_insert(new_bh); | |
678 | ||
679 | ext2_xattr_update_super_block(sb); | |
680 | } | |
681 | mark_buffer_dirty(new_bh); | |
682 | if (IS_SYNC(inode)) { | |
683 | sync_dirty_buffer(new_bh); | |
684 | error = -EIO; | |
685 | if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) | |
686 | goto cleanup; | |
687 | } | |
688 | } | |
689 | ||
690 | /* Update the inode. */ | |
691 | EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; | |
692 | inode->i_ctime = CURRENT_TIME_SEC; | |
693 | if (IS_SYNC(inode)) { | |
c3765016 | 694 | error = sync_inode_metadata(inode, 1); |
1da177e4 LT |
695 | /* In case sync failed due to ENOSPC the inode was actually |
696 | * written (only some dirty data were not) so we just proceed | |
697 | * as if nothing happened and cleanup the unused block */ | |
698 | if (error && error != -ENOSPC) { | |
3889717d AV |
699 | if (new_bh && new_bh != old_bh) { |
700 | dquot_free_block_nodirty(inode, 1); | |
701 | mark_inode_dirty(inode); | |
702 | } | |
1da177e4 LT |
703 | goto cleanup; |
704 | } | |
705 | } else | |
706 | mark_inode_dirty(inode); | |
707 | ||
708 | error = 0; | |
709 | if (old_bh && old_bh != new_bh) { | |
710 | struct mb_cache_entry *ce; | |
711 | ||
712 | /* | |
713 | * If there was an old block and we are no longer using it, | |
714 | * release the old block. | |
715 | */ | |
716 | ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev, | |
717 | old_bh->b_blocknr); | |
718 | lock_buffer(old_bh); | |
719 | if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { | |
720 | /* Free the old block. */ | |
721 | if (ce) | |
722 | mb_cache_entry_free(ce); | |
723 | ea_bdebug(old_bh, "freeing"); | |
724 | ext2_free_blocks(inode, old_bh->b_blocknr, 1); | |
addacc7d | 725 | mark_inode_dirty(inode); |
1da177e4 LT |
726 | /* We let our caller release old_bh, so we |
727 | * need to duplicate the buffer before. */ | |
728 | get_bh(old_bh); | |
729 | bforget(old_bh); | |
730 | } else { | |
731 | /* Decrement the refcount only. */ | |
fba4d399 | 732 | le32_add_cpu(&HDR(old_bh)->h_refcount, -1); |
1da177e4 LT |
733 | if (ce) |
734 | mb_cache_entry_release(ce); | |
3889717d AV |
735 | dquot_free_block_nodirty(inode, 1); |
736 | mark_inode_dirty(inode); | |
1da177e4 LT |
737 | mark_buffer_dirty(old_bh); |
738 | ea_bdebug(old_bh, "refcount now=%d", | |
739 | le32_to_cpu(HDR(old_bh)->h_refcount)); | |
740 | } | |
741 | unlock_buffer(old_bh); | |
742 | } | |
743 | ||
744 | cleanup: | |
745 | brelse(new_bh); | |
746 | ||
747 | return error; | |
748 | } | |
749 | ||
750 | /* | |
751 | * ext2_xattr_delete_inode() | |
752 | * | |
753 | * Free extended attribute resources associated with this inode. This | |
754 | * is called immediately before an inode is freed. | |
755 | */ | |
756 | void | |
757 | ext2_xattr_delete_inode(struct inode *inode) | |
758 | { | |
759 | struct buffer_head *bh = NULL; | |
760 | struct mb_cache_entry *ce; | |
761 | ||
762 | down_write(&EXT2_I(inode)->xattr_sem); | |
763 | if (!EXT2_I(inode)->i_file_acl) | |
764 | goto cleanup; | |
765 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
766 | if (!bh) { | |
767 | ext2_error(inode->i_sb, "ext2_xattr_delete_inode", | |
768 | "inode %ld: block %d read error", inode->i_ino, | |
769 | EXT2_I(inode)->i_file_acl); | |
770 | goto cleanup; | |
771 | } | |
772 | ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); | |
773 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
774 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
775 | ext2_error(inode->i_sb, "ext2_xattr_delete_inode", | |
776 | "inode %ld: bad block %d", inode->i_ino, | |
777 | EXT2_I(inode)->i_file_acl); | |
778 | goto cleanup; | |
779 | } | |
780 | ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr); | |
781 | lock_buffer(bh); | |
782 | if (HDR(bh)->h_refcount == cpu_to_le32(1)) { | |
783 | if (ce) | |
784 | mb_cache_entry_free(ce); | |
785 | ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); | |
786 | get_bh(bh); | |
787 | bforget(bh); | |
b2f49033 | 788 | unlock_buffer(bh); |
1da177e4 | 789 | } else { |
fba4d399 | 790 | le32_add_cpu(&HDR(bh)->h_refcount, -1); |
1da177e4 LT |
791 | if (ce) |
792 | mb_cache_entry_release(ce); | |
b2f49033 PS |
793 | ea_bdebug(bh, "refcount now=%d", |
794 | le32_to_cpu(HDR(bh)->h_refcount)); | |
795 | unlock_buffer(bh); | |
1da177e4 LT |
796 | mark_buffer_dirty(bh); |
797 | if (IS_SYNC(inode)) | |
798 | sync_dirty_buffer(bh); | |
3889717d | 799 | dquot_free_block_nodirty(inode, 1); |
1da177e4 | 800 | } |
1da177e4 LT |
801 | EXT2_I(inode)->i_file_acl = 0; |
802 | ||
803 | cleanup: | |
804 | brelse(bh); | |
805 | up_write(&EXT2_I(inode)->xattr_sem); | |
806 | } | |
807 | ||
808 | /* | |
809 | * ext2_xattr_put_super() | |
810 | * | |
811 | * This is called when a file system is unmounted. | |
812 | */ | |
813 | void | |
814 | ext2_xattr_put_super(struct super_block *sb) | |
815 | { | |
8c52ab42 | 816 | mb_cache_shrink(sb->s_bdev); |
1da177e4 LT |
817 | } |
818 | ||
819 | ||
820 | /* | |
821 | * ext2_xattr_cache_insert() | |
822 | * | |
823 | * Create a new entry in the extended attribute cache, and insert | |
824 | * it unless such an entry is already in the cache. | |
825 | * | |
826 | * Returns 0, or a negative error number on failure. | |
827 | */ | |
828 | static int | |
829 | ext2_xattr_cache_insert(struct buffer_head *bh) | |
830 | { | |
831 | __u32 hash = le32_to_cpu(HDR(bh)->h_hash); | |
832 | struct mb_cache_entry *ce; | |
833 | int error; | |
834 | ||
335e92e8 | 835 | ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS); |
1da177e4 LT |
836 | if (!ce) |
837 | return -ENOMEM; | |
2aec7c52 | 838 | error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash); |
1da177e4 LT |
839 | if (error) { |
840 | mb_cache_entry_free(ce); | |
841 | if (error == -EBUSY) { | |
842 | ea_bdebug(bh, "already in cache (%d cache entries)", | |
843 | atomic_read(&ext2_xattr_cache->c_entry_count)); | |
844 | error = 0; | |
845 | } | |
846 | } else { | |
847 | ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, | |
848 | atomic_read(&ext2_xattr_cache->c_entry_count)); | |
849 | mb_cache_entry_release(ce); | |
850 | } | |
851 | return error; | |
852 | } | |
853 | ||
854 | /* | |
855 | * ext2_xattr_cmp() | |
856 | * | |
857 | * Compare two extended attribute blocks for equality. | |
858 | * | |
859 | * Returns 0 if the blocks are equal, 1 if they differ, and | |
860 | * a negative error number on errors. | |
861 | */ | |
862 | static int | |
863 | ext2_xattr_cmp(struct ext2_xattr_header *header1, | |
864 | struct ext2_xattr_header *header2) | |
865 | { | |
866 | struct ext2_xattr_entry *entry1, *entry2; | |
867 | ||
868 | entry1 = ENTRY(header1+1); | |
869 | entry2 = ENTRY(header2+1); | |
870 | while (!IS_LAST_ENTRY(entry1)) { | |
871 | if (IS_LAST_ENTRY(entry2)) | |
872 | return 1; | |
873 | if (entry1->e_hash != entry2->e_hash || | |
874 | entry1->e_name_index != entry2->e_name_index || | |
875 | entry1->e_name_len != entry2->e_name_len || | |
876 | entry1->e_value_size != entry2->e_value_size || | |
877 | memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) | |
878 | return 1; | |
879 | if (entry1->e_value_block != 0 || entry2->e_value_block != 0) | |
880 | return -EIO; | |
881 | if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), | |
882 | (char *)header2 + le16_to_cpu(entry2->e_value_offs), | |
883 | le32_to_cpu(entry1->e_value_size))) | |
884 | return 1; | |
885 | ||
886 | entry1 = EXT2_XATTR_NEXT(entry1); | |
887 | entry2 = EXT2_XATTR_NEXT(entry2); | |
888 | } | |
889 | if (!IS_LAST_ENTRY(entry2)) | |
890 | return 1; | |
891 | return 0; | |
892 | } | |
893 | ||
894 | /* | |
895 | * ext2_xattr_cache_find() | |
896 | * | |
897 | * Find an identical extended attribute block. | |
898 | * | |
899 | * Returns a locked buffer head to the block found, or NULL if such | |
900 | * a block was not found or an error occurred. | |
901 | */ | |
902 | static struct buffer_head * | |
903 | ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) | |
904 | { | |
905 | __u32 hash = le32_to_cpu(header->h_hash); | |
906 | struct mb_cache_entry *ce; | |
907 | ||
908 | if (!header->h_hash) | |
909 | return NULL; /* never share */ | |
910 | ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); | |
911 | again: | |
2aec7c52 AG |
912 | ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev, |
913 | hash); | |
1da177e4 LT |
914 | while (ce) { |
915 | struct buffer_head *bh; | |
916 | ||
917 | if (IS_ERR(ce)) { | |
918 | if (PTR_ERR(ce) == -EAGAIN) | |
919 | goto again; | |
920 | break; | |
921 | } | |
922 | ||
923 | bh = sb_bread(inode->i_sb, ce->e_block); | |
924 | if (!bh) { | |
925 | ext2_error(inode->i_sb, "ext2_xattr_cache_find", | |
926 | "inode %ld: block %ld read error", | |
927 | inode->i_ino, (unsigned long) ce->e_block); | |
928 | } else { | |
929 | lock_buffer(bh); | |
930 | if (le32_to_cpu(HDR(bh)->h_refcount) > | |
931 | EXT2_XATTR_REFCOUNT_MAX) { | |
932 | ea_idebug(inode, "block %ld refcount %d>%d", | |
933 | (unsigned long) ce->e_block, | |
934 | le32_to_cpu(HDR(bh)->h_refcount), | |
935 | EXT2_XATTR_REFCOUNT_MAX); | |
936 | } else if (!ext2_xattr_cmp(header, HDR(bh))) { | |
937 | ea_bdebug(bh, "b_count=%d", | |
938 | atomic_read(&(bh->b_count))); | |
939 | mb_cache_entry_release(ce); | |
940 | return bh; | |
941 | } | |
942 | unlock_buffer(bh); | |
943 | brelse(bh); | |
944 | } | |
2aec7c52 | 945 | ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash); |
1da177e4 LT |
946 | } |
947 | return NULL; | |
948 | } | |
949 | ||
950 | #define NAME_HASH_SHIFT 5 | |
951 | #define VALUE_HASH_SHIFT 16 | |
952 | ||
953 | /* | |
954 | * ext2_xattr_hash_entry() | |
955 | * | |
956 | * Compute the hash of an extended attribute. | |
957 | */ | |
958 | static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, | |
959 | struct ext2_xattr_entry *entry) | |
960 | { | |
961 | __u32 hash = 0; | |
962 | char *name = entry->e_name; | |
963 | int n; | |
964 | ||
965 | for (n=0; n < entry->e_name_len; n++) { | |
966 | hash = (hash << NAME_HASH_SHIFT) ^ | |
967 | (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ | |
968 | *name++; | |
969 | } | |
970 | ||
971 | if (entry->e_value_block == 0 && entry->e_value_size != 0) { | |
972 | __le32 *value = (__le32 *)((char *)header + | |
973 | le16_to_cpu(entry->e_value_offs)); | |
974 | for (n = (le32_to_cpu(entry->e_value_size) + | |
975 | EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { | |
976 | hash = (hash << VALUE_HASH_SHIFT) ^ | |
977 | (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ | |
978 | le32_to_cpu(*value++); | |
979 | } | |
980 | } | |
981 | entry->e_hash = cpu_to_le32(hash); | |
982 | } | |
983 | ||
984 | #undef NAME_HASH_SHIFT | |
985 | #undef VALUE_HASH_SHIFT | |
986 | ||
987 | #define BLOCK_HASH_SHIFT 16 | |
988 | ||
989 | /* | |
990 | * ext2_xattr_rehash() | |
991 | * | |
992 | * Re-compute the extended attribute hash value after an entry has changed. | |
993 | */ | |
994 | static void ext2_xattr_rehash(struct ext2_xattr_header *header, | |
995 | struct ext2_xattr_entry *entry) | |
996 | { | |
997 | struct ext2_xattr_entry *here; | |
998 | __u32 hash = 0; | |
999 | ||
1000 | ext2_xattr_hash_entry(header, entry); | |
1001 | here = ENTRY(header+1); | |
1002 | while (!IS_LAST_ENTRY(here)) { | |
1003 | if (!here->e_hash) { | |
1004 | /* Block is not shared if an entry's hash value == 0 */ | |
1005 | hash = 0; | |
1006 | break; | |
1007 | } | |
1008 | hash = (hash << BLOCK_HASH_SHIFT) ^ | |
1009 | (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ | |
1010 | le32_to_cpu(here->e_hash); | |
1011 | here = EXT2_XATTR_NEXT(here); | |
1012 | } | |
1013 | header->h_hash = cpu_to_le32(hash); | |
1014 | } | |
1015 | ||
1016 | #undef BLOCK_HASH_SHIFT | |
1017 | ||
1018 | int __init | |
1019 | init_ext2_xattr(void) | |
1020 | { | |
2aec7c52 | 1021 | ext2_xattr_cache = mb_cache_create("ext2_xattr", 6); |
1da177e4 LT |
1022 | if (!ext2_xattr_cache) |
1023 | return -ENOMEM; | |
1024 | return 0; | |
1025 | } | |
1026 | ||
1027 | void | |
1028 | exit_ext2_xattr(void) | |
1029 | { | |
1030 | mb_cache_destroy(ext2_xattr_cache); | |
1031 | } |