]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/ext2/xattr.c | |
3 | * | |
4 | * Copyright (C) 2001-2003 Andreas Gruenbacher <[email protected]> | |
5 | * | |
6 | * Fix by Harrison Xing <[email protected]>. | |
7 | * Extended attributes for symlinks and special files added per | |
8 | * suggestion of Luka Renko <[email protected]>. | |
9 | * xattr consolidation Copyright (c) 2004 James Morris <[email protected]>, | |
10 | * Red Hat Inc. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * Extended attributes are stored on disk blocks allocated outside of | |
16 | * any inode. The i_file_acl field is then made to point to this allocated | |
17 | * block. If all extended attributes of an inode are identical, these | |
18 | * inodes may share the same extended attribute block. Such situations | |
19 | * are automatically detected by keeping a cache of recent attribute block | |
20 | * numbers and hashes over the block's contents in memory. | |
21 | * | |
22 | * | |
23 | * Extended attribute block layout: | |
24 | * | |
25 | * +------------------+ | |
26 | * | header | | |
27 | * | entry 1 | | | |
28 | * | entry 2 | | growing downwards | |
29 | * | entry 3 | v | |
30 | * | four null bytes | | |
31 | * | . . . | | |
32 | * | value 1 | ^ | |
33 | * | value 3 | | growing upwards | |
34 | * | value 2 | | | |
35 | * +------------------+ | |
36 | * | |
37 | * The block header is followed by multiple entry descriptors. These entry | |
38 | * descriptors are variable in size, and alligned to EXT2_XATTR_PAD | |
39 | * byte boundaries. The entry descriptors are sorted by attribute name, | |
40 | * so that two extended attribute blocks can be compared efficiently. | |
41 | * | |
42 | * Attribute values are aligned to the end of the block, stored in | |
43 | * no specific order. They are also padded to EXT2_XATTR_PAD byte | |
44 | * boundaries. No additional gaps are left between them. | |
45 | * | |
46 | * Locking strategy | |
47 | * ---------------- | |
48 | * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. | |
49 | * EA blocks are only changed if they are exclusive to an inode, so | |
50 | * holding xattr_sem also means that nothing but the EA block's reference | |
51 | * count will change. Multiple writers to an EA block are synchronized | |
52 | * by the bh lock. No more than a single bh lock is held at any time | |
53 | * to avoid deadlocks. | |
54 | */ | |
55 | ||
56 | #include <linux/buffer_head.h> | |
57 | #include <linux/module.h> | |
58 | #include <linux/init.h> | |
59 | #include <linux/slab.h> | |
60 | #include <linux/mbcache.h> | |
61 | #include <linux/quotaops.h> | |
62 | #include <linux/rwsem.h> | |
63 | #include "ext2.h" | |
64 | #include "xattr.h" | |
65 | #include "acl.h" | |
66 | ||
67 | #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) | |
68 | #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) | |
69 | #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) | |
70 | #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) | |
71 | ||
72 | #ifdef EXT2_XATTR_DEBUG | |
73 | # define ea_idebug(inode, f...) do { \ | |
74 | printk(KERN_DEBUG "inode %s:%ld: ", \ | |
75 | inode->i_sb->s_id, inode->i_ino); \ | |
76 | printk(f); \ | |
77 | printk("\n"); \ | |
78 | } while (0) | |
79 | # define ea_bdebug(bh, f...) do { \ | |
80 | char b[BDEVNAME_SIZE]; \ | |
81 | printk(KERN_DEBUG "block %s:%lu: ", \ | |
82 | bdevname(bh->b_bdev, b), \ | |
83 | (unsigned long) bh->b_blocknr); \ | |
84 | printk(f); \ | |
85 | printk("\n"); \ | |
86 | } while (0) | |
87 | #else | |
88 | # define ea_idebug(f...) | |
89 | # define ea_bdebug(f...) | |
90 | #endif | |
91 | ||
92 | static int ext2_xattr_set2(struct inode *, struct buffer_head *, | |
93 | struct ext2_xattr_header *); | |
94 | ||
95 | static int ext2_xattr_cache_insert(struct buffer_head *); | |
96 | static struct buffer_head *ext2_xattr_cache_find(struct inode *, | |
97 | struct ext2_xattr_header *); | |
98 | static void ext2_xattr_rehash(struct ext2_xattr_header *, | |
99 | struct ext2_xattr_entry *); | |
100 | ||
101 | static struct mb_cache *ext2_xattr_cache; | |
102 | ||
103 | static struct xattr_handler *ext2_xattr_handler_map[] = { | |
104 | [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, | |
105 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | |
106 | [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext2_xattr_acl_access_handler, | |
107 | [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler, | |
108 | #endif | |
109 | [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, | |
110 | #ifdef CONFIG_EXT2_FS_SECURITY | |
111 | [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, | |
112 | #endif | |
113 | }; | |
114 | ||
115 | struct xattr_handler *ext2_xattr_handlers[] = { | |
116 | &ext2_xattr_user_handler, | |
117 | &ext2_xattr_trusted_handler, | |
118 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | |
119 | &ext2_xattr_acl_access_handler, | |
120 | &ext2_xattr_acl_default_handler, | |
121 | #endif | |
122 | #ifdef CONFIG_EXT2_FS_SECURITY | |
123 | &ext2_xattr_security_handler, | |
124 | #endif | |
125 | NULL | |
126 | }; | |
127 | ||
128 | static inline struct xattr_handler * | |
129 | ext2_xattr_handler(int name_index) | |
130 | { | |
131 | struct xattr_handler *handler = NULL; | |
132 | ||
133 | if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) | |
134 | handler = ext2_xattr_handler_map[name_index]; | |
135 | return handler; | |
136 | } | |
137 | ||
138 | /* | |
139 | * ext2_xattr_get() | |
140 | * | |
141 | * Copy an extended attribute into the buffer | |
142 | * provided, or compute the buffer size required. | |
143 | * Buffer is NULL to compute the size of the buffer required. | |
144 | * | |
145 | * Returns a negative error number on failure, or the number of bytes | |
146 | * used / required on success. | |
147 | */ | |
148 | int | |
149 | ext2_xattr_get(struct inode *inode, int name_index, const char *name, | |
150 | void *buffer, size_t buffer_size) | |
151 | { | |
152 | struct buffer_head *bh = NULL; | |
153 | struct ext2_xattr_entry *entry; | |
154 | size_t name_len, size; | |
155 | char *end; | |
156 | int error; | |
157 | ||
158 | ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", | |
159 | name_index, name, buffer, (long)buffer_size); | |
160 | ||
161 | if (name == NULL) | |
162 | return -EINVAL; | |
163 | down_read(&EXT2_I(inode)->xattr_sem); | |
164 | error = -ENODATA; | |
165 | if (!EXT2_I(inode)->i_file_acl) | |
166 | goto cleanup; | |
167 | ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); | |
168 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
169 | error = -EIO; | |
170 | if (!bh) | |
171 | goto cleanup; | |
172 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
173 | atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); | |
174 | end = bh->b_data + bh->b_size; | |
175 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
176 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
177 | bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", | |
178 | "inode %ld: bad block %d", inode->i_ino, | |
179 | EXT2_I(inode)->i_file_acl); | |
180 | error = -EIO; | |
181 | goto cleanup; | |
182 | } | |
183 | /* find named attribute */ | |
184 | name_len = strlen(name); | |
185 | ||
186 | error = -ERANGE; | |
187 | if (name_len > 255) | |
188 | goto cleanup; | |
189 | entry = FIRST_ENTRY(bh); | |
190 | while (!IS_LAST_ENTRY(entry)) { | |
191 | struct ext2_xattr_entry *next = | |
192 | EXT2_XATTR_NEXT(entry); | |
193 | if ((char *)next >= end) | |
194 | goto bad_block; | |
195 | if (name_index == entry->e_name_index && | |
196 | name_len == entry->e_name_len && | |
197 | memcmp(name, entry->e_name, name_len) == 0) | |
198 | goto found; | |
199 | entry = next; | |
200 | } | |
201 | /* Check the remaining name entries */ | |
202 | while (!IS_LAST_ENTRY(entry)) { | |
203 | struct ext2_xattr_entry *next = | |
204 | EXT2_XATTR_NEXT(entry); | |
205 | if ((char *)next >= end) | |
206 | goto bad_block; | |
207 | entry = next; | |
208 | } | |
209 | if (ext2_xattr_cache_insert(bh)) | |
210 | ea_idebug(inode, "cache insert failed"); | |
211 | error = -ENODATA; | |
212 | goto cleanup; | |
213 | found: | |
214 | /* check the buffer size */ | |
215 | if (entry->e_value_block != 0) | |
216 | goto bad_block; | |
217 | size = le32_to_cpu(entry->e_value_size); | |
218 | if (size > inode->i_sb->s_blocksize || | |
219 | le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) | |
220 | goto bad_block; | |
221 | ||
222 | if (ext2_xattr_cache_insert(bh)) | |
223 | ea_idebug(inode, "cache insert failed"); | |
224 | if (buffer) { | |
225 | error = -ERANGE; | |
226 | if (size > buffer_size) | |
227 | goto cleanup; | |
228 | /* return value of attribute */ | |
229 | memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), | |
230 | size); | |
231 | } | |
232 | error = size; | |
233 | ||
234 | cleanup: | |
235 | brelse(bh); | |
236 | up_read(&EXT2_I(inode)->xattr_sem); | |
237 | ||
238 | return error; | |
239 | } | |
240 | ||
241 | /* | |
242 | * ext2_xattr_list() | |
243 | * | |
244 | * Copy a list of attribute names into the buffer | |
245 | * provided, or compute the buffer size required. | |
246 | * Buffer is NULL to compute the size of the buffer required. | |
247 | * | |
248 | * Returns a negative error number on failure, or the number of bytes | |
249 | * used / required on success. | |
250 | */ | |
251 | static int | |
252 | ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size) | |
253 | { | |
254 | struct buffer_head *bh = NULL; | |
255 | struct ext2_xattr_entry *entry; | |
256 | char *end; | |
257 | size_t rest = buffer_size; | |
258 | int error; | |
259 | ||
260 | ea_idebug(inode, "buffer=%p, buffer_size=%ld", | |
261 | buffer, (long)buffer_size); | |
262 | ||
263 | down_read(&EXT2_I(inode)->xattr_sem); | |
264 | error = 0; | |
265 | if (!EXT2_I(inode)->i_file_acl) | |
266 | goto cleanup; | |
267 | ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); | |
268 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
269 | error = -EIO; | |
270 | if (!bh) | |
271 | goto cleanup; | |
272 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
273 | atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); | |
274 | end = bh->b_data + bh->b_size; | |
275 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
276 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
277 | bad_block: ext2_error(inode->i_sb, "ext2_xattr_list", | |
278 | "inode %ld: bad block %d", inode->i_ino, | |
279 | EXT2_I(inode)->i_file_acl); | |
280 | error = -EIO; | |
281 | goto cleanup; | |
282 | } | |
283 | ||
284 | /* check the on-disk data structure */ | |
285 | entry = FIRST_ENTRY(bh); | |
286 | while (!IS_LAST_ENTRY(entry)) { | |
287 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry); | |
288 | ||
289 | if ((char *)next >= end) | |
290 | goto bad_block; | |
291 | entry = next; | |
292 | } | |
293 | if (ext2_xattr_cache_insert(bh)) | |
294 | ea_idebug(inode, "cache insert failed"); | |
295 | ||
296 | /* list the attribute names */ | |
297 | for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); | |
298 | entry = EXT2_XATTR_NEXT(entry)) { | |
299 | struct xattr_handler *handler = | |
300 | ext2_xattr_handler(entry->e_name_index); | |
301 | ||
302 | if (handler) { | |
303 | size_t size = handler->list(inode, buffer, rest, | |
304 | entry->e_name, | |
305 | entry->e_name_len); | |
306 | if (buffer) { | |
307 | if (size > rest) { | |
308 | error = -ERANGE; | |
309 | goto cleanup; | |
310 | } | |
311 | buffer += size; | |
312 | } | |
313 | rest -= size; | |
314 | } | |
315 | } | |
316 | error = buffer_size - rest; /* total size */ | |
317 | ||
318 | cleanup: | |
319 | brelse(bh); | |
320 | up_read(&EXT2_I(inode)->xattr_sem); | |
321 | ||
322 | return error; | |
323 | } | |
324 | ||
325 | /* | |
326 | * Inode operation listxattr() | |
327 | * | |
1b1dcc1b | 328 | * dentry->d_inode->i_mutex: don't care |
1da177e4 LT |
329 | */ |
330 | ssize_t | |
331 | ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) | |
332 | { | |
333 | return ext2_xattr_list(dentry->d_inode, buffer, size); | |
334 | } | |
335 | ||
336 | /* | |
337 | * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is | |
338 | * not set, set it. | |
339 | */ | |
340 | static void ext2_xattr_update_super_block(struct super_block *sb) | |
341 | { | |
342 | if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) | |
343 | return; | |
344 | ||
345 | lock_super(sb); | |
346 | EXT2_SB(sb)->s_es->s_feature_compat |= | |
347 | cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR); | |
348 | sb->s_dirt = 1; | |
349 | mark_buffer_dirty(EXT2_SB(sb)->s_sbh); | |
350 | unlock_super(sb); | |
351 | } | |
352 | ||
353 | /* | |
354 | * ext2_xattr_set() | |
355 | * | |
356 | * Create, replace or remove an extended attribute for this inode. Buffer | |
357 | * is NULL to remove an existing extended attribute, and non-NULL to | |
358 | * either replace an existing extended attribute, or create a new extended | |
359 | * attribute. The flags XATTR_REPLACE and XATTR_CREATE | |
360 | * specify that an extended attribute must exist and must not exist | |
361 | * previous to the call, respectively. | |
362 | * | |
363 | * Returns 0, or a negative error number on failure. | |
364 | */ | |
365 | int | |
366 | ext2_xattr_set(struct inode *inode, int name_index, const char *name, | |
367 | const void *value, size_t value_len, int flags) | |
368 | { | |
369 | struct super_block *sb = inode->i_sb; | |
370 | struct buffer_head *bh = NULL; | |
371 | struct ext2_xattr_header *header = NULL; | |
372 | struct ext2_xattr_entry *here, *last; | |
373 | size_t name_len, free, min_offs = sb->s_blocksize; | |
374 | int not_found = 1, error; | |
375 | char *end; | |
376 | ||
377 | /* | |
378 | * header -- Points either into bh, or to a temporarily | |
379 | * allocated buffer. | |
380 | * here -- The named entry found, or the place for inserting, within | |
381 | * the block pointed to by header. | |
382 | * last -- Points right after the last named entry within the block | |
383 | * pointed to by header. | |
384 | * min_offs -- The offset of the first value (values are aligned | |
385 | * towards the end of the block). | |
386 | * end -- Points right after the block pointed to by header. | |
387 | */ | |
388 | ||
389 | ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", | |
390 | name_index, name, value, (long)value_len); | |
391 | ||
1da177e4 LT |
392 | if (value == NULL) |
393 | value_len = 0; | |
394 | if (name == NULL) | |
395 | return -EINVAL; | |
396 | name_len = strlen(name); | |
397 | if (name_len > 255 || value_len > sb->s_blocksize) | |
398 | return -ERANGE; | |
399 | down_write(&EXT2_I(inode)->xattr_sem); | |
400 | if (EXT2_I(inode)->i_file_acl) { | |
401 | /* The inode already has an extended attribute block. */ | |
402 | bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); | |
403 | error = -EIO; | |
404 | if (!bh) | |
405 | goto cleanup; | |
406 | ea_bdebug(bh, "b_count=%d, refcount=%d", | |
407 | atomic_read(&(bh->b_count)), | |
408 | le32_to_cpu(HDR(bh)->h_refcount)); | |
409 | header = HDR(bh); | |
410 | end = bh->b_data + bh->b_size; | |
411 | if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
412 | header->h_blocks != cpu_to_le32(1)) { | |
413 | bad_block: ext2_error(sb, "ext2_xattr_set", | |
414 | "inode %ld: bad block %d", inode->i_ino, | |
415 | EXT2_I(inode)->i_file_acl); | |
416 | error = -EIO; | |
417 | goto cleanup; | |
418 | } | |
419 | /* Find the named attribute. */ | |
420 | here = FIRST_ENTRY(bh); | |
421 | while (!IS_LAST_ENTRY(here)) { | |
422 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here); | |
423 | if ((char *)next >= end) | |
424 | goto bad_block; | |
425 | if (!here->e_value_block && here->e_value_size) { | |
426 | size_t offs = le16_to_cpu(here->e_value_offs); | |
427 | if (offs < min_offs) | |
428 | min_offs = offs; | |
429 | } | |
430 | not_found = name_index - here->e_name_index; | |
431 | if (!not_found) | |
432 | not_found = name_len - here->e_name_len; | |
433 | if (!not_found) | |
434 | not_found = memcmp(name, here->e_name,name_len); | |
435 | if (not_found <= 0) | |
436 | break; | |
437 | here = next; | |
438 | } | |
439 | last = here; | |
440 | /* We still need to compute min_offs and last. */ | |
441 | while (!IS_LAST_ENTRY(last)) { | |
442 | struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last); | |
443 | if ((char *)next >= end) | |
444 | goto bad_block; | |
445 | if (!last->e_value_block && last->e_value_size) { | |
446 | size_t offs = le16_to_cpu(last->e_value_offs); | |
447 | if (offs < min_offs) | |
448 | min_offs = offs; | |
449 | } | |
450 | last = next; | |
451 | } | |
452 | ||
453 | /* Check whether we have enough space left. */ | |
454 | free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); | |
455 | } else { | |
456 | /* We will use a new extended attribute block. */ | |
457 | free = sb->s_blocksize - | |
458 | sizeof(struct ext2_xattr_header) - sizeof(__u32); | |
459 | here = last = NULL; /* avoid gcc uninitialized warning. */ | |
460 | } | |
461 | ||
462 | if (not_found) { | |
463 | /* Request to remove a nonexistent attribute? */ | |
464 | error = -ENODATA; | |
465 | if (flags & XATTR_REPLACE) | |
466 | goto cleanup; | |
467 | error = 0; | |
468 | if (value == NULL) | |
469 | goto cleanup; | |
470 | } else { | |
471 | /* Request to create an existing attribute? */ | |
472 | error = -EEXIST; | |
473 | if (flags & XATTR_CREATE) | |
474 | goto cleanup; | |
475 | if (!here->e_value_block && here->e_value_size) { | |
476 | size_t size = le32_to_cpu(here->e_value_size); | |
477 | ||
478 | if (le16_to_cpu(here->e_value_offs) + size > | |
479 | sb->s_blocksize || size > sb->s_blocksize) | |
480 | goto bad_block; | |
481 | free += EXT2_XATTR_SIZE(size); | |
482 | } | |
483 | free += EXT2_XATTR_LEN(name_len); | |
484 | } | |
485 | error = -ENOSPC; | |
486 | if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) | |
487 | goto cleanup; | |
488 | ||
489 | /* Here we know that we can set the new attribute. */ | |
490 | ||
491 | if (header) { | |
492 | struct mb_cache_entry *ce; | |
493 | ||
494 | /* assert(header == HDR(bh)); */ | |
495 | ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, | |
496 | bh->b_blocknr); | |
497 | lock_buffer(bh); | |
498 | if (header->h_refcount == cpu_to_le32(1)) { | |
499 | ea_bdebug(bh, "modifying in-place"); | |
500 | if (ce) | |
501 | mb_cache_entry_free(ce); | |
502 | /* keep the buffer locked while modifying it. */ | |
503 | } else { | |
504 | int offset; | |
505 | ||
506 | if (ce) | |
507 | mb_cache_entry_release(ce); | |
508 | unlock_buffer(bh); | |
509 | ea_bdebug(bh, "cloning"); | |
510 | header = kmalloc(bh->b_size, GFP_KERNEL); | |
511 | error = -ENOMEM; | |
512 | if (header == NULL) | |
513 | goto cleanup; | |
514 | memcpy(header, HDR(bh), bh->b_size); | |
515 | header->h_refcount = cpu_to_le32(1); | |
516 | ||
517 | offset = (char *)here - bh->b_data; | |
518 | here = ENTRY((char *)header + offset); | |
519 | offset = (char *)last - bh->b_data; | |
520 | last = ENTRY((char *)header + offset); | |
521 | } | |
522 | } else { | |
523 | /* Allocate a buffer where we construct the new block. */ | |
524 | header = kmalloc(sb->s_blocksize, GFP_KERNEL); | |
525 | error = -ENOMEM; | |
526 | if (header == NULL) | |
527 | goto cleanup; | |
528 | memset(header, 0, sb->s_blocksize); | |
529 | end = (char *)header + sb->s_blocksize; | |
530 | header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); | |
531 | header->h_blocks = header->h_refcount = cpu_to_le32(1); | |
532 | last = here = ENTRY(header+1); | |
533 | } | |
534 | ||
535 | /* Iff we are modifying the block in-place, bh is locked here. */ | |
536 | ||
537 | if (not_found) { | |
538 | /* Insert the new name. */ | |
539 | size_t size = EXT2_XATTR_LEN(name_len); | |
540 | size_t rest = (char *)last - (char *)here; | |
541 | memmove((char *)here + size, here, rest); | |
542 | memset(here, 0, size); | |
543 | here->e_name_index = name_index; | |
544 | here->e_name_len = name_len; | |
545 | memcpy(here->e_name, name, name_len); | |
546 | } else { | |
547 | if (!here->e_value_block && here->e_value_size) { | |
548 | char *first_val = (char *)header + min_offs; | |
549 | size_t offs = le16_to_cpu(here->e_value_offs); | |
550 | char *val = (char *)header + offs; | |
551 | size_t size = EXT2_XATTR_SIZE( | |
552 | le32_to_cpu(here->e_value_size)); | |
553 | ||
554 | if (size == EXT2_XATTR_SIZE(value_len)) { | |
555 | /* The old and the new value have the same | |
556 | size. Just replace. */ | |
557 | here->e_value_size = cpu_to_le32(value_len); | |
558 | memset(val + size - EXT2_XATTR_PAD, 0, | |
559 | EXT2_XATTR_PAD); /* Clear pad bytes. */ | |
560 | memcpy(val, value, value_len); | |
561 | goto skip_replace; | |
562 | } | |
563 | ||
564 | /* Remove the old value. */ | |
565 | memmove(first_val + size, first_val, val - first_val); | |
566 | memset(first_val, 0, size); | |
567 | here->e_value_offs = 0; | |
568 | min_offs += size; | |
569 | ||
570 | /* Adjust all value offsets. */ | |
571 | last = ENTRY(header+1); | |
572 | while (!IS_LAST_ENTRY(last)) { | |
573 | size_t o = le16_to_cpu(last->e_value_offs); | |
574 | if (!last->e_value_block && o < offs) | |
575 | last->e_value_offs = | |
576 | cpu_to_le16(o + size); | |
577 | last = EXT2_XATTR_NEXT(last); | |
578 | } | |
579 | } | |
580 | if (value == NULL) { | |
581 | /* Remove the old name. */ | |
582 | size_t size = EXT2_XATTR_LEN(name_len); | |
583 | last = ENTRY((char *)last - size); | |
584 | memmove(here, (char*)here + size, | |
585 | (char*)last - (char*)here); | |
586 | memset(last, 0, size); | |
587 | } | |
588 | } | |
589 | ||
590 | if (value != NULL) { | |
591 | /* Insert the new value. */ | |
592 | here->e_value_size = cpu_to_le32(value_len); | |
593 | if (value_len) { | |
594 | size_t size = EXT2_XATTR_SIZE(value_len); | |
595 | char *val = (char *)header + min_offs - size; | |
596 | here->e_value_offs = | |
597 | cpu_to_le16((char *)val - (char *)header); | |
598 | memset(val + size - EXT2_XATTR_PAD, 0, | |
599 | EXT2_XATTR_PAD); /* Clear the pad bytes. */ | |
600 | memcpy(val, value, value_len); | |
601 | } | |
602 | } | |
603 | ||
604 | skip_replace: | |
605 | if (IS_LAST_ENTRY(ENTRY(header+1))) { | |
606 | /* This block is now empty. */ | |
607 | if (bh && header == HDR(bh)) | |
608 | unlock_buffer(bh); /* we were modifying in-place. */ | |
609 | error = ext2_xattr_set2(inode, bh, NULL); | |
610 | } else { | |
611 | ext2_xattr_rehash(header, here); | |
612 | if (bh && header == HDR(bh)) | |
613 | unlock_buffer(bh); /* we were modifying in-place. */ | |
614 | error = ext2_xattr_set2(inode, bh, header); | |
615 | } | |
616 | ||
617 | cleanup: | |
618 | brelse(bh); | |
619 | if (!(bh && header == HDR(bh))) | |
620 | kfree(header); | |
621 | up_write(&EXT2_I(inode)->xattr_sem); | |
622 | ||
623 | return error; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Second half of ext2_xattr_set(): Update the file system. | |
628 | */ | |
629 | static int | |
630 | ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, | |
631 | struct ext2_xattr_header *header) | |
632 | { | |
633 | struct super_block *sb = inode->i_sb; | |
634 | struct buffer_head *new_bh = NULL; | |
635 | int error; | |
636 | ||
637 | if (header) { | |
638 | new_bh = ext2_xattr_cache_find(inode, header); | |
639 | if (new_bh) { | |
640 | /* We found an identical block in the cache. */ | |
641 | if (new_bh == old_bh) { | |
642 | ea_bdebug(new_bh, "keeping this block"); | |
643 | } else { | |
644 | /* The old block is released after updating | |
645 | the inode. */ | |
646 | ea_bdebug(new_bh, "reusing block"); | |
647 | ||
648 | error = -EDQUOT; | |
649 | if (DQUOT_ALLOC_BLOCK(inode, 1)) { | |
650 | unlock_buffer(new_bh); | |
651 | goto cleanup; | |
652 | } | |
653 | HDR(new_bh)->h_refcount = cpu_to_le32(1 + | |
654 | le32_to_cpu(HDR(new_bh)->h_refcount)); | |
655 | ea_bdebug(new_bh, "refcount now=%d", | |
656 | le32_to_cpu(HDR(new_bh)->h_refcount)); | |
657 | } | |
658 | unlock_buffer(new_bh); | |
659 | } else if (old_bh && header == HDR(old_bh)) { | |
660 | /* Keep this block. No need to lock the block as we | |
661 | don't need to change the reference count. */ | |
662 | new_bh = old_bh; | |
663 | get_bh(new_bh); | |
664 | ext2_xattr_cache_insert(new_bh); | |
665 | } else { | |
666 | /* We need to allocate a new block */ | |
667 | int goal = le32_to_cpu(EXT2_SB(sb)->s_es-> | |
668 | s_first_data_block) + | |
669 | EXT2_I(inode)->i_block_group * | |
670 | EXT2_BLOCKS_PER_GROUP(sb); | |
671 | int block = ext2_new_block(inode, goal, | |
672 | NULL, NULL, &error); | |
673 | if (error) | |
674 | goto cleanup; | |
675 | ea_idebug(inode, "creating block %d", block); | |
676 | ||
677 | new_bh = sb_getblk(sb, block); | |
678 | if (!new_bh) { | |
679 | ext2_free_blocks(inode, block, 1); | |
680 | error = -EIO; | |
681 | goto cleanup; | |
682 | } | |
683 | lock_buffer(new_bh); | |
684 | memcpy(new_bh->b_data, header, new_bh->b_size); | |
685 | set_buffer_uptodate(new_bh); | |
686 | unlock_buffer(new_bh); | |
687 | ext2_xattr_cache_insert(new_bh); | |
688 | ||
689 | ext2_xattr_update_super_block(sb); | |
690 | } | |
691 | mark_buffer_dirty(new_bh); | |
692 | if (IS_SYNC(inode)) { | |
693 | sync_dirty_buffer(new_bh); | |
694 | error = -EIO; | |
695 | if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) | |
696 | goto cleanup; | |
697 | } | |
698 | } | |
699 | ||
700 | /* Update the inode. */ | |
701 | EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; | |
702 | inode->i_ctime = CURRENT_TIME_SEC; | |
703 | if (IS_SYNC(inode)) { | |
704 | error = ext2_sync_inode (inode); | |
705 | /* In case sync failed due to ENOSPC the inode was actually | |
706 | * written (only some dirty data were not) so we just proceed | |
707 | * as if nothing happened and cleanup the unused block */ | |
708 | if (error && error != -ENOSPC) { | |
709 | if (new_bh && new_bh != old_bh) | |
710 | DQUOT_FREE_BLOCK(inode, 1); | |
711 | goto cleanup; | |
712 | } | |
713 | } else | |
714 | mark_inode_dirty(inode); | |
715 | ||
716 | error = 0; | |
717 | if (old_bh && old_bh != new_bh) { | |
718 | struct mb_cache_entry *ce; | |
719 | ||
720 | /* | |
721 | * If there was an old block and we are no longer using it, | |
722 | * release the old block. | |
723 | */ | |
724 | ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev, | |
725 | old_bh->b_blocknr); | |
726 | lock_buffer(old_bh); | |
727 | if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { | |
728 | /* Free the old block. */ | |
729 | if (ce) | |
730 | mb_cache_entry_free(ce); | |
731 | ea_bdebug(old_bh, "freeing"); | |
732 | ext2_free_blocks(inode, old_bh->b_blocknr, 1); | |
733 | /* We let our caller release old_bh, so we | |
734 | * need to duplicate the buffer before. */ | |
735 | get_bh(old_bh); | |
736 | bforget(old_bh); | |
737 | } else { | |
738 | /* Decrement the refcount only. */ | |
739 | HDR(old_bh)->h_refcount = cpu_to_le32( | |
740 | le32_to_cpu(HDR(old_bh)->h_refcount) - 1); | |
741 | if (ce) | |
742 | mb_cache_entry_release(ce); | |
743 | DQUOT_FREE_BLOCK(inode, 1); | |
744 | mark_buffer_dirty(old_bh); | |
745 | ea_bdebug(old_bh, "refcount now=%d", | |
746 | le32_to_cpu(HDR(old_bh)->h_refcount)); | |
747 | } | |
748 | unlock_buffer(old_bh); | |
749 | } | |
750 | ||
751 | cleanup: | |
752 | brelse(new_bh); | |
753 | ||
754 | return error; | |
755 | } | |
756 | ||
757 | /* | |
758 | * ext2_xattr_delete_inode() | |
759 | * | |
760 | * Free extended attribute resources associated with this inode. This | |
761 | * is called immediately before an inode is freed. | |
762 | */ | |
763 | void | |
764 | ext2_xattr_delete_inode(struct inode *inode) | |
765 | { | |
766 | struct buffer_head *bh = NULL; | |
767 | struct mb_cache_entry *ce; | |
768 | ||
769 | down_write(&EXT2_I(inode)->xattr_sem); | |
770 | if (!EXT2_I(inode)->i_file_acl) | |
771 | goto cleanup; | |
772 | bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); | |
773 | if (!bh) { | |
774 | ext2_error(inode->i_sb, "ext2_xattr_delete_inode", | |
775 | "inode %ld: block %d read error", inode->i_ino, | |
776 | EXT2_I(inode)->i_file_acl); | |
777 | goto cleanup; | |
778 | } | |
779 | ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); | |
780 | if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || | |
781 | HDR(bh)->h_blocks != cpu_to_le32(1)) { | |
782 | ext2_error(inode->i_sb, "ext2_xattr_delete_inode", | |
783 | "inode %ld: bad block %d", inode->i_ino, | |
784 | EXT2_I(inode)->i_file_acl); | |
785 | goto cleanup; | |
786 | } | |
787 | ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr); | |
788 | lock_buffer(bh); | |
789 | if (HDR(bh)->h_refcount == cpu_to_le32(1)) { | |
790 | if (ce) | |
791 | mb_cache_entry_free(ce); | |
792 | ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); | |
793 | get_bh(bh); | |
794 | bforget(bh); | |
b2f49033 | 795 | unlock_buffer(bh); |
1da177e4 LT |
796 | } else { |
797 | HDR(bh)->h_refcount = cpu_to_le32( | |
798 | le32_to_cpu(HDR(bh)->h_refcount) - 1); | |
799 | if (ce) | |
800 | mb_cache_entry_release(ce); | |
b2f49033 PS |
801 | ea_bdebug(bh, "refcount now=%d", |
802 | le32_to_cpu(HDR(bh)->h_refcount)); | |
803 | unlock_buffer(bh); | |
1da177e4 LT |
804 | mark_buffer_dirty(bh); |
805 | if (IS_SYNC(inode)) | |
806 | sync_dirty_buffer(bh); | |
807 | DQUOT_FREE_BLOCK(inode, 1); | |
808 | } | |
1da177e4 LT |
809 | EXT2_I(inode)->i_file_acl = 0; |
810 | ||
811 | cleanup: | |
812 | brelse(bh); | |
813 | up_write(&EXT2_I(inode)->xattr_sem); | |
814 | } | |
815 | ||
816 | /* | |
817 | * ext2_xattr_put_super() | |
818 | * | |
819 | * This is called when a file system is unmounted. | |
820 | */ | |
821 | void | |
822 | ext2_xattr_put_super(struct super_block *sb) | |
823 | { | |
8c52ab42 | 824 | mb_cache_shrink(sb->s_bdev); |
1da177e4 LT |
825 | } |
826 | ||
827 | ||
828 | /* | |
829 | * ext2_xattr_cache_insert() | |
830 | * | |
831 | * Create a new entry in the extended attribute cache, and insert | |
832 | * it unless such an entry is already in the cache. | |
833 | * | |
834 | * Returns 0, or a negative error number on failure. | |
835 | */ | |
836 | static int | |
837 | ext2_xattr_cache_insert(struct buffer_head *bh) | |
838 | { | |
839 | __u32 hash = le32_to_cpu(HDR(bh)->h_hash); | |
840 | struct mb_cache_entry *ce; | |
841 | int error; | |
842 | ||
843 | ce = mb_cache_entry_alloc(ext2_xattr_cache); | |
844 | if (!ce) | |
845 | return -ENOMEM; | |
846 | error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); | |
847 | if (error) { | |
848 | mb_cache_entry_free(ce); | |
849 | if (error == -EBUSY) { | |
850 | ea_bdebug(bh, "already in cache (%d cache entries)", | |
851 | atomic_read(&ext2_xattr_cache->c_entry_count)); | |
852 | error = 0; | |
853 | } | |
854 | } else { | |
855 | ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, | |
856 | atomic_read(&ext2_xattr_cache->c_entry_count)); | |
857 | mb_cache_entry_release(ce); | |
858 | } | |
859 | return error; | |
860 | } | |
861 | ||
862 | /* | |
863 | * ext2_xattr_cmp() | |
864 | * | |
865 | * Compare two extended attribute blocks for equality. | |
866 | * | |
867 | * Returns 0 if the blocks are equal, 1 if they differ, and | |
868 | * a negative error number on errors. | |
869 | */ | |
870 | static int | |
871 | ext2_xattr_cmp(struct ext2_xattr_header *header1, | |
872 | struct ext2_xattr_header *header2) | |
873 | { | |
874 | struct ext2_xattr_entry *entry1, *entry2; | |
875 | ||
876 | entry1 = ENTRY(header1+1); | |
877 | entry2 = ENTRY(header2+1); | |
878 | while (!IS_LAST_ENTRY(entry1)) { | |
879 | if (IS_LAST_ENTRY(entry2)) | |
880 | return 1; | |
881 | if (entry1->e_hash != entry2->e_hash || | |
882 | entry1->e_name_index != entry2->e_name_index || | |
883 | entry1->e_name_len != entry2->e_name_len || | |
884 | entry1->e_value_size != entry2->e_value_size || | |
885 | memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) | |
886 | return 1; | |
887 | if (entry1->e_value_block != 0 || entry2->e_value_block != 0) | |
888 | return -EIO; | |
889 | if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), | |
890 | (char *)header2 + le16_to_cpu(entry2->e_value_offs), | |
891 | le32_to_cpu(entry1->e_value_size))) | |
892 | return 1; | |
893 | ||
894 | entry1 = EXT2_XATTR_NEXT(entry1); | |
895 | entry2 = EXT2_XATTR_NEXT(entry2); | |
896 | } | |
897 | if (!IS_LAST_ENTRY(entry2)) | |
898 | return 1; | |
899 | return 0; | |
900 | } | |
901 | ||
902 | /* | |
903 | * ext2_xattr_cache_find() | |
904 | * | |
905 | * Find an identical extended attribute block. | |
906 | * | |
907 | * Returns a locked buffer head to the block found, or NULL if such | |
908 | * a block was not found or an error occurred. | |
909 | */ | |
910 | static struct buffer_head * | |
911 | ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) | |
912 | { | |
913 | __u32 hash = le32_to_cpu(header->h_hash); | |
914 | struct mb_cache_entry *ce; | |
915 | ||
916 | if (!header->h_hash) | |
917 | return NULL; /* never share */ | |
918 | ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); | |
919 | again: | |
920 | ce = mb_cache_entry_find_first(ext2_xattr_cache, 0, | |
921 | inode->i_sb->s_bdev, hash); | |
922 | while (ce) { | |
923 | struct buffer_head *bh; | |
924 | ||
925 | if (IS_ERR(ce)) { | |
926 | if (PTR_ERR(ce) == -EAGAIN) | |
927 | goto again; | |
928 | break; | |
929 | } | |
930 | ||
931 | bh = sb_bread(inode->i_sb, ce->e_block); | |
932 | if (!bh) { | |
933 | ext2_error(inode->i_sb, "ext2_xattr_cache_find", | |
934 | "inode %ld: block %ld read error", | |
935 | inode->i_ino, (unsigned long) ce->e_block); | |
936 | } else { | |
937 | lock_buffer(bh); | |
938 | if (le32_to_cpu(HDR(bh)->h_refcount) > | |
939 | EXT2_XATTR_REFCOUNT_MAX) { | |
940 | ea_idebug(inode, "block %ld refcount %d>%d", | |
941 | (unsigned long) ce->e_block, | |
942 | le32_to_cpu(HDR(bh)->h_refcount), | |
943 | EXT2_XATTR_REFCOUNT_MAX); | |
944 | } else if (!ext2_xattr_cmp(header, HDR(bh))) { | |
945 | ea_bdebug(bh, "b_count=%d", | |
946 | atomic_read(&(bh->b_count))); | |
947 | mb_cache_entry_release(ce); | |
948 | return bh; | |
949 | } | |
950 | unlock_buffer(bh); | |
951 | brelse(bh); | |
952 | } | |
953 | ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash); | |
954 | } | |
955 | return NULL; | |
956 | } | |
957 | ||
958 | #define NAME_HASH_SHIFT 5 | |
959 | #define VALUE_HASH_SHIFT 16 | |
960 | ||
961 | /* | |
962 | * ext2_xattr_hash_entry() | |
963 | * | |
964 | * Compute the hash of an extended attribute. | |
965 | */ | |
966 | static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, | |
967 | struct ext2_xattr_entry *entry) | |
968 | { | |
969 | __u32 hash = 0; | |
970 | char *name = entry->e_name; | |
971 | int n; | |
972 | ||
973 | for (n=0; n < entry->e_name_len; n++) { | |
974 | hash = (hash << NAME_HASH_SHIFT) ^ | |
975 | (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ | |
976 | *name++; | |
977 | } | |
978 | ||
979 | if (entry->e_value_block == 0 && entry->e_value_size != 0) { | |
980 | __le32 *value = (__le32 *)((char *)header + | |
981 | le16_to_cpu(entry->e_value_offs)); | |
982 | for (n = (le32_to_cpu(entry->e_value_size) + | |
983 | EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { | |
984 | hash = (hash << VALUE_HASH_SHIFT) ^ | |
985 | (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ | |
986 | le32_to_cpu(*value++); | |
987 | } | |
988 | } | |
989 | entry->e_hash = cpu_to_le32(hash); | |
990 | } | |
991 | ||
992 | #undef NAME_HASH_SHIFT | |
993 | #undef VALUE_HASH_SHIFT | |
994 | ||
995 | #define BLOCK_HASH_SHIFT 16 | |
996 | ||
997 | /* | |
998 | * ext2_xattr_rehash() | |
999 | * | |
1000 | * Re-compute the extended attribute hash value after an entry has changed. | |
1001 | */ | |
1002 | static void ext2_xattr_rehash(struct ext2_xattr_header *header, | |
1003 | struct ext2_xattr_entry *entry) | |
1004 | { | |
1005 | struct ext2_xattr_entry *here; | |
1006 | __u32 hash = 0; | |
1007 | ||
1008 | ext2_xattr_hash_entry(header, entry); | |
1009 | here = ENTRY(header+1); | |
1010 | while (!IS_LAST_ENTRY(here)) { | |
1011 | if (!here->e_hash) { | |
1012 | /* Block is not shared if an entry's hash value == 0 */ | |
1013 | hash = 0; | |
1014 | break; | |
1015 | } | |
1016 | hash = (hash << BLOCK_HASH_SHIFT) ^ | |
1017 | (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ | |
1018 | le32_to_cpu(here->e_hash); | |
1019 | here = EXT2_XATTR_NEXT(here); | |
1020 | } | |
1021 | header->h_hash = cpu_to_le32(hash); | |
1022 | } | |
1023 | ||
1024 | #undef BLOCK_HASH_SHIFT | |
1025 | ||
1026 | int __init | |
1027 | init_ext2_xattr(void) | |
1028 | { | |
1029 | ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL, | |
1030 | sizeof(struct mb_cache_entry) + | |
1031 | sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6); | |
1032 | if (!ext2_xattr_cache) | |
1033 | return -ENOMEM; | |
1034 | return 0; | |
1035 | } | |
1036 | ||
1037 | void | |
1038 | exit_ext2_xattr(void) | |
1039 | { | |
1040 | mb_cache_destroy(ext2_xattr_cache); | |
1041 | } |