1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_da_format.h"
15 #include "xfs_da_btree.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_inode_item.h"
21 #include "xfs_attr_sf.h"
22 #include "xfs_attr_remote.h"
23 #include "xfs_attr_leaf.h"
24 #include "xfs_error.h"
25 #include "xfs_trace.h"
26 #include "xfs_buf_item.h"
27 #include "xfs_cksum.h"
31 xfs_attr_shortform_compare(const void *a, const void *b)
33 xfs_attr_sf_sort_t *sa, *sb;
35 sa = (xfs_attr_sf_sort_t *)a;
36 sb = (xfs_attr_sf_sort_t *)b;
37 if (sa->hash < sb->hash) {
39 } else if (sa->hash > sb->hash) {
42 return sa->entno - sb->entno;
46 #define XFS_ISRESET_CURSOR(cursor) \
47 (!((cursor)->initted) && !((cursor)->hashval) && \
48 !((cursor)->blkno) && !((cursor)->offset))
50 * Copy out entries of shortform attribute lists for attr_list().
51 * Shortform attribute lists are not stored in hashval sorted order.
52 * If the output buffer is not large enough to hold them all, then we
53 * we have to calculate each entries' hashvalue and sort them before
54 * we can begin returning them to the user.
57 xfs_attr_shortform_list(xfs_attr_list_context_t *context)
59 attrlist_cursor_kern_t *cursor;
60 xfs_attr_sf_sort_t *sbuf, *sbp;
61 xfs_attr_shortform_t *sf;
62 xfs_attr_sf_entry_t *sfe;
64 int sbsize, nsbuf, count, i;
66 ASSERT(context != NULL);
69 ASSERT(dp->i_afp != NULL);
70 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
74 cursor = context->cursor;
75 ASSERT(cursor != NULL);
77 trace_xfs_attr_list_sf(context);
80 * If the buffer is large enough and the cursor is at the start,
81 * do not bother with sorting since we will return everything in
82 * one buffer and another call using the cursor won't need to be
84 * Note the generous fudge factor of 16 overhead bytes per entry.
85 * If bufsize is zero then put_listent must be a search function
86 * and can just scan through what we have.
88 if (context->bufsize == 0 ||
89 (XFS_ISRESET_CURSOR(cursor) &&
90 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
91 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
92 context->put_listent(context,
98 * Either search callback finished early or
99 * didn't fit it all in the buffer after all.
101 if (context->seen_enough)
103 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
105 trace_xfs_attr_list_sf_all(context);
109 /* do no more for a search callback */
110 if (context->bufsize == 0)
114 * It didn't all fit, so we have to sort everything on hashval.
116 sbsize = sf->hdr.count * sizeof(*sbuf);
117 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
120 * Scan the attribute list for the rest of the entries, storing
121 * the relevant info from only those that match into a buffer.
124 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
126 ((char *)sfe < (char *)sf) ||
127 ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
128 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
130 context->dp->i_mount, sfe,
133 return -EFSCORRUPTED;
137 sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
138 sbp->name = sfe->nameval;
139 sbp->namelen = sfe->namelen;
140 /* These are bytes, and both on-disk, don't endian-flip */
141 sbp->valuelen = sfe->valuelen;
142 sbp->flags = sfe->flags;
143 sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
149 * Sort the entries on hash then entno.
151 xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
154 * Re-find our place IN THE SORTED LIST.
159 for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
160 if (sbp->hash == cursor->hashval) {
161 if (cursor->offset == count) {
165 } else if (sbp->hash > cursor->hashval) {
175 * Loop putting entries into the user buffer.
177 for ( ; i < nsbuf; i++, sbp++) {
178 if (cursor->hashval != sbp->hash) {
179 cursor->hashval = sbp->hash;
182 context->put_listent(context,
187 if (context->seen_enough)
197 * We didn't find the block & hash mentioned in the cursor state, so
198 * walk down the attr btree looking for the hash.
201 xfs_attr_node_list_lookup(
202 struct xfs_attr_list_context *context,
203 struct attrlist_cursor_kern *cursor,
204 struct xfs_buf **pbp)
206 struct xfs_da3_icnode_hdr nodehdr;
207 struct xfs_da_intnode *node;
208 struct xfs_da_node_entry *btree;
209 struct xfs_inode *dp = context->dp;
210 struct xfs_mount *mp = dp->i_mount;
211 struct xfs_trans *tp = context->tp;
215 unsigned int expected_level = 0;
218 ASSERT(*pbp == NULL);
221 error = xfs_da3_node_read(tp, dp, cursor->blkno, -1, &bp,
226 magic = be16_to_cpu(node->hdr.info.magic);
227 if (magic == XFS_ATTR_LEAF_MAGIC ||
228 magic == XFS_ATTR3_LEAF_MAGIC)
230 if (magic != XFS_DA_NODE_MAGIC &&
231 magic != XFS_DA3_NODE_MAGIC) {
232 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
233 node, sizeof(*node));
237 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
239 /* Tree taller than we can handle; bail out! */
240 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
243 /* Check the level from the root node. */
244 if (cursor->blkno == 0)
245 expected_level = nodehdr.level - 1;
246 else if (expected_level != nodehdr.level)
251 btree = dp->d_ops->node_tree_p(node);
252 for (i = 0; i < nodehdr.count; btree++, i++) {
253 if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
254 cursor->blkno = be32_to_cpu(btree->before);
255 trace_xfs_attr_list_node_descend(context,
260 xfs_trans_brelse(tp, bp);
262 if (i == nodehdr.count)
265 /* We can't point back to the root. */
266 if (cursor->blkno == 0)
267 return -EFSCORRUPTED;
270 if (expected_level != 0)
277 xfs_trans_brelse(tp, bp);
278 return -EFSCORRUPTED;
283 struct xfs_attr_list_context *context)
285 struct xfs_attr3_icleaf_hdr leafhdr;
286 struct attrlist_cursor_kern *cursor;
287 struct xfs_attr_leafblock *leaf;
288 struct xfs_da_intnode *node;
290 struct xfs_inode *dp = context->dp;
291 struct xfs_mount *mp = dp->i_mount;
294 trace_xfs_attr_node_list(context);
296 cursor = context->cursor;
300 * Do all sorts of validation on the passed-in cursor structure.
301 * If anything is amiss, ignore the cursor and look up the hashval
302 * starting from the btree root.
305 if (cursor->blkno > 0) {
306 error = xfs_da3_node_read(context->tp, dp, cursor->blkno, -1,
308 if ((error != 0) && (error != -EFSCORRUPTED))
311 struct xfs_attr_leaf_entry *entries;
314 switch (be16_to_cpu(node->hdr.info.magic)) {
315 case XFS_DA_NODE_MAGIC:
316 case XFS_DA3_NODE_MAGIC:
317 trace_xfs_attr_list_wrong_blk(context);
318 xfs_trans_brelse(context->tp, bp);
321 case XFS_ATTR_LEAF_MAGIC:
322 case XFS_ATTR3_LEAF_MAGIC:
324 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo,
326 entries = xfs_attr3_leaf_entryp(leaf);
327 if (cursor->hashval > be32_to_cpu(
328 entries[leafhdr.count - 1].hashval)) {
329 trace_xfs_attr_list_wrong_blk(context);
330 xfs_trans_brelse(context->tp, bp);
332 } else if (cursor->hashval <= be32_to_cpu(
333 entries[0].hashval)) {
334 trace_xfs_attr_list_wrong_blk(context);
335 xfs_trans_brelse(context->tp, bp);
340 trace_xfs_attr_list_wrong_blk(context);
341 xfs_trans_brelse(context->tp, bp);
348 * We did not find what we expected given the cursor's contents,
349 * so we start from the top and work down based on the hash value.
350 * Note that start of node block is same as start of leaf block.
353 error = xfs_attr_node_list_lookup(context, cursor, &bp);
360 * Roll upward through the blocks, processing each leaf block in
361 * order. As long as there is space in the result buffer, keep
362 * adding the information.
366 xfs_attr3_leaf_list_int(bp, context);
367 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
368 if (context->seen_enough || leafhdr.forw == 0)
370 cursor->blkno = leafhdr.forw;
371 xfs_trans_brelse(context->tp, bp);
372 error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, -1, &bp);
376 xfs_trans_brelse(context->tp, bp);
381 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
384 xfs_attr3_leaf_list_int(
386 struct xfs_attr_list_context *context)
388 struct attrlist_cursor_kern *cursor;
389 struct xfs_attr_leafblock *leaf;
390 struct xfs_attr3_icleaf_hdr ichdr;
391 struct xfs_attr_leaf_entry *entries;
392 struct xfs_attr_leaf_entry *entry;
394 struct xfs_mount *mp = context->dp->i_mount;
396 trace_xfs_attr_list_leaf(context);
399 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
400 entries = xfs_attr3_leaf_entryp(leaf);
402 cursor = context->cursor;
406 * Re-find our place in the leaf block if this is a new syscall.
408 if (context->resynch) {
410 for (i = 0; i < ichdr.count; entry++, i++) {
411 if (be32_to_cpu(entry->hashval) == cursor->hashval) {
412 if (cursor->offset == context->dupcnt) {
417 } else if (be32_to_cpu(entry->hashval) >
423 if (i == ichdr.count) {
424 trace_xfs_attr_list_notfound(context);
431 context->resynch = 0;
434 * We have found our place, start copying out the new attributes.
436 for (; i < ichdr.count; entry++, i++) {
438 int namelen, valuelen;
440 if (be32_to_cpu(entry->hashval) != cursor->hashval) {
441 cursor->hashval = be32_to_cpu(entry->hashval);
445 if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
446 !(context->flags & ATTR_INCOMPLETE))
447 continue; /* skip incomplete entries */
449 if (entry->flags & XFS_ATTR_LOCAL) {
450 xfs_attr_leaf_name_local_t *name_loc;
452 name_loc = xfs_attr3_leaf_name_local(leaf, i);
453 name = name_loc->nameval;
454 namelen = name_loc->namelen;
455 valuelen = be16_to_cpu(name_loc->valuelen);
457 xfs_attr_leaf_name_remote_t *name_rmt;
459 name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
460 name = name_rmt->name;
461 namelen = name_rmt->namelen;
462 valuelen = be32_to_cpu(name_rmt->valuelen);
465 context->put_listent(context, entry->flags,
466 name, namelen, valuelen);
467 if (context->seen_enough)
471 trace_xfs_attr_list_leaf_end(context);
476 * Copy out attribute entries for attr_list(), for leaf attribute lists.
479 xfs_attr_leaf_list(xfs_attr_list_context_t *context)
484 trace_xfs_attr_leaf_list(context);
486 context->cursor->blkno = 0;
487 error = xfs_attr3_leaf_read(context->tp, context->dp, 0, -1, &bp);
491 xfs_attr3_leaf_list_int(bp, context);
492 xfs_trans_brelse(context->tp, bp);
497 xfs_attr_list_int_ilocked(
498 struct xfs_attr_list_context *context)
500 struct xfs_inode *dp = context->dp;
502 ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
505 * Decide on what work routines to call based on the inode size.
507 if (!xfs_inode_hasattr(dp))
509 else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
510 return xfs_attr_shortform_list(context);
511 else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
512 return xfs_attr_leaf_list(context);
513 return xfs_attr_node_list(context);
518 xfs_attr_list_context_t *context)
521 xfs_inode_t *dp = context->dp;
524 XFS_STATS_INC(dp->i_mount, xs_attr_list);
526 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
529 lock_mode = xfs_ilock_attr_map_shared(dp);
530 error = xfs_attr_list_int_ilocked(context);
531 xfs_iunlock(dp, lock_mode);
535 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
536 (((struct attrlist_ent *) 0)->a_name - (char *) 0)
537 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
538 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(uint32_t)-1) \
539 & ~(sizeof(uint32_t)-1))
542 * Format an attribute and copy it out to the user's buffer.
543 * Take care to check values and protect against them changing later,
544 * we may be reading them directly out of a user buffer.
547 xfs_attr_put_listent(
548 xfs_attr_list_context_t *context,
554 struct attrlist *alist = (struct attrlist *)context->alist;
558 ASSERT(!(context->flags & ATTR_KERNOVAL));
559 ASSERT(context->count >= 0);
560 ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
561 ASSERT(context->firstu >= sizeof(*alist));
562 ASSERT(context->firstu <= context->bufsize);
565 * Only list entries in the right namespace.
567 if (((context->flags & ATTR_SECURE) == 0) !=
568 ((flags & XFS_ATTR_SECURE) == 0))
570 if (((context->flags & ATTR_ROOT) == 0) !=
571 ((flags & XFS_ATTR_ROOT) == 0))
574 arraytop = sizeof(*alist) +
575 context->count * sizeof(alist->al_offset[0]);
576 context->firstu -= ATTR_ENTSIZE(namelen);
577 if (context->firstu < arraytop) {
578 trace_xfs_attr_list_full(context);
580 context->seen_enough = 1;
584 aep = (attrlist_ent_t *)&context->alist[context->firstu];
585 aep->a_valuelen = valuelen;
586 memcpy(aep->a_name, name, namelen);
587 aep->a_name[namelen] = 0;
588 alist->al_offset[context->count++] = context->firstu;
589 alist->al_count = context->count;
590 trace_xfs_attr_list_add(context);
595 * Generate a list of extended attribute names and optionally
596 * also value lengths. Positive return value follows the XFS
597 * convention of being an error, zero or negative return code
598 * is the length of the buffer returned (negated), indicating
607 attrlist_cursor_kern_t *cursor)
609 xfs_attr_list_context_t context;
610 struct attrlist *alist;
614 * Validate the cursor.
616 if (cursor->pad1 || cursor->pad2)
618 if ((cursor->initted == 0) &&
619 (cursor->hashval || cursor->blkno || cursor->offset))
622 /* Only internal consumers can retrieve incomplete attrs. */
623 if (flags & ATTR_INCOMPLETE)
627 * Check for a properly aligned buffer.
629 if (((long)buffer) & (sizeof(int)-1))
631 if (flags & ATTR_KERNOVAL)
635 * Initialize the output buffer.
637 memset(&context, 0, sizeof(context));
639 context.cursor = cursor;
641 context.flags = flags;
642 context.alist = buffer;
643 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */
644 context.firstu = context.bufsize;
645 context.put_listent = xfs_attr_put_listent;
647 alist = (struct attrlist *)context.alist;
650 alist->al_offset[0] = context.bufsize;
652 error = xfs_attr_list_int(&context);