2 * Device operations for the pnfs client.
5 * The Regents of the University of Michigan
11 * Permission is granted to use, copy, create derivative works, and
12 * redistribute this software and such derivative works for any purpose,
13 * so long as the name of the University of Michigan is not used in
14 * any advertising or publicity pertaining to the use or distribution
15 * of this software without specific, written prior authorization. If
16 * the above copyright notice or any other identification of the
17 * University of Michigan is included in any copy of any portion of
18 * this software, then the disclaimer below must also be included.
20 * This software is provided as is, without representation or warranty
21 * of any kind either express or implied, including without limitation
22 * the implied warranties of merchantability, fitness for a particular
23 * purpose, or noninfringement. The Regents of the University of
24 * Michigan shall not be liable for any damages, including special,
25 * indirect, incidental, or consequential damages, with respect to any
26 * claim arising out of or in connection with the use of the software,
27 * even if it has been or is hereafter advised of the possibility of
31 #include <linux/export.h>
34 #define NFSDBG_FACILITY NFSDBG_PNFS
37 * Device ID RCU cache. A device ID is unique per server and layout type.
39 #define NFS4_DEVICE_ID_HASH_BITS 5
40 #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
41 #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
43 #define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
45 static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
46 static DEFINE_SPINLOCK(nfs4_deviceid_lock);
50 nfs4_print_deviceid(const struct nfs4_deviceid *id)
54 dprintk("%s: device id= [%x%x%x%x]\n", __func__,
55 p[0], p[1], p[2], p[3]);
57 EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
61 nfs4_deviceid_hash(const struct nfs4_deviceid *id)
63 unsigned char *cptr = (unsigned char *)id->data;
64 unsigned int nbytes = NFS4_DEVICEID4_SIZE;
71 return x & NFS4_DEVICE_ID_HASH_MASK;
74 static struct nfs4_deviceid_node *
75 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
76 const struct nfs_client *clp, const struct nfs4_deviceid *id,
79 struct nfs4_deviceid_node *d;
82 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
83 if (d->ld == ld && d->nfs_client == clp &&
84 !memcmp(&d->deviceid, id, sizeof(*id))) {
85 if (atomic_read(&d->ref))
94 * Lookup a deviceid in cache and get a reference count on it if found
96 * @clp nfs_client associated with deviceid
97 * @id deviceid to look up
99 static struct nfs4_deviceid_node *
100 _find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
101 const struct nfs_client *clp, const struct nfs4_deviceid *id,
104 struct nfs4_deviceid_node *d;
107 d = _lookup_deviceid(ld, clp, id, hash);
114 struct nfs4_deviceid_node *
115 nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
116 const struct nfs_client *clp, const struct nfs4_deviceid *id)
118 return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
120 EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
123 * Remove a deviceid from cache
125 * @clp nfs_client associated with deviceid
126 * @id the deviceid to unhash
128 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
131 nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
132 const struct nfs_client *clp, const struct nfs4_deviceid *id)
134 struct nfs4_deviceid_node *d;
136 spin_lock(&nfs4_deviceid_lock);
138 d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
141 spin_unlock(&nfs4_deviceid_lock);
144 hlist_del_init_rcu(&d->node);
145 spin_unlock(&nfs4_deviceid_lock);
148 /* balance the initial ref set in pnfs_insert_deviceid */
149 if (atomic_dec_and_test(&d->ref))
150 d->ld->free_deviceid_node(d);
152 EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
155 nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
156 const struct pnfs_layoutdriver_type *ld,
157 const struct nfs_client *nfs_client,
158 const struct nfs4_deviceid *id)
160 INIT_HLIST_NODE(&d->node);
161 INIT_HLIST_NODE(&d->tmpnode);
163 d->nfs_client = nfs_client;
166 atomic_set(&d->ref, 1);
168 EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
171 * Uniquely initialize and insert a deviceid node into cache
173 * @new new deviceid node
174 * Note that the caller must set up the following members:
179 * @ret the inserted node, if none found, otherwise, the found entry.
181 struct nfs4_deviceid_node *
182 nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
184 struct nfs4_deviceid_node *d;
187 spin_lock(&nfs4_deviceid_lock);
188 hash = nfs4_deviceid_hash(&new->deviceid);
189 d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
191 spin_unlock(&nfs4_deviceid_lock);
195 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
196 spin_unlock(&nfs4_deviceid_lock);
197 atomic_inc(&new->ref);
201 EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
204 * Dereference a deviceid node and delete it when its reference count drops
207 * @d deviceid node to put
209 * return true iff the node was deleted
210 * Note that since the test for d->ref == 0 is sufficient to establish
211 * that the node is no longer hashed in the global device id cache.
214 nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
216 if (!atomic_dec_and_test(&d->ref))
218 d->ld->free_deviceid_node(d);
221 EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
224 nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
226 node->timestamp_unavailable = jiffies;
227 set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
229 EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
232 nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
234 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
235 unsigned long start, end;
238 start = end - PNFS_DEVICE_RETRY_TIMEOUT;
239 if (time_in_range(node->timestamp_unavailable, start, end))
241 clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
245 EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
248 _deviceid_purge_client(const struct nfs_client *clp, long hash)
250 struct nfs4_deviceid_node *d;
251 struct hlist_node *n;
254 spin_lock(&nfs4_deviceid_lock);
256 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
257 if (d->nfs_client == clp && atomic_read(&d->ref)) {
258 hlist_del_init_rcu(&d->node);
259 hlist_add_head(&d->tmpnode, &tmp);
262 spin_unlock(&nfs4_deviceid_lock);
264 if (hlist_empty(&tmp))
268 while (!hlist_empty(&tmp)) {
269 d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
270 hlist_del(&d->tmpnode);
271 if (atomic_dec_and_test(&d->ref))
272 d->ld->free_deviceid_node(d);
277 nfs4_deviceid_purge_client(const struct nfs_client *clp)
281 if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
283 for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
284 _deviceid_purge_client(clp, h);
288 * Stop use of all deviceids associated with an nfs_client
291 nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
293 struct nfs4_deviceid_node *d;
294 struct hlist_node *n;
298 for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
299 hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
300 if (d->nfs_client == clp)
301 set_bit(NFS_DEVICEID_INVALID, &d->flags);