Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / xattr_cache.c
CommitLineData
7fc1f831
AP
1/*
2 * Copyright 2012 Xyratex Technology Limited
3 *
1dc563a6
AD
4 * Copyright (c) 2013, 2015, Intel Corporation.
5 *
7fc1f831
AP
6 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
7 *
8 */
9
10#define DEBUG_SUBSYSTEM S_LLITE
11
12#include <linux/fs.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
67a235f5
GKH
15#include "../include/obd_support.h"
16#include "../include/lustre_lite.h"
17#include "../include/lustre_dlm.h"
18#include "../include/lustre_ver.h"
7fc1f831
AP
19#include "llite_internal.h"
20
21/* If we ever have hundreds of extended attributes, we might want to consider
22 * using a hash or a tree structure instead of list for faster lookups.
23 */
24struct ll_xattr_entry {
25 struct list_head xe_list; /* protected with
c0894c6c
OD
26 * lli_xattrs_list_rwsem
27 */
7fc1f831
AP
28 char *xe_name; /* xattr name, \0-terminated */
29 char *xe_value; /* xattr value */
30 unsigned xe_namelen; /* strlen(xe_name) + 1 */
31 unsigned xe_vallen; /* xattr value length */
32};
33
34static struct kmem_cache *xattr_kmem;
35static struct lu_kmem_descr xattr_caches[] = {
36 {
37 .ckd_cache = &xattr_kmem,
38 .ckd_name = "xattr_kmem",
39 .ckd_size = sizeof(struct ll_xattr_entry)
40 },
41 {
42 .ckd_cache = NULL
43 }
44};
45
46int ll_xattr_init(void)
47{
48 return lu_kmem_init(xattr_caches);
49}
50
51void ll_xattr_fini(void)
52{
53 lu_kmem_fini(xattr_caches);
54}
55
56/**
57 * Initializes xattr cache for an inode.
58 *
59 * This initializes the xattr list and marks cache presence.
60 */
61static void ll_xattr_cache_init(struct ll_inode_info *lli)
62{
7fc1f831
AP
63 INIT_LIST_HEAD(&lli->lli_xattrs);
64 lli->lli_flags |= LLIF_XATTR_CACHE;
65}
66
67/**
68 * This looks for a specific extended attribute.
69 *
70 * Find in @cache and return @xattr_name attribute in @xattr,
71 * for the NULL @xattr_name return the first cached @xattr.
72 *
73 * \retval 0 success
74 * \retval -ENODATA if not found
75 */
76static int ll_xattr_cache_find(struct list_head *cache,
77 const char *xattr_name,
78 struct ll_xattr_entry **xattr)
79{
80 struct ll_xattr_entry *entry;
81
7fc1f831
AP
82 list_for_each_entry(entry, cache, xe_list) {
83 /* xattr_name == NULL means look for any entry */
6e16818b 84 if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
7fc1f831
AP
85 *xattr = entry;
86 CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
87 entry->xe_name, entry->xe_vallen,
88 entry->xe_value);
89 return 0;
90 }
91 }
92
93 return -ENODATA;
94}
95
96/**
e93a3082 97 * This adds an xattr.
7fc1f831
AP
98 *
99 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
7fc1f831
AP
100 *
101 * \retval 0 success
102 * \retval -ENOMEM if no memory could be allocated for the cached attr
e93a3082 103 * \retval -EPROTO if duplicate xattr is being added
7fc1f831
AP
104 */
105static int ll_xattr_cache_add(struct list_head *cache,
106 const char *xattr_name,
107 const char *xattr_val,
108 unsigned xattr_val_len)
109{
110 struct ll_xattr_entry *xattr;
111
7fc1f831 112 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
e93a3082
AP
113 CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
114 return -EPROTO;
7fc1f831
AP
115 }
116
21068c46 117 xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS);
6e16818b 118 if (!xattr) {
7fc1f831
AP
119 CDEBUG(D_CACHE, "failed to allocate xattr\n");
120 return -ENOMEM;
121 }
122
b3dd8957 123 xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
7fc1f831
AP
124 if (!xattr->xe_name) {
125 CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
126 xattr->xe_namelen);
127 goto err_name;
128 }
9cda6856 129 xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
695a0666 130 if (!xattr->xe_value)
7fc1f831 131 goto err_value;
7fc1f831 132
7fc1f831
AP
133 xattr->xe_vallen = xattr_val_len;
134 list_add(&xattr->xe_list, cache);
135
e15ba45d
OD
136 CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len,
137 xattr_val);
7fc1f831
AP
138
139 return 0;
140err_value:
97903a26 141 kfree(xattr->xe_name);
7fc1f831 142err_name:
50d30362 143 kmem_cache_free(xattr_kmem, xattr);
7fc1f831
AP
144
145 return -ENOMEM;
146}
147
148/**
149 * This removes an extended attribute from cache.
150 *
151 * Remove @xattr_name attribute from @cache.
152 *
153 * \retval 0 success
154 * \retval -ENODATA if @xattr_name is not cached
155 */
156static int ll_xattr_cache_del(struct list_head *cache,
157 const char *xattr_name)
158{
159 struct ll_xattr_entry *xattr;
160
7fc1f831
AP
161 CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
162
163 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
164 list_del(&xattr->xe_list);
97903a26
JL
165 kfree(xattr->xe_name);
166 kfree(xattr->xe_value);
50d30362 167 kmem_cache_free(xattr_kmem, xattr);
7fc1f831
AP
168
169 return 0;
170 }
171
172 return -ENODATA;
173}
174
175/**
176 * This iterates cached extended attributes.
177 *
178 * Walk over cached attributes in @cache and
179 * fill in @xld_buffer or only calculate buffer
180 * size if @xld_buffer is NULL.
181 *
182 * \retval >= 0 buffer list size
183 * \retval -ENODATA if the list cannot fit @xld_size buffer
184 */
185static int ll_xattr_cache_list(struct list_head *cache,
186 char *xld_buffer,
187 int xld_size)
188{
189 struct ll_xattr_entry *xattr, *tmp;
190 int xld_tail = 0;
191
7fc1f831
AP
192 list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
193 CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
e15ba45d 194 xld_buffer, xld_tail, xattr->xe_name);
7fc1f831
AP
195
196 if (xld_buffer) {
197 xld_size -= xattr->xe_namelen;
198 if (xld_size < 0)
199 break;
200 memcpy(&xld_buffer[xld_tail],
201 xattr->xe_name, xattr->xe_namelen);
202 }
203 xld_tail += xattr->xe_namelen;
204 }
205
206 if (xld_size < 0)
207 return -ERANGE;
208
209 return xld_tail;
210}
211
212/**
213 * Check if the xattr cache is initialized (filled).
214 *
215 * \retval 0 @cache is not initialized
216 * \retval 1 @cache is initialized
217 */
2d95f10e 218static int ll_xattr_cache_valid(struct ll_inode_info *lli)
7fc1f831
AP
219{
220 return !!(lli->lli_flags & LLIF_XATTR_CACHE);
221}
222
223/**
224 * This finalizes the xattr cache.
225 *
226 * Free all xattr memory. @lli is the inode info pointer.
227 *
d0a0acc3 228 * \retval 0 no error occurred
7fc1f831
AP
229 */
230static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
231{
7fc1f831
AP
232 if (!ll_xattr_cache_valid(lli))
233 return 0;
234
235 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
236 ; /* empty loop */
237 lli->lli_flags &= ~LLIF_XATTR_CACHE;
238
239 return 0;
240}
241
242int ll_xattr_cache_destroy(struct inode *inode)
243{
244 struct ll_inode_info *lli = ll_i2info(inode);
245 int rc;
246
7fc1f831
AP
247 down_write(&lli->lli_xattrs_list_rwsem);
248 rc = ll_xattr_cache_destroy_locked(lli);
249 up_write(&lli->lli_xattrs_list_rwsem);
250
251 return rc;
252}
253
254/**
e93a3082 255 * Match or enqueue a PR lock.
7fc1f831
AP
256 *
257 * Find or request an LDLM lock with xattr data.
258 * Since LDLM does not provide API for atomic match_or_enqueue,
259 * the function handles it with a separate enq lock.
260 * If successful, the function exits with the list lock held.
261 *
d0a0acc3 262 * \retval 0 no error occurred
7fc1f831
AP
263 * \retval -ENOMEM not enough memory
264 */
265static int ll_xattr_find_get_lock(struct inode *inode,
266 struct lookup_intent *oit,
267 struct ptlrpc_request **req)
268{
52ee0d20 269 enum ldlm_mode mode;
7fc1f831
AP
270 struct lustre_handle lockh = { 0 };
271 struct md_op_data *op_data;
272 struct ll_inode_info *lli = ll_i2info(inode);
70a251f6
JH
273 struct ldlm_enqueue_info einfo = {
274 .ei_type = LDLM_IBITS,
275 .ei_mode = it_to_lock_mode(oit),
276 .ei_cb_bl = &ll_md_blocking_ast,
277 .ei_cb_cp = &ldlm_completion_ast,
278 };
7fc1f831
AP
279 struct ll_sb_info *sbi = ll_i2sbi(inode);
280 struct obd_export *exp = sbi->ll_md_exp;
281 int rc;
282
7fc1f831 283 mutex_lock(&lli->lli_xattrs_enq_lock);
d6abc59e 284 /* inode may have been shrunk and recreated, so data is gone, match lock
c0894c6c
OD
285 * only when data exists.
286 */
d6abc59e
LS
287 if (ll_xattr_cache_valid(lli)) {
288 /* Try matching first. */
289 mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
290 LCK_PR);
291 if (mode != 0) {
292 /* fake oit in mdc_revalidate_lock() manner */
e476f2e5
JH
293 oit->it_lock_handle = lockh.cookie;
294 oit->it_lock_mode = mode;
d6abc59e
LS
295 goto out;
296 }
7fc1f831
AP
297 }
298
299 /* Enqueue if the lock isn't cached locally. */
300 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
301 LUSTRE_OPC_ANY, NULL);
302 if (IS_ERR(op_data)) {
303 mutex_unlock(&lli->lli_xattrs_enq_lock);
304 return PTR_ERR(op_data);
305 }
306
e93a3082 307 op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
7fc1f831 308
70a251f6 309 rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
7fc1f831
AP
310 ll_finish_md_op_data(op_data);
311
312 if (rc < 0) {
313 CDEBUG(D_CACHE,
314 "md_intent_lock failed with %d for fid "DFID"\n",
315 rc, PFID(ll_inode2fid(inode)));
316 mutex_unlock(&lli->lli_xattrs_enq_lock);
317 return rc;
318 }
319
8bf86fd9 320 *req = oit->it_request;
7fc1f831
AP
321out:
322 down_write(&lli->lli_xattrs_list_rwsem);
323 mutex_unlock(&lli->lli_xattrs_enq_lock);
324
325 return 0;
326}
327
328/**
329 * Refill the xattr cache.
330 *
331 * Fetch and cache the whole of xattrs for @inode, acquiring
332 * a read or a write xattr lock depending on operation in @oit.
333 * Intent is dropped on exit unless the operation is setxattr.
334 *
d0a0acc3 335 * \retval 0 no error occurred
7fc1f831
AP
336 * \retval -EPROTO network protocol error
337 * \retval -ENOMEM not enough memory for the cache
338 */
339static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
340{
341 struct ll_sb_info *sbi = ll_i2sbi(inode);
342 struct ptlrpc_request *req = NULL;
343 const char *xdata, *xval, *xtail, *xvtail;
344 struct ll_inode_info *lli = ll_i2info(inode);
345 struct mdt_body *body;
346 __u32 *xsizes;
f82ced5d 347 int rc, i;
7fc1f831 348
7fc1f831
AP
349 rc = ll_xattr_find_get_lock(inode, oit, &req);
350 if (rc)
34e1f2bb 351 goto out_no_unlock;
7fc1f831
AP
352
353 /* Do we have the data at this point? */
354 if (ll_xattr_cache_valid(lli)) {
355 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
34e1f2bb
JL
356 rc = 0;
357 goto out_maybe_drop;
7fc1f831
AP
358 }
359
360 /* Matched but no cache? Cancelled on error by a parallel refill. */
6e16818b 361 if (unlikely(!req)) {
7fc1f831 362 CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
34e1f2bb
JL
363 rc = -EIO;
364 goto out_maybe_drop;
7fc1f831
AP
365 }
366
e476f2e5 367 if (oit->it_status < 0) {
7fc1f831 368 CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
e476f2e5
JH
369 oit->it_status, PFID(ll_inode2fid(inode)));
370 rc = oit->it_status;
e93a3082
AP
371 /* xattr data is so large that we don't want to cache it */
372 if (rc == -ERANGE)
373 rc = -EAGAIN;
34e1f2bb 374 goto out_destroy;
7fc1f831
AP
375 }
376
377 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
6e16818b 378 if (!body) {
7fc1f831 379 CERROR("no MDT BODY in the refill xattr reply\n");
34e1f2bb
JL
380 rc = -EPROTO;
381 goto out_destroy;
7fc1f831
AP
382 }
383 /* do not need swab xattr data */
384 xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
2e1b5b8b 385 body->mbo_eadatasize);
7fc1f831 386 xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
2e1b5b8b 387 body->mbo_aclsize);
7fc1f831 388 xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
2e1b5b8b 389 body->mbo_max_mdsize * sizeof(__u32));
6e16818b 390 if (!xdata || !xval || !xsizes) {
7fc1f831 391 CERROR("wrong setxattr reply\n");
34e1f2bb
JL
392 rc = -EPROTO;
393 goto out_destroy;
7fc1f831
AP
394 }
395
2e1b5b8b
JH
396 xtail = xdata + body->mbo_eadatasize;
397 xvtail = xval + body->mbo_aclsize;
7fc1f831
AP
398
399 CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
400
401 ll_xattr_cache_init(lli);
402
2e1b5b8b 403 for (i = 0; i < body->mbo_max_mdsize; i++) {
7fc1f831
AP
404 CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
405 /* Perform consistency checks: attr names and vals in pill */
6e16818b 406 if (!memchr(xdata, 0, xtail - xdata)) {
7fc1f831
AP
407 CERROR("xattr protocol violation (names are broken)\n");
408 rc = -EPROTO;
409 } else if (xval + *xsizes > xvtail) {
410 CERROR("xattr protocol violation (vals are broken)\n");
411 rc = -EPROTO;
412 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
413 rc = -ENOMEM;
e93a3082
AP
414 } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
415 /* Filter out ACL ACCESS since it's cached separately */
416 CDEBUG(D_CACHE, "not caching %s\n",
417 XATTR_NAME_ACL_ACCESS);
418 rc = 0;
7fc1f831
AP
419 } else {
420 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
421 *xsizes);
422 }
423 if (rc < 0) {
424 ll_xattr_cache_destroy_locked(lli);
34e1f2bb 425 goto out_destroy;
7fc1f831
AP
426 }
427 xdata += strlen(xdata) + 1;
428 xval += *xsizes;
429 xsizes++;
430 }
431
432 if (xdata != xtail || xval != xvtail)
433 CERROR("a hole in xattr data\n");
434
435 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
436
34e1f2bb 437 goto out_maybe_drop;
7fc1f831 438out_maybe_drop:
e93a3082 439
7fc1f831
AP
440 ll_intent_drop_lock(oit);
441
442 if (rc != 0)
443 up_write(&lli->lli_xattrs_list_rwsem);
444out_no_unlock:
445 ptlrpc_req_finished(req);
446
447 return rc;
448
449out_destroy:
450 up_write(&lli->lli_xattrs_list_rwsem);
451
452 ldlm_lock_decref_and_cancel((struct lustre_handle *)
e476f2e5
JH
453 &oit->it_lock_handle,
454 oit->it_lock_mode);
7fc1f831
AP
455
456 goto out_no_unlock;
457}
458
459/**
460 * Get an xattr value or list xattrs using the write-through cache.
461 *
462 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
463 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
464 * The resulting value/list is stored in @buffer if the former
465 * is not larger than @size.
466 *
d0a0acc3 467 * \retval 0 no error occurred
7fc1f831
AP
468 * \retval -EPROTO network protocol error
469 * \retval -ENOMEM not enough memory for the cache
470 * \retval -ERANGE the buffer is not large enough
471 * \retval -ENODATA no such attr or the list is empty
472 */
e15ba45d
OD
473int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer,
474 size_t size, __u64 valid)
7fc1f831
AP
475{
476 struct lookup_intent oit = { .it_op = IT_GETXATTR };
477 struct ll_inode_info *lli = ll_i2info(inode);
478 int rc = 0;
479
7fc1f831
AP
480 LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
481
482 down_read(&lli->lli_xattrs_list_rwsem);
483 if (!ll_xattr_cache_valid(lli)) {
484 up_read(&lli->lli_xattrs_list_rwsem);
485 rc = ll_xattr_cache_refill(inode, &oit);
486 if (rc)
487 return rc;
488 downgrade_write(&lli->lli_xattrs_list_rwsem);
489 } else {
490 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
491 }
492
493 if (valid & OBD_MD_FLXATTR) {
494 struct ll_xattr_entry *xattr;
495
496 rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
497 if (rc == 0) {
498 rc = xattr->xe_vallen;
499 /* zero size means we are only requested size in rc */
500 if (size != 0) {
501 if (size >= xattr->xe_vallen)
502 memcpy(buffer, xattr->xe_value,
e15ba45d 503 xattr->xe_vallen);
7fc1f831
AP
504 else
505 rc = -ERANGE;
506 }
507 }
508 } else if (valid & OBD_MD_FLXATTRLS) {
509 rc = ll_xattr_cache_list(&lli->lli_xattrs,
510 size ? buffer : NULL, size);
511 }
512
34e1f2bb 513 goto out;
7fc1f831
AP
514out:
515 up_read(&lli->lli_xattrs_list_rwsem);
516
517 return rc;
518}
This page took 0.504118 seconds and 5 git commands to generate.