Merge branches 'pm-core', 'powercap' and 'pm-tools'
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / dir.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/llite/dir.c
37 *
38 * Directory code for lustre client.
39 */
40
41 #include <linux/fs.h>
42 #include <linux/pagemap.h>
43 #include <linux/mm.h>
44 #include <linux/uaccess.h>
45 #include <linux/buffer_head.h> /* for wait_on_buffer */
46 #include <linux/pagevec.h>
47 #include <linux/prefetch.h>
48
49 #define DEBUG_SUBSYSTEM S_LLITE
50
51 #include "../include/obd_support.h"
52 #include "../include/obd_class.h"
53 #include "../include/lustre_lib.h"
54 #include "../include/lustre/lustre_idl.h"
55 #include "../include/lustre_lite.h"
56 #include "../include/lustre_dlm.h"
57 #include "../include/lustre_fid.h"
58 #include "../include/lustre_kernelcomm.h"
59 #include "llite_internal.h"
60
61 /*
62 * (new) readdir implementation overview.
63 *
64 * Original lustre readdir implementation cached exact copy of raw directory
65 * pages on the client. These pages were indexed in client page cache by
66 * logical offset in the directory file. This design, while very simple and
67 * intuitive had some inherent problems:
68 *
69 * . it implies that byte offset to the directory entry serves as a
70 * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
71 * ext3/htree directory entries may move due to splits, and more
72 * importantly,
73 *
74 * . it is incompatible with the design of split directories for cmd3,
75 * that assumes that names are distributed across nodes based on their
76 * hash, and so readdir should be done in hash order.
77 *
78 * New readdir implementation does readdir in hash order, and uses hash of a
79 * file name as a telldir/seekdir cookie. This led to number of complications:
80 *
81 * . hash is not unique, so it cannot be used to index cached directory
82 * pages on the client (note, that it requires a whole pageful of hash
83 * collided entries to cause two pages to have identical hashes);
84 *
85 * . hash is not unique, so it cannot, strictly speaking, be used as an
86 * entry cookie. ext3/htree has the same problem and lustre implementation
87 * mimics their solution: seekdir(hash) positions directory at the first
88 * entry with the given hash.
89 *
90 * Client side.
91 *
92 * 0. caching
93 *
94 * Client caches directory pages using hash of the first entry as an index. As
95 * noted above hash is not unique, so this solution doesn't work as is:
96 * special processing is needed for "page hash chains" (i.e., sequences of
97 * pages filled with entries all having the same hash value).
98 *
99 * First, such chains have to be detected. To this end, server returns to the
100 * client the hash of the first entry on the page next to one returned. When
101 * client detects that this hash is the same as hash of the first entry on the
102 * returned page, page hash collision has to be handled. Pages in the
103 * hash chain, except first one, are termed "overflow pages".
104 *
105 * Solution to index uniqueness problem is to not cache overflow
106 * pages. Instead, when page hash collision is detected, all overflow pages
107 * from emerging chain are immediately requested from the server and placed in
108 * a special data structure (struct ll_dir_chain). This data structure is used
109 * by ll_readdir() to process entries from overflow pages. When readdir
110 * invocation finishes, overflow pages are discarded. If page hash collision
111 * chain weren't completely processed, next call to readdir will again detect
112 * page hash collision, again read overflow pages in, process next portion of
113 * entries and again discard the pages. This is not as wasteful as it looks,
114 * because, given reasonable hash, page hash collisions are extremely rare.
115 *
116 * 1. directory positioning
117 *
118 * When seekdir(hash) is called, original
119 *
120 *
121 *
122 *
123 *
124 *
125 *
126 *
127 * Server.
128 *
129 * identification of and access to overflow pages
130 *
131 * page format
132 *
133 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
138 * lu_dirpage for this integrated page will be adjusted. See
139 * lmv_adjust_dirpages().
140 *
141 */
142
143 /* returns the page unlocked, but with a reference */
144 static int ll_dir_filler(void *_hash, struct page *page0)
145 {
146 struct inode *inode = page0->mapping->host;
147 int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
148 struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
149 struct ptlrpc_request *request;
150 struct mdt_body *body;
151 struct md_op_data *op_data;
152 __u64 hash = *((__u64 *)_hash);
153 struct page **page_pool;
154 struct page *page;
155 struct lu_dirpage *dp;
156 int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
157 int nrdpgs = 0; /* number of pages read actually */
158 int npages;
159 int i;
160 int rc;
161
162 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
163 inode->i_ino, inode->i_generation, inode, hash);
164
165 LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
166
167 page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
168 if (page_pool) {
169 page_pool[0] = page0;
170 } else {
171 page_pool = &page0;
172 max_pages = 1;
173 }
174 for (npages = 1; npages < max_pages; npages++) {
175 page = page_cache_alloc_cold(inode->i_mapping);
176 if (!page)
177 break;
178 page_pool[npages] = page;
179 }
180
181 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
182 LUSTRE_OPC_ANY, NULL);
183 op_data->op_npages = npages;
184 op_data->op_offset = hash;
185 rc = md_readpage(exp, op_data, page_pool, &request);
186 ll_finish_md_op_data(op_data);
187 if (rc < 0) {
188 /* page0 is special, which was added into page cache early */
189 delete_from_page_cache(page0);
190 } else if (rc == 0) {
191 body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
192 /* Checked by mdc_readpage() */
193 if (body->valid & OBD_MD_FLSIZE)
194 cl_isize_write(inode, body->size);
195
196 nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
197 >> PAGE_CACHE_SHIFT;
198 SetPageUptodate(page0);
199 }
200 unlock_page(page0);
201 ptlrpc_req_finished(request);
202
203 CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
204
205 for (i = 1; i < npages; i++) {
206 unsigned long offset;
207 int ret;
208
209 page = page_pool[i];
210
211 if (rc < 0 || i >= nrdpgs) {
212 page_cache_release(page);
213 continue;
214 }
215
216 SetPageUptodate(page);
217
218 dp = kmap(page);
219 hash = le64_to_cpu(dp->ldp_hash_start);
220 kunmap(page);
221
222 offset = hash_x_index(hash, hash64);
223
224 prefetchw(&page->flags);
225 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
226 GFP_NOFS);
227 if (ret == 0) {
228 unlock_page(page);
229 } else {
230 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
231 offset, ret);
232 }
233 page_cache_release(page);
234 }
235
236 if (page_pool != &page0)
237 kfree(page_pool);
238 return rc;
239 }
240
241 void ll_release_page(struct page *page, int remove)
242 {
243 kunmap(page);
244 if (remove) {
245 lock_page(page);
246 if (likely(page->mapping))
247 truncate_complete_page(page->mapping, page);
248 unlock_page(page);
249 }
250 page_cache_release(page);
251 }
252
253 /*
254 * Find, kmap and return page that contains given hash.
255 */
256 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
257 __u64 *start, __u64 *end)
258 {
259 int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
260 struct address_space *mapping = dir->i_mapping;
261 /*
262 * Complement of hash is used as an index so that
263 * radix_tree_gang_lookup() can be used to find a page with starting
264 * hash _smaller_ than one we are looking for.
265 */
266 unsigned long offset = hash_x_index(*hash, hash64);
267 struct page *page;
268 int found;
269
270 spin_lock_irq(&mapping->tree_lock);
271 found = radix_tree_gang_lookup(&mapping->page_tree,
272 (void **)&page, offset, 1);
273 if (found > 0 && !radix_tree_exceptional_entry(page)) {
274 struct lu_dirpage *dp;
275
276 page_cache_get(page);
277 spin_unlock_irq(&mapping->tree_lock);
278 /*
279 * In contrast to find_lock_page() we are sure that directory
280 * page cannot be truncated (while DLM lock is held) and,
281 * hence, can avoid restart.
282 *
283 * In fact, page cannot be locked here at all, because
284 * ll_dir_filler() does synchronous io.
285 */
286 wait_on_page_locked(page);
287 if (PageUptodate(page)) {
288 dp = kmap(page);
289 if (BITS_PER_LONG == 32 && hash64) {
290 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
291 *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
292 *hash = *hash >> 32;
293 } else {
294 *start = le64_to_cpu(dp->ldp_hash_start);
295 *end = le64_to_cpu(dp->ldp_hash_end);
296 }
297 LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n",
298 *start, *end, *hash);
299 CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n",
300 offset, *start, *end, *hash);
301 if (*hash > *end) {
302 ll_release_page(page, 0);
303 page = NULL;
304 } else if (*end != *start && *hash == *end) {
305 /*
306 * upon hash collision, remove this page,
307 * otherwise put page reference, and
308 * ll_get_dir_page() will issue RPC to fetch
309 * the page we want.
310 */
311 ll_release_page(page,
312 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
313 page = NULL;
314 }
315 } else {
316 page_cache_release(page);
317 page = ERR_PTR(-EIO);
318 }
319
320 } else {
321 spin_unlock_irq(&mapping->tree_lock);
322 page = NULL;
323 }
324 return page;
325 }
326
327 struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
328 struct ll_dir_chain *chain)
329 {
330 ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
331 struct address_space *mapping = dir->i_mapping;
332 struct lustre_handle lockh;
333 struct lu_dirpage *dp;
334 struct page *page;
335 enum ldlm_mode mode;
336 int rc;
337 __u64 start = 0;
338 __u64 end = 0;
339 __u64 lhash = hash;
340 struct ll_inode_info *lli = ll_i2info(dir);
341 int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
342
343 mode = LCK_PR;
344 rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
345 ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
346 if (!rc) {
347 struct ldlm_enqueue_info einfo = {
348 .ei_type = LDLM_IBITS,
349 .ei_mode = mode,
350 .ei_cb_bl = ll_md_blocking_ast,
351 .ei_cb_cp = ldlm_completion_ast,
352 };
353 struct lookup_intent it = { .it_op = IT_READDIR };
354 struct ptlrpc_request *request;
355 struct md_op_data *op_data;
356
357 op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
358 LUSTRE_OPC_ANY, NULL);
359 if (IS_ERR(op_data))
360 return (void *)op_data;
361
362 rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
363 op_data, &lockh, NULL, 0, NULL, 0);
364
365 ll_finish_md_op_data(op_data);
366
367 request = (struct ptlrpc_request *)it.d.lustre.it_data;
368 if (request)
369 ptlrpc_req_finished(request);
370 if (rc < 0) {
371 CERROR("lock enqueue: " DFID " at %llu: rc %d\n",
372 PFID(ll_inode2fid(dir)), hash, rc);
373 return ERR_PTR(rc);
374 }
375
376 CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
377 dir, dir->i_ino, dir->i_generation);
378 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
379 &it.d.lustre.it_lock_handle, dir, NULL);
380 } else {
381 /* for cross-ref object, l_ast_data of the lock may not be set,
382 * we reset it here
383 */
384 md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
385 dir, NULL);
386 }
387 ldlm_lock_dump_handle(D_OTHER, &lockh);
388
389 mutex_lock(&lli->lli_readdir_mutex);
390 page = ll_dir_page_locate(dir, &lhash, &start, &end);
391 if (IS_ERR(page)) {
392 CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
393 PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
394 goto out_unlock;
395 } else if (page) {
396 /*
397 * XXX nikita: not entirely correct handling of a corner case:
398 * suppose hash chain of entries with hash value HASH crosses
399 * border between pages P0 and P1. First both P0 and P1 are
400 * cached, seekdir() is called for some entry from the P0 part
401 * of the chain. Later P0 goes out of cache. telldir(HASH)
402 * happens and finds P1, as it starts with matching hash
403 * value. Remaining entries from P0 part of the chain are
404 * skipped. (Is that really a bug?)
405 *
406 * Possible solutions: 0. don't cache P1 is such case, handle
407 * it as an "overflow" page. 1. invalidate all pages at
408 * once. 2. use HASH|1 as an index for P1.
409 */
410 goto hash_collision;
411 }
412
413 page = read_cache_page(mapping, hash_x_index(hash, hash64),
414 ll_dir_filler, &lhash);
415 if (IS_ERR(page)) {
416 CERROR("read cache page: "DFID" at %llu: rc %ld\n",
417 PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
418 goto out_unlock;
419 }
420
421 wait_on_page_locked(page);
422 (void)kmap(page);
423 if (!PageUptodate(page)) {
424 CERROR("page not updated: "DFID" at %llu: rc %d\n",
425 PFID(ll_inode2fid(dir)), hash, -5);
426 goto fail;
427 }
428 if (!PageChecked(page))
429 /* XXX: check page format later */
430 SetPageChecked(page);
431 if (PageError(page)) {
432 CERROR("page error: "DFID" at %llu: rc %d\n",
433 PFID(ll_inode2fid(dir)), hash, -5);
434 goto fail;
435 }
436 hash_collision:
437 dp = page_address(page);
438 if (BITS_PER_LONG == 32 && hash64) {
439 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
440 end = le64_to_cpu(dp->ldp_hash_end) >> 32;
441 lhash = hash >> 32;
442 } else {
443 start = le64_to_cpu(dp->ldp_hash_start);
444 end = le64_to_cpu(dp->ldp_hash_end);
445 lhash = hash;
446 }
447 if (end == start) {
448 LASSERT(start == lhash);
449 CWARN("Page-wide hash collision: %llu\n", end);
450 if (BITS_PER_LONG == 32 && hash64)
451 CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n",
452 le64_to_cpu(dp->ldp_hash_start),
453 le64_to_cpu(dp->ldp_hash_end), hash);
454 /*
455 * Fetch whole overflow chain...
456 *
457 * XXX not yet.
458 */
459 goto fail;
460 }
461 out_unlock:
462 mutex_unlock(&lli->lli_readdir_mutex);
463 ldlm_lock_decref(&lockh, mode);
464 return page;
465
466 fail:
467 ll_release_page(page, 1);
468 page = ERR_PTR(-EIO);
469 goto out_unlock;
470 }
471
472 int ll_dir_read(struct inode *inode, struct dir_context *ctx)
473 {
474 struct ll_inode_info *info = ll_i2info(inode);
475 struct ll_sb_info *sbi = ll_i2sbi(inode);
476 __u64 pos = ctx->pos;
477 int api32 = ll_need_32bit_api(sbi);
478 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
479 struct page *page;
480 struct ll_dir_chain chain;
481 int done = 0;
482 int rc = 0;
483
484 ll_dir_chain_init(&chain);
485
486 page = ll_get_dir_page(inode, pos, &chain);
487
488 while (rc == 0 && !done) {
489 struct lu_dirpage *dp;
490 struct lu_dirent *ent;
491
492 if (!IS_ERR(page)) {
493 /*
494 * If page is empty (end of directory is reached),
495 * use this value.
496 */
497 __u64 hash = MDS_DIR_END_OFF;
498 __u64 next;
499
500 dp = page_address(page);
501 for (ent = lu_dirent_start(dp); ent && !done;
502 ent = lu_dirent_next(ent)) {
503 __u16 type;
504 int namelen;
505 struct lu_fid fid;
506 __u64 lhash;
507 __u64 ino;
508
509 /*
510 * XXX: implement correct swabbing here.
511 */
512
513 hash = le64_to_cpu(ent->lde_hash);
514 if (hash < pos)
515 /*
516 * Skip until we find target hash
517 * value.
518 */
519 continue;
520
521 namelen = le16_to_cpu(ent->lde_namelen);
522 if (namelen == 0)
523 /*
524 * Skip dummy record.
525 */
526 continue;
527
528 if (api32 && hash64)
529 lhash = hash >> 32;
530 else
531 lhash = hash;
532 fid_le_to_cpu(&fid, &ent->lde_fid);
533 ino = cl_fid_build_ino(&fid, api32);
534 type = ll_dirent_type_get(ent);
535 ctx->pos = lhash;
536 /* For 'll_nfs_get_name_filldir()', it will try
537 * to access the 'ent' through its 'lde_name',
538 * so the parameter 'name' for 'ctx->actor()'
539 * must be part of the 'ent'.
540 */
541 done = !dir_emit(ctx, ent->lde_name,
542 namelen, ino, type);
543 }
544 next = le64_to_cpu(dp->ldp_hash_end);
545 if (!done) {
546 pos = next;
547 if (pos == MDS_DIR_END_OFF) {
548 /*
549 * End of directory reached.
550 */
551 done = 1;
552 ll_release_page(page, 0);
553 } else if (1 /* chain is exhausted*/) {
554 /*
555 * Normal case: continue to the next
556 * page.
557 */
558 ll_release_page(page,
559 le32_to_cpu(dp->ldp_flags) &
560 LDF_COLLIDE);
561 next = pos;
562 page = ll_get_dir_page(inode, pos,
563 &chain);
564 } else {
565 /*
566 * go into overflow page.
567 */
568 LASSERT(le32_to_cpu(dp->ldp_flags) &
569 LDF_COLLIDE);
570 ll_release_page(page, 1);
571 }
572 } else {
573 pos = hash;
574 ll_release_page(page, 0);
575 }
576 } else {
577 rc = PTR_ERR(page);
578 CERROR("error reading dir "DFID" at %lu: rc %d\n",
579 PFID(&info->lli_fid), (unsigned long)pos, rc);
580 }
581 }
582
583 ctx->pos = pos;
584 ll_dir_chain_fini(&chain);
585 return rc;
586 }
587
588 static int ll_readdir(struct file *filp, struct dir_context *ctx)
589 {
590 struct inode *inode = file_inode(filp);
591 struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
592 struct ll_sb_info *sbi = ll_i2sbi(inode);
593 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
594 int api32 = ll_need_32bit_api(sbi);
595 int rc;
596
597 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
598 inode->i_ino, inode->i_generation,
599 inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
600
601 if (lfd->lfd_pos == MDS_DIR_END_OFF) {
602 /*
603 * end-of-file.
604 */
605 rc = 0;
606 goto out;
607 }
608
609 ctx->pos = lfd->lfd_pos;
610 rc = ll_dir_read(inode, ctx);
611 lfd->lfd_pos = ctx->pos;
612 if (ctx->pos == MDS_DIR_END_OFF) {
613 if (api32)
614 ctx->pos = LL_DIR_END_OFF_32BIT;
615 else
616 ctx->pos = LL_DIR_END_OFF;
617 } else {
618 if (api32 && hash64)
619 ctx->pos >>= 32;
620 }
621 filp->f_version = inode->i_version;
622
623 out:
624 if (!rc)
625 ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
626
627 return rc;
628 }
629
630 static int ll_send_mgc_param(struct obd_export *mgc, char *string)
631 {
632 struct mgs_send_param *msp;
633 int rc = 0;
634
635 msp = kzalloc(sizeof(*msp), GFP_NOFS);
636 if (!msp)
637 return -ENOMEM;
638
639 strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param));
640 rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
641 sizeof(struct mgs_send_param), msp, NULL);
642 if (rc)
643 CERROR("Failed to set parameter: %d\n", rc);
644 kfree(msp);
645
646 return rc;
647 }
648
649 static int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
650 char *filename)
651 {
652 struct ptlrpc_request *request = NULL;
653 struct md_op_data *op_data;
654 struct ll_sb_info *sbi = ll_i2sbi(dir);
655 int mode;
656 int err;
657
658 mode = (~current_umask() & 0755) | S_IFDIR;
659 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
660 strlen(filename), mode, LUSTRE_OPC_MKDIR,
661 lump);
662 if (IS_ERR(op_data)) {
663 err = PTR_ERR(op_data);
664 goto err_exit;
665 }
666
667 op_data->op_cli_flags |= CLI_SET_MEA;
668 err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
669 from_kuid(&init_user_ns, current_fsuid()),
670 from_kgid(&init_user_ns, current_fsgid()),
671 cfs_curproc_cap_pack(), 0, &request);
672 ll_finish_md_op_data(op_data);
673 if (err)
674 goto err_exit;
675 err_exit:
676 ptlrpc_req_finished(request);
677 return err;
678 }
679
680 int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
681 int set_default)
682 {
683 struct ll_sb_info *sbi = ll_i2sbi(inode);
684 struct md_op_data *op_data;
685 struct ptlrpc_request *req = NULL;
686 int rc = 0;
687 struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
688 struct obd_device *mgc = lsi->lsi_mgc;
689 int lum_size;
690
691 if (lump) {
692 /*
693 * This is coming from userspace, so should be in
694 * local endian. But the MDS would like it in little
695 * endian, so we swab it before we send it.
696 */
697 switch (lump->lmm_magic) {
698 case LOV_USER_MAGIC_V1: {
699 if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
700 lustre_swab_lov_user_md_v1(lump);
701 lum_size = sizeof(struct lov_user_md_v1);
702 break;
703 }
704 case LOV_USER_MAGIC_V3: {
705 if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
706 lustre_swab_lov_user_md_v3(
707 (struct lov_user_md_v3 *)lump);
708 lum_size = sizeof(struct lov_user_md_v3);
709 break;
710 }
711 default: {
712 CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
713 lump->lmm_magic, LOV_USER_MAGIC_V1,
714 LOV_USER_MAGIC_V3);
715 return -EINVAL;
716 }
717 }
718 } else {
719 lum_size = sizeof(struct lov_user_md_v1);
720 }
721
722 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
723 LUSTRE_OPC_ANY, NULL);
724 if (IS_ERR(op_data))
725 return PTR_ERR(op_data);
726
727 if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
728 op_data->op_cli_flags |= CLI_SET_MEA;
729
730 /* swabbing is done in lov_setstripe() on server side */
731 rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
732 NULL, 0, &req, NULL);
733 ll_finish_md_op_data(op_data);
734 ptlrpc_req_finished(req);
735 if (rc) {
736 if (rc != -EPERM && rc != -EACCES)
737 CERROR("mdc_setattr fails: rc = %d\n", rc);
738 }
739
740 /* In the following we use the fact that LOV_USER_MAGIC_V1 and
741 * LOV_USER_MAGIC_V3 have the same initial fields so we do not
742 * need to make the distinction between the 2 versions
743 */
744 if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
745 char *param = NULL;
746 char *buf;
747
748 param = kzalloc(MGS_PARAM_MAXLEN, GFP_NOFS);
749 if (!param)
750 return -ENOMEM;
751
752 buf = param;
753 /* Get fsname and assume devname to be -MDT0000. */
754 ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
755 strcat(buf, "-MDT0000.lov");
756 buf += strlen(buf);
757
758 /* Set root stripesize */
759 sprintf(buf, ".stripesize=%u",
760 lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
761 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
762 if (rc)
763 goto end;
764
765 /* Set root stripecount */
766 sprintf(buf, ".stripecount=%hd",
767 lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
768 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
769 if (rc)
770 goto end;
771
772 /* Set root stripeoffset */
773 sprintf(buf, ".stripeoffset=%hd",
774 lump ? le16_to_cpu(lump->lmm_stripe_offset) :
775 (typeof(lump->lmm_stripe_offset))(-1));
776 rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
777
778 end:
779 kfree(param);
780 }
781 return rc;
782 }
783
784 int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
785 int *lmm_size, struct ptlrpc_request **request)
786 {
787 struct ll_sb_info *sbi = ll_i2sbi(inode);
788 struct mdt_body *body;
789 struct lov_mds_md *lmm = NULL;
790 struct ptlrpc_request *req = NULL;
791 int rc, lmmsize;
792 struct md_op_data *op_data;
793
794 rc = ll_get_default_mdsize(sbi, &lmmsize);
795 if (rc)
796 return rc;
797
798 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
799 0, lmmsize, LUSTRE_OPC_ANY,
800 NULL);
801 if (IS_ERR(op_data))
802 return PTR_ERR(op_data);
803
804 op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
805 rc = md_getattr(sbi->ll_md_exp, op_data, &req);
806 ll_finish_md_op_data(op_data);
807 if (rc < 0) {
808 CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
809 inode->i_ino,
810 inode->i_generation, rc);
811 goto out;
812 }
813
814 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
815
816 lmmsize = body->eadatasize;
817
818 if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
819 lmmsize == 0) {
820 rc = -ENODATA;
821 goto out;
822 }
823
824 lmm = req_capsule_server_sized_get(&req->rq_pill,
825 &RMF_MDT_MD, lmmsize);
826
827 /*
828 * This is coming from the MDS, so is probably in
829 * little endian. We convert it to host endian before
830 * passing it to userspace.
831 */
832 /* We don't swab objects for directories */
833 switch (le32_to_cpu(lmm->lmm_magic)) {
834 case LOV_MAGIC_V1:
835 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
836 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
837 break;
838 case LOV_MAGIC_V3:
839 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC)
840 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
841 break;
842 default:
843 CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
844 rc = -EPROTO;
845 }
846 out:
847 *lmmp = lmm;
848 *lmm_size = lmmsize;
849 *request = req;
850 return rc;
851 }
852
853 /*
854 * Get MDT index for the inode.
855 */
856 int ll_get_mdt_idx(struct inode *inode)
857 {
858 struct ll_sb_info *sbi = ll_i2sbi(inode);
859 struct md_op_data *op_data;
860 int rc, mdtidx;
861
862 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
863 0, LUSTRE_OPC_ANY, NULL);
864 if (IS_ERR(op_data))
865 return PTR_ERR(op_data);
866
867 op_data->op_flags |= MF_GET_MDT_IDX;
868 rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
869 mdtidx = op_data->op_mds;
870 ll_finish_md_op_data(op_data);
871 if (rc < 0) {
872 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
873 return rc;
874 }
875 return mdtidx;
876 }
877
878 /**
879 * Generic handler to do any pre-copy work.
880 *
881 * It sends a first hsm_progress (with extent length == 0) to coordinator as a
882 * first information for it that real work has started.
883 *
884 * Moreover, for a ARCHIVE request, it will sample the file data version and
885 * store it in \a copy.
886 *
887 * \return 0 on success.
888 */
889 static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
890 {
891 struct ll_sb_info *sbi = ll_s2sbi(sb);
892 struct hsm_progress_kernel hpk;
893 int rc;
894
895 /* Forge a hsm_progress based on data from copy. */
896 hpk.hpk_fid = copy->hc_hai.hai_fid;
897 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
898 hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
899 hpk.hpk_extent.length = 0;
900 hpk.hpk_flags = 0;
901 hpk.hpk_errval = 0;
902 hpk.hpk_data_version = 0;
903
904 /* For archive request, we need to read the current file version. */
905 if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
906 struct inode *inode;
907 __u64 data_version = 0;
908
909 /* Get inode for this fid */
910 inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
911 if (IS_ERR(inode)) {
912 hpk.hpk_flags |= HP_FLAG_RETRY;
913 /* hpk_errval is >= 0 */
914 hpk.hpk_errval = -PTR_ERR(inode);
915 rc = PTR_ERR(inode);
916 goto progress;
917 }
918
919 /* Read current file data version */
920 rc = ll_data_version(inode, &data_version, 1);
921 iput(inode);
922 if (rc != 0) {
923 CDEBUG(D_HSM, "Could not read file data version of "
924 DFID" (rc = %d). Archive request (%#llx) could not be done.\n",
925 PFID(&copy->hc_hai.hai_fid), rc,
926 copy->hc_hai.hai_cookie);
927 hpk.hpk_flags |= HP_FLAG_RETRY;
928 /* hpk_errval must be >= 0 */
929 hpk.hpk_errval = -rc;
930 goto progress;
931 }
932
933 /* Store in the hsm_copy for later copytool use.
934 * Always modified even if no lsm.
935 */
936 copy->hc_data_version = data_version;
937 }
938
939 progress:
940 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
941 &hpk, NULL);
942
943 return rc;
944 }
945
946 /**
947 * Generic handler to do any post-copy work.
948 *
949 * It will send the last hsm_progress update to coordinator to inform it
950 * that copy is finished and whether it was successful or not.
951 *
952 * Moreover,
953 * - for ARCHIVE request, it will sample the file data version and compare it
954 * with the version saved in ll_ioc_copy_start(). If they do not match, copy
955 * will be considered as failed.
956 * - for RESTORE request, it will sample the file data version and send it to
957 * coordinator which is useful if the file was imported as 'released'.
958 *
959 * \return 0 on success.
960 */
961 static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
962 {
963 struct ll_sb_info *sbi = ll_s2sbi(sb);
964 struct hsm_progress_kernel hpk;
965 int rc;
966
967 /* If you modify the logic here, also check llapi_hsm_copy_end(). */
968 /* Take care: copy->hc_hai.hai_action, len, gid and data are not
969 * initialized if copy_end was called with copy == NULL.
970 */
971
972 /* Forge a hsm_progress based on data from copy. */
973 hpk.hpk_fid = copy->hc_hai.hai_fid;
974 hpk.hpk_cookie = copy->hc_hai.hai_cookie;
975 hpk.hpk_extent = copy->hc_hai.hai_extent;
976 hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
977 hpk.hpk_errval = copy->hc_errval;
978 hpk.hpk_data_version = 0;
979
980 /* For archive request, we need to check the file data was not changed.
981 *
982 * For restore request, we need to send the file data version, this is
983 * useful when the file was created using hsm_import.
984 */
985 if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
986 (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
987 (copy->hc_errval == 0)) {
988 struct inode *inode;
989 __u64 data_version = 0;
990
991 /* Get lsm for this fid */
992 inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
993 if (IS_ERR(inode)) {
994 hpk.hpk_flags |= HP_FLAG_RETRY;
995 /* hpk_errval must be >= 0 */
996 hpk.hpk_errval = -PTR_ERR(inode);
997 rc = PTR_ERR(inode);
998 goto progress;
999 }
1000
1001 rc = ll_data_version(inode, &data_version,
1002 copy->hc_hai.hai_action == HSMA_ARCHIVE);
1003 iput(inode);
1004 if (rc) {
1005 CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
1006 if (hpk.hpk_errval == 0)
1007 hpk.hpk_errval = -rc;
1008 goto progress;
1009 }
1010
1011 /* Store in the hsm_copy for later copytool use.
1012 * Always modified even if no lsm.
1013 */
1014 hpk.hpk_data_version = data_version;
1015
1016 /* File could have been stripped during archiving, so we need
1017 * to check anyway.
1018 */
1019 if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
1020 (copy->hc_data_version != data_version)) {
1021 CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
1022 DFID", start:%#llx current:%#llx\n",
1023 PFID(&copy->hc_hai.hai_fid),
1024 copy->hc_data_version, data_version);
1025 /* File was changed, send error to cdt. Do not ask for
1026 * retry because if a file is modified frequently,
1027 * the cdt will loop on retried archive requests.
1028 * The policy engine will ask for a new archive later
1029 * when the file will not be modified for some tunable
1030 * time
1031 */
1032 /* we do not notify caller */
1033 hpk.hpk_flags &= ~HP_FLAG_RETRY;
1034 /* hpk_errval must be >= 0 */
1035 hpk.hpk_errval = EBUSY;
1036 }
1037
1038 }
1039
1040 progress:
1041 rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
1042 &hpk, NULL);
1043
1044 return rc;
1045 }
1046
1047 static int copy_and_ioctl(int cmd, struct obd_export *exp,
1048 const void __user *data, size_t size)
1049 {
1050 void *copy;
1051 int rc;
1052
1053 copy = kzalloc(size, GFP_NOFS);
1054 if (!copy)
1055 return -ENOMEM;
1056
1057 if (copy_from_user(copy, data, size)) {
1058 rc = -EFAULT;
1059 goto out;
1060 }
1061
1062 rc = obd_iocontrol(cmd, exp, size, copy, NULL);
1063 out:
1064 kfree(copy);
1065
1066 return rc;
1067 }
1068
1069 static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
1070 {
1071 int cmd = qctl->qc_cmd;
1072 int type = qctl->qc_type;
1073 int id = qctl->qc_id;
1074 int valid = qctl->qc_valid;
1075 int rc = 0;
1076
1077 switch (cmd) {
1078 case LUSTRE_Q_INVALIDATE:
1079 case LUSTRE_Q_FINVALIDATE:
1080 case Q_QUOTAON:
1081 case Q_QUOTAOFF:
1082 case Q_SETQUOTA:
1083 case Q_SETINFO:
1084 if (!capable(CFS_CAP_SYS_ADMIN) ||
1085 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1086 return -EPERM;
1087 break;
1088 case Q_GETQUOTA:
1089 if (((type == USRQUOTA &&
1090 !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
1091 (type == GRPQUOTA &&
1092 !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
1093 (!capable(CFS_CAP_SYS_ADMIN) ||
1094 sbi->ll_flags & LL_SBI_RMT_CLIENT))
1095 return -EPERM;
1096 break;
1097 case Q_GETINFO:
1098 break;
1099 default:
1100 CERROR("unsupported quotactl op: %#x\n", cmd);
1101 return -ENOTTY;
1102 }
1103
1104 if (valid != QC_GENERAL) {
1105 if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
1106 return -EOPNOTSUPP;
1107
1108 if (cmd == Q_GETINFO)
1109 qctl->qc_cmd = Q_GETOINFO;
1110 else if (cmd == Q_GETQUOTA)
1111 qctl->qc_cmd = Q_GETOQUOTA;
1112 else
1113 return -EINVAL;
1114
1115 switch (valid) {
1116 case QC_MDTIDX:
1117 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1118 sizeof(*qctl), qctl, NULL);
1119 break;
1120 case QC_OSTIDX:
1121 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
1122 sizeof(*qctl), qctl, NULL);
1123 break;
1124 case QC_UUID:
1125 rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
1126 sizeof(*qctl), qctl, NULL);
1127 if (rc == -EAGAIN)
1128 rc = obd_iocontrol(OBD_IOC_QUOTACTL,
1129 sbi->ll_dt_exp,
1130 sizeof(*qctl), qctl, NULL);
1131 break;
1132 default:
1133 rc = -EINVAL;
1134 break;
1135 }
1136
1137 if (rc)
1138 return rc;
1139
1140 qctl->qc_cmd = cmd;
1141 } else {
1142 struct obd_quotactl *oqctl;
1143
1144 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1145 if (!oqctl)
1146 return -ENOMEM;
1147
1148 QCTL_COPY(oqctl, qctl);
1149 rc = obd_quotactl(sbi->ll_md_exp, oqctl);
1150 if (rc) {
1151 if (rc != -EALREADY && cmd == Q_QUOTAON) {
1152 oqctl->qc_cmd = Q_QUOTAOFF;
1153 obd_quotactl(sbi->ll_md_exp, oqctl);
1154 }
1155 kfree(oqctl);
1156 return rc;
1157 }
1158 /* If QIF_SPACE is not set, client should collect the
1159 * space usage from OSSs by itself
1160 */
1161 if (cmd == Q_GETQUOTA &&
1162 !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
1163 !oqctl->qc_dqblk.dqb_curspace) {
1164 struct obd_quotactl *oqctl_tmp;
1165
1166 oqctl_tmp = kzalloc(sizeof(*oqctl_tmp), GFP_NOFS);
1167 if (!oqctl_tmp) {
1168 rc = -ENOMEM;
1169 goto out;
1170 }
1171
1172 oqctl_tmp->qc_cmd = Q_GETOQUOTA;
1173 oqctl_tmp->qc_id = oqctl->qc_id;
1174 oqctl_tmp->qc_type = oqctl->qc_type;
1175
1176 /* collect space usage from OSTs */
1177 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1178 rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
1179 if (!rc || rc == -EREMOTEIO) {
1180 oqctl->qc_dqblk.dqb_curspace =
1181 oqctl_tmp->qc_dqblk.dqb_curspace;
1182 oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
1183 }
1184
1185 /* collect space & inode usage from MDTs */
1186 oqctl_tmp->qc_dqblk.dqb_curspace = 0;
1187 oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
1188 rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
1189 if (!rc || rc == -EREMOTEIO) {
1190 oqctl->qc_dqblk.dqb_curspace +=
1191 oqctl_tmp->qc_dqblk.dqb_curspace;
1192 oqctl->qc_dqblk.dqb_curinodes =
1193 oqctl_tmp->qc_dqblk.dqb_curinodes;
1194 oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
1195 } else {
1196 oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
1197 }
1198
1199 kfree(oqctl_tmp);
1200 }
1201 out:
1202 QCTL_COPY(qctl, oqctl);
1203 kfree(oqctl);
1204 }
1205
1206 return rc;
1207 }
1208
1209 /* This function tries to get a single name component,
1210 * to send to the server. No actual path traversal involved,
1211 * so we limit to NAME_MAX
1212 */
1213 static char *ll_getname(const char __user *filename)
1214 {
1215 int ret = 0, len;
1216 char *tmp;
1217
1218 tmp = kzalloc(NAME_MAX + 1, GFP_KERNEL);
1219 if (!tmp)
1220 return ERR_PTR(-ENOMEM);
1221
1222 len = strncpy_from_user(tmp, filename, NAME_MAX + 1);
1223 if (len < 0)
1224 ret = len;
1225 else if (len == 0)
1226 ret = -ENOENT;
1227 else if (len > NAME_MAX && tmp[NAME_MAX] != 0)
1228 ret = -ENAMETOOLONG;
1229
1230 if (ret) {
1231 kfree(tmp);
1232 tmp = ERR_PTR(ret);
1233 }
1234 return tmp;
1235 }
1236
1237 #define ll_putname(filename) kfree(filename)
1238
1239 static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1240 {
1241 struct inode *inode = file_inode(file);
1242 struct ll_sb_info *sbi = ll_i2sbi(inode);
1243 struct obd_ioctl_data *data;
1244 int rc = 0;
1245
1246 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
1247 inode->i_ino, inode->i_generation, inode, cmd);
1248
1249 /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
1250 if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
1251 return -ENOTTY;
1252
1253 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
1254 switch (cmd) {
1255 case FSFILT_IOC_GETFLAGS:
1256 case FSFILT_IOC_SETFLAGS:
1257 return ll_iocontrol(inode, file, cmd, arg);
1258 case FSFILT_IOC_GETVERSION_OLD:
1259 case FSFILT_IOC_GETVERSION:
1260 return put_user(inode->i_generation, (int __user *)arg);
1261 /* We need to special case any other ioctls we want to handle,
1262 * to send them to the MDS/OST as appropriate and to properly
1263 * network encode the arg field.
1264 case FSFILT_IOC_SETVERSION_OLD:
1265 case FSFILT_IOC_SETVERSION:
1266 */
1267 case LL_IOC_GET_MDTIDX: {
1268 int mdtidx;
1269
1270 mdtidx = ll_get_mdt_idx(inode);
1271 if (mdtidx < 0)
1272 return mdtidx;
1273
1274 if (put_user((int)mdtidx, (int __user *)arg))
1275 return -EFAULT;
1276
1277 return 0;
1278 }
1279 case IOC_MDC_LOOKUP: {
1280 struct ptlrpc_request *request = NULL;
1281 int namelen, len = 0;
1282 char *buf = NULL;
1283 char *filename;
1284 struct md_op_data *op_data;
1285
1286 rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
1287 if (rc)
1288 return rc;
1289 data = (void *)buf;
1290
1291 filename = data->ioc_inlbuf1;
1292 namelen = strlen(filename);
1293
1294 if (namelen < 1) {
1295 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1296 rc = -EINVAL;
1297 goto out_free;
1298 }
1299
1300 op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
1301 0, LUSTRE_OPC_ANY, NULL);
1302 if (IS_ERR(op_data)) {
1303 rc = PTR_ERR(op_data);
1304 goto out_free;
1305 }
1306
1307 op_data->op_valid = OBD_MD_FLID;
1308 rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
1309 ll_finish_md_op_data(op_data);
1310 if (rc < 0) {
1311 CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
1312 goto out_free;
1313 }
1314 ptlrpc_req_finished(request);
1315 out_free:
1316 obd_ioctl_freedata(buf, len);
1317 return rc;
1318 }
1319 case LL_IOC_LMV_SETSTRIPE: {
1320 struct lmv_user_md *lum;
1321 char *buf = NULL;
1322 char *filename;
1323 int namelen = 0;
1324 int lumlen = 0;
1325 int len;
1326 int rc;
1327
1328 rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
1329 if (rc)
1330 return rc;
1331
1332 data = (void *)buf;
1333 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
1334 data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
1335 rc = -EINVAL;
1336 goto lmv_out_free;
1337 }
1338
1339 filename = data->ioc_inlbuf1;
1340 namelen = data->ioc_inllen1;
1341
1342 if (namelen < 1) {
1343 CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
1344 rc = -EINVAL;
1345 goto lmv_out_free;
1346 }
1347 lum = (struct lmv_user_md *)data->ioc_inlbuf2;
1348 lumlen = data->ioc_inllen2;
1349
1350 if (lum->lum_magic != LMV_USER_MAGIC ||
1351 lumlen != sizeof(*lum)) {
1352 CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
1353 filename, lum->lum_magic, lumlen, -EFAULT);
1354 rc = -EINVAL;
1355 goto lmv_out_free;
1356 }
1357
1358 /**
1359 * ll_dir_setdirstripe will be used to set dir stripe
1360 * mdc_create--->mdt_reint_create (with dirstripe)
1361 */
1362 rc = ll_dir_setdirstripe(inode, lum, filename);
1363 lmv_out_free:
1364 obd_ioctl_freedata(buf, len);
1365 return rc;
1366
1367 }
1368 case LL_IOC_LOV_SETSTRIPE: {
1369 struct lov_user_md_v3 lumv3;
1370 struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
1371 struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
1372 struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
1373
1374 int set_default = 0;
1375
1376 LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
1377 LASSERT(sizeof(lumv3.lmm_objects[0]) ==
1378 sizeof(lumv3p->lmm_objects[0]));
1379 /* first try with v1 which is smaller than v3 */
1380 if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
1381 return -EFAULT;
1382
1383 if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
1384 if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
1385 return -EFAULT;
1386 }
1387
1388 if (is_root_inode(inode))
1389 set_default = 1;
1390
1391 /* in v1 and v3 cases lumv1 points to data */
1392 rc = ll_dir_setstripe(inode, lumv1, set_default);
1393
1394 return rc;
1395 }
1396 case LL_IOC_LMV_GETSTRIPE: {
1397 struct lmv_user_md __user *lump = (void __user *)arg;
1398 struct lmv_user_md lum;
1399 struct lmv_user_md *tmp;
1400 int lum_size;
1401 int rc = 0;
1402 int mdtindex;
1403
1404 if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
1405 return -EFAULT;
1406
1407 if (lum.lum_magic != LMV_MAGIC_V1)
1408 return -EINVAL;
1409
1410 lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
1411 tmp = kzalloc(lum_size, GFP_NOFS);
1412 if (!tmp) {
1413 rc = -ENOMEM;
1414 goto free_lmv;
1415 }
1416
1417 *tmp = lum;
1418 tmp->lum_type = LMV_STRIPE_TYPE;
1419 tmp->lum_stripe_count = 1;
1420 mdtindex = ll_get_mdt_idx(inode);
1421 if (mdtindex < 0) {
1422 rc = -ENOMEM;
1423 goto free_lmv;
1424 }
1425
1426 tmp->lum_stripe_offset = mdtindex;
1427 tmp->lum_objects[0].lum_mds = mdtindex;
1428 memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
1429 sizeof(struct lu_fid));
1430 if (copy_to_user((void __user *)arg, tmp, lum_size)) {
1431 rc = -EFAULT;
1432 goto free_lmv;
1433 }
1434 free_lmv:
1435 kfree(tmp);
1436 return rc;
1437 }
1438 case LL_IOC_LOV_SWAP_LAYOUTS:
1439 return -EPERM;
1440 case LL_IOC_OBD_STATFS:
1441 return ll_obd_statfs(inode, (void __user *)arg);
1442 case LL_IOC_LOV_GETSTRIPE:
1443 case LL_IOC_MDC_GETINFO:
1444 case IOC_MDC_GETFILEINFO:
1445 case IOC_MDC_GETFILESTRIPE: {
1446 struct ptlrpc_request *request = NULL;
1447 struct lov_user_md __user *lump;
1448 struct lov_mds_md *lmm = NULL;
1449 struct mdt_body *body;
1450 char *filename = NULL;
1451 int lmmsize;
1452
1453 if (cmd == IOC_MDC_GETFILEINFO ||
1454 cmd == IOC_MDC_GETFILESTRIPE) {
1455 filename = ll_getname((const char __user *)arg);
1456 if (IS_ERR(filename))
1457 return PTR_ERR(filename);
1458
1459 rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
1460 &lmmsize, &request);
1461 } else {
1462 rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
1463 }
1464
1465 if (request) {
1466 body = req_capsule_server_get(&request->rq_pill,
1467 &RMF_MDT_BODY);
1468 LASSERT(body);
1469 } else {
1470 goto out_req;
1471 }
1472
1473 if (rc < 0) {
1474 if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
1475 cmd == LL_IOC_MDC_GETINFO)) {
1476 rc = 0;
1477 goto skip_lmm;
1478 } else
1479 goto out_req;
1480 }
1481
1482 if (cmd == IOC_MDC_GETFILESTRIPE ||
1483 cmd == LL_IOC_LOV_GETSTRIPE) {
1484 lump = (struct lov_user_md __user *)arg;
1485 } else {
1486 struct lov_user_mds_data __user *lmdp;
1487
1488 lmdp = (struct lov_user_mds_data __user *)arg;
1489 lump = &lmdp->lmd_lmm;
1490 }
1491 if (copy_to_user(lump, lmm, lmmsize)) {
1492 if (copy_to_user(lump, lmm, sizeof(*lump))) {
1493 rc = -EFAULT;
1494 goto out_req;
1495 }
1496 rc = -EOVERFLOW;
1497 }
1498 skip_lmm:
1499 if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
1500 struct lov_user_mds_data __user *lmdp;
1501 lstat_t st = { 0 };
1502
1503 st.st_dev = inode->i_sb->s_dev;
1504 st.st_mode = body->mode;
1505 st.st_nlink = body->nlink;
1506 st.st_uid = body->uid;
1507 st.st_gid = body->gid;
1508 st.st_rdev = body->rdev;
1509 st.st_size = body->size;
1510 st.st_blksize = PAGE_CACHE_SIZE;
1511 st.st_blocks = body->blocks;
1512 st.st_atime = body->atime;
1513 st.st_mtime = body->mtime;
1514 st.st_ctime = body->ctime;
1515 st.st_ino = inode->i_ino;
1516
1517 lmdp = (struct lov_user_mds_data __user *)arg;
1518 if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
1519 rc = -EFAULT;
1520 goto out_req;
1521 }
1522 }
1523
1524 out_req:
1525 ptlrpc_req_finished(request);
1526 if (filename)
1527 ll_putname(filename);
1528 return rc;
1529 }
1530 case IOC_LOV_GETINFO: {
1531 struct lov_user_mds_data __user *lumd;
1532 struct lov_stripe_md *lsm;
1533 struct lov_user_md __user *lum;
1534 struct lov_mds_md *lmm;
1535 int lmmsize;
1536 lstat_t st;
1537
1538 lumd = (struct lov_user_mds_data __user *)arg;
1539 lum = &lumd->lmd_lmm;
1540
1541 rc = ll_get_max_mdsize(sbi, &lmmsize);
1542 if (rc)
1543 return rc;
1544
1545 lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
1546 if (!lmm)
1547 return -ENOMEM;
1548 if (copy_from_user(lmm, lum, lmmsize)) {
1549 rc = -EFAULT;
1550 goto free_lmm;
1551 }
1552
1553 switch (lmm->lmm_magic) {
1554 case LOV_USER_MAGIC_V1:
1555 if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
1556 break;
1557 /* swab objects first so that stripes num will be sane */
1558 lustre_swab_lov_user_md_objects(
1559 ((struct lov_user_md_v1 *)lmm)->lmm_objects,
1560 ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
1561 lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
1562 break;
1563 case LOV_USER_MAGIC_V3:
1564 if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
1565 break;
1566 /* swab objects first so that stripes num will be sane */
1567 lustre_swab_lov_user_md_objects(
1568 ((struct lov_user_md_v3 *)lmm)->lmm_objects,
1569 ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
1570 lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
1571 break;
1572 default:
1573 rc = -EINVAL;
1574 goto free_lmm;
1575 }
1576
1577 rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
1578 if (rc < 0) {
1579 rc = -ENOMEM;
1580 goto free_lmm;
1581 }
1582
1583 /* Perform glimpse_size operation. */
1584 memset(&st, 0, sizeof(st));
1585
1586 rc = ll_glimpse_ioctl(sbi, lsm, &st);
1587 if (rc)
1588 goto free_lsm;
1589
1590 if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
1591 rc = -EFAULT;
1592 goto free_lsm;
1593 }
1594
1595 free_lsm:
1596 obd_free_memmd(sbi->ll_dt_exp, &lsm);
1597 free_lmm:
1598 kvfree(lmm);
1599 return rc;
1600 }
1601 case OBD_IOC_LLOG_CATINFO: {
1602 return -EOPNOTSUPP;
1603 }
1604 case OBD_IOC_QUOTACHECK: {
1605 struct obd_quotactl *oqctl;
1606 int error = 0;
1607
1608 if (!capable(CFS_CAP_SYS_ADMIN) ||
1609 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1610 return -EPERM;
1611
1612 oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
1613 if (!oqctl)
1614 return -ENOMEM;
1615 oqctl->qc_type = arg;
1616 rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
1617 if (rc < 0) {
1618 CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
1619 error = rc;
1620 }
1621
1622 rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
1623 if (rc < 0)
1624 CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
1625
1626 kfree(oqctl);
1627 return error ?: rc;
1628 }
1629 case OBD_IOC_POLL_QUOTACHECK: {
1630 struct if_quotacheck *check;
1631
1632 if (!capable(CFS_CAP_SYS_ADMIN) ||
1633 sbi->ll_flags & LL_SBI_RMT_CLIENT)
1634 return -EPERM;
1635
1636 check = kzalloc(sizeof(*check), GFP_NOFS);
1637 if (!check)
1638 return -ENOMEM;
1639
1640 rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
1641 NULL);
1642 if (rc) {
1643 CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
1644 if (copy_to_user((void __user *)arg, check,
1645 sizeof(*check)))
1646 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1647 goto out_poll;
1648 }
1649
1650 rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
1651 NULL);
1652 if (rc) {
1653 CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
1654 if (copy_to_user((void __user *)arg, check,
1655 sizeof(*check)))
1656 CDEBUG(D_QUOTA, "copy_to_user failed\n");
1657 goto out_poll;
1658 }
1659 out_poll:
1660 kfree(check);
1661 return rc;
1662 }
1663 case LL_IOC_QUOTACTL: {
1664 struct if_quotactl *qctl;
1665
1666 qctl = kzalloc(sizeof(*qctl), GFP_NOFS);
1667 if (!qctl)
1668 return -ENOMEM;
1669
1670 if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) {
1671 rc = -EFAULT;
1672 goto out_quotactl;
1673 }
1674
1675 rc = quotactl_ioctl(sbi, qctl);
1676
1677 if (rc == 0 && copy_to_user((void __user *)arg, qctl,
1678 sizeof(*qctl)))
1679 rc = -EFAULT;
1680
1681 out_quotactl:
1682 kfree(qctl);
1683 return rc;
1684 }
1685 case OBD_IOC_GETDTNAME:
1686 case OBD_IOC_GETMDNAME:
1687 return ll_get_obd_name(inode, cmd, arg);
1688 case LL_IOC_FLUSHCTX:
1689 return ll_flush_ctx(inode);
1690 #ifdef CONFIG_FS_POSIX_ACL
1691 case LL_IOC_RMTACL: {
1692 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
1693 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1694
1695 rc = rct_add(&sbi->ll_rct, current_pid(), arg);
1696 if (!rc)
1697 fd->fd_flags |= LL_FILE_RMTACL;
1698 return rc;
1699 } else
1700 return 0;
1701 }
1702 #endif
1703 case LL_IOC_GETOBDCOUNT: {
1704 int count, vallen;
1705 struct obd_export *exp;
1706
1707 if (copy_from_user(&count, (int __user *)arg, sizeof(int)))
1708 return -EFAULT;
1709
1710 /* get ost count when count is zero, get mdt count otherwise */
1711 exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
1712 vallen = sizeof(count);
1713 rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
1714 KEY_TGT_COUNT, &vallen, &count, NULL);
1715 if (rc) {
1716 CERROR("get target count failed: %d\n", rc);
1717 return rc;
1718 }
1719
1720 if (copy_to_user((int __user *)arg, &count, sizeof(int)))
1721 return -EFAULT;
1722
1723 return 0;
1724 }
1725 case LL_IOC_PATH2FID:
1726 if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
1727 sizeof(struct lu_fid)))
1728 return -EFAULT;
1729 return 0;
1730 case LL_IOC_GET_CONNECT_FLAGS: {
1731 return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL,
1732 (void __user *)arg);
1733 }
1734 case OBD_IOC_CHANGELOG_SEND:
1735 case OBD_IOC_CHANGELOG_CLEAR:
1736 if (!capable(CFS_CAP_SYS_ADMIN))
1737 return -EPERM;
1738
1739 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
1740 sizeof(struct ioc_changelog));
1741 return rc;
1742 case OBD_IOC_FID2PATH:
1743 return ll_fid2path(inode, (void __user *)arg);
1744 case LL_IOC_HSM_REQUEST: {
1745 struct hsm_user_request *hur;
1746 ssize_t totalsize;
1747
1748 hur = memdup_user((void __user *)arg, sizeof(*hur));
1749 if (IS_ERR(hur))
1750 return PTR_ERR(hur);
1751
1752 /* Compute the whole struct size */
1753 totalsize = hur_len(hur);
1754 kfree(hur);
1755 if (totalsize < 0)
1756 return -E2BIG;
1757
1758 /* Final size will be more than double totalsize */
1759 if (totalsize >= MDS_MAXREQSIZE / 3)
1760 return -E2BIG;
1761
1762 hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
1763 if (!hur)
1764 return -ENOMEM;
1765
1766 /* Copy the whole struct */
1767 if (copy_from_user(hur, (void __user *)arg, totalsize)) {
1768 kvfree(hur);
1769 return -EFAULT;
1770 }
1771
1772 if (hur->hur_request.hr_action == HUA_RELEASE) {
1773 const struct lu_fid *fid;
1774 struct inode *f;
1775 int i;
1776
1777 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
1778 fid = &hur->hur_user_item[i].hui_fid;
1779 f = search_inode_for_lustre(inode->i_sb, fid);
1780 if (IS_ERR(f)) {
1781 rc = PTR_ERR(f);
1782 break;
1783 }
1784
1785 rc = ll_hsm_release(f);
1786 iput(f);
1787 if (rc != 0)
1788 break;
1789 }
1790 } else {
1791 rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
1792 hur, NULL);
1793 }
1794
1795 kvfree(hur);
1796
1797 return rc;
1798 }
1799 case LL_IOC_HSM_PROGRESS: {
1800 struct hsm_progress_kernel hpk;
1801 struct hsm_progress hp;
1802
1803 if (copy_from_user(&hp, (void __user *)arg, sizeof(hp)))
1804 return -EFAULT;
1805
1806 hpk.hpk_fid = hp.hp_fid;
1807 hpk.hpk_cookie = hp.hp_cookie;
1808 hpk.hpk_extent = hp.hp_extent;
1809 hpk.hpk_flags = hp.hp_flags;
1810 hpk.hpk_errval = hp.hp_errval;
1811 hpk.hpk_data_version = 0;
1812
1813 /* File may not exist in Lustre; all progress
1814 * reported to Lustre root
1815 */
1816 rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
1817 NULL);
1818 return rc;
1819 }
1820 case LL_IOC_HSM_CT_START:
1821 rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
1822 sizeof(struct lustre_kernelcomm));
1823 return rc;
1824
1825 case LL_IOC_HSM_COPY_START: {
1826 struct hsm_copy *copy;
1827 int rc;
1828
1829 copy = memdup_user((char __user *)arg, sizeof(*copy));
1830 if (IS_ERR(copy))
1831 return PTR_ERR(copy);
1832
1833 rc = ll_ioc_copy_start(inode->i_sb, copy);
1834 if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
1835 rc = -EFAULT;
1836
1837 kfree(copy);
1838 return rc;
1839 }
1840 case LL_IOC_HSM_COPY_END: {
1841 struct hsm_copy *copy;
1842 int rc;
1843
1844 copy = memdup_user((char __user *)arg, sizeof(*copy));
1845 if (IS_ERR(copy))
1846 return PTR_ERR(copy);
1847
1848 rc = ll_ioc_copy_end(inode->i_sb, copy);
1849 if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
1850 rc = -EFAULT;
1851
1852 kfree(copy);
1853 return rc;
1854 }
1855 default:
1856 return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
1857 (void __user *)arg);
1858 }
1859 }
1860
1861 static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
1862 {
1863 struct inode *inode = file->f_mapping->host;
1864 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
1865 struct ll_sb_info *sbi = ll_i2sbi(inode);
1866 int api32 = ll_need_32bit_api(sbi);
1867 loff_t ret = -EINVAL;
1868
1869 inode_lock(inode);
1870 switch (origin) {
1871 case SEEK_SET:
1872 break;
1873 case SEEK_CUR:
1874 offset += file->f_pos;
1875 break;
1876 case SEEK_END:
1877 if (offset > 0)
1878 goto out;
1879 if (api32)
1880 offset += LL_DIR_END_OFF_32BIT;
1881 else
1882 offset += LL_DIR_END_OFF;
1883 break;
1884 default:
1885 goto out;
1886 }
1887
1888 if (offset >= 0 &&
1889 ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
1890 (!api32 && offset <= LL_DIR_END_OFF))) {
1891 if (offset != file->f_pos) {
1892 if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
1893 (!api32 && offset == LL_DIR_END_OFF))
1894 fd->lfd_pos = MDS_DIR_END_OFF;
1895 else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
1896 fd->lfd_pos = offset << 32;
1897 else
1898 fd->lfd_pos = offset;
1899 file->f_pos = offset;
1900 file->f_version = 0;
1901 }
1902 ret = offset;
1903 }
1904 goto out;
1905
1906 out:
1907 inode_unlock(inode);
1908 return ret;
1909 }
1910
1911 static int ll_dir_open(struct inode *inode, struct file *file)
1912 {
1913 return ll_file_open(inode, file);
1914 }
1915
1916 static int ll_dir_release(struct inode *inode, struct file *file)
1917 {
1918 return ll_file_release(inode, file);
1919 }
1920
1921 const struct file_operations ll_dir_operations = {
1922 .llseek = ll_dir_seek,
1923 .open = ll_dir_open,
1924 .release = ll_dir_release,
1925 .read = generic_read_dir,
1926 .iterate = ll_readdir,
1927 .unlocked_ioctl = ll_dir_ioctl,
1928 .fsync = ll_fsync,
1929 };
This page took 0.069555 seconds and 6 git commands to generate.