Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / vvp_dev.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
6a5b99a4 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03 19 *
d7e09d03
PT
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
1dc563a6 26 * Copyright (c) 2012, 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * cl_device and cl_device_type implementation for VVP layer.
33 *
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
d9d47901 35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
d7e09d03
PT
36 */
37
38#define DEBUG_SUBSYSTEM S_LLITE
39
67a235f5
GKH
40#include "../include/obd.h"
41#include "../include/lustre_lite.h"
2d95f10e 42#include "llite_internal.h"
d7e09d03
PT
43#include "vvp_internal.h"
44
45/*****************************************************************************
46 *
47 * Vvp device and device type functions.
48 *
49 */
50
51/*
52 * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
53 * "llite_" (var. "ll_") prefix.
54 */
55
9989a58e 56static struct kmem_cache *ll_thread_kmem;
4a4eee07 57struct kmem_cache *vvp_lock_kmem;
8c7b0e1a 58struct kmem_cache *vvp_object_kmem;
103b8bda 59struct kmem_cache *vvp_req_kmem;
d7e09d03 60static struct kmem_cache *vvp_session_kmem;
9acc4500
JH
61static struct kmem_cache *vvp_thread_kmem;
62
d7e09d03 63static struct lu_kmem_descr vvp_caches[] = {
9989a58e
JH
64 {
65 .ckd_cache = &ll_thread_kmem,
66 .ckd_name = "ll_thread_kmem",
67 .ckd_size = sizeof(struct ll_thread_info),
68 },
4a4eee07
JH
69 {
70 .ckd_cache = &vvp_lock_kmem,
71 .ckd_name = "vvp_lock_kmem",
72 .ckd_size = sizeof(struct vvp_lock),
73 },
8c7b0e1a
JH
74 {
75 .ckd_cache = &vvp_object_kmem,
76 .ckd_name = "vvp_object_kmem",
77 .ckd_size = sizeof(struct vvp_object),
78 },
103b8bda
JH
79 {
80 .ckd_cache = &vvp_req_kmem,
81 .ckd_name = "vvp_req_kmem",
82 .ckd_size = sizeof(struct vvp_req),
83 },
d7e09d03
PT
84 {
85 .ckd_cache = &vvp_session_kmem,
86 .ckd_name = "vvp_session_kmem",
245cbcff 87 .ckd_size = sizeof(struct vvp_session)
d7e09d03 88 },
9acc4500
JH
89 {
90 .ckd_cache = &vvp_thread_kmem,
91 .ckd_name = "vvp_thread_kmem",
92 .ckd_size = sizeof(struct vvp_thread_info),
93 },
d7e09d03
PT
94 {
95 .ckd_cache = NULL
96 }
97};
98
9989a58e
JH
99static void *ll_thread_key_init(const struct lu_context *ctx,
100 struct lu_context_key *key)
d7e09d03
PT
101{
102 struct vvp_thread_info *info;
103
9989a58e 104 info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
6e16818b 105 if (!info)
d7e09d03
PT
106 info = ERR_PTR(-ENOMEM);
107 return info;
108}
109
9989a58e
JH
110static void ll_thread_key_fini(const struct lu_context *ctx,
111 struct lu_context_key *key, void *data)
d7e09d03
PT
112{
113 struct vvp_thread_info *info = data;
245cbcff 114
9989a58e 115 kmem_cache_free(ll_thread_kmem, info);
d7e09d03
PT
116}
117
9989a58e
JH
118struct lu_context_key ll_thread_key = {
119 .lct_tags = LCT_CL_THREAD,
120 .lct_init = ll_thread_key_init,
121 .lct_fini = ll_thread_key_fini
122};
123
d7e09d03
PT
124static void *vvp_session_key_init(const struct lu_context *ctx,
125 struct lu_context_key *key)
126{
127 struct vvp_session *session;
128
21068c46 129 session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS);
6e16818b 130 if (!session)
d7e09d03
PT
131 session = ERR_PTR(-ENOMEM);
132 return session;
133}
134
135static void vvp_session_key_fini(const struct lu_context *ctx,
136 struct lu_context_key *key, void *data)
137{
138 struct vvp_session *session = data;
245cbcff 139
50d30362 140 kmem_cache_free(vvp_session_kmem, session);
d7e09d03
PT
141}
142
d7e09d03
PT
143struct lu_context_key vvp_session_key = {
144 .lct_tags = LCT_SESSION,
145 .lct_init = vvp_session_key_init,
146 .lct_fini = vvp_session_key_fini
147};
148
e3c9078a
TH
149static void *vvp_thread_key_init(const struct lu_context *ctx,
150 struct lu_context_key *key)
9acc4500
JH
151{
152 struct vvp_thread_info *vti;
153
154 vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
155 if (!vti)
156 vti = ERR_PTR(-ENOMEM);
157 return vti;
158}
159
e3c9078a
TH
160static void vvp_thread_key_fini(const struct lu_context *ctx,
161 struct lu_context_key *key, void *data)
9acc4500
JH
162{
163 struct vvp_thread_info *vti = data;
164
165 kmem_cache_free(vvp_thread_kmem, vti);
166}
167
168struct lu_context_key vvp_thread_key = {
169 .lct_tags = LCT_CL_THREAD,
170 .lct_init = vvp_thread_key_init,
171 .lct_fini = vvp_thread_key_fini
172};
173
d7e09d03 174/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
9acc4500 175LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
d7e09d03
PT
176
177static const struct lu_device_operations vvp_lu_ops = {
178 .ldo_object_alloc = vvp_object_alloc
179};
180
181static const struct cl_device_operations vvp_cl_ops = {
103b8bda 182 .cdo_req_init = vvp_req_init
d7e09d03
PT
183};
184
3c95b839
JH
185static struct lu_device *vvp_device_free(const struct lu_env *env,
186 struct lu_device *d)
187{
188 struct vvp_device *vdv = lu2vvp_dev(d);
189 struct cl_site *site = lu2cl_site(d->ld_site);
190 struct lu_device *next = cl2lu_dev(vdv->vdv_next);
191
192 if (d->ld_site) {
193 cl_site_fini(site);
194 kfree(site);
195 }
196 cl_device_fini(lu2cl_dev(d));
197 kfree(vdv);
198 return next;
199}
200
d7e09d03
PT
201static struct lu_device *vvp_device_alloc(const struct lu_env *env,
202 struct lu_device_type *t,
203 struct lustre_cfg *cfg)
204{
3c95b839
JH
205 struct vvp_device *vdv;
206 struct lu_device *lud;
207 struct cl_site *site;
208 int rc;
209
210 vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
211 if (!vdv)
212 return ERR_PTR(-ENOMEM);
213
214 lud = &vdv->vdv_cl.cd_lu_dev;
215 cl_device_init(&vdv->vdv_cl, t);
216 vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
217 vdv->vdv_cl.cd_ops = &vvp_cl_ops;
218
219 site = kzalloc(sizeof(*site), GFP_NOFS);
220 if (site) {
221 rc = cl_site_init(site, &vdv->vdv_cl);
222 if (rc == 0) {
223 rc = lu_site_init_finish(&site->cs_lu);
224 } else {
225 LASSERT(!lud->ld_site);
226 CERROR("Cannot init lu_site, rc %d.\n", rc);
227 kfree(site);
228 }
229 } else {
230 rc = -ENOMEM;
231 }
232 if (rc != 0) {
233 vvp_device_free(env, lud);
234 lud = ERR_PTR(rc);
235 }
236 return lud;
237}
238
239static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
240 const char *name, struct lu_device *next)
241{
242 struct vvp_device *vdv;
243 int rc;
244
245 vdv = lu2vvp_dev(d);
246 vdv->vdv_next = lu2cl_dev(next);
247
248 LASSERT(d->ld_site && next->ld_type);
249 next->ld_site = d->ld_site;
250 rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
251 next->ld_type->ldt_name,
252 NULL);
253 if (rc == 0) {
254 lu_device_get(next);
255 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
256 }
257 return rc;
258}
259
260static struct lu_device *vvp_device_fini(const struct lu_env *env,
261 struct lu_device *d)
262{
263 return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
d7e09d03
PT
264}
265
266static const struct lu_device_type_operations vvp_device_type_ops = {
267 .ldto_init = vvp_type_init,
268 .ldto_fini = vvp_type_fini,
269
270 .ldto_start = vvp_type_start,
271 .ldto_stop = vvp_type_stop,
272
273 .ldto_device_alloc = vvp_device_alloc,
3c95b839
JH
274 .ldto_device_free = vvp_device_free,
275 .ldto_device_init = vvp_device_init,
276 .ldto_device_fini = vvp_device_fini,
d7e09d03
PT
277};
278
279struct lu_device_type vvp_device_type = {
280 .ldt_tags = LU_DEVICE_CL,
281 .ldt_name = LUSTRE_VVP_NAME,
282 .ldt_ops = &vvp_device_type_ops,
283 .ldt_ctx_tags = LCT_CL_THREAD
284};
285
286/**
287 * A mutex serializing calls to vvp_inode_fini() under extreme memory
288 * pressure, when environments cannot be allocated.
289 */
290int vvp_global_init(void)
291{
a37bec74 292 int rc;
d7e09d03 293
a37bec74
JH
294 rc = lu_kmem_init(vvp_caches);
295 if (rc != 0)
296 return rc;
297
298 rc = lu_device_type_init(&vvp_device_type);
299 if (rc != 0)
300 goto out_kmem;
301
302 return 0;
303
304out_kmem:
305 lu_kmem_fini(vvp_caches);
306
307 return rc;
d7e09d03
PT
308}
309
310void vvp_global_fini(void)
311{
a37bec74 312 lu_device_type_fini(&vvp_device_type);
d7e09d03
PT
313 lu_kmem_fini(vvp_caches);
314}
315
d7e09d03
PT
316/*****************************************************************************
317 *
318 * mirror obd-devices into cl devices.
319 *
320 */
321
322int cl_sb_init(struct super_block *sb)
323{
324 struct ll_sb_info *sbi;
325 struct cl_device *cl;
326 struct lu_env *env;
327 int rc = 0;
328 int refcheck;
329
330 sbi = ll_s2sbi(sb);
331 env = cl_env_get(&refcheck);
332 if (!IS_ERR(env)) {
333 cl = cl_type_setup(env, NULL, &vvp_device_type,
334 sbi->ll_dt_exp->exp_obd->obd_lu_dev);
335 if (!IS_ERR(cl)) {
3c95b839 336 cl2vvp_dev(cl)->vdv_sb = sb;
d7e09d03
PT
337 sbi->ll_cl = cl;
338 sbi->ll_site = cl2lu_dev(cl)->ld_site;
339 }
340 cl_env_put(env, &refcheck);
da5ecb4d 341 } else {
d7e09d03 342 rc = PTR_ERR(env);
da5ecb4d 343 }
0a3bdb00 344 return rc;
d7e09d03
PT
345}
346
347int cl_sb_fini(struct super_block *sb)
348{
349 struct ll_sb_info *sbi;
350 struct lu_env *env;
351 struct cl_device *cld;
352 int refcheck;
353 int result;
354
d7e09d03
PT
355 sbi = ll_s2sbi(sb);
356 env = cl_env_get(&refcheck);
357 if (!IS_ERR(env)) {
358 cld = sbi->ll_cl;
359
6e16818b 360 if (cld) {
d7e09d03
PT
361 cl_stack_fini(env, cld);
362 sbi->ll_cl = NULL;
363 sbi->ll_site = NULL;
364 }
365 cl_env_put(env, &refcheck);
366 result = 0;
367 } else {
368 CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
369 result = PTR_ERR(env);
370 }
0a3bdb00 371 return result;
d7e09d03
PT
372}
373
374/****************************************************************************
375 *
ae7c0f48 376 * debugfs/lustre/llite/$MNT/dump_page_cache
d7e09d03
PT
377 *
378 ****************************************************************************/
379
380/*
381 * To represent contents of a page cache as a byte stream, following
382 * information if encoded in 64bit offset:
383 *
384 * - file hash bucket in lu_site::ls_hash[] 28bits
385 *
386 * - how far file is from bucket head 4bits
387 *
388 * - page index 32bits
389 *
390 * First two data identify a file in the cache uniquely.
391 */
392
393#define PGC_OBJ_SHIFT (32 + 4)
394#define PGC_DEPTH_SHIFT (32)
395
396struct vvp_pgcache_id {
397 unsigned vpi_bucket;
398 unsigned vpi_depth;
399 uint32_t vpi_index;
400
401 unsigned vpi_curdep;
402 struct lu_object_header *vpi_obj;
403};
404
405static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
406{
407 CLASSERT(sizeof(pos) == sizeof(__u64));
408
409 id->vpi_index = pos & 0xffffffff;
410 id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
a354e0d8 411 id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
d7e09d03
PT
412}
413
414static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
415{
416 return
417 ((__u64)id->vpi_index) |
418 ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
419 ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
420}
421
6da6eabe 422static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
d7e09d03
PT
423 struct hlist_node *hnode, void *data)
424{
425 struct vvp_pgcache_id *id = data;
426 struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
427
428 if (id->vpi_curdep-- > 0)
429 return 0; /* continue */
430
431 if (lu_object_is_dying(hdr))
432 return 1;
433
434 cfs_hash_get(hs, hnode);
435 id->vpi_obj = hdr;
436 return 1;
437}
438
439static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
440 struct lu_device *dev,
441 struct vvp_pgcache_id *id)
442{
443 LASSERT(lu_device_is_cl(dev));
444
445 id->vpi_depth &= 0xf;
446 id->vpi_obj = NULL;
447 id->vpi_curdep = id->vpi_depth;
448
449 cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
450 vvp_pgcache_obj_get, id);
6e16818b 451 if (id->vpi_obj) {
d7e09d03
PT
452 struct lu_object *lu_obj;
453
454 lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
6e16818b 455 if (lu_obj) {
d7e09d03
PT
456 lu_object_ref_add(lu_obj, "dump", current);
457 return lu2cl(lu_obj);
458 }
459 lu_object_put(env, lu_object_top(id->vpi_obj));
460
461 } else if (id->vpi_curdep > 0) {
462 id->vpi_depth = 0xf;
463 }
464 return NULL;
465}
466
467static loff_t vvp_pgcache_find(const struct lu_env *env,
468 struct lu_device *dev, loff_t pos)
469{
470 struct cl_object *clob;
471 struct lu_site *site;
472 struct vvp_pgcache_id id;
473
474 site = dev->ld_site;
475 vvp_pgcache_id_unpack(pos, &id);
476
477 while (1) {
478 if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
479 return ~0ULL;
480 clob = vvp_pgcache_obj(env, dev, &id);
6e16818b 481 if (clob) {
8c7b0e1a 482 struct inode *inode = vvp_object_inode(clob);
d9d47901
JX
483 struct page *vmpage;
484 int nr;
d7e09d03 485
d9d47901
JX
486 nr = find_get_pages_contig(inode->i_mapping,
487 id.vpi_index, 1, &vmpage);
d7e09d03 488 if (nr > 0) {
d9d47901 489 id.vpi_index = vmpage->index;
d7e09d03 490 /* Cant support over 16T file */
d9d47901 491 nr = !(vmpage->index > 0xffffffff);
5f479924 492 put_page(vmpage);
d7e09d03 493 }
d7e09d03
PT
494
495 lu_object_ref_del(&clob->co_lu, "dump", current);
496 cl_object_put(env, clob);
497 if (nr > 0)
498 return vvp_pgcache_id_pack(&id);
499 }
500 /* to the next object. */
501 ++id.vpi_depth;
502 id.vpi_depth &= 0xf;
503 if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
504 return ~0ULL;
505 id.vpi_index = 0;
506 }
507}
508
509#define seq_page_flag(seq, page, flag, has_flags) do { \
510 if (test_bit(PG_##flag, &(page)->flags)) { \
511 seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
512 has_flags = 1; \
513 } \
a58a38ac 514} while (0)
d7e09d03
PT
515
516static void vvp_pgcache_page_show(const struct lu_env *env,
517 struct seq_file *seq, struct cl_page *page)
518{
3a52f803 519 struct vvp_page *vpg;
d7e09d03
PT
520 struct page *vmpage;
521 int has_flags;
522
3a52f803
JH
523 vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
524 vmpage = vpg->vpg_page;
97a075cd 525 seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
d7e09d03 526 0 /* gen */,
3a52f803 527 vpg, page,
d7e09d03 528 "none",
3a52f803
JH
529 vpg->vpg_write_queued ? "wq" : "- ",
530 vpg->vpg_defer_uptodate ? "du" : "- ",
d7e09d03 531 PageWriteback(vmpage) ? "wb" : "-",
97a075cd 532 vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
d7e09d03
PT
533 vmpage->mapping->host, vmpage->index,
534 page_count(vmpage));
535 has_flags = 0;
536 seq_page_flag(seq, vmpage, locked, has_flags);
537 seq_page_flag(seq, vmpage, error, has_flags);
538 seq_page_flag(seq, vmpage, referenced, has_flags);
539 seq_page_flag(seq, vmpage, uptodate, has_flags);
540 seq_page_flag(seq, vmpage, dirty, has_flags);
541 seq_page_flag(seq, vmpage, writeback, has_flags);
542 seq_printf(seq, "%s]\n", has_flags ? "" : "-");
543}
544
545static int vvp_pgcache_show(struct seq_file *f, void *v)
546{
547 loff_t pos;
548 struct ll_sb_info *sbi;
549 struct cl_object *clob;
550 struct lu_env *env;
d7e09d03
PT
551 struct vvp_pgcache_id id;
552 int refcheck;
553 int result;
554
555 env = cl_env_get(&refcheck);
556 if (!IS_ERR(env)) {
9797fb0e 557 pos = *(loff_t *)v;
d7e09d03
PT
558 vvp_pgcache_id_unpack(pos, &id);
559 sbi = f->private;
560 clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
6e16818b 561 if (clob) {
8c7b0e1a 562 struct inode *inode = vvp_object_inode(clob);
d9d47901
JX
563 struct cl_page *page = NULL;
564 struct page *vmpage;
565
566 result = find_get_pages_contig(inode->i_mapping,
567 id.vpi_index, 1,
568 &vmpage);
569 if (result > 0) {
570 lock_page(vmpage);
571 page = cl_vmpage_page(vmpage, clob);
572 unlock_page(vmpage);
5f479924 573 put_page(vmpage);
d9d47901 574 }
d7e09d03 575
d9d47901
JX
576 seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
577 PFID(lu_object_fid(&clob->co_lu)));
6e16818b 578 if (page) {
d7e09d03
PT
579 vvp_pgcache_page_show(env, f, page);
580 cl_page_put(env, page);
da5ecb4d 581 } else {
d7e09d03 582 seq_puts(f, "missing\n");
da5ecb4d 583 }
d7e09d03
PT
584 lu_object_ref_del(&clob->co_lu, "dump", current);
585 cl_object_put(env, clob);
da5ecb4d 586 } else {
d7e09d03 587 seq_printf(f, "%llx missing\n", pos);
da5ecb4d 588 }
d7e09d03
PT
589 cl_env_put(env, &refcheck);
590 result = 0;
da5ecb4d 591 } else {
d7e09d03 592 result = PTR_ERR(env);
da5ecb4d 593 }
d7e09d03
PT
594 return result;
595}
596
597static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
598{
599 struct ll_sb_info *sbi;
600 struct lu_env *env;
601 int refcheck;
602
603 sbi = f->private;
604
605 env = cl_env_get(&refcheck);
606 if (!IS_ERR(env)) {
607 sbi = f->private;
608 if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
609 pos = ERR_PTR(-EFBIG);
610 else {
611 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
612 *pos);
613 if (*pos == ~0ULL)
614 pos = NULL;
615 }
616 cl_env_put(env, &refcheck);
617 }
618 return pos;
619}
620
621static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
622{
623 struct ll_sb_info *sbi;
624 struct lu_env *env;
625 int refcheck;
626
627 env = cl_env_get(&refcheck);
628 if (!IS_ERR(env)) {
629 sbi = f->private;
630 *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
631 if (*pos == ~0ULL)
632 pos = NULL;
633 cl_env_put(env, &refcheck);
634 }
635 return pos;
636}
637
638static void vvp_pgcache_stop(struct seq_file *f, void *v)
639{
640 /* Nothing to do */
641}
642
02b31079 643static const struct seq_operations vvp_pgcache_ops = {
d7e09d03
PT
644 .start = vvp_pgcache_start,
645 .next = vvp_pgcache_next,
646 .stop = vvp_pgcache_stop,
647 .show = vvp_pgcache_show
648};
649
650static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
651{
ae7c0f48
OD
652 struct seq_file *seq;
653 int rc;
d7e09d03 654
ae7c0f48
OD
655 rc = seq_open(filp, &vvp_pgcache_ops);
656 if (rc)
657 return rc;
658
659 seq = filp->private_data;
e4ba525e 660 seq->private = inode->i_private;
ae7c0f48
OD
661
662 return 0;
d7e09d03
PT
663}
664
2d95f10e 665const struct file_operations vvp_dump_pgcache_file_ops = {
d7e09d03
PT
666 .owner = THIS_MODULE,
667 .open = vvp_dump_pgcache_seq_open,
668 .read = seq_read,
669 .llseek = seq_lseek,
670 .release = seq_release,
671};
This page took 0.487956 seconds and 5 git commands to generate.