Merge remote-tracking branch 'usb-chipidea-next/ci-for-usb-next'
[deliverable/linux.git] / drivers / staging / lustre / lustre / lov / lov_object.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2015, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * Implementation of cl_object for LOV layer.
33 *
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36 */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include "lov_cl_internal.h"
41
42 /** \addtogroup lov
43 * @{
44 */
45
46 /*****************************************************************************
47 *
48 * Layout operations.
49 *
50 */
51
52 struct lov_layout_operations {
53 int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
54 struct lov_object *lov,
55 const struct cl_object_conf *conf,
56 union lov_layout_state *state);
57 int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
58 union lov_layout_state *state);
59 void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
60 union lov_layout_state *state);
61 void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
62 union lov_layout_state *state);
63 int (*llo_print)(const struct lu_env *env, void *cookie,
64 lu_printer_t p, const struct lu_object *o);
65 int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
66 struct cl_page *page, pgoff_t index);
67 int (*llo_lock_init)(const struct lu_env *env,
68 struct cl_object *obj, struct cl_lock *lock,
69 const struct cl_io *io);
70 int (*llo_io_init)(const struct lu_env *env,
71 struct cl_object *obj, struct cl_io *io);
72 int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
73 struct cl_attr *attr);
74 };
75
76 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
77
78 /*****************************************************************************
79 *
80 * Lov object layout operations.
81 *
82 */
83
84 static void lov_install_empty(const struct lu_env *env,
85 struct lov_object *lov,
86 union lov_layout_state *state)
87 {
88 /*
89 * File without objects.
90 */
91 }
92
93 static int lov_init_empty(const struct lu_env *env,
94 struct lov_device *dev, struct lov_object *lov,
95 const struct cl_object_conf *conf,
96 union lov_layout_state *state)
97 {
98 return 0;
99 }
100
101 static void lov_install_raid0(const struct lu_env *env,
102 struct lov_object *lov,
103 union lov_layout_state *state)
104 {
105 }
106
107 static struct cl_object *lov_sub_find(const struct lu_env *env,
108 struct cl_device *dev,
109 const struct lu_fid *fid,
110 const struct cl_object_conf *conf)
111 {
112 struct lu_object *o;
113
114 o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
115 LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
116 return lu2cl(o);
117 }
118
119 static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
120 struct cl_object *stripe, struct lov_layout_raid0 *r0,
121 int idx)
122 {
123 struct cl_object_header *hdr;
124 struct cl_object_header *subhdr;
125 struct cl_object_header *parent;
126 struct lov_oinfo *oinfo;
127 int result;
128
129 if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
130 /* For sanity:test_206.
131 * Do not leave the object in cache to avoid accessing
132 * freed memory. This is because osc_object is referring to
133 * lov_oinfo of lsm_stripe_data which will be freed due to
134 * this failure.
135 */
136 cl_object_kill(env, stripe);
137 cl_object_put(env, stripe);
138 return -EIO;
139 }
140
141 hdr = cl_object_header(lov2cl(lov));
142 subhdr = cl_object_header(stripe);
143
144 oinfo = lov->lo_lsm->lsm_oinfo[idx];
145 CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
146 " idx: %d gen: %d\n",
147 PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
148 PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
149 oinfo->loi_ost_idx, oinfo->loi_ost_gen);
150
151 /* reuse ->coh_attr_guard to protect coh_parent change */
152 spin_lock(&subhdr->coh_attr_guard);
153 parent = subhdr->coh_parent;
154 if (!parent) {
155 subhdr->coh_parent = hdr;
156 spin_unlock(&subhdr->coh_attr_guard);
157 subhdr->coh_nesting = hdr->coh_nesting + 1;
158 lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
159 r0->lo_sub[idx] = cl2lovsub(stripe);
160 r0->lo_sub[idx]->lso_super = lov;
161 r0->lo_sub[idx]->lso_index = idx;
162 result = 0;
163 } else {
164 struct lu_object *old_obj;
165 struct lov_object *old_lov;
166 unsigned int mask = D_INODE;
167
168 spin_unlock(&subhdr->coh_attr_guard);
169 old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
170 LASSERT(old_obj);
171 old_lov = cl2lov(lu2cl(old_obj));
172 if (old_lov->lo_layout_invalid) {
173 /* the object's layout has already changed but isn't
174 * refreshed
175 */
176 lu_object_unhash(env, &stripe->co_lu);
177 result = -EAGAIN;
178 } else {
179 mask = D_ERROR;
180 result = -EIO;
181 }
182
183 LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
184 "stripe %d is already owned.", idx);
185 LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
186 LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
187 cl_object_put(env, stripe);
188 }
189 return result;
190 }
191
192 static int lov_page_slice_fixup(struct lov_object *lov,
193 struct cl_object *stripe)
194 {
195 struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
196 struct cl_object *o;
197
198 cl_object_for_each(o, stripe)
199 o->co_slice_off += hdr->coh_page_bufsize;
200
201 return cl_object_header(stripe)->coh_page_bufsize;
202 }
203
204 static int lov_init_raid0(const struct lu_env *env,
205 struct lov_device *dev, struct lov_object *lov,
206 const struct cl_object_conf *conf,
207 union lov_layout_state *state)
208 {
209 int result;
210 int i;
211
212 struct cl_object *stripe;
213 struct lov_thread_info *lti = lov_env_info(env);
214 struct cl_object_conf *subconf = &lti->lti_stripe_conf;
215 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
216 struct lu_fid *ofid = &lti->lti_fid;
217 struct lov_layout_raid0 *r0 = &state->raid0;
218
219 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
220 dump_lsm(D_ERROR, lsm);
221 LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
222 LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
223 }
224
225 LASSERT(!lov->lo_lsm);
226 lov->lo_lsm = lsm_addref(lsm);
227 r0->lo_nr = lsm->lsm_stripe_count;
228 LASSERT(r0->lo_nr <= lov_targets_nr(dev));
229
230 r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
231 GFP_NOFS);
232 if (r0->lo_sub) {
233 int psz = 0;
234
235 result = 0;
236 subconf->coc_inode = conf->coc_inode;
237 spin_lock_init(&r0->lo_sub_lock);
238 /*
239 * Create stripe cl_objects.
240 */
241 for (i = 0; i < r0->lo_nr && result == 0; ++i) {
242 struct cl_device *subdev;
243 struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
244 int ost_idx = oinfo->loi_ost_idx;
245
246 if (lov_oinfo_is_dummy(oinfo))
247 continue;
248
249 result = ostid_to_fid(ofid, &oinfo->loi_oi,
250 oinfo->loi_ost_idx);
251 if (result != 0)
252 goto out;
253
254 subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
255 subconf->u.coc_oinfo = oinfo;
256 LASSERTF(subdev, "not init ost %d\n", ost_idx);
257 /* In the function below, .hs_keycmp resolves to
258 * lu_obj_hop_keycmp()
259 */
260 /* coverity[overrun-buffer-val] */
261 stripe = lov_sub_find(env, subdev, ofid, subconf);
262 if (!IS_ERR(stripe)) {
263 result = lov_init_sub(env, lov, stripe, r0, i);
264 if (result == -EAGAIN) { /* try again */
265 --i;
266 result = 0;
267 continue;
268 }
269 } else {
270 result = PTR_ERR(stripe);
271 }
272
273 if (result == 0) {
274 int sz = lov_page_slice_fixup(lov, stripe);
275
276 LASSERT(ergo(psz > 0, psz == sz));
277 psz = sz;
278 }
279 }
280 if (result == 0)
281 cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
282 } else {
283 result = -ENOMEM;
284 }
285 out:
286 return result;
287 }
288
289 static int lov_init_released(const struct lu_env *env,
290 struct lov_device *dev, struct lov_object *lov,
291 const struct cl_object_conf *conf,
292 union lov_layout_state *state)
293 {
294 struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
295
296 LASSERT(lsm);
297 LASSERT(lsm_is_released(lsm));
298 LASSERT(!lov->lo_lsm);
299
300 lov->lo_lsm = lsm_addref(lsm);
301 return 0;
302 }
303
304 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
305 union lov_layout_state *state)
306 {
307 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
308
309 lov_layout_wait(env, lov);
310 return 0;
311 }
312
313 static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
314 struct lovsub_object *los, int idx)
315 {
316 struct cl_object *sub;
317 struct lov_layout_raid0 *r0;
318 struct lu_site *site;
319 struct lu_site_bkt_data *bkt;
320 wait_queue_t *waiter;
321
322 r0 = &lov->u.raid0;
323 LASSERT(r0->lo_sub[idx] == los);
324
325 sub = lovsub2cl(los);
326 site = sub->co_lu.lo_dev->ld_site;
327 bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
328
329 cl_object_kill(env, sub);
330 /* release a reference to the sub-object and ... */
331 lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
332 cl_object_put(env, sub);
333
334 /* ... wait until it is actually destroyed---sub-object clears its
335 * ->lo_sub[] slot in lovsub_object_fini()
336 */
337 if (r0->lo_sub[idx] == los) {
338 waiter = &lov_env_info(env)->lti_waiter;
339 init_waitqueue_entry(waiter, current);
340 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
341 set_current_state(TASK_UNINTERRUPTIBLE);
342 while (1) {
343 /* this wait-queue is signaled at the end of
344 * lu_object_free().
345 */
346 set_current_state(TASK_UNINTERRUPTIBLE);
347 spin_lock(&r0->lo_sub_lock);
348 if (r0->lo_sub[idx] == los) {
349 spin_unlock(&r0->lo_sub_lock);
350 schedule();
351 } else {
352 spin_unlock(&r0->lo_sub_lock);
353 set_current_state(TASK_RUNNING);
354 break;
355 }
356 }
357 remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
358 }
359 LASSERT(!r0->lo_sub[idx]);
360 }
361
362 static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
363 union lov_layout_state *state)
364 {
365 struct lov_layout_raid0 *r0 = &state->raid0;
366 struct lov_stripe_md *lsm = lov->lo_lsm;
367 int i;
368
369 dump_lsm(D_INODE, lsm);
370
371 lov_layout_wait(env, lov);
372 if (r0->lo_sub) {
373 for (i = 0; i < r0->lo_nr; ++i) {
374 struct lovsub_object *los = r0->lo_sub[i];
375
376 if (los) {
377 cl_object_prune(env, &los->lso_cl);
378 /*
379 * If top-level object is to be evicted from
380 * the cache, so are its sub-objects.
381 */
382 lov_subobject_kill(env, lov, los, i);
383 }
384 }
385 }
386 return 0;
387 }
388
389 static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
390 union lov_layout_state *state)
391 {
392 LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
393 }
394
395 static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
396 union lov_layout_state *state)
397 {
398 struct lov_layout_raid0 *r0 = &state->raid0;
399
400 if (r0->lo_sub) {
401 kvfree(r0->lo_sub);
402 r0->lo_sub = NULL;
403 }
404
405 dump_lsm(D_INODE, lov->lo_lsm);
406 lov_free_memmd(&lov->lo_lsm);
407 }
408
409 static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
410 union lov_layout_state *state)
411 {
412 dump_lsm(D_INODE, lov->lo_lsm);
413 lov_free_memmd(&lov->lo_lsm);
414 }
415
416 static int lov_print_empty(const struct lu_env *env, void *cookie,
417 lu_printer_t p, const struct lu_object *o)
418 {
419 (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
420 return 0;
421 }
422
423 static int lov_print_raid0(const struct lu_env *env, void *cookie,
424 lu_printer_t p, const struct lu_object *o)
425 {
426 struct lov_object *lov = lu2lov(o);
427 struct lov_layout_raid0 *r0 = lov_r0(lov);
428 struct lov_stripe_md *lsm = lov->lo_lsm;
429 int i;
430
431 (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
432 r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
433 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
434 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
435 for (i = 0; i < r0->lo_nr; ++i) {
436 struct lu_object *sub;
437
438 if (r0->lo_sub[i]) {
439 sub = lovsub2lu(r0->lo_sub[i]);
440 lu_object_print(env, cookie, p, sub);
441 } else {
442 (*p)(env, cookie, "sub %d absent\n", i);
443 }
444 }
445 return 0;
446 }
447
448 static int lov_print_released(const struct lu_env *env, void *cookie,
449 lu_printer_t p, const struct lu_object *o)
450 {
451 struct lov_object *lov = lu2lov(o);
452 struct lov_stripe_md *lsm = lov->lo_lsm;
453
454 (*p)(env, cookie,
455 "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
456 lov->lo_layout_invalid ? "invalid" : "valid", lsm,
457 lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
458 lsm->lsm_stripe_count, lsm->lsm_layout_gen);
459 return 0;
460 }
461
462 /**
463 * Implements cl_object_operations::coo_attr_get() method for an object
464 * without stripes (LLT_EMPTY layout type).
465 *
466 * The only attributes this layer is authoritative in this case is
467 * cl_attr::cat_blocks---it's 0.
468 */
469 static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
470 struct cl_attr *attr)
471 {
472 attr->cat_blocks = 0;
473 return 0;
474 }
475
476 static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
477 struct cl_attr *attr)
478 {
479 struct lov_object *lov = cl2lov(obj);
480 struct lov_layout_raid0 *r0 = lov_r0(lov);
481 struct cl_attr *lov_attr = &r0->lo_attr;
482 int result = 0;
483
484 /* this is called w/o holding type guard mutex, so it must be inside
485 * an on going IO otherwise lsm may be replaced.
486 * LU-2117: it turns out there exists one exception. For mmaped files,
487 * the lock of those files may be requested in the other file's IO
488 * context, and this function is called in ccc_lock_state(), it will
489 * hit this assertion.
490 * Anyway, it's still okay to call attr_get w/o type guard as layout
491 * can't go if locks exist.
492 */
493 /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
494
495 if (!r0->lo_attr_valid) {
496 struct lov_stripe_md *lsm = lov->lo_lsm;
497 struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
498 __u64 kms = 0;
499
500 memset(lvb, 0, sizeof(*lvb));
501 /* XXX: timestamps can be negative by sanity:test_39m,
502 * how can it be?
503 */
504 lvb->lvb_atime = LLONG_MIN;
505 lvb->lvb_ctime = LLONG_MIN;
506 lvb->lvb_mtime = LLONG_MIN;
507
508 /*
509 * XXX that should be replaced with a loop over sub-objects,
510 * doing cl_object_attr_get() on them. But for now, let's
511 * reuse old lov code.
512 */
513
514 /*
515 * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
516 * happy. It's not needed, because new code uses
517 * ->coh_attr_guard spin-lock to protect consistency of
518 * sub-object attributes.
519 */
520 lov_stripe_lock(lsm);
521 result = lov_merge_lvb_kms(lsm, lvb, &kms);
522 lov_stripe_unlock(lsm);
523 if (result == 0) {
524 cl_lvb2attr(lov_attr, lvb);
525 lov_attr->cat_kms = kms;
526 r0->lo_attr_valid = 1;
527 }
528 }
529 if (result == 0) { /* merge results */
530 attr->cat_blocks = lov_attr->cat_blocks;
531 attr->cat_size = lov_attr->cat_size;
532 attr->cat_kms = lov_attr->cat_kms;
533 if (attr->cat_atime < lov_attr->cat_atime)
534 attr->cat_atime = lov_attr->cat_atime;
535 if (attr->cat_ctime < lov_attr->cat_ctime)
536 attr->cat_ctime = lov_attr->cat_ctime;
537 if (attr->cat_mtime < lov_attr->cat_mtime)
538 attr->cat_mtime = lov_attr->cat_mtime;
539 }
540 return result;
541 }
542
543 static const struct lov_layout_operations lov_dispatch[] = {
544 [LLT_EMPTY] = {
545 .llo_init = lov_init_empty,
546 .llo_delete = lov_delete_empty,
547 .llo_fini = lov_fini_empty,
548 .llo_install = lov_install_empty,
549 .llo_print = lov_print_empty,
550 .llo_page_init = lov_page_init_empty,
551 .llo_lock_init = lov_lock_init_empty,
552 .llo_io_init = lov_io_init_empty,
553 .llo_getattr = lov_attr_get_empty
554 },
555 [LLT_RAID0] = {
556 .llo_init = lov_init_raid0,
557 .llo_delete = lov_delete_raid0,
558 .llo_fini = lov_fini_raid0,
559 .llo_install = lov_install_raid0,
560 .llo_print = lov_print_raid0,
561 .llo_page_init = lov_page_init_raid0,
562 .llo_lock_init = lov_lock_init_raid0,
563 .llo_io_init = lov_io_init_raid0,
564 .llo_getattr = lov_attr_get_raid0
565 },
566 [LLT_RELEASED] = {
567 .llo_init = lov_init_released,
568 .llo_delete = lov_delete_empty,
569 .llo_fini = lov_fini_released,
570 .llo_install = lov_install_empty,
571 .llo_print = lov_print_released,
572 .llo_page_init = lov_page_init_empty,
573 .llo_lock_init = lov_lock_init_empty,
574 .llo_io_init = lov_io_init_released,
575 .llo_getattr = lov_attr_get_empty
576 }
577 };
578
579 /**
580 * Performs a double-dispatch based on the layout type of an object.
581 */
582 #define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
583 ({ \
584 struct lov_object *__obj = (obj); \
585 enum lov_layout_type __llt; \
586 \
587 __llt = __obj->lo_type; \
588 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
589 lov_dispatch[__llt].op(__VA_ARGS__); \
590 })
591
592 /**
593 * Return lov_layout_type associated with a given lsm
594 */
595 static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
596 {
597 if (!lsm)
598 return LLT_EMPTY;
599 if (lsm_is_released(lsm))
600 return LLT_RELEASED;
601 return LLT_RAID0;
602 }
603
604 static inline void lov_conf_freeze(struct lov_object *lov)
605 {
606 if (lov->lo_owner != current)
607 down_read(&lov->lo_type_guard);
608 }
609
610 static inline void lov_conf_thaw(struct lov_object *lov)
611 {
612 if (lov->lo_owner != current)
613 up_read(&lov->lo_type_guard);
614 }
615
616 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
617 ({ \
618 struct lov_object *__obj = (obj); \
619 int __lock = !!(lock); \
620 typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
621 \
622 if (__lock) \
623 lov_conf_freeze(__obj); \
624 __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
625 if (__lock) \
626 lov_conf_thaw(__obj); \
627 __result; \
628 })
629
630 /**
631 * Performs a locked double-dispatch based on the layout type of an object.
632 */
633 #define LOV_2DISPATCH(obj, op, ...) \
634 LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
635
636 #define LOV_2DISPATCH_VOID(obj, op, ...) \
637 do { \
638 struct lov_object *__obj = (obj); \
639 enum lov_layout_type __llt; \
640 \
641 lov_conf_freeze(__obj); \
642 __llt = __obj->lo_type; \
643 LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
644 lov_dispatch[__llt].op(__VA_ARGS__); \
645 lov_conf_thaw(__obj); \
646 } while (0)
647
648 static void lov_conf_lock(struct lov_object *lov)
649 {
650 LASSERT(lov->lo_owner != current);
651 down_write(&lov->lo_type_guard);
652 LASSERT(!lov->lo_owner);
653 lov->lo_owner = current;
654 }
655
656 static void lov_conf_unlock(struct lov_object *lov)
657 {
658 lov->lo_owner = NULL;
659 up_write(&lov->lo_type_guard);
660 }
661
662 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
663 {
664 struct l_wait_info lwi = { 0 };
665
666 while (atomic_read(&lov->lo_active_ios) > 0) {
667 CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n",
668 PFID(lu_object_fid(lov2lu(lov))),
669 atomic_read(&lov->lo_active_ios));
670
671 l_wait_event(lov->lo_waitq,
672 atomic_read(&lov->lo_active_ios) == 0, &lwi);
673 }
674 return 0;
675 }
676
677 static int lov_layout_change(const struct lu_env *unused,
678 struct lov_object *lov,
679 const struct cl_object_conf *conf)
680 {
681 int result;
682 enum lov_layout_type llt = LLT_EMPTY;
683 union lov_layout_state *state = &lov->u;
684 const struct lov_layout_operations *old_ops;
685 const struct lov_layout_operations *new_ops;
686
687 void *cookie;
688 struct lu_env *env;
689 int refcheck;
690
691 LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
692
693 if (conf->u.coc_md)
694 llt = lov_type(conf->u.coc_md->lsm);
695 LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
696
697 cookie = cl_env_reenter();
698 env = cl_env_get(&refcheck);
699 if (IS_ERR(env)) {
700 cl_env_reexit(cookie);
701 return PTR_ERR(env);
702 }
703
704 CDEBUG(D_INODE, DFID" from %s to %s\n",
705 PFID(lu_object_fid(lov2lu(lov))),
706 llt2str(lov->lo_type), llt2str(llt));
707
708 old_ops = &lov_dispatch[lov->lo_type];
709 new_ops = &lov_dispatch[llt];
710
711 result = cl_object_prune(env, &lov->lo_cl);
712 if (result != 0)
713 goto out;
714
715 result = old_ops->llo_delete(env, lov, &lov->u);
716 if (result == 0) {
717 old_ops->llo_fini(env, lov, &lov->u);
718
719 LASSERT(atomic_read(&lov->lo_active_ios) == 0);
720
721 lov->lo_type = LLT_EMPTY;
722 result = new_ops->llo_init(env,
723 lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
724 lov, conf, state);
725 if (result == 0) {
726 new_ops->llo_install(env, lov, state);
727 lov->lo_type = llt;
728 } else {
729 new_ops->llo_delete(env, lov, state);
730 new_ops->llo_fini(env, lov, state);
731 /* this file becomes an EMPTY file. */
732 }
733 }
734
735 out:
736 cl_env_put(env, &refcheck);
737 cl_env_reexit(cookie);
738 return result;
739 }
740
741 /*****************************************************************************
742 *
743 * Lov object operations.
744 *
745 */
746 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
747 const struct lu_object_conf *conf)
748 {
749 struct lov_device *dev = lu2lov_dev(obj->lo_dev);
750 struct lov_object *lov = lu2lov(obj);
751 const struct cl_object_conf *cconf = lu2cl_conf(conf);
752 union lov_layout_state *set = &lov->u;
753 const struct lov_layout_operations *ops;
754 int result;
755
756 init_rwsem(&lov->lo_type_guard);
757 atomic_set(&lov->lo_active_ios, 0);
758 init_waitqueue_head(&lov->lo_waitq);
759
760 cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
761
762 /* no locking is necessary, as object is being created */
763 lov->lo_type = lov_type(cconf->u.coc_md->lsm);
764 ops = &lov_dispatch[lov->lo_type];
765 result = ops->llo_init(env, dev, lov, cconf, set);
766 if (result == 0)
767 ops->llo_install(env, lov, set);
768 return result;
769 }
770
771 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
772 const struct cl_object_conf *conf)
773 {
774 struct lov_stripe_md *lsm = NULL;
775 struct lov_object *lov = cl2lov(obj);
776 int result = 0;
777
778 lov_conf_lock(lov);
779 if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
780 lov->lo_layout_invalid = true;
781 result = 0;
782 goto out;
783 }
784
785 if (conf->coc_opc == OBJECT_CONF_WAIT) {
786 if (lov->lo_layout_invalid &&
787 atomic_read(&lov->lo_active_ios) > 0) {
788 lov_conf_unlock(lov);
789 result = lov_layout_wait(env, lov);
790 lov_conf_lock(lov);
791 }
792 goto out;
793 }
794
795 LASSERT(conf->coc_opc == OBJECT_CONF_SET);
796
797 if (conf->u.coc_md)
798 lsm = conf->u.coc_md->lsm;
799 if ((!lsm && !lov->lo_lsm) ||
800 ((lsm && lov->lo_lsm) &&
801 (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
802 (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
803 /* same version of layout */
804 lov->lo_layout_invalid = false;
805 result = 0;
806 goto out;
807 }
808
809 /* will change layout - check if there still exists active IO. */
810 if (atomic_read(&lov->lo_active_ios) > 0) {
811 lov->lo_layout_invalid = true;
812 result = -EBUSY;
813 goto out;
814 }
815
816 result = lov_layout_change(env, lov, conf);
817 lov->lo_layout_invalid = result != 0;
818
819 out:
820 lov_conf_unlock(lov);
821 CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
822 PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
823 return result;
824 }
825
826 static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
827 {
828 struct lov_object *lov = lu2lov(obj);
829
830 LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
831 }
832
833 static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
834 {
835 struct lov_object *lov = lu2lov(obj);
836
837 LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
838 lu_object_fini(obj);
839 kmem_cache_free(lov_object_kmem, lov);
840 }
841
842 static int lov_object_print(const struct lu_env *env, void *cookie,
843 lu_printer_t p, const struct lu_object *o)
844 {
845 return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
846 }
847
848 int lov_page_init(const struct lu_env *env, struct cl_object *obj,
849 struct cl_page *page, pgoff_t index)
850 {
851 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
852 index);
853 }
854
855 /**
856 * Implements cl_object_operations::clo_io_init() method for lov
857 * layer. Dispatches to the appropriate layout io initialization method.
858 */
859 int lov_io_init(const struct lu_env *env, struct cl_object *obj,
860 struct cl_io *io)
861 {
862 CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
863 return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
864 !io->ci_ignore_layout, env, obj, io);
865 }
866
867 /**
868 * An implementation of cl_object_operations::clo_attr_get() method for lov
869 * layer. For raid0 layout this collects and merges attributes of all
870 * sub-objects.
871 */
872 static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
873 struct cl_attr *attr)
874 {
875 /* do not take lock, as this function is called under a
876 * spin-lock. Layout is protected from changing by ongoing IO.
877 */
878 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
879 }
880
881 static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
882 const struct cl_attr *attr, unsigned valid)
883 {
884 /*
885 * No dispatch is required here, as no layout implements this.
886 */
887 return 0;
888 }
889
890 int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
891 struct cl_lock *lock, const struct cl_io *io)
892 {
893 /* No need to lock because we've taken one refcount of layout. */
894 return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
895 io);
896 }
897
898 static const struct cl_object_operations lov_ops = {
899 .coo_page_init = lov_page_init,
900 .coo_lock_init = lov_lock_init,
901 .coo_io_init = lov_io_init,
902 .coo_attr_get = lov_attr_get,
903 .coo_attr_set = lov_attr_set,
904 .coo_conf_set = lov_conf_set
905 };
906
907 static const struct lu_object_operations lov_lu_obj_ops = {
908 .loo_object_init = lov_object_init,
909 .loo_object_delete = lov_object_delete,
910 .loo_object_release = NULL,
911 .loo_object_free = lov_object_free,
912 .loo_object_print = lov_object_print,
913 .loo_object_invariant = NULL
914 };
915
916 struct lu_object *lov_object_alloc(const struct lu_env *env,
917 const struct lu_object_header *unused,
918 struct lu_device *dev)
919 {
920 struct lov_object *lov;
921 struct lu_object *obj;
922
923 lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS);
924 if (lov) {
925 obj = lov2lu(lov);
926 lu_object_init(obj, NULL, dev);
927 lov->lo_cl.co_ops = &lov_ops;
928 lov->lo_type = -1; /* invalid, to catch uninitialized type */
929 /*
930 * object io operation vector (cl_object::co_iop) is installed
931 * later in lov_object_init(), as different vectors are used
932 * for object with different layouts.
933 */
934 obj->lo_ops = &lov_lu_obj_ops;
935 } else {
936 obj = NULL;
937 }
938 return obj;
939 }
940
941 static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
942 {
943 struct lov_stripe_md *lsm = NULL;
944
945 lov_conf_freeze(lov);
946 if (lov->lo_lsm) {
947 lsm = lsm_addref(lov->lo_lsm);
948 CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
949 lsm, atomic_read(&lsm->lsm_refc),
950 lov->lo_layout_invalid, current);
951 }
952 lov_conf_thaw(lov);
953 return lsm;
954 }
955
956 struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
957 {
958 struct lu_object *luobj;
959 struct lov_stripe_md *lsm = NULL;
960
961 if (!clobj)
962 return NULL;
963
964 luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
965 &lov_device_type);
966 if (luobj)
967 lsm = lov_lsm_addref(lu2lov(luobj));
968 return lsm;
969 }
970 EXPORT_SYMBOL(lov_lsm_get);
971
972 void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
973 {
974 if (lsm)
975 lov_free_memmd(&lsm);
976 }
977 EXPORT_SYMBOL(lov_lsm_put);
978
979 int lov_read_and_clear_async_rc(struct cl_object *clob)
980 {
981 struct lu_object *luobj;
982 int rc = 0;
983
984 luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
985 &lov_device_type);
986 if (luobj) {
987 struct lov_object *lov = lu2lov(luobj);
988
989 lov_conf_freeze(lov);
990 switch (lov->lo_type) {
991 case LLT_RAID0: {
992 struct lov_stripe_md *lsm;
993 int i;
994
995 lsm = lov->lo_lsm;
996 for (i = 0; i < lsm->lsm_stripe_count; i++) {
997 struct lov_oinfo *loi = lsm->lsm_oinfo[i];
998
999 if (lov_oinfo_is_dummy(loi))
1000 continue;
1001
1002 if (loi->loi_ar.ar_rc && !rc)
1003 rc = loi->loi_ar.ar_rc;
1004 loi->loi_ar.ar_rc = 0;
1005 }
1006 }
1007 case LLT_RELEASED:
1008 case LLT_EMPTY:
1009 break;
1010 default:
1011 LBUG();
1012 }
1013 lov_conf_thaw(lov);
1014 }
1015 return rc;
1016 }
1017 EXPORT_SYMBOL(lov_read_and_clear_async_rc);
1018
1019 /** @} lov */
This page took 0.068745 seconds and 5 git commands to generate.