4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mgc/mgc_request.c
38 * Author: Nathan Rutman <nathan@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_MGC
42 #define D_MGC D_CONFIG /*|D_WARNING*/
44 #include <linux/module.h>
45 #include "../include/obd_class.h"
46 #include "../include/lustre_dlm.h"
47 #include "../include/lprocfs_status.h"
48 #include "../include/lustre_log.h"
49 #include "../include/lustre_disk.h"
51 #include "mgc_internal.h"
53 static int mgc_name2resid(char *name
, int len
, struct ldlm_res_id
*res_id
,
58 if (len
> sizeof(resname
)) {
59 CERROR("name too long: %s\n", name
);
63 CERROR("missing name: %s\n", name
);
66 memcpy(&resname
, name
, len
);
68 /* Always use the same endianness for the resid */
69 memset(res_id
, 0, sizeof(*res_id
));
70 res_id
->name
[0] = cpu_to_le64(resname
);
71 /* XXX: unfortunately, sptlprc and config llog share one lock */
74 case CONFIG_T_SPTLRPC
:
77 case CONFIG_T_RECOVER
:
84 res_id
->name
[1] = cpu_to_le64(resname
);
85 CDEBUG(D_MGC
, "log %s to resid %#llx/%#llx (%.8s)\n", name
,
86 res_id
->name
[0], res_id
->name
[1], (char *)&res_id
->name
[0]);
90 int mgc_fsname2resid(char *fsname
, struct ldlm_res_id
*res_id
, int type
)
92 /* fsname is at most 8 chars long, maybe contain "-".
93 * e.g. "lustre", "SUN-000"
95 return mgc_name2resid(fsname
, strlen(fsname
), res_id
, type
);
97 EXPORT_SYMBOL(mgc_fsname2resid
);
99 static int mgc_logname2resid(char *logname
, struct ldlm_res_id
*res_id
, int type
)
104 /* logname consists of "fsname-nodetype".
105 * e.g. "lustre-MDT0001", "SUN-000-client"
106 * there is an exception: llog "params"
108 name_end
= strrchr(logname
, '-');
110 len
= strlen(logname
);
112 len
= name_end
- logname
;
113 return mgc_name2resid(logname
, len
, res_id
, type
);
116 /********************** config llog list **********************/
117 static LIST_HEAD(config_llog_list
);
118 static DEFINE_SPINLOCK(config_list_lock
);
120 /* Take a reference to a config log */
121 static int config_log_get(struct config_llog_data
*cld
)
123 atomic_inc(&cld
->cld_refcount
);
124 CDEBUG(D_INFO
, "log %s refs %d\n", cld
->cld_logname
,
125 atomic_read(&cld
->cld_refcount
));
129 /* Drop a reference to a config log. When no longer referenced,
130 * we can free the config log data
132 static void config_log_put(struct config_llog_data
*cld
)
134 CDEBUG(D_INFO
, "log %s refs %d\n", cld
->cld_logname
,
135 atomic_read(&cld
->cld_refcount
));
136 LASSERT(atomic_read(&cld
->cld_refcount
) > 0);
138 /* spinlock to make sure no item with 0 refcount in the list */
139 if (atomic_dec_and_lock(&cld
->cld_refcount
, &config_list_lock
)) {
140 list_del(&cld
->cld_list_chain
);
141 spin_unlock(&config_list_lock
);
143 CDEBUG(D_MGC
, "dropping config log %s\n", cld
->cld_logname
);
145 if (cld
->cld_recover
)
146 config_log_put(cld
->cld_recover
);
147 if (cld
->cld_sptlrpc
)
148 config_log_put(cld
->cld_sptlrpc
);
150 config_log_put(cld
->cld_params
);
151 if (cld_is_sptlrpc(cld
))
152 sptlrpc_conf_log_stop(cld
->cld_logname
);
154 class_export_put(cld
->cld_mgcexp
);
159 /* Find a config log by name */
161 struct config_llog_data
*config_log_find(char *logname
,
162 struct config_llog_instance
*cfg
)
164 struct config_llog_data
*cld
;
165 struct config_llog_data
*found
= NULL
;
170 instance
= cfg
? cfg
->cfg_instance
: NULL
;
171 spin_lock(&config_list_lock
);
172 list_for_each_entry(cld
, &config_llog_list
, cld_list_chain
) {
173 /* check if instance equals */
174 if (instance
!= cld
->cld_cfg
.cfg_instance
)
177 /* instance may be NULL, should check name */
178 if (strcmp(logname
, cld
->cld_logname
) == 0) {
184 atomic_inc(&found
->cld_refcount
);
185 LASSERT(found
->cld_stopping
== 0 || cld_is_sptlrpc(found
) == 0);
187 spin_unlock(&config_list_lock
);
192 struct config_llog_data
*do_config_log_add(struct obd_device
*obd
,
195 struct config_llog_instance
*cfg
,
196 struct super_block
*sb
)
198 struct config_llog_data
*cld
;
201 CDEBUG(D_MGC
, "do adding config log %s:%p\n", logname
,
202 cfg
? cfg
->cfg_instance
: NULL
);
204 cld
= kzalloc(sizeof(*cld
) + strlen(logname
) + 1, GFP_NOFS
);
206 return ERR_PTR(-ENOMEM
);
208 strcpy(cld
->cld_logname
, logname
);
212 cld
->cld_cfg
.cfg_callback
= class_config_llog_handler
;
213 mutex_init(&cld
->cld_lock
);
214 cld
->cld_cfg
.cfg_last_idx
= 0;
215 cld
->cld_cfg
.cfg_flags
= 0;
216 cld
->cld_cfg
.cfg_sb
= sb
;
217 cld
->cld_type
= type
;
218 atomic_set(&cld
->cld_refcount
, 1);
220 /* Keep the mgc around until we are done */
221 cld
->cld_mgcexp
= class_export_get(obd
->obd_self_export
);
223 if (cld_is_sptlrpc(cld
)) {
224 sptlrpc_conf_log_start(logname
);
225 cld
->cld_cfg
.cfg_obdname
= obd
->obd_name
;
228 rc
= mgc_logname2resid(logname
, &cld
->cld_resid
, type
);
230 spin_lock(&config_list_lock
);
231 list_add(&cld
->cld_list_chain
, &config_llog_list
);
232 spin_unlock(&config_list_lock
);
239 if (cld_is_sptlrpc(cld
)) {
240 rc
= mgc_process_log(obd
, cld
);
241 if (rc
&& rc
!= -ENOENT
)
242 CERROR("failed processing sptlrpc log: %d\n", rc
);
248 static struct config_llog_data
*
249 config_recover_log_add(struct obd_device
*obd
, char *fsname
,
250 struct config_llog_instance
*cfg
,
251 struct super_block
*sb
)
253 struct config_llog_instance lcfg
= *cfg
;
254 struct config_llog_data
*cld
;
257 /* we have to use different llog for clients and mdts for cmd
258 * where only clients are notified if one of cmd server restarts
260 LASSERT(strlen(fsname
) < sizeof(logname
) / 2);
261 strcpy(logname
, fsname
);
262 LASSERT(lcfg
.cfg_instance
);
263 strcat(logname
, "-cliir");
265 cld
= do_config_log_add(obd
, logname
, CONFIG_T_RECOVER
, &lcfg
, sb
);
269 static struct config_llog_data
*
270 config_params_log_add(struct obd_device
*obd
,
271 struct config_llog_instance
*cfg
, struct super_block
*sb
)
273 struct config_llog_instance lcfg
= *cfg
;
274 struct config_llog_data
*cld
;
276 lcfg
.cfg_instance
= sb
;
278 cld
= do_config_log_add(obd
, PARAMS_FILENAME
, CONFIG_T_PARAMS
,
284 /** Add this log to the list of active logs watched by an MGC.
285 * Active means we're watching for updates.
286 * We have one active log per "mount" - client instance or servername.
287 * Each instance may be at a different point in the log.
289 static int config_log_add(struct obd_device
*obd
, char *logname
,
290 struct config_llog_instance
*cfg
,
291 struct super_block
*sb
)
293 struct lustre_sb_info
*lsi
= s2lsi(sb
);
294 struct config_llog_data
*cld
;
295 struct config_llog_data
*sptlrpc_cld
;
296 struct config_llog_data
*params_cld
;
301 CDEBUG(D_MGC
, "adding config log %s:%p\n", logname
, cfg
->cfg_instance
);
304 * for each regular log, the depended sptlrpc log name is
305 * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
307 ptr
= strrchr(logname
, '-');
308 if (!ptr
|| ptr
- logname
> 8) {
309 CERROR("logname %s is too long\n", logname
);
313 memcpy(seclogname
, logname
, ptr
- logname
);
314 strcpy(seclogname
+ (ptr
- logname
), "-sptlrpc");
316 sptlrpc_cld
= config_log_find(seclogname
, NULL
);
318 sptlrpc_cld
= do_config_log_add(obd
, seclogname
,
319 CONFIG_T_SPTLRPC
, NULL
, NULL
);
320 if (IS_ERR(sptlrpc_cld
)) {
321 CERROR("can't create sptlrpc log: %s\n", seclogname
);
322 rc
= PTR_ERR(sptlrpc_cld
);
326 params_cld
= config_params_log_add(obd
, cfg
, sb
);
327 if (IS_ERR(params_cld
)) {
328 rc
= PTR_ERR(params_cld
);
329 CERROR("%s: can't create params log: rc = %d\n",
334 cld
= do_config_log_add(obd
, logname
, CONFIG_T_CONFIG
, cfg
, sb
);
336 CERROR("can't create log: %s\n", logname
);
341 cld
->cld_sptlrpc
= sptlrpc_cld
;
342 cld
->cld_params
= params_cld
;
344 LASSERT(lsi
->lsi_lmd
);
345 if (!(lsi
->lsi_lmd
->lmd_flags
& LMD_FLG_NOIR
)) {
346 struct config_llog_data
*recover_cld
;
348 ptr
= strrchr(seclogname
, '-');
352 CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n",
353 obd
->obd_name
, seclogname
, -EINVAL
);
357 recover_cld
= config_recover_log_add(obd
, seclogname
, cfg
, sb
);
358 if (IS_ERR(recover_cld
)) {
359 rc
= PTR_ERR(recover_cld
);
362 cld
->cld_recover
= recover_cld
;
371 config_log_put(params_cld
);
374 config_log_put(sptlrpc_cld
);
380 DEFINE_MUTEX(llog_process_lock
);
382 /** Stop watching for updates on this log.
384 static int config_log_end(char *logname
, struct config_llog_instance
*cfg
)
386 struct config_llog_data
*cld
;
387 struct config_llog_data
*cld_sptlrpc
= NULL
;
388 struct config_llog_data
*cld_params
= NULL
;
389 struct config_llog_data
*cld_recover
= NULL
;
392 cld
= config_log_find(logname
, cfg
);
396 mutex_lock(&cld
->cld_lock
);
398 * if cld_stopping is set, it means we didn't start the log thus
399 * not owning the start ref. this can happen after previous umount:
400 * the cld still hanging there waiting for lock cancel, and we
401 * remount again but failed in the middle and call log_end without
404 if (unlikely(cld
->cld_stopping
)) {
405 mutex_unlock(&cld
->cld_lock
);
406 /* drop the ref from the find */
411 cld
->cld_stopping
= 1;
413 cld_recover
= cld
->cld_recover
;
414 cld
->cld_recover
= NULL
;
415 mutex_unlock(&cld
->cld_lock
);
418 mutex_lock(&cld_recover
->cld_lock
);
419 cld_recover
->cld_stopping
= 1;
420 mutex_unlock(&cld_recover
->cld_lock
);
421 config_log_put(cld_recover
);
424 spin_lock(&config_list_lock
);
425 cld_sptlrpc
= cld
->cld_sptlrpc
;
426 cld
->cld_sptlrpc
= NULL
;
427 cld_params
= cld
->cld_params
;
428 cld
->cld_params
= NULL
;
429 spin_unlock(&config_list_lock
);
432 config_log_put(cld_sptlrpc
);
435 mutex_lock(&cld_params
->cld_lock
);
436 cld_params
->cld_stopping
= 1;
437 mutex_unlock(&cld_params
->cld_lock
);
438 config_log_put(cld_params
);
441 /* drop the ref from the find */
443 /* drop the start ref */
446 CDEBUG(D_MGC
, "end config log %s (%d)\n", logname
? logname
: "client",
451 int lprocfs_mgc_rd_ir_state(struct seq_file
*m
, void *data
)
453 struct obd_device
*obd
= data
;
454 struct obd_import
*imp
;
455 struct obd_connect_data
*ocd
;
456 struct config_llog_data
*cld
;
459 rc
= lprocfs_climp_check(obd
);
463 imp
= obd
->u
.cli
.cl_import
;
464 ocd
= &imp
->imp_connect_data
;
466 seq_printf(m
, "imperative_recovery: %s\n",
467 OCD_HAS_FLAG(ocd
, IMP_RECOV
) ? "ENABLED" : "DISABLED");
468 seq_printf(m
, "client_state:\n");
470 spin_lock(&config_list_lock
);
471 list_for_each_entry(cld
, &config_llog_list
, cld_list_chain
) {
472 if (!cld
->cld_recover
)
474 seq_printf(m
, " - { client: %s, nidtbl_version: %u }\n",
476 cld
->cld_recover
->cld_cfg
.cfg_last_idx
);
478 spin_unlock(&config_list_lock
);
480 up_read(&obd
->u
.cli
.cl_sem
);
484 /* reenqueue any lost locks */
485 #define RQ_RUNNING 0x1
489 #define RQ_PRECLEANUP 0x10
491 static wait_queue_head_t rq_waitq
;
492 static DECLARE_COMPLETION(rq_exit
);
493 static DECLARE_COMPLETION(rq_start
);
495 static void do_requeue(struct config_llog_data
*cld
)
497 LASSERT(atomic_read(&cld
->cld_refcount
) > 0);
499 /* Do not run mgc_process_log on a disconnected export or an
500 * export which is being disconnected. Take the client
501 * semaphore to make the check non-racy.
503 down_read(&cld
->cld_mgcexp
->exp_obd
->u
.cli
.cl_sem
);
504 if (cld
->cld_mgcexp
->exp_obd
->u
.cli
.cl_conn_count
!= 0) {
505 CDEBUG(D_MGC
, "updating log %s\n", cld
->cld_logname
);
506 mgc_process_log(cld
->cld_mgcexp
->exp_obd
, cld
);
508 CDEBUG(D_MGC
, "disconnecting, won't update log %s\n",
511 up_read(&cld
->cld_mgcexp
->exp_obd
->u
.cli
.cl_sem
);
514 /* this timeout represents how many seconds MGC should wait before
515 * requeue config and recover lock to the MGS. We need to randomize this
516 * in order to not flood the MGS.
518 #define MGC_TIMEOUT_MIN_SECONDS 5
519 #define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
521 static int mgc_requeue_thread(void *data
)
525 CDEBUG(D_MGC
, "Starting requeue thread\n");
527 /* Keep trying failed locks periodically */
528 spin_lock(&config_list_lock
);
529 rq_state
|= RQ_RUNNING
;
531 struct l_wait_info lwi
;
532 struct config_llog_data
*cld
, *cld_prev
;
533 int rand
= cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC
;
534 int stopped
= !!(rq_state
& RQ_STOP
);
537 /* Any new or requeued lostlocks will change the state */
538 rq_state
&= ~(RQ_NOW
| RQ_LATER
);
539 spin_unlock(&config_list_lock
);
546 /* Always wait a few seconds to allow the server who
547 * caused the lock revocation to finish its setup, plus some
548 * random so everyone doesn't try to reconnect at once.
550 to
= MGC_TIMEOUT_MIN_SECONDS
* HZ
;
551 to
+= rand
* HZ
/ 100; /* rand is centi-seconds */
552 lwi
= LWI_TIMEOUT(to
, NULL
, NULL
);
553 l_wait_event(rq_waitq
, rq_state
& (RQ_STOP
| RQ_PRECLEANUP
),
557 * iterate & processing through the list. for each cld, process
558 * its depending sptlrpc cld firstly (if any) and then itself.
560 * it's guaranteed any item in the list must have
561 * reference > 0; and if cld_lostlock is set, at
562 * least one reference is taken by the previous enqueue.
566 spin_lock(&config_list_lock
);
567 rq_state
&= ~RQ_PRECLEANUP
;
568 list_for_each_entry(cld
, &config_llog_list
, cld_list_chain
) {
569 if (!cld
->cld_lostlock
)
572 spin_unlock(&config_list_lock
);
574 LASSERT(atomic_read(&cld
->cld_refcount
) > 0);
576 /* Whether we enqueued again or not in mgc_process_log,
577 * we're done with the ref from the old enqueue
580 config_log_put(cld_prev
);
583 cld
->cld_lostlock
= 0;
584 if (likely(!stopped
))
587 spin_lock(&config_list_lock
);
589 spin_unlock(&config_list_lock
);
591 config_log_put(cld_prev
);
593 /* break after scanning the list so that we can drop
594 * refcount to losing lock clds
596 if (unlikely(stopped
)) {
597 spin_lock(&config_list_lock
);
601 /* Wait a bit to see if anyone else needs a requeue */
602 lwi
= (struct l_wait_info
) { 0 };
603 l_wait_event(rq_waitq
, rq_state
& (RQ_NOW
| RQ_STOP
),
605 spin_lock(&config_list_lock
);
607 /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
608 rq_state
&= ~RQ_RUNNING
;
609 spin_unlock(&config_list_lock
);
613 CDEBUG(D_MGC
, "Ending requeue thread\n");
617 /* Add a cld to the list to requeue. Start the requeue thread if needed.
618 * We are responsible for dropping the config log reference from here on out.
620 static void mgc_requeue_add(struct config_llog_data
*cld
)
622 CDEBUG(D_INFO
, "log %s: requeue (r=%d sp=%d st=%x)\n",
623 cld
->cld_logname
, atomic_read(&cld
->cld_refcount
),
624 cld
->cld_stopping
, rq_state
);
625 LASSERT(atomic_read(&cld
->cld_refcount
) > 0);
627 mutex_lock(&cld
->cld_lock
);
628 if (cld
->cld_stopping
|| cld
->cld_lostlock
) {
629 mutex_unlock(&cld
->cld_lock
);
632 /* this refcount will be released in mgc_requeue_thread. */
634 cld
->cld_lostlock
= 1;
635 mutex_unlock(&cld
->cld_lock
);
637 /* Hold lock for rq_state */
638 spin_lock(&config_list_lock
);
639 if (rq_state
& RQ_STOP
) {
640 spin_unlock(&config_list_lock
);
641 cld
->cld_lostlock
= 0;
645 spin_unlock(&config_list_lock
);
650 static int mgc_llog_init(const struct lu_env
*env
, struct obd_device
*obd
)
652 struct llog_ctxt
*ctxt
;
655 /* setup only remote ctxt, the local disk context is switched per each
656 * filesystem during mgc_fs_setup()
658 rc
= llog_setup(env
, obd
, &obd
->obd_olg
, LLOG_CONFIG_REPL_CTXT
, obd
,
663 ctxt
= llog_get_context(obd
, LLOG_CONFIG_REPL_CTXT
);
666 llog_initiator_connect(ctxt
);
672 static int mgc_llog_fini(const struct lu_env
*env
, struct obd_device
*obd
)
674 struct llog_ctxt
*ctxt
;
676 ctxt
= llog_get_context(obd
, LLOG_CONFIG_REPL_CTXT
);
678 llog_cleanup(env
, ctxt
);
683 static atomic_t mgc_count
= ATOMIC_INIT(0);
684 static int mgc_precleanup(struct obd_device
*obd
, enum obd_cleanup_stage stage
)
690 case OBD_CLEANUP_EARLY
:
692 case OBD_CLEANUP_EXPORTS
:
693 if (atomic_dec_and_test(&mgc_count
)) {
694 LASSERT(rq_state
& RQ_RUNNING
);
695 /* stop requeue thread */
698 /* wakeup requeue thread to clean our cld */
699 temp
= RQ_NOW
| RQ_PRECLEANUP
;
701 spin_lock(&config_list_lock
);
703 spin_unlock(&config_list_lock
);
706 wait_for_completion(&rq_exit
);
707 obd_cleanup_client_import(obd
);
708 rc
= mgc_llog_fini(NULL
, obd
);
710 CERROR("failed to cleanup llogging subsystems\n");
716 static int mgc_cleanup(struct obd_device
*obd
)
718 /* COMPAT_146 - old config logs may have added profiles we don't
721 if (obd
->obd_type
->typ_refcnt
<= 1)
722 /* Only for the last mgc */
723 class_del_profiles();
725 lprocfs_obd_cleanup(obd
);
728 return client_obd_cleanup(obd
);
731 static int mgc_setup(struct obd_device
*obd
, struct lustre_cfg
*lcfg
)
733 struct lprocfs_static_vars lvars
= { NULL
};
734 struct task_struct
*task
;
739 rc
= client_obd_setup(obd
, lcfg
);
743 rc
= mgc_llog_init(NULL
, obd
);
745 CERROR("failed to setup llogging subsystems\n");
749 lprocfs_mgc_init_vars(&lvars
);
750 lprocfs_obd_setup(obd
, lvars
.obd_vars
, lvars
.sysfs_vars
);
751 sptlrpc_lprocfs_cliobd_attach(obd
);
753 if (atomic_inc_return(&mgc_count
) == 1) {
755 init_waitqueue_head(&rq_waitq
);
757 /* start requeue thread */
758 task
= kthread_run(mgc_requeue_thread
, NULL
, "ll_cfg_requeue");
761 CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n",
765 /* rc is the task_struct pointer of mgc_requeue_thread. */
767 wait_for_completion(&rq_start
);
773 client_obd_cleanup(obd
);
779 /* based on ll_mdc_blocking_ast */
780 static int mgc_blocking_ast(struct ldlm_lock
*lock
, struct ldlm_lock_desc
*desc
,
781 void *data
, int flag
)
783 struct lustre_handle lockh
;
784 struct config_llog_data
*cld
= data
;
788 case LDLM_CB_BLOCKING
:
789 /* mgs wants the lock, give it up... */
790 LDLM_DEBUG(lock
, "MGC blocking CB");
791 ldlm_lock2handle(lock
, &lockh
);
792 rc
= ldlm_cli_cancel(&lockh
, LCF_ASYNC
);
794 case LDLM_CB_CANCELING
:
795 /* We've given up the lock, prepare ourselves to update. */
796 LDLM_DEBUG(lock
, "MGC cancel CB");
798 CDEBUG(D_MGC
, "Lock res "DLDLMRES
" (%.8s)\n",
799 PLDLMRES(lock
->l_resource
),
800 (char *)&lock
->l_resource
->lr_name
.name
[0]);
803 CDEBUG(D_INFO
, "missing data, won't requeue\n");
807 /* held at mgc_process_log(). */
808 LASSERT(atomic_read(&cld
->cld_refcount
) > 0);
809 /* Are we done with this log? */
810 if (cld
->cld_stopping
) {
811 CDEBUG(D_MGC
, "log %s: stopping, won't requeue\n",
816 /* Make sure not to re-enqueue when the mgc is stopping
817 * (we get called from client_disconnect_export)
819 if (!lock
->l_conn_export
||
820 !lock
->l_conn_export
->exp_obd
->u
.cli
.cl_conn_count
) {
821 CDEBUG(D_MGC
, "log %.8s: disconnecting, won't requeue\n",
828 mgc_requeue_add(cld
);
838 /* Not sure where this should go... */
839 /* This is the timeout value for MGS_CONNECT request plus a ping interval, such
840 * that we can have a chance to try the secondary MGS if any.
842 #define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
844 #define MGC_TARGET_REG_LIMIT 10
845 #define MGC_SEND_PARAM_LIMIT 10
847 /* Send parameter to MGS*/
848 static int mgc_set_mgs_param(struct obd_export
*exp
,
849 struct mgs_send_param
*msp
)
851 struct ptlrpc_request
*req
;
852 struct mgs_send_param
*req_msp
, *rep_msp
;
855 req
= ptlrpc_request_alloc_pack(class_exp2cliimp(exp
),
856 &RQF_MGS_SET_INFO
, LUSTRE_MGS_VERSION
,
861 req_msp
= req_capsule_client_get(&req
->rq_pill
, &RMF_MGS_SEND_PARAM
);
863 ptlrpc_req_finished(req
);
867 memcpy(req_msp
, msp
, sizeof(*req_msp
));
868 ptlrpc_request_set_replen(req
);
870 /* Limit how long we will wait for the enqueue to complete */
871 req
->rq_delay_limit
= MGC_SEND_PARAM_LIMIT
;
872 rc
= ptlrpc_queue_wait(req
);
874 rep_msp
= req_capsule_server_get(&req
->rq_pill
, &RMF_MGS_SEND_PARAM
);
875 memcpy(msp
, rep_msp
, sizeof(*rep_msp
));
878 ptlrpc_req_finished(req
);
883 /* Take a config lock so we can get cancel notifications */
884 static int mgc_enqueue(struct obd_export
*exp
, struct lov_stripe_md
*lsm
,
885 __u32 type
, ldlm_policy_data_t
*policy
, __u32 mode
,
886 __u64
*flags
, void *bl_cb
, void *cp_cb
, void *gl_cb
,
887 void *data
, __u32 lvb_len
, void *lvb_swabber
,
888 struct lustre_handle
*lockh
)
890 struct config_llog_data
*cld
= data
;
891 struct ldlm_enqueue_info einfo
= {
894 .ei_cb_bl
= mgc_blocking_ast
,
895 .ei_cb_cp
= ldlm_completion_ast
,
897 struct ptlrpc_request
*req
;
898 int short_limit
= cld_is_sptlrpc(cld
);
901 CDEBUG(D_MGC
, "Enqueue for %s (res %#llx)\n", cld
->cld_logname
,
902 cld
->cld_resid
.name
[0]);
904 /* We need a callback for every lockholder, so don't try to
905 * ldlm_lock_match (see rev 1.1.2.11.2.47)
907 req
= ptlrpc_request_alloc_pack(class_exp2cliimp(exp
),
908 &RQF_LDLM_ENQUEUE
, LUSTRE_DLM_VERSION
,
913 req_capsule_set_size(&req
->rq_pill
, &RMF_DLM_LVB
, RCL_SERVER
, 0);
914 ptlrpc_request_set_replen(req
);
916 /* Limit how long we will wait for the enqueue to complete */
917 req
->rq_delay_limit
= short_limit
? 5 : MGC_ENQUEUE_LIMIT
;
918 rc
= ldlm_cli_enqueue(exp
, &req
, &einfo
, &cld
->cld_resid
, NULL
, flags
,
919 NULL
, 0, LVB_T_NONE
, lockh
, 0);
920 /* A failed enqueue should still call the mgc_blocking_ast,
921 * where it will be requeued if needed ("grant failed").
923 ptlrpc_req_finished(req
);
927 static void mgc_notify_active(struct obd_device
*unused
)
929 /* wakeup mgc_requeue_thread to requeue mgc lock */
930 spin_lock(&config_list_lock
);
932 spin_unlock(&config_list_lock
);
935 /* TODO: Help the MGS rebuild nidtbl. -jay */
938 /* Send target_reg message to MGS */
939 static int mgc_target_register(struct obd_export
*exp
,
940 struct mgs_target_info
*mti
)
942 struct ptlrpc_request
*req
;
943 struct mgs_target_info
*req_mti
, *rep_mti
;
946 req
= ptlrpc_request_alloc_pack(class_exp2cliimp(exp
),
947 &RQF_MGS_TARGET_REG
, LUSTRE_MGS_VERSION
,
952 req_mti
= req_capsule_client_get(&req
->rq_pill
, &RMF_MGS_TARGET_INFO
);
954 ptlrpc_req_finished(req
);
958 memcpy(req_mti
, mti
, sizeof(*req_mti
));
959 ptlrpc_request_set_replen(req
);
960 CDEBUG(D_MGC
, "register %s\n", mti
->mti_svname
);
961 /* Limit how long we will wait for the enqueue to complete */
962 req
->rq_delay_limit
= MGC_TARGET_REG_LIMIT
;
964 rc
= ptlrpc_queue_wait(req
);
966 rep_mti
= req_capsule_server_get(&req
->rq_pill
,
967 &RMF_MGS_TARGET_INFO
);
968 memcpy(mti
, rep_mti
, sizeof(*rep_mti
));
969 CDEBUG(D_MGC
, "register %s got index = %d\n",
970 mti
->mti_svname
, mti
->mti_stripe_index
);
972 ptlrpc_req_finished(req
);
977 static int mgc_set_info_async(const struct lu_env
*env
, struct obd_export
*exp
,
978 u32 keylen
, void *key
, u32 vallen
,
979 void *val
, struct ptlrpc_request_set
*set
)
983 /* Turn off initial_recov after we try all backup servers once */
984 if (KEY_IS(KEY_INIT_RECOV_BACKUP
)) {
985 struct obd_import
*imp
= class_exp2cliimp(exp
);
988 if (vallen
!= sizeof(int))
991 CDEBUG(D_MGC
, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
992 imp
->imp_obd
->obd_name
, value
,
993 imp
->imp_deactive
, imp
->imp_invalid
,
994 imp
->imp_replayable
, imp
->imp_obd
->obd_replayable
,
995 ptlrpc_import_state_name(imp
->imp_state
));
996 /* Resurrect if we previously died */
997 if ((imp
->imp_state
!= LUSTRE_IMP_FULL
&&
998 imp
->imp_state
!= LUSTRE_IMP_NEW
) || value
> 1)
999 ptlrpc_reconnect_import(imp
);
1002 if (KEY_IS(KEY_SET_INFO
)) {
1003 struct mgs_send_param
*msp
;
1006 rc
= mgc_set_mgs_param(exp
, msp
);
1009 if (KEY_IS(KEY_MGSSEC
)) {
1010 struct client_obd
*cli
= &exp
->exp_obd
->u
.cli
;
1011 struct sptlrpc_flavor flvr
;
1014 * empty string means using current flavor, if which haven't
1015 * been set yet, set it as null.
1017 * if flavor has been set previously, check the asking flavor
1018 * must match the existing one.
1021 if (cli
->cl_flvr_mgc
.sf_rpc
!= SPTLRPC_FLVR_INVALID
)
1027 rc
= sptlrpc_parse_flavor(val
, &flvr
);
1029 CERROR("invalid sptlrpc flavor %s to MGS\n",
1035 * caller already hold a mutex
1037 if (cli
->cl_flvr_mgc
.sf_rpc
== SPTLRPC_FLVR_INVALID
) {
1038 cli
->cl_flvr_mgc
= flvr
;
1039 } else if (memcmp(&cli
->cl_flvr_mgc
, &flvr
,
1040 sizeof(flvr
)) != 0) {
1043 sptlrpc_flavor2name(&cli
->cl_flvr_mgc
,
1045 LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
1055 static int mgc_get_info(const struct lu_env
*env
, struct obd_export
*exp
,
1056 __u32 keylen
, void *key
, __u32
*vallen
, void *val
,
1057 struct lov_stripe_md
*unused
)
1061 if (KEY_IS(KEY_CONN_DATA
)) {
1062 struct obd_import
*imp
= class_exp2cliimp(exp
);
1063 struct obd_connect_data
*data
= val
;
1065 if (*vallen
== sizeof(*data
)) {
1066 *data
= imp
->imp_connect_data
;
1074 static int mgc_import_event(struct obd_device
*obd
,
1075 struct obd_import
*imp
,
1076 enum obd_import_event event
)
1078 LASSERT(imp
->imp_obd
== obd
);
1079 CDEBUG(D_MGC
, "import event %#x\n", event
);
1082 case IMP_EVENT_DISCON
:
1083 /* MGC imports should not wait for recovery */
1084 if (OCD_HAS_FLAG(&imp
->imp_connect_data
, IMP_RECOV
))
1085 ptlrpc_pinger_ir_down();
1087 case IMP_EVENT_INACTIVE
:
1089 case IMP_EVENT_INVALIDATE
: {
1090 struct ldlm_namespace
*ns
= obd
->obd_namespace
;
1092 ldlm_namespace_cleanup(ns
, LDLM_FL_LOCAL_ONLY
);
1095 case IMP_EVENT_ACTIVE
:
1096 CDEBUG(D_INFO
, "%s: Reactivating import\n", obd
->obd_name
);
1097 /* Clearing obd_no_recov allows us to continue pinging */
1098 obd
->obd_no_recov
= 0;
1099 mgc_notify_active(obd
);
1100 if (OCD_HAS_FLAG(&imp
->imp_connect_data
, IMP_RECOV
))
1101 ptlrpc_pinger_ir_up();
1105 case IMP_EVENT_DEACTIVATE
:
1106 case IMP_EVENT_ACTIVATE
:
1109 CERROR("Unknown import event %#x\n", event
);
1116 CONFIG_READ_NRPAGES_INIT
= 1 << (20 - PAGE_SHIFT
),
1117 CONFIG_READ_NRPAGES
= 4
1120 static int mgc_apply_recover_logs(struct obd_device
*mgc
,
1121 struct config_llog_data
*cld
,
1123 void *data
, int datalen
, bool mne_swab
)
1125 struct config_llog_instance
*cfg
= &cld
->cld_cfg
;
1126 struct mgs_nidtbl_entry
*entry
;
1127 struct lustre_cfg
*lcfg
;
1128 struct lustre_cfg_bufs bufs
;
1129 u64 prev_version
= 0;
1137 LASSERT(cfg
->cfg_instance
);
1138 LASSERT(cfg
->cfg_sb
== cfg
->cfg_instance
);
1140 inst
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1144 pos
= snprintf(inst
, PAGE_SIZE
, "%p", cfg
->cfg_instance
);
1145 if (pos
>= PAGE_SIZE
) {
1152 bufsz
= PAGE_SIZE
- pos
;
1154 while (datalen
> 0) {
1155 int entry_len
= sizeof(*entry
);
1157 struct obd_device
*obd
;
1164 if (datalen
< sizeof(*entry
))
1167 entry
= (typeof(entry
))(data
+ off
);
1170 if (entry
->mne_nid_type
!= 0) /* only support type 0 for ipv4 */
1172 if (entry
->mne_nid_count
== 0) /* at least one nid entry */
1174 if (entry
->mne_nid_size
!= sizeof(lnet_nid_t
))
1177 entry_len
+= entry
->mne_nid_count
* entry
->mne_nid_size
;
1178 if (datalen
< entry_len
) /* must have entry_len at least */
1181 /* Keep this swab for normal mixed endian handling. LU-1644 */
1183 lustre_swab_mgs_nidtbl_entry(entry
);
1184 if (entry
->mne_length
> PAGE_SIZE
) {
1185 CERROR("MNE too large (%u)\n", entry
->mne_length
);
1189 if (entry
->mne_length
< entry_len
)
1192 off
+= entry
->mne_length
;
1193 datalen
-= entry
->mne_length
;
1197 if (entry
->mne_version
> max_version
) {
1198 CERROR("entry index(%lld) is over max_index(%lld)\n",
1199 entry
->mne_version
, max_version
);
1203 if (prev_version
>= entry
->mne_version
) {
1204 CERROR("index unsorted, prev %lld, now %lld\n",
1205 prev_version
, entry
->mne_version
);
1208 prev_version
= entry
->mne_version
;
1211 * Write a string with format "nid::instance" to
1212 * lustre/<osc|mdc>/<target>-<osc|mdc>-<instance>/import.
1215 is_ost
= entry
->mne_type
== LDD_F_SV_TYPE_OST
;
1216 memset(buf
, 0, bufsz
);
1220 /* lustre-OST0001-osc-<instance #> */
1221 strcpy(obdname
, cld
->cld_logname
);
1222 cname
= strrchr(obdname
, '-');
1224 CERROR("mgc %s: invalid logname %s\n",
1225 mgc
->obd_name
, obdname
);
1229 pos
= cname
- obdname
;
1231 pos
+= sprintf(obdname
+ pos
, "-%s%04x",
1232 is_ost
? "OST" : "MDT", entry
->mne_index
);
1234 cname
= is_ost
? "osc" : "mdc";
1235 pos
+= sprintf(obdname
+ pos
, "-%s-%s", cname
, inst
);
1236 lustre_cfg_bufs_reset(&bufs
, obdname
);
1238 /* find the obd by obdname */
1239 obd
= class_name2obd(obdname
);
1241 CDEBUG(D_INFO
, "mgc %s: cannot find obdname %s\n",
1242 mgc
->obd_name
, obdname
);
1244 /* this is a safe race, when the ost is starting up...*/
1248 /* osc.import = "connection=<Conn UUID>::<target instance>" */
1251 pos
+= sprintf(params
, "%s.import=%s", cname
, "connection=");
1254 down_read(&obd
->u
.cli
.cl_sem
);
1255 if (!obd
->u
.cli
.cl_import
) {
1256 /* client does not connect to the OST yet */
1257 up_read(&obd
->u
.cli
.cl_sem
);
1262 /* TODO: iterate all nids to find one */
1263 /* find uuid by nid */
1264 rc
= client_import_find_conn(obd
->u
.cli
.cl_import
,
1266 (struct obd_uuid
*)uuid
);
1267 up_read(&obd
->u
.cli
.cl_sem
);
1269 CERROR("mgc: cannot find uuid by nid %s\n",
1270 libcfs_nid2str(entry
->u
.nids
[0]));
1274 CDEBUG(D_INFO
, "Find uuid %s by nid %s\n",
1275 uuid
, libcfs_nid2str(entry
->u
.nids
[0]));
1277 pos
+= strlen(uuid
);
1278 pos
+= sprintf(buf
+ pos
, "::%u", entry
->mne_instance
);
1279 LASSERT(pos
< bufsz
);
1281 lustre_cfg_bufs_set_string(&bufs
, 1, params
);
1284 lcfg
= lustre_cfg_new(LCFG_PARAM
, &bufs
);
1286 CERROR("mgc: cannot allocate memory\n");
1290 CDEBUG(D_INFO
, "ir apply logs %lld/%lld for %s -> %s\n",
1291 prev_version
, max_version
, obdname
, params
);
1293 rc
= class_process_config(lcfg
);
1294 lustre_cfg_free(lcfg
);
1296 CDEBUG(D_INFO
, "process config for %s error %d\n",
1299 /* continue, even one with error */
1307 * This function is called if this client was notified for target restarting
1308 * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs.
1310 static int mgc_process_recover_log(struct obd_device
*obd
,
1311 struct config_llog_data
*cld
)
1313 struct ptlrpc_request
*req
= NULL
;
1314 struct config_llog_instance
*cfg
= &cld
->cld_cfg
;
1315 struct mgs_config_body
*body
;
1316 struct mgs_config_res
*res
;
1317 struct ptlrpc_bulk_desc
*desc
;
1318 struct page
**pages
;
1326 /* allocate buffer for bulk transfer.
1327 * if this is the first time for this mgs to read logs,
1328 * CONFIG_READ_NRPAGES_INIT will be used since it will read all logs
1329 * once; otherwise, it only reads increment of logs, this should be
1330 * small and CONFIG_READ_NRPAGES will be used.
1332 nrpages
= CONFIG_READ_NRPAGES
;
1333 if (cfg
->cfg_last_idx
== 0) /* the first time */
1334 nrpages
= CONFIG_READ_NRPAGES_INIT
;
1336 pages
= kcalloc(nrpages
, sizeof(*pages
), GFP_KERNEL
);
1342 for (i
= 0; i
< nrpages
; i
++) {
1343 pages
[i
] = alloc_page(GFP_KERNEL
);
1351 LASSERT(cld_is_recover(cld
));
1352 LASSERT(mutex_is_locked(&cld
->cld_lock
));
1353 req
= ptlrpc_request_alloc(class_exp2cliimp(cld
->cld_mgcexp
),
1354 &RQF_MGS_CONFIG_READ
);
1360 rc
= ptlrpc_request_pack(req
, LUSTRE_MGS_VERSION
, MGS_CONFIG_READ
);
1365 body
= req_capsule_client_get(&req
->rq_pill
, &RMF_MGS_CONFIG_BODY
);
1366 LASSERT(sizeof(body
->mcb_name
) > strlen(cld
->cld_logname
));
1367 if (strlcpy(body
->mcb_name
, cld
->cld_logname
, sizeof(body
->mcb_name
))
1368 >= sizeof(body
->mcb_name
)) {
1372 body
->mcb_offset
= cfg
->cfg_last_idx
+ 1;
1373 body
->mcb_type
= cld
->cld_type
;
1374 body
->mcb_bits
= PAGE_SHIFT
;
1375 body
->mcb_units
= nrpages
;
1377 /* allocate bulk transfer descriptor */
1378 desc
= ptlrpc_prep_bulk_imp(req
, nrpages
, 1, BULK_PUT_SINK
,
1385 for (i
= 0; i
< nrpages
; i
++)
1386 ptlrpc_prep_bulk_page_pin(desc
, pages
[i
], 0, PAGE_SIZE
);
1388 ptlrpc_request_set_replen(req
);
1389 rc
= ptlrpc_queue_wait(req
);
1393 res
= req_capsule_server_get(&req
->rq_pill
, &RMF_MGS_CONFIG_RES
);
1394 if (res
->mcr_size
< res
->mcr_offset
) {
1399 /* always update the index even though it might have errors with
1400 * handling the recover logs
1402 cfg
->cfg_last_idx
= res
->mcr_offset
;
1403 eof
= res
->mcr_offset
== res
->mcr_size
;
1405 CDEBUG(D_INFO
, "Latest version %lld, more %d.\n",
1406 res
->mcr_offset
, eof
== false);
1408 ealen
= sptlrpc_cli_unwrap_bulk_read(req
, req
->rq_bulk
, 0);
1414 if (ealen
> nrpages
<< PAGE_SHIFT
) {
1419 if (ealen
== 0) { /* no logs transferred */
1425 mne_swab
= !!ptlrpc_rep_need_swab(req
);
1426 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
1427 /* This import flag means the server did an extra swab of IR MNE
1428 * records (fixed in LU-1252), reverse it here if needed. LU-1644
1430 if (unlikely(req
->rq_import
->imp_need_mne_swab
))
1431 mne_swab
= !mne_swab
;
1433 #warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
1436 for (i
= 0; i
< nrpages
&& ealen
> 0; i
++) {
1440 ptr
= kmap(pages
[i
]);
1441 rc2
= mgc_apply_recover_logs(obd
, cld
, res
->mcr_offset
, ptr
,
1442 min_t(int, ealen
, PAGE_SIZE
),
1446 CWARN("Process recover log %s error %d\n",
1447 cld
->cld_logname
, rc2
);
1456 ptlrpc_req_finished(req
);
1458 if (rc
== 0 && !eof
)
1462 for (i
= 0; i
< nrpages
; i
++) {
1465 __free_page(pages
[i
]);
1472 /* local_only means it cannot get remote llogs */
1473 static int mgc_process_cfg_log(struct obd_device
*mgc
,
1474 struct config_llog_data
*cld
, int local_only
)
1476 struct llog_ctxt
*ctxt
;
1477 struct lustre_sb_info
*lsi
= NULL
;
1479 bool sptlrpc_started
= false;
1483 LASSERT(mutex_is_locked(&cld
->cld_lock
));
1486 * local copy of sptlrpc log is controlled elsewhere, don't try to
1489 if (cld_is_sptlrpc(cld
) && local_only
)
1492 if (cld
->cld_cfg
.cfg_sb
)
1493 lsi
= s2lsi(cld
->cld_cfg
.cfg_sb
);
1495 env
= kzalloc(sizeof(*env
), GFP_KERNEL
);
1499 rc
= lu_env_init(env
, LCT_MG_THREAD
);
1503 ctxt
= llog_get_context(mgc
, LLOG_CONFIG_REPL_CTXT
);
1506 if (local_only
) /* no local log at client side */ {
1511 if (cld_is_sptlrpc(cld
)) {
1512 sptlrpc_conf_log_update_begin(cld
->cld_logname
);
1513 sptlrpc_started
= true;
1516 /* logname and instance info should be the same, so use our
1517 * copy of the instance for the update. The cfg_last_idx will
1520 rc
= class_config_parse_llog(env
, ctxt
, cld
->cld_logname
,
1524 __llog_ctxt_put(env
, ctxt
);
1527 * update settings on existing OBDs. doing it inside
1528 * of llog_process_lock so no device is attaching/detaching
1530 * the logname must be <fsname>-sptlrpc
1532 if (sptlrpc_started
) {
1533 LASSERT(cld_is_sptlrpc(cld
));
1534 sptlrpc_conf_log_update_end(cld
->cld_logname
);
1535 class_notify_sptlrpc_conf(cld
->cld_logname
,
1536 strlen(cld
->cld_logname
) -
1537 strlen("-sptlrpc"));
1546 /** Get a config log from the MGS and process it.
1547 * This func is called for both clients and servers.
1548 * Copy the log locally before parsing it if appropriate (non-MGS server)
1550 int mgc_process_log(struct obd_device
*mgc
, struct config_llog_data
*cld
)
1552 struct lustre_handle lockh
= { 0 };
1553 __u64 flags
= LDLM_FL_NO_LRU
;
1558 /* I don't want multiple processes running process_log at once --
1559 * sounds like badness. It actually might be fine, as long as
1560 * we're not trying to update from the same log
1561 * simultaneously (in which case we should use a per-log sem.)
1563 mutex_lock(&cld
->cld_lock
);
1564 if (cld
->cld_stopping
) {
1565 mutex_unlock(&cld
->cld_lock
);
1569 OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG
, 20);
1571 CDEBUG(D_MGC
, "Process log %s:%p from %d\n", cld
->cld_logname
,
1572 cld
->cld_cfg
.cfg_instance
, cld
->cld_cfg
.cfg_last_idx
+ 1);
1574 /* Get the cfg lock on the llog */
1575 rcl
= mgc_enqueue(mgc
->u
.cli
.cl_mgc_mgsexp
, NULL
, LDLM_PLAIN
, NULL
,
1576 LCK_CR
, &flags
, NULL
, NULL
, NULL
,
1577 cld
, 0, NULL
, &lockh
);
1579 /* Get the cld, it will be released in mgc_blocking_ast. */
1580 config_log_get(cld
);
1581 rc
= ldlm_lock_set_data(&lockh
, (void *)cld
);
1584 CDEBUG(D_MGC
, "Can't get cfg lock: %d\n", rcl
);
1586 /* mark cld_lostlock so that it will requeue
1587 * after MGC becomes available.
1589 cld
->cld_lostlock
= 1;
1590 /* Get extra reference, it will be put in requeue thread */
1591 config_log_get(cld
);
1594 if (cld_is_recover(cld
)) {
1595 rc
= 0; /* this is not a fatal error for recover log */
1597 rc
= mgc_process_recover_log(mgc
, cld
);
1599 rc
= mgc_process_cfg_log(mgc
, cld
, rcl
!= 0);
1602 CDEBUG(D_MGC
, "%s: configuration from log '%s' %sed (%d).\n",
1603 mgc
->obd_name
, cld
->cld_logname
, rc
? "fail" : "succeed", rc
);
1605 mutex_unlock(&cld
->cld_lock
);
1607 /* Now drop the lock so MGS can revoke it */
1609 ldlm_lock_decref(&lockh
, LCK_CR
);
1614 /** Called from lustre_process_log.
1615 * LCFG_LOG_START gets the config log from the MGS, processes it to start
1616 * any services, and adds it to the list logs to watch (follow).
1618 static int mgc_process_config(struct obd_device
*obd
, u32 len
, void *buf
)
1620 struct lustre_cfg
*lcfg
= buf
;
1621 struct config_llog_instance
*cfg
= NULL
;
1625 switch (lcfg
->lcfg_command
) {
1626 case LCFG_LOV_ADD_OBD
: {
1627 /* Overloading this cfg command: register a new target */
1628 struct mgs_target_info
*mti
;
1630 if (LUSTRE_CFG_BUFLEN(lcfg
, 1) !=
1631 sizeof(struct mgs_target_info
)) {
1636 mti
= (struct mgs_target_info
*)lustre_cfg_buf(lcfg
, 1);
1637 CDEBUG(D_MGC
, "add_target %s %#x\n",
1638 mti
->mti_svname
, mti
->mti_flags
);
1639 rc
= mgc_target_register(obd
->u
.cli
.cl_mgc_mgsexp
, mti
);
1642 case LCFG_LOV_DEL_OBD
:
1643 /* Unregister has no meaning at the moment. */
1644 CERROR("lov_del_obd unimplemented\n");
1647 case LCFG_SPTLRPC_CONF
: {
1648 rc
= sptlrpc_process_config(lcfg
);
1651 case LCFG_LOG_START
: {
1652 struct config_llog_data
*cld
;
1653 struct super_block
*sb
;
1655 logname
= lustre_cfg_string(lcfg
, 1);
1656 cfg
= (struct config_llog_instance
*)lustre_cfg_buf(lcfg
, 2);
1657 sb
= *(struct super_block
**)lustre_cfg_buf(lcfg
, 3);
1659 CDEBUG(D_MGC
, "parse_log %s from %d\n", logname
,
1662 /* We're only called through here on the initial mount */
1663 rc
= config_log_add(obd
, logname
, cfg
, sb
);
1666 cld
= config_log_find(logname
, cfg
);
1673 /* FIXME only set this for old logs! Right now this forces
1674 * us to always skip the "inside markers" check
1676 cld
->cld_cfg
.cfg_flags
|= CFG_F_COMPAT146
;
1678 rc
= mgc_process_log(obd
, cld
);
1679 if (rc
== 0 && cld
->cld_recover
) {
1680 if (OCD_HAS_FLAG(&obd
->u
.cli
.cl_import
->
1681 imp_connect_data
, IMP_RECOV
)) {
1682 rc
= mgc_process_log(obd
, cld
->cld_recover
);
1684 struct config_llog_data
*cir
= cld
->cld_recover
;
1686 cld
->cld_recover
= NULL
;
1687 config_log_put(cir
);
1690 CERROR("Cannot process recover llog %d\n", rc
);
1693 if (rc
== 0 && cld
->cld_params
) {
1694 rc
= mgc_process_log(obd
, cld
->cld_params
);
1695 if (rc
== -ENOENT
) {
1697 "There is no params config file yet\n");
1700 /* params log is optional */
1703 "%s: can't process params llog: rc = %d\n",
1706 config_log_put(cld
);
1710 case LCFG_LOG_END
: {
1711 logname
= lustre_cfg_string(lcfg
, 1);
1713 if (lcfg
->lcfg_bufcount
>= 2)
1714 cfg
= (struct config_llog_instance
*)lustre_cfg_buf(
1716 rc
= config_log_end(logname
, cfg
);
1720 CERROR("Unknown command: %d\n", lcfg
->lcfg_command
);
1730 static struct obd_ops mgc_obd_ops
= {
1731 .owner
= THIS_MODULE
,
1733 .precleanup
= mgc_precleanup
,
1734 .cleanup
= mgc_cleanup
,
1735 .add_conn
= client_import_add_conn
,
1736 .del_conn
= client_import_del_conn
,
1737 .connect
= client_connect_import
,
1738 .disconnect
= client_disconnect_export
,
1739 /* .enqueue = mgc_enqueue, */
1740 /* .iocontrol = mgc_iocontrol, */
1741 .set_info_async
= mgc_set_info_async
,
1742 .get_info
= mgc_get_info
,
1743 .import_event
= mgc_import_event
,
1744 .process_config
= mgc_process_config
,
1747 static int __init
mgc_init(void)
1749 return class_register_type(&mgc_obd_ops
, NULL
,
1750 LUSTRE_MGC_NAME
, NULL
);
1753 static void /*__exit*/ mgc_exit(void)
1755 class_unregister_type(LUSTRE_MGC_NAME
);
1758 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
1759 MODULE_DESCRIPTION("Lustre Management Client");
1760 MODULE_VERSION(LUSTRE_VERSION_STRING
);
1761 MODULE_LICENSE("GPL");
1763 module_init(mgc_init
);
1764 module_exit(mgc_exit
);