4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/tracefile.c
38 * Author: Zach Brown <zab@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LNET
43 #define LUSTRE_TRACEFILE_PRIVATE
44 #include "tracefile.h"
46 #include "../../include/linux/libcfs/libcfs.h"
48 /* XXX move things up to the top, comment */
49 union cfs_trace_data_union (*cfs_trace_data
[TCD_MAX_TYPES
])[NR_CPUS
] __cacheline_aligned
;
51 char cfs_tracefile
[TRACEFILE_NAME_SIZE
];
52 long long cfs_tracefile_size
= CFS_TRACEFILE_SIZE
;
53 static struct tracefiled_ctl trace_tctl
;
54 static DEFINE_MUTEX(cfs_trace_thread_mutex
);
55 static int thread_running
;
57 static atomic_t cfs_tage_allocated
= ATOMIC_INIT(0);
59 struct page_collection
{
60 struct list_head pc_pages
;
62 * if this flag is set, collect_pages() will spill both
63 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
64 * only ->tcd_pages are spilled.
66 int pc_want_daemon_pages
;
69 struct tracefiled_ctl
{
70 struct completion tctl_start
;
71 struct completion tctl_stop
;
72 wait_queue_head_t tctl_waitq
;
74 atomic_t tctl_shutdown
;
78 * small data-structure for each page owned by tracefiled.
80 struct cfs_trace_page
{
86 * linkage into one of the lists in trace_data_union or
89 struct list_head linkage
;
91 * number of bytes used within this page
95 * cpu that owns this page
99 * type(context) of this page
104 static void put_pages_on_tcd_daemon_list(struct page_collection
*pc
,
105 struct cfs_trace_cpu_data
*tcd
);
107 static inline struct cfs_trace_page
*
108 cfs_tage_from_list(struct list_head
*list
)
110 return list_entry(list
, struct cfs_trace_page
, linkage
);
113 static struct cfs_trace_page
*cfs_tage_alloc(gfp_t gfp
)
116 struct cfs_trace_page
*tage
;
118 /* My caller is trying to free memory */
119 if (!in_interrupt() && memory_pressure_get())
123 * Don't spam console with allocation failures: they will be reported
124 * by upper layer anyway.
127 page
= alloc_page(gfp
);
131 tage
= kmalloc(sizeof(*tage
), gfp
);
138 atomic_inc(&cfs_tage_allocated
);
142 static void cfs_tage_free(struct cfs_trace_page
*tage
)
144 __free_page(tage
->page
);
146 atomic_dec(&cfs_tage_allocated
);
149 static void cfs_tage_to_tail(struct cfs_trace_page
*tage
,
150 struct list_head
*queue
)
152 list_move_tail(&tage
->linkage
, queue
);
155 int cfs_trace_refill_stock(struct cfs_trace_cpu_data
*tcd
, gfp_t gfp
,
156 struct list_head
*stock
)
161 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
162 * from here: this will lead to infinite recursion.
165 for (i
= 0; i
+ tcd
->tcd_cur_stock_pages
< TCD_STOCK_PAGES
; ++i
) {
166 struct cfs_trace_page
*tage
;
168 tage
= cfs_tage_alloc(gfp
);
171 list_add_tail(&tage
->linkage
, stock
);
176 /* return a page that has 'len' bytes left at the end */
177 static struct cfs_trace_page
*
178 cfs_trace_get_tage_try(struct cfs_trace_cpu_data
*tcd
, unsigned long len
)
180 struct cfs_trace_page
*tage
;
182 if (tcd
->tcd_cur_pages
> 0) {
183 __LASSERT(!list_empty(&tcd
->tcd_pages
));
184 tage
= cfs_tage_from_list(tcd
->tcd_pages
.prev
);
185 if (tage
->used
+ len
<= PAGE_SIZE
)
189 if (tcd
->tcd_cur_pages
< tcd
->tcd_max_pages
) {
190 if (tcd
->tcd_cur_stock_pages
> 0) {
191 tage
= cfs_tage_from_list(tcd
->tcd_stock_pages
.prev
);
192 --tcd
->tcd_cur_stock_pages
;
193 list_del_init(&tage
->linkage
);
195 tage
= cfs_tage_alloc(GFP_ATOMIC
);
196 if (unlikely(!tage
)) {
197 if ((!memory_pressure_get() ||
198 in_interrupt()) && printk_ratelimit())
200 "cannot allocate a tage (%ld)\n",
207 tage
->cpu
= smp_processor_id();
208 tage
->type
= tcd
->tcd_type
;
209 list_add_tail(&tage
->linkage
, &tcd
->tcd_pages
);
210 tcd
->tcd_cur_pages
++;
212 if (tcd
->tcd_cur_pages
> 8 && thread_running
) {
213 struct tracefiled_ctl
*tctl
= &trace_tctl
;
215 * wake up tracefiled to process some pages.
217 wake_up(&tctl
->tctl_waitq
);
224 static void cfs_tcd_shrink(struct cfs_trace_cpu_data
*tcd
)
226 int pgcount
= tcd
->tcd_cur_pages
/ 10;
227 struct page_collection pc
;
228 struct cfs_trace_page
*tage
;
229 struct cfs_trace_page
*tmp
;
232 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
233 * from here: this will lead to infinite recursion.
236 if (printk_ratelimit())
237 printk(KERN_WARNING
"debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
238 pgcount
+ 1, tcd
->tcd_cur_pages
);
240 INIT_LIST_HEAD(&pc
.pc_pages
);
242 list_for_each_entry_safe(tage
, tmp
, &tcd
->tcd_pages
, linkage
) {
246 list_move_tail(&tage
->linkage
, &pc
.pc_pages
);
247 tcd
->tcd_cur_pages
--;
249 put_pages_on_tcd_daemon_list(&pc
, tcd
);
252 /* return a page that has 'len' bytes left at the end */
253 static struct cfs_trace_page
*cfs_trace_get_tage(struct cfs_trace_cpu_data
*tcd
,
256 struct cfs_trace_page
*tage
;
259 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
260 * from here: this will lead to infinite recursion.
263 if (len
> PAGE_SIZE
) {
264 pr_err("cowardly refusing to write %lu bytes in a page\n", len
);
268 tage
= cfs_trace_get_tage_try(tcd
, len
);
273 if (tcd
->tcd_cur_pages
> 0) {
274 tage
= cfs_tage_from_list(tcd
->tcd_pages
.next
);
276 cfs_tage_to_tail(tage
, &tcd
->tcd_pages
);
281 int libcfs_debug_msg(struct libcfs_debug_msg_data
*msgdata
,
282 const char *format
, ...)
287 va_start(args
, format
);
288 rc
= libcfs_debug_vmsg2(msgdata
, format
, args
, NULL
);
293 EXPORT_SYMBOL(libcfs_debug_msg
);
295 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data
*msgdata
,
296 const char *format1
, va_list args
,
297 const char *format2
, ...)
299 struct cfs_trace_cpu_data
*tcd
= NULL
;
300 struct ptldebug_header header
= {0};
301 struct cfs_trace_page
*tage
;
302 /* string_buf is used only if tcd != NULL, and is always set then */
303 char *string_buf
= NULL
;
306 int needed
= 85; /* average message length */
312 int mask
= msgdata
->msg_mask
;
313 const char *file
= kbasename(msgdata
->msg_file
);
314 struct cfs_debug_limit_state
*cdls
= msgdata
->msg_cdls
;
316 tcd
= cfs_trace_get_tcd();
318 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
319 * pins us to a particular CPU. This avoids an smp_processor_id()
320 * warning on Linux when debugging is enabled.
322 cfs_set_ptldebug_header(&header
, msgdata
, CDEBUG_STACK());
324 if (!tcd
) /* arch may not log in IRQ context */
327 if (tcd
->tcd_cur_pages
== 0)
328 header
.ph_flags
|= PH_FLAG_FIRST_RECORD
;
330 if (tcd
->tcd_shutting_down
) {
331 cfs_trace_put_tcd(tcd
);
336 depth
= __current_nesting_level();
337 known_size
= strlen(file
) + 1 + depth
;
339 known_size
+= strlen(msgdata
->msg_fn
) + 1;
341 if (libcfs_debug_binary
)
342 known_size
+= sizeof(header
);
345 * '2' used because vsnprintf return real size required for output
346 * _without_ terminating NULL.
347 * if needed is to small for this format.
349 for (i
= 0; i
< 2; i
++) {
350 tage
= cfs_trace_get_tage(tcd
, needed
+ known_size
+ 1);
352 if (needed
+ known_size
> PAGE_SIZE
)
355 cfs_trace_put_tcd(tcd
);
360 string_buf
= (char *)page_address(tage
->page
) +
361 tage
->used
+ known_size
;
363 max_nob
= PAGE_SIZE
- tage
->used
- known_size
;
365 printk(KERN_EMERG
"negative max_nob: %d\n",
368 cfs_trace_put_tcd(tcd
);
376 needed
= vsnprintf(string_buf
, max_nob
, format1
, ap
);
381 remain
= max_nob
- needed
;
385 va_start(ap
, format2
);
386 needed
+= vsnprintf(string_buf
+ needed
, remain
,
391 if (needed
< max_nob
) /* well. printing ok.. */
395 if (*(string_buf
+ needed
- 1) != '\n')
396 printk(KERN_INFO
"format at %s:%d:%s doesn't end in newline\n",
397 file
, msgdata
->msg_line
, msgdata
->msg_fn
);
399 header
.ph_len
= known_size
+ needed
;
400 debug_buf
= (char *)page_address(tage
->page
) + tage
->used
;
402 if (libcfs_debug_binary
) {
403 memcpy(debug_buf
, &header
, sizeof(header
));
404 tage
->used
+= sizeof(header
);
405 debug_buf
+= sizeof(header
);
408 /* indent message according to the nesting level */
409 while (depth
-- > 0) {
410 *(debug_buf
++) = '.';
414 strcpy(debug_buf
, file
);
415 tage
->used
+= strlen(file
) + 1;
416 debug_buf
+= strlen(file
) + 1;
418 if (msgdata
->msg_fn
) {
419 strcpy(debug_buf
, msgdata
->msg_fn
);
420 tage
->used
+= strlen(msgdata
->msg_fn
) + 1;
421 debug_buf
+= strlen(msgdata
->msg_fn
) + 1;
424 __LASSERT(debug_buf
== string_buf
);
426 tage
->used
+= needed
;
427 __LASSERT(tage
->used
<= PAGE_SIZE
);
430 if ((mask
& libcfs_printk
) == 0) {
431 /* no console output requested */
433 cfs_trace_put_tcd(tcd
);
438 if (libcfs_console_ratelimit
&&
439 cdls
->cdls_next
!= 0 && /* not first time ever */
440 !cfs_time_after(cfs_time_current(), cdls
->cdls_next
)) {
441 /* skipping a console message */
444 cfs_trace_put_tcd(tcd
);
448 if (cfs_time_after(cfs_time_current(),
449 cdls
->cdls_next
+ libcfs_console_max_delay
+
450 cfs_time_seconds(10))) {
451 /* last timeout was a long time ago */
452 cdls
->cdls_delay
/= libcfs_console_backoff
* 4;
454 cdls
->cdls_delay
*= libcfs_console_backoff
;
457 if (cdls
->cdls_delay
< libcfs_console_min_delay
)
458 cdls
->cdls_delay
= libcfs_console_min_delay
;
459 else if (cdls
->cdls_delay
> libcfs_console_max_delay
)
460 cdls
->cdls_delay
= libcfs_console_max_delay
;
462 /* ensure cdls_next is never zero after it's been seen */
463 cdls
->cdls_next
= (cfs_time_current() + cdls
->cdls_delay
) | 1;
467 cfs_print_to_console(&header
, mask
, string_buf
, needed
, file
,
469 cfs_trace_put_tcd(tcd
);
471 string_buf
= cfs_trace_get_console_buffer();
476 needed
= vsnprintf(string_buf
,
477 CFS_TRACE_CONSOLE_BUFFER_SIZE
,
482 remain
= CFS_TRACE_CONSOLE_BUFFER_SIZE
- needed
;
484 va_start(ap
, format2
);
485 needed
+= vsnprintf(string_buf
+ needed
, remain
,
490 cfs_print_to_console(&header
, mask
,
491 string_buf
, needed
, file
, msgdata
->msg_fn
);
496 if (cdls
&& cdls
->cdls_count
!= 0) {
497 string_buf
= cfs_trace_get_console_buffer();
499 needed
= snprintf(string_buf
, CFS_TRACE_CONSOLE_BUFFER_SIZE
,
500 "Skipped %d previous similar message%s\n",
502 (cdls
->cdls_count
> 1) ? "s" : "");
504 cfs_print_to_console(&header
, mask
,
505 string_buf
, needed
, file
, msgdata
->msg_fn
);
508 cdls
->cdls_count
= 0;
513 EXPORT_SYMBOL(libcfs_debug_vmsg2
);
516 cfs_trace_assertion_failed(const char *str
,
517 struct libcfs_debug_msg_data
*msgdata
)
519 struct ptldebug_header hdr
;
521 libcfs_panic_in_progress
= 1;
522 libcfs_catastrophe
= 1;
525 cfs_set_ptldebug_header(&hdr
, msgdata
, CDEBUG_STACK());
527 cfs_print_to_console(&hdr
, D_EMERG
, str
, strlen(str
),
528 msgdata
->msg_file
, msgdata
->msg_fn
);
530 panic("Lustre debug assertion failure\n");
536 panic_collect_pages(struct page_collection
*pc
)
538 /* Do the collect_pages job on a single CPU: assumes that all other
539 * CPUs have been stopped during a panic. If this isn't true for some
540 * arch, this will have to be implemented separately in each arch.
544 struct cfs_trace_cpu_data
*tcd
;
546 INIT_LIST_HEAD(&pc
->pc_pages
);
548 cfs_tcd_for_each(tcd
, i
, j
) {
549 list_splice_init(&tcd
->tcd_pages
, &pc
->pc_pages
);
550 tcd
->tcd_cur_pages
= 0;
552 if (pc
->pc_want_daemon_pages
) {
553 list_splice_init(&tcd
->tcd_daemon_pages
, &pc
->pc_pages
);
554 tcd
->tcd_cur_daemon_pages
= 0;
559 static void collect_pages_on_all_cpus(struct page_collection
*pc
)
561 struct cfs_trace_cpu_data
*tcd
;
564 for_each_possible_cpu(cpu
) {
565 cfs_tcd_for_each_type_lock(tcd
, i
, cpu
) {
566 list_splice_init(&tcd
->tcd_pages
, &pc
->pc_pages
);
567 tcd
->tcd_cur_pages
= 0;
568 if (pc
->pc_want_daemon_pages
) {
569 list_splice_init(&tcd
->tcd_daemon_pages
,
571 tcd
->tcd_cur_daemon_pages
= 0;
577 static void collect_pages(struct page_collection
*pc
)
579 INIT_LIST_HEAD(&pc
->pc_pages
);
581 if (libcfs_panic_in_progress
)
582 panic_collect_pages(pc
);
584 collect_pages_on_all_cpus(pc
);
587 static void put_pages_back_on_all_cpus(struct page_collection
*pc
)
589 struct cfs_trace_cpu_data
*tcd
;
590 struct list_head
*cur_head
;
591 struct cfs_trace_page
*tage
;
592 struct cfs_trace_page
*tmp
;
595 for_each_possible_cpu(cpu
) {
596 cfs_tcd_for_each_type_lock(tcd
, i
, cpu
) {
597 cur_head
= tcd
->tcd_pages
.next
;
599 list_for_each_entry_safe(tage
, tmp
, &pc
->pc_pages
,
601 __LASSERT_TAGE_INVARIANT(tage
);
603 if (tage
->cpu
!= cpu
|| tage
->type
!= i
)
606 cfs_tage_to_tail(tage
, cur_head
);
607 tcd
->tcd_cur_pages
++;
613 static void put_pages_back(struct page_collection
*pc
)
615 if (!libcfs_panic_in_progress
)
616 put_pages_back_on_all_cpus(pc
);
619 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
620 * we have a good amount of data at all times for dumping during an LBUG, even
621 * if we have been steadily writing (and otherwise discarding) pages via the
624 static void put_pages_on_tcd_daemon_list(struct page_collection
*pc
,
625 struct cfs_trace_cpu_data
*tcd
)
627 struct cfs_trace_page
*tage
;
628 struct cfs_trace_page
*tmp
;
630 list_for_each_entry_safe(tage
, tmp
, &pc
->pc_pages
, linkage
) {
631 __LASSERT_TAGE_INVARIANT(tage
);
633 if (tage
->cpu
!= tcd
->tcd_cpu
|| tage
->type
!= tcd
->tcd_type
)
636 cfs_tage_to_tail(tage
, &tcd
->tcd_daemon_pages
);
637 tcd
->tcd_cur_daemon_pages
++;
639 if (tcd
->tcd_cur_daemon_pages
> tcd
->tcd_max_pages
) {
640 struct cfs_trace_page
*victim
;
642 __LASSERT(!list_empty(&tcd
->tcd_daemon_pages
));
643 victim
= cfs_tage_from_list(tcd
->tcd_daemon_pages
.next
);
645 __LASSERT_TAGE_INVARIANT(victim
);
647 list_del(&victim
->linkage
);
648 cfs_tage_free(victim
);
649 tcd
->tcd_cur_daemon_pages
--;
654 static void put_pages_on_daemon_list(struct page_collection
*pc
)
656 struct cfs_trace_cpu_data
*tcd
;
659 for_each_possible_cpu(cpu
) {
660 cfs_tcd_for_each_type_lock(tcd
, i
, cpu
)
661 put_pages_on_tcd_daemon_list(pc
, tcd
);
665 void cfs_trace_debug_print(void)
667 struct page_collection pc
;
668 struct cfs_trace_page
*tage
;
669 struct cfs_trace_page
*tmp
;
671 pc
.pc_want_daemon_pages
= 1;
673 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
677 __LASSERT_TAGE_INVARIANT(tage
);
680 p
= page_address(page
);
681 while (p
< ((char *)page_address(page
) + tage
->used
)) {
682 struct ptldebug_header
*hdr
;
688 p
+= strlen(file
) + 1;
691 len
= hdr
->ph_len
- (int)(p
- (char *)hdr
);
693 cfs_print_to_console(hdr
, D_EMERG
, p
, len
, file
, fn
);
698 list_del(&tage
->linkage
);
703 int cfs_tracefile_dump_all_pages(char *filename
)
705 struct page_collection pc
;
707 struct cfs_trace_page
*tage
;
708 struct cfs_trace_page
*tmp
;
714 cfs_tracefile_write_lock();
716 filp
= filp_open(filename
, O_CREAT
| O_EXCL
| O_WRONLY
| O_LARGEFILE
,
721 pr_err("LustreError: can't open %s for dump: rc %d\n",
726 pc
.pc_want_daemon_pages
= 1;
728 if (list_empty(&pc
.pc_pages
)) {
733 /* ok, for now, just write the pages. in the future we'll be building
734 * iobufs with the pages and calling generic_direct_IO
737 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
738 __LASSERT_TAGE_INVARIANT(tage
);
740 buf
= kmap(tage
->page
);
741 rc
= vfs_write(filp
, (__force
const char __user
*)buf
,
742 tage
->used
, &filp
->f_pos
);
745 if (rc
!= (int)tage
->used
) {
746 printk(KERN_WARNING
"wanted to write %u but wrote %d\n",
749 __LASSERT(list_empty(&pc
.pc_pages
));
752 list_del(&tage
->linkage
);
756 rc
= vfs_fsync(filp
, 1);
758 pr_err("sync returns %d\n", rc
);
760 filp_close(filp
, NULL
);
762 cfs_tracefile_write_unlock();
766 void cfs_trace_flush_pages(void)
768 struct page_collection pc
;
769 struct cfs_trace_page
*tage
;
770 struct cfs_trace_page
*tmp
;
772 pc
.pc_want_daemon_pages
= 1;
774 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
775 __LASSERT_TAGE_INVARIANT(tage
);
777 list_del(&tage
->linkage
);
782 int cfs_trace_copyin_string(char *knl_buffer
, int knl_buffer_nob
,
783 const char __user
*usr_buffer
, int usr_buffer_nob
)
787 if (usr_buffer_nob
> knl_buffer_nob
)
790 if (copy_from_user((void *)knl_buffer
,
791 usr_buffer
, usr_buffer_nob
))
794 nob
= strnlen(knl_buffer
, usr_buffer_nob
);
795 while (nob
-- >= 0) /* strip trailing whitespace */
796 if (!isspace(knl_buffer
[nob
]))
799 if (nob
< 0) /* empty string */
802 if (nob
== knl_buffer_nob
) /* no space to terminate */
805 knl_buffer
[nob
+ 1] = 0; /* terminate */
808 EXPORT_SYMBOL(cfs_trace_copyin_string
);
810 int cfs_trace_copyout_string(char __user
*usr_buffer
, int usr_buffer_nob
,
811 const char *knl_buffer
, char *append
)
814 * NB if 'append' != NULL, it's a single character to append to the
815 * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
817 int nob
= strlen(knl_buffer
);
819 if (nob
> usr_buffer_nob
)
820 nob
= usr_buffer_nob
;
822 if (copy_to_user(usr_buffer
, knl_buffer
, nob
))
825 if (append
&& nob
< usr_buffer_nob
) {
826 if (copy_to_user(usr_buffer
+ nob
, append
, 1))
834 EXPORT_SYMBOL(cfs_trace_copyout_string
);
836 int cfs_trace_allocate_string_buffer(char **str
, int nob
)
838 if (nob
> 2 * PAGE_SIZE
) /* string must be "sensible" */
841 *str
= kmalloc(nob
, GFP_KERNEL
| __GFP_ZERO
);
848 int cfs_trace_dump_debug_buffer_usrstr(void __user
*usr_str
, int usr_str_nob
)
853 rc
= cfs_trace_allocate_string_buffer(&str
, usr_str_nob
+ 1);
857 rc
= cfs_trace_copyin_string(str
, usr_str_nob
+ 1,
858 usr_str
, usr_str_nob
);
866 rc
= cfs_tracefile_dump_all_pages(str
);
872 int cfs_trace_daemon_command(char *str
)
876 cfs_tracefile_write_lock();
878 if (strcmp(str
, "stop") == 0) {
879 cfs_tracefile_write_unlock();
880 cfs_trace_stop_thread();
881 cfs_tracefile_write_lock();
882 memset(cfs_tracefile
, 0, sizeof(cfs_tracefile
));
884 } else if (strncmp(str
, "size=", 5) == 0) {
887 rc
= kstrtoul(str
+ 5, 10, &tmp
);
889 if (tmp
< 10 || tmp
> 20480)
890 cfs_tracefile_size
= CFS_TRACEFILE_SIZE
;
892 cfs_tracefile_size
= tmp
<< 20;
894 } else if (strlen(str
) >= sizeof(cfs_tracefile
)) {
896 } else if (str
[0] != '/') {
899 strcpy(cfs_tracefile
, str
);
902 "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
904 (long)(cfs_tracefile_size
>> 10));
906 cfs_trace_start_thread();
909 cfs_tracefile_write_unlock();
913 int cfs_trace_daemon_command_usrstr(void __user
*usr_str
, int usr_str_nob
)
918 rc
= cfs_trace_allocate_string_buffer(&str
, usr_str_nob
+ 1);
922 rc
= cfs_trace_copyin_string(str
, usr_str_nob
+ 1,
923 usr_str
, usr_str_nob
);
925 rc
= cfs_trace_daemon_command(str
);
931 int cfs_trace_set_debug_mb(int mb
)
936 int limit
= cfs_trace_max_debug_mb();
937 struct cfs_trace_cpu_data
*tcd
;
939 if (mb
< num_possible_cpus()) {
941 "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
942 mb
, num_possible_cpus());
943 mb
= num_possible_cpus();
948 "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
953 mb
/= num_possible_cpus();
954 pages
= mb
<< (20 - PAGE_SHIFT
);
956 cfs_tracefile_write_lock();
958 cfs_tcd_for_each(tcd
, i
, j
)
959 tcd
->tcd_max_pages
= (pages
* tcd
->tcd_pages_factor
) / 100;
961 cfs_tracefile_write_unlock();
966 int cfs_trace_get_debug_mb(void)
970 struct cfs_trace_cpu_data
*tcd
;
973 cfs_tracefile_read_lock();
975 cfs_tcd_for_each(tcd
, i
, j
)
976 total_pages
+= tcd
->tcd_max_pages
;
978 cfs_tracefile_read_unlock();
980 return (total_pages
>> (20 - PAGE_SHIFT
)) + 1;
983 static int tracefiled(void *arg
)
985 struct page_collection pc
;
986 struct tracefiled_ctl
*tctl
= arg
;
987 struct cfs_trace_page
*tage
;
988 struct cfs_trace_page
*tmp
;
996 /* we're started late enough that we pick up init's fs context */
997 /* this is so broken in uml? what on earth is going on? */
999 complete(&tctl
->tctl_start
);
1002 wait_queue_t __wait
;
1004 pc
.pc_want_daemon_pages
= 0;
1006 if (list_empty(&pc
.pc_pages
))
1010 cfs_tracefile_read_lock();
1011 if (cfs_tracefile
[0] != 0) {
1012 filp
= filp_open(cfs_tracefile
,
1013 O_CREAT
| O_RDWR
| O_LARGEFILE
,
1018 printk(KERN_WARNING
"couldn't open %s: %d\n",
1022 cfs_tracefile_read_unlock();
1024 put_pages_on_daemon_list(&pc
);
1025 __LASSERT(list_empty(&pc
.pc_pages
));
1031 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
, linkage
) {
1032 static loff_t f_pos
;
1034 __LASSERT_TAGE_INVARIANT(tage
);
1036 if (f_pos
>= (off_t
)cfs_tracefile_size
)
1038 else if (f_pos
> i_size_read(file_inode(filp
)))
1039 f_pos
= i_size_read(file_inode(filp
));
1041 buf
= kmap(tage
->page
);
1042 rc
= vfs_write(filp
, (__force
const char __user
*)buf
,
1043 tage
->used
, &f_pos
);
1046 if (rc
!= (int)tage
->used
) {
1047 printk(KERN_WARNING
"wanted to write %u but wrote %d\n",
1049 put_pages_back(&pc
);
1050 __LASSERT(list_empty(&pc
.pc_pages
));
1056 filp_close(filp
, NULL
);
1057 put_pages_on_daemon_list(&pc
);
1058 if (!list_empty(&pc
.pc_pages
)) {
1061 printk(KERN_ALERT
"Lustre: trace pages aren't empty\n");
1062 pr_err("total cpus(%d): ", num_possible_cpus());
1063 for (i
= 0; i
< num_possible_cpus(); i
++)
1065 pr_cont("%d(on) ", i
);
1067 pr_cont("%d(off) ", i
);
1071 list_for_each_entry_safe(tage
, tmp
, &pc
.pc_pages
,
1073 pr_err("page %d belongs to cpu %d\n",
1075 pr_err("There are %d pages unwritten\n", i
);
1077 __LASSERT(list_empty(&pc
.pc_pages
));
1079 if (atomic_read(&tctl
->tctl_shutdown
)) {
1080 if (last_loop
== 0) {
1087 init_waitqueue_entry(&__wait
, current
);
1088 add_wait_queue(&tctl
->tctl_waitq
, &__wait
);
1089 set_current_state(TASK_INTERRUPTIBLE
);
1090 schedule_timeout(cfs_time_seconds(1));
1091 remove_wait_queue(&tctl
->tctl_waitq
, &__wait
);
1093 complete(&tctl
->tctl_stop
);
1097 int cfs_trace_start_thread(void)
1099 struct tracefiled_ctl
*tctl
= &trace_tctl
;
1100 struct task_struct
*task
;
1103 mutex_lock(&cfs_trace_thread_mutex
);
1107 init_completion(&tctl
->tctl_start
);
1108 init_completion(&tctl
->tctl_stop
);
1109 init_waitqueue_head(&tctl
->tctl_waitq
);
1110 atomic_set(&tctl
->tctl_shutdown
, 0);
1112 task
= kthread_run(tracefiled
, tctl
, "ktracefiled");
1118 wait_for_completion(&tctl
->tctl_start
);
1121 mutex_unlock(&cfs_trace_thread_mutex
);
1125 void cfs_trace_stop_thread(void)
1127 struct tracefiled_ctl
*tctl
= &trace_tctl
;
1129 mutex_lock(&cfs_trace_thread_mutex
);
1130 if (thread_running
) {
1132 "Lustre: shutting down debug daemon thread...\n");
1133 atomic_set(&tctl
->tctl_shutdown
, 1);
1134 wait_for_completion(&tctl
->tctl_stop
);
1137 mutex_unlock(&cfs_trace_thread_mutex
);
1140 int cfs_tracefile_init(int max_pages
)
1142 struct cfs_trace_cpu_data
*tcd
;
1148 rc
= cfs_tracefile_init_arch();
1152 cfs_tcd_for_each(tcd
, i
, j
) {
1153 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1154 factor
= tcd
->tcd_pages_factor
;
1155 INIT_LIST_HEAD(&tcd
->tcd_pages
);
1156 INIT_LIST_HEAD(&tcd
->tcd_stock_pages
);
1157 INIT_LIST_HEAD(&tcd
->tcd_daemon_pages
);
1158 tcd
->tcd_cur_pages
= 0;
1159 tcd
->tcd_cur_stock_pages
= 0;
1160 tcd
->tcd_cur_daemon_pages
= 0;
1161 tcd
->tcd_max_pages
= (max_pages
* factor
) / 100;
1162 LASSERT(tcd
->tcd_max_pages
> 0);
1163 tcd
->tcd_shutting_down
= 0;
1169 static void trace_cleanup_on_all_cpus(void)
1171 struct cfs_trace_cpu_data
*tcd
;
1172 struct cfs_trace_page
*tage
;
1173 struct cfs_trace_page
*tmp
;
1176 for_each_possible_cpu(cpu
) {
1177 cfs_tcd_for_each_type_lock(tcd
, i
, cpu
) {
1178 tcd
->tcd_shutting_down
= 1;
1180 list_for_each_entry_safe(tage
, tmp
, &tcd
->tcd_pages
,
1182 __LASSERT_TAGE_INVARIANT(tage
);
1184 list_del(&tage
->linkage
);
1185 cfs_tage_free(tage
);
1188 tcd
->tcd_cur_pages
= 0;
1193 static void cfs_trace_cleanup(void)
1195 struct page_collection pc
;
1197 INIT_LIST_HEAD(&pc
.pc_pages
);
1199 trace_cleanup_on_all_cpus();
1201 cfs_tracefile_fini_arch();
1204 void cfs_tracefile_exit(void)
1206 cfs_trace_stop_thread();
1207 cfs_trace_cleanup();