Merge tag 'driver-core-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / staging / lustre / lnet / libcfs / tracefile.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * libcfs/libcfs/tracefile.c
37 *
38 * Author: Zach Brown <zab@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_LNET
43 #define LUSTRE_TRACEFILE_PRIVATE
44 #include "tracefile.h"
45
46 #include "../../include/linux/libcfs/libcfs.h"
47
48 /* XXX move things up to the top, comment */
49 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
50
51 char cfs_tracefile[TRACEFILE_NAME_SIZE];
52 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
53 static struct tracefiled_ctl trace_tctl;
54 static DEFINE_MUTEX(cfs_trace_thread_mutex);
55 static int thread_running;
56
57 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
58
59 struct page_collection {
60 struct list_head pc_pages;
61 /*
62 * if this flag is set, collect_pages() will spill both
63 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
64 * only ->tcd_pages are spilled.
65 */
66 int pc_want_daemon_pages;
67 };
68
69 struct tracefiled_ctl {
70 struct completion tctl_start;
71 struct completion tctl_stop;
72 wait_queue_head_t tctl_waitq;
73 pid_t tctl_pid;
74 atomic_t tctl_shutdown;
75 };
76
77 /*
78 * small data-structure for each page owned by tracefiled.
79 */
80 struct cfs_trace_page {
81 /*
82 * page itself
83 */
84 struct page *page;
85 /*
86 * linkage into one of the lists in trace_data_union or
87 * page_collection
88 */
89 struct list_head linkage;
90 /*
91 * number of bytes used within this page
92 */
93 unsigned int used;
94 /*
95 * cpu that owns this page
96 */
97 unsigned short cpu;
98 /*
99 * type(context) of this page
100 */
101 unsigned short type;
102 };
103
104 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
105 struct cfs_trace_cpu_data *tcd);
106
107 static inline struct cfs_trace_page *
108 cfs_tage_from_list(struct list_head *list)
109 {
110 return list_entry(list, struct cfs_trace_page, linkage);
111 }
112
113 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
114 {
115 struct page *page;
116 struct cfs_trace_page *tage;
117
118 /* My caller is trying to free memory */
119 if (!in_interrupt() && memory_pressure_get())
120 return NULL;
121
122 /*
123 * Don't spam console with allocation failures: they will be reported
124 * by upper layer anyway.
125 */
126 gfp |= __GFP_NOWARN;
127 page = alloc_page(gfp);
128 if (!page)
129 return NULL;
130
131 tage = kmalloc(sizeof(*tage), gfp);
132 if (!tage) {
133 __free_page(page);
134 return NULL;
135 }
136
137 tage->page = page;
138 atomic_inc(&cfs_tage_allocated);
139 return tage;
140 }
141
142 static void cfs_tage_free(struct cfs_trace_page *tage)
143 {
144 __free_page(tage->page);
145 kfree(tage);
146 atomic_dec(&cfs_tage_allocated);
147 }
148
149 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
150 struct list_head *queue)
151 {
152 list_move_tail(&tage->linkage, queue);
153 }
154
155 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
156 struct list_head *stock)
157 {
158 int i;
159
160 /*
161 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
162 * from here: this will lead to infinite recursion.
163 */
164
165 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) {
166 struct cfs_trace_page *tage;
167
168 tage = cfs_tage_alloc(gfp);
169 if (!tage)
170 break;
171 list_add_tail(&tage->linkage, stock);
172 }
173 return i;
174 }
175
176 /* return a page that has 'len' bytes left at the end */
177 static struct cfs_trace_page *
178 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
179 {
180 struct cfs_trace_page *tage;
181
182 if (tcd->tcd_cur_pages > 0) {
183 __LASSERT(!list_empty(&tcd->tcd_pages));
184 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
185 if (tage->used + len <= PAGE_SIZE)
186 return tage;
187 }
188
189 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
190 if (tcd->tcd_cur_stock_pages > 0) {
191 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
192 --tcd->tcd_cur_stock_pages;
193 list_del_init(&tage->linkage);
194 } else {
195 tage = cfs_tage_alloc(GFP_ATOMIC);
196 if (unlikely(!tage)) {
197 if ((!memory_pressure_get() ||
198 in_interrupt()) && printk_ratelimit())
199 printk(KERN_WARNING
200 "cannot allocate a tage (%ld)\n",
201 tcd->tcd_cur_pages);
202 return NULL;
203 }
204 }
205
206 tage->used = 0;
207 tage->cpu = smp_processor_id();
208 tage->type = tcd->tcd_type;
209 list_add_tail(&tage->linkage, &tcd->tcd_pages);
210 tcd->tcd_cur_pages++;
211
212 if (tcd->tcd_cur_pages > 8 && thread_running) {
213 struct tracefiled_ctl *tctl = &trace_tctl;
214 /*
215 * wake up tracefiled to process some pages.
216 */
217 wake_up(&tctl->tctl_waitq);
218 }
219 return tage;
220 }
221 return NULL;
222 }
223
224 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
225 {
226 int pgcount = tcd->tcd_cur_pages / 10;
227 struct page_collection pc;
228 struct cfs_trace_page *tage;
229 struct cfs_trace_page *tmp;
230
231 /*
232 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
233 * from here: this will lead to infinite recursion.
234 */
235
236 if (printk_ratelimit())
237 printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
238 pgcount + 1, tcd->tcd_cur_pages);
239
240 INIT_LIST_HEAD(&pc.pc_pages);
241
242 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
243 if (pgcount-- == 0)
244 break;
245
246 list_move_tail(&tage->linkage, &pc.pc_pages);
247 tcd->tcd_cur_pages--;
248 }
249 put_pages_on_tcd_daemon_list(&pc, tcd);
250 }
251
252 /* return a page that has 'len' bytes left at the end */
253 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
254 unsigned long len)
255 {
256 struct cfs_trace_page *tage;
257
258 /*
259 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
260 * from here: this will lead to infinite recursion.
261 */
262
263 if (len > PAGE_SIZE) {
264 pr_err("cowardly refusing to write %lu bytes in a page\n", len);
265 return NULL;
266 }
267
268 tage = cfs_trace_get_tage_try(tcd, len);
269 if (tage)
270 return tage;
271 if (thread_running)
272 cfs_tcd_shrink(tcd);
273 if (tcd->tcd_cur_pages > 0) {
274 tage = cfs_tage_from_list(tcd->tcd_pages.next);
275 tage->used = 0;
276 cfs_tage_to_tail(tage, &tcd->tcd_pages);
277 }
278 return tage;
279 }
280
281 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
282 const char *format, ...)
283 {
284 va_list args;
285 int rc;
286
287 va_start(args, format);
288 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
289 va_end(args);
290
291 return rc;
292 }
293 EXPORT_SYMBOL(libcfs_debug_msg);
294
295 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
296 const char *format1, va_list args,
297 const char *format2, ...)
298 {
299 struct cfs_trace_cpu_data *tcd = NULL;
300 struct ptldebug_header header = {0};
301 struct cfs_trace_page *tage;
302 /* string_buf is used only if tcd != NULL, and is always set then */
303 char *string_buf = NULL;
304 char *debug_buf;
305 int known_size;
306 int needed = 85; /* average message length */
307 int max_nob;
308 va_list ap;
309 int depth;
310 int i;
311 int remain;
312 int mask = msgdata->msg_mask;
313 const char *file = kbasename(msgdata->msg_file);
314 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
315
316 tcd = cfs_trace_get_tcd();
317
318 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
319 * pins us to a particular CPU. This avoids an smp_processor_id()
320 * warning on Linux when debugging is enabled.
321 */
322 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
323
324 if (!tcd) /* arch may not log in IRQ context */
325 goto console;
326
327 if (tcd->tcd_cur_pages == 0)
328 header.ph_flags |= PH_FLAG_FIRST_RECORD;
329
330 if (tcd->tcd_shutting_down) {
331 cfs_trace_put_tcd(tcd);
332 tcd = NULL;
333 goto console;
334 }
335
336 depth = __current_nesting_level();
337 known_size = strlen(file) + 1 + depth;
338 if (msgdata->msg_fn)
339 known_size += strlen(msgdata->msg_fn) + 1;
340
341 if (libcfs_debug_binary)
342 known_size += sizeof(header);
343
344 /*
345 * '2' used because vsnprintf return real size required for output
346 * _without_ terminating NULL.
347 * if needed is to small for this format.
348 */
349 for (i = 0; i < 2; i++) {
350 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
351 if (!tage) {
352 if (needed + known_size > PAGE_SIZE)
353 mask |= D_ERROR;
354
355 cfs_trace_put_tcd(tcd);
356 tcd = NULL;
357 goto console;
358 }
359
360 string_buf = (char *)page_address(tage->page) +
361 tage->used + known_size;
362
363 max_nob = PAGE_SIZE - tage->used - known_size;
364 if (max_nob <= 0) {
365 printk(KERN_EMERG "negative max_nob: %d\n",
366 max_nob);
367 mask |= D_ERROR;
368 cfs_trace_put_tcd(tcd);
369 tcd = NULL;
370 goto console;
371 }
372
373 needed = 0;
374 if (format1) {
375 va_copy(ap, args);
376 needed = vsnprintf(string_buf, max_nob, format1, ap);
377 va_end(ap);
378 }
379
380 if (format2) {
381 remain = max_nob - needed;
382 if (remain < 0)
383 remain = 0;
384
385 va_start(ap, format2);
386 needed += vsnprintf(string_buf + needed, remain,
387 format2, ap);
388 va_end(ap);
389 }
390
391 if (needed < max_nob) /* well. printing ok.. */
392 break;
393 }
394
395 if (*(string_buf + needed - 1) != '\n')
396 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
397 file, msgdata->msg_line, msgdata->msg_fn);
398
399 header.ph_len = known_size + needed;
400 debug_buf = (char *)page_address(tage->page) + tage->used;
401
402 if (libcfs_debug_binary) {
403 memcpy(debug_buf, &header, sizeof(header));
404 tage->used += sizeof(header);
405 debug_buf += sizeof(header);
406 }
407
408 /* indent message according to the nesting level */
409 while (depth-- > 0) {
410 *(debug_buf++) = '.';
411 ++tage->used;
412 }
413
414 strcpy(debug_buf, file);
415 tage->used += strlen(file) + 1;
416 debug_buf += strlen(file) + 1;
417
418 if (msgdata->msg_fn) {
419 strcpy(debug_buf, msgdata->msg_fn);
420 tage->used += strlen(msgdata->msg_fn) + 1;
421 debug_buf += strlen(msgdata->msg_fn) + 1;
422 }
423
424 __LASSERT(debug_buf == string_buf);
425
426 tage->used += needed;
427 __LASSERT(tage->used <= PAGE_SIZE);
428
429 console:
430 if ((mask & libcfs_printk) == 0) {
431 /* no console output requested */
432 if (tcd)
433 cfs_trace_put_tcd(tcd);
434 return 1;
435 }
436
437 if (cdls) {
438 if (libcfs_console_ratelimit &&
439 cdls->cdls_next != 0 && /* not first time ever */
440 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
441 /* skipping a console message */
442 cdls->cdls_count++;
443 if (tcd)
444 cfs_trace_put_tcd(tcd);
445 return 1;
446 }
447
448 if (cfs_time_after(cfs_time_current(),
449 cdls->cdls_next + libcfs_console_max_delay +
450 cfs_time_seconds(10))) {
451 /* last timeout was a long time ago */
452 cdls->cdls_delay /= libcfs_console_backoff * 4;
453 } else {
454 cdls->cdls_delay *= libcfs_console_backoff;
455 }
456
457 if (cdls->cdls_delay < libcfs_console_min_delay)
458 cdls->cdls_delay = libcfs_console_min_delay;
459 else if (cdls->cdls_delay > libcfs_console_max_delay)
460 cdls->cdls_delay = libcfs_console_max_delay;
461
462 /* ensure cdls_next is never zero after it's been seen */
463 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
464 }
465
466 if (tcd) {
467 cfs_print_to_console(&header, mask, string_buf, needed, file,
468 msgdata->msg_fn);
469 cfs_trace_put_tcd(tcd);
470 } else {
471 string_buf = cfs_trace_get_console_buffer();
472
473 needed = 0;
474 if (format1) {
475 va_copy(ap, args);
476 needed = vsnprintf(string_buf,
477 CFS_TRACE_CONSOLE_BUFFER_SIZE,
478 format1, ap);
479 va_end(ap);
480 }
481 if (format2) {
482 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
483 if (remain > 0) {
484 va_start(ap, format2);
485 needed += vsnprintf(string_buf + needed, remain,
486 format2, ap);
487 va_end(ap);
488 }
489 }
490 cfs_print_to_console(&header, mask,
491 string_buf, needed, file, msgdata->msg_fn);
492
493 put_cpu();
494 }
495
496 if (cdls && cdls->cdls_count != 0) {
497 string_buf = cfs_trace_get_console_buffer();
498
499 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
500 "Skipped %d previous similar message%s\n",
501 cdls->cdls_count,
502 (cdls->cdls_count > 1) ? "s" : "");
503
504 cfs_print_to_console(&header, mask,
505 string_buf, needed, file, msgdata->msg_fn);
506
507 put_cpu();
508 cdls->cdls_count = 0;
509 }
510
511 return 0;
512 }
513 EXPORT_SYMBOL(libcfs_debug_vmsg2);
514
515 void
516 cfs_trace_assertion_failed(const char *str,
517 struct libcfs_debug_msg_data *msgdata)
518 {
519 struct ptldebug_header hdr;
520
521 libcfs_panic_in_progress = 1;
522 libcfs_catastrophe = 1;
523 mb();
524
525 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
526
527 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
528 msgdata->msg_file, msgdata->msg_fn);
529
530 panic("Lustre debug assertion failure\n");
531
532 /* not reached */
533 }
534
535 static void
536 panic_collect_pages(struct page_collection *pc)
537 {
538 /* Do the collect_pages job on a single CPU: assumes that all other
539 * CPUs have been stopped during a panic. If this isn't true for some
540 * arch, this will have to be implemented separately in each arch.
541 */
542 int i;
543 int j;
544 struct cfs_trace_cpu_data *tcd;
545
546 INIT_LIST_HEAD(&pc->pc_pages);
547
548 cfs_tcd_for_each(tcd, i, j) {
549 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
550 tcd->tcd_cur_pages = 0;
551
552 if (pc->pc_want_daemon_pages) {
553 list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
554 tcd->tcd_cur_daemon_pages = 0;
555 }
556 }
557 }
558
559 static void collect_pages_on_all_cpus(struct page_collection *pc)
560 {
561 struct cfs_trace_cpu_data *tcd;
562 int i, cpu;
563
564 for_each_possible_cpu(cpu) {
565 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
566 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
567 tcd->tcd_cur_pages = 0;
568 if (pc->pc_want_daemon_pages) {
569 list_splice_init(&tcd->tcd_daemon_pages,
570 &pc->pc_pages);
571 tcd->tcd_cur_daemon_pages = 0;
572 }
573 }
574 }
575 }
576
577 static void collect_pages(struct page_collection *pc)
578 {
579 INIT_LIST_HEAD(&pc->pc_pages);
580
581 if (libcfs_panic_in_progress)
582 panic_collect_pages(pc);
583 else
584 collect_pages_on_all_cpus(pc);
585 }
586
587 static void put_pages_back_on_all_cpus(struct page_collection *pc)
588 {
589 struct cfs_trace_cpu_data *tcd;
590 struct list_head *cur_head;
591 struct cfs_trace_page *tage;
592 struct cfs_trace_page *tmp;
593 int i, cpu;
594
595 for_each_possible_cpu(cpu) {
596 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
597 cur_head = tcd->tcd_pages.next;
598
599 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
600 linkage) {
601 __LASSERT_TAGE_INVARIANT(tage);
602
603 if (tage->cpu != cpu || tage->type != i)
604 continue;
605
606 cfs_tage_to_tail(tage, cur_head);
607 tcd->tcd_cur_pages++;
608 }
609 }
610 }
611 }
612
613 static void put_pages_back(struct page_collection *pc)
614 {
615 if (!libcfs_panic_in_progress)
616 put_pages_back_on_all_cpus(pc);
617 }
618
619 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
620 * we have a good amount of data at all times for dumping during an LBUG, even
621 * if we have been steadily writing (and otherwise discarding) pages via the
622 * debug daemon.
623 */
624 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
625 struct cfs_trace_cpu_data *tcd)
626 {
627 struct cfs_trace_page *tage;
628 struct cfs_trace_page *tmp;
629
630 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
631 __LASSERT_TAGE_INVARIANT(tage);
632
633 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
634 continue;
635
636 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
637 tcd->tcd_cur_daemon_pages++;
638
639 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
640 struct cfs_trace_page *victim;
641
642 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
643 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
644
645 __LASSERT_TAGE_INVARIANT(victim);
646
647 list_del(&victim->linkage);
648 cfs_tage_free(victim);
649 tcd->tcd_cur_daemon_pages--;
650 }
651 }
652 }
653
654 static void put_pages_on_daemon_list(struct page_collection *pc)
655 {
656 struct cfs_trace_cpu_data *tcd;
657 int i, cpu;
658
659 for_each_possible_cpu(cpu) {
660 cfs_tcd_for_each_type_lock(tcd, i, cpu)
661 put_pages_on_tcd_daemon_list(pc, tcd);
662 }
663 }
664
665 void cfs_trace_debug_print(void)
666 {
667 struct page_collection pc;
668 struct cfs_trace_page *tage;
669 struct cfs_trace_page *tmp;
670
671 pc.pc_want_daemon_pages = 1;
672 collect_pages(&pc);
673 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
674 char *p, *file, *fn;
675 struct page *page;
676
677 __LASSERT_TAGE_INVARIANT(tage);
678
679 page = tage->page;
680 p = page_address(page);
681 while (p < ((char *)page_address(page) + tage->used)) {
682 struct ptldebug_header *hdr;
683 int len;
684
685 hdr = (void *)p;
686 p += sizeof(*hdr);
687 file = p;
688 p += strlen(file) + 1;
689 fn = p;
690 p += strlen(fn) + 1;
691 len = hdr->ph_len - (int)(p - (char *)hdr);
692
693 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
694
695 p += len;
696 }
697
698 list_del(&tage->linkage);
699 cfs_tage_free(tage);
700 }
701 }
702
703 int cfs_tracefile_dump_all_pages(char *filename)
704 {
705 struct page_collection pc;
706 struct file *filp;
707 struct cfs_trace_page *tage;
708 struct cfs_trace_page *tmp;
709 char *buf;
710 int rc;
711
712 DECL_MMSPACE;
713
714 cfs_tracefile_write_lock();
715
716 filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
717 0600);
718 if (IS_ERR(filp)) {
719 rc = PTR_ERR(filp);
720 filp = NULL;
721 pr_err("LustreError: can't open %s for dump: rc %d\n",
722 filename, rc);
723 goto out;
724 }
725
726 pc.pc_want_daemon_pages = 1;
727 collect_pages(&pc);
728 if (list_empty(&pc.pc_pages)) {
729 rc = 0;
730 goto close;
731 }
732
733 /* ok, for now, just write the pages. in the future we'll be building
734 * iobufs with the pages and calling generic_direct_IO
735 */
736 MMSPACE_OPEN;
737 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
738 __LASSERT_TAGE_INVARIANT(tage);
739
740 buf = kmap(tage->page);
741 rc = vfs_write(filp, (__force const char __user *)buf,
742 tage->used, &filp->f_pos);
743 kunmap(tage->page);
744
745 if (rc != (int)tage->used) {
746 printk(KERN_WARNING "wanted to write %u but wrote %d\n",
747 tage->used, rc);
748 put_pages_back(&pc);
749 __LASSERT(list_empty(&pc.pc_pages));
750 break;
751 }
752 list_del(&tage->linkage);
753 cfs_tage_free(tage);
754 }
755 MMSPACE_CLOSE;
756 rc = vfs_fsync(filp, 1);
757 if (rc)
758 pr_err("sync returns %d\n", rc);
759 close:
760 filp_close(filp, NULL);
761 out:
762 cfs_tracefile_write_unlock();
763 return rc;
764 }
765
766 void cfs_trace_flush_pages(void)
767 {
768 struct page_collection pc;
769 struct cfs_trace_page *tage;
770 struct cfs_trace_page *tmp;
771
772 pc.pc_want_daemon_pages = 1;
773 collect_pages(&pc);
774 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
775 __LASSERT_TAGE_INVARIANT(tage);
776
777 list_del(&tage->linkage);
778 cfs_tage_free(tage);
779 }
780 }
781
782 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
783 const char __user *usr_buffer, int usr_buffer_nob)
784 {
785 int nob;
786
787 if (usr_buffer_nob > knl_buffer_nob)
788 return -EOVERFLOW;
789
790 if (copy_from_user((void *)knl_buffer,
791 usr_buffer, usr_buffer_nob))
792 return -EFAULT;
793
794 nob = strnlen(knl_buffer, usr_buffer_nob);
795 while (nob-- >= 0) /* strip trailing whitespace */
796 if (!isspace(knl_buffer[nob]))
797 break;
798
799 if (nob < 0) /* empty string */
800 return -EINVAL;
801
802 if (nob == knl_buffer_nob) /* no space to terminate */
803 return -EOVERFLOW;
804
805 knl_buffer[nob + 1] = 0; /* terminate */
806 return 0;
807 }
808 EXPORT_SYMBOL(cfs_trace_copyin_string);
809
810 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
811 const char *knl_buffer, char *append)
812 {
813 /*
814 * NB if 'append' != NULL, it's a single character to append to the
815 * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
816 */
817 int nob = strlen(knl_buffer);
818
819 if (nob > usr_buffer_nob)
820 nob = usr_buffer_nob;
821
822 if (copy_to_user(usr_buffer, knl_buffer, nob))
823 return -EFAULT;
824
825 if (append && nob < usr_buffer_nob) {
826 if (copy_to_user(usr_buffer + nob, append, 1))
827 return -EFAULT;
828
829 nob++;
830 }
831
832 return nob;
833 }
834 EXPORT_SYMBOL(cfs_trace_copyout_string);
835
836 int cfs_trace_allocate_string_buffer(char **str, int nob)
837 {
838 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
839 return -EINVAL;
840
841 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
842 if (!*str)
843 return -ENOMEM;
844
845 return 0;
846 }
847
848 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
849 {
850 char *str;
851 int rc;
852
853 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
854 if (rc != 0)
855 return rc;
856
857 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
858 usr_str, usr_str_nob);
859 if (rc != 0)
860 goto out;
861
862 if (str[0] != '/') {
863 rc = -EINVAL;
864 goto out;
865 }
866 rc = cfs_tracefile_dump_all_pages(str);
867 out:
868 kfree(str);
869 return rc;
870 }
871
872 int cfs_trace_daemon_command(char *str)
873 {
874 int rc = 0;
875
876 cfs_tracefile_write_lock();
877
878 if (strcmp(str, "stop") == 0) {
879 cfs_tracefile_write_unlock();
880 cfs_trace_stop_thread();
881 cfs_tracefile_write_lock();
882 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
883
884 } else if (strncmp(str, "size=", 5) == 0) {
885 unsigned long tmp;
886
887 rc = kstrtoul(str + 5, 10, &tmp);
888 if (!rc) {
889 if (tmp < 10 || tmp > 20480)
890 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
891 else
892 cfs_tracefile_size = tmp << 20;
893 }
894 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
895 rc = -ENAMETOOLONG;
896 } else if (str[0] != '/') {
897 rc = -EINVAL;
898 } else {
899 strcpy(cfs_tracefile, str);
900
901 printk(KERN_INFO
902 "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
903 cfs_tracefile,
904 (long)(cfs_tracefile_size >> 10));
905
906 cfs_trace_start_thread();
907 }
908
909 cfs_tracefile_write_unlock();
910 return rc;
911 }
912
913 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
914 {
915 char *str;
916 int rc;
917
918 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
919 if (rc != 0)
920 return rc;
921
922 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
923 usr_str, usr_str_nob);
924 if (rc == 0)
925 rc = cfs_trace_daemon_command(str);
926
927 kfree(str);
928 return rc;
929 }
930
931 int cfs_trace_set_debug_mb(int mb)
932 {
933 int i;
934 int j;
935 int pages;
936 int limit = cfs_trace_max_debug_mb();
937 struct cfs_trace_cpu_data *tcd;
938
939 if (mb < num_possible_cpus()) {
940 printk(KERN_WARNING
941 "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
942 mb, num_possible_cpus());
943 mb = num_possible_cpus();
944 }
945
946 if (mb > limit) {
947 printk(KERN_WARNING
948 "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
949 mb, limit);
950 mb = limit;
951 }
952
953 mb /= num_possible_cpus();
954 pages = mb << (20 - PAGE_SHIFT);
955
956 cfs_tracefile_write_lock();
957
958 cfs_tcd_for_each(tcd, i, j)
959 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
960
961 cfs_tracefile_write_unlock();
962
963 return 0;
964 }
965
966 int cfs_trace_get_debug_mb(void)
967 {
968 int i;
969 int j;
970 struct cfs_trace_cpu_data *tcd;
971 int total_pages = 0;
972
973 cfs_tracefile_read_lock();
974
975 cfs_tcd_for_each(tcd, i, j)
976 total_pages += tcd->tcd_max_pages;
977
978 cfs_tracefile_read_unlock();
979
980 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
981 }
982
983 static int tracefiled(void *arg)
984 {
985 struct page_collection pc;
986 struct tracefiled_ctl *tctl = arg;
987 struct cfs_trace_page *tage;
988 struct cfs_trace_page *tmp;
989 struct file *filp;
990 char *buf;
991 int last_loop = 0;
992 int rc;
993
994 DECL_MMSPACE;
995
996 /* we're started late enough that we pick up init's fs context */
997 /* this is so broken in uml? what on earth is going on? */
998
999 complete(&tctl->tctl_start);
1000
1001 while (1) {
1002 wait_queue_t __wait;
1003
1004 pc.pc_want_daemon_pages = 0;
1005 collect_pages(&pc);
1006 if (list_empty(&pc.pc_pages))
1007 goto end_loop;
1008
1009 filp = NULL;
1010 cfs_tracefile_read_lock();
1011 if (cfs_tracefile[0] != 0) {
1012 filp = filp_open(cfs_tracefile,
1013 O_CREAT | O_RDWR | O_LARGEFILE,
1014 0600);
1015 if (IS_ERR(filp)) {
1016 rc = PTR_ERR(filp);
1017 filp = NULL;
1018 printk(KERN_WARNING "couldn't open %s: %d\n",
1019 cfs_tracefile, rc);
1020 }
1021 }
1022 cfs_tracefile_read_unlock();
1023 if (!filp) {
1024 put_pages_on_daemon_list(&pc);
1025 __LASSERT(list_empty(&pc.pc_pages));
1026 goto end_loop;
1027 }
1028
1029 MMSPACE_OPEN;
1030
1031 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1032 static loff_t f_pos;
1033
1034 __LASSERT_TAGE_INVARIANT(tage);
1035
1036 if (f_pos >= (off_t)cfs_tracefile_size)
1037 f_pos = 0;
1038 else if (f_pos > i_size_read(file_inode(filp)))
1039 f_pos = i_size_read(file_inode(filp));
1040
1041 buf = kmap(tage->page);
1042 rc = vfs_write(filp, (__force const char __user *)buf,
1043 tage->used, &f_pos);
1044 kunmap(tage->page);
1045
1046 if (rc != (int)tage->used) {
1047 printk(KERN_WARNING "wanted to write %u but wrote %d\n",
1048 tage->used, rc);
1049 put_pages_back(&pc);
1050 __LASSERT(list_empty(&pc.pc_pages));
1051 break;
1052 }
1053 }
1054 MMSPACE_CLOSE;
1055
1056 filp_close(filp, NULL);
1057 put_pages_on_daemon_list(&pc);
1058 if (!list_empty(&pc.pc_pages)) {
1059 int i;
1060
1061 printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
1062 pr_err("total cpus(%d): ", num_possible_cpus());
1063 for (i = 0; i < num_possible_cpus(); i++)
1064 if (cpu_online(i))
1065 pr_cont("%d(on) ", i);
1066 else
1067 pr_cont("%d(off) ", i);
1068 pr_cont("\n");
1069
1070 i = 0;
1071 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1072 linkage)
1073 pr_err("page %d belongs to cpu %d\n",
1074 ++i, tage->cpu);
1075 pr_err("There are %d pages unwritten\n", i);
1076 }
1077 __LASSERT(list_empty(&pc.pc_pages));
1078 end_loop:
1079 if (atomic_read(&tctl->tctl_shutdown)) {
1080 if (last_loop == 0) {
1081 last_loop = 1;
1082 continue;
1083 } else {
1084 break;
1085 }
1086 }
1087 init_waitqueue_entry(&__wait, current);
1088 add_wait_queue(&tctl->tctl_waitq, &__wait);
1089 set_current_state(TASK_INTERRUPTIBLE);
1090 schedule_timeout(cfs_time_seconds(1));
1091 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1092 }
1093 complete(&tctl->tctl_stop);
1094 return 0;
1095 }
1096
1097 int cfs_trace_start_thread(void)
1098 {
1099 struct tracefiled_ctl *tctl = &trace_tctl;
1100 struct task_struct *task;
1101 int rc = 0;
1102
1103 mutex_lock(&cfs_trace_thread_mutex);
1104 if (thread_running)
1105 goto out;
1106
1107 init_completion(&tctl->tctl_start);
1108 init_completion(&tctl->tctl_stop);
1109 init_waitqueue_head(&tctl->tctl_waitq);
1110 atomic_set(&tctl->tctl_shutdown, 0);
1111
1112 task = kthread_run(tracefiled, tctl, "ktracefiled");
1113 if (IS_ERR(task)) {
1114 rc = PTR_ERR(task);
1115 goto out;
1116 }
1117
1118 wait_for_completion(&tctl->tctl_start);
1119 thread_running = 1;
1120 out:
1121 mutex_unlock(&cfs_trace_thread_mutex);
1122 return rc;
1123 }
1124
1125 void cfs_trace_stop_thread(void)
1126 {
1127 struct tracefiled_ctl *tctl = &trace_tctl;
1128
1129 mutex_lock(&cfs_trace_thread_mutex);
1130 if (thread_running) {
1131 printk(KERN_INFO
1132 "Lustre: shutting down debug daemon thread...\n");
1133 atomic_set(&tctl->tctl_shutdown, 1);
1134 wait_for_completion(&tctl->tctl_stop);
1135 thread_running = 0;
1136 }
1137 mutex_unlock(&cfs_trace_thread_mutex);
1138 }
1139
1140 int cfs_tracefile_init(int max_pages)
1141 {
1142 struct cfs_trace_cpu_data *tcd;
1143 int i;
1144 int j;
1145 int rc;
1146 int factor;
1147
1148 rc = cfs_tracefile_init_arch();
1149 if (rc != 0)
1150 return rc;
1151
1152 cfs_tcd_for_each(tcd, i, j) {
1153 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1154 factor = tcd->tcd_pages_factor;
1155 INIT_LIST_HEAD(&tcd->tcd_pages);
1156 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1157 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1158 tcd->tcd_cur_pages = 0;
1159 tcd->tcd_cur_stock_pages = 0;
1160 tcd->tcd_cur_daemon_pages = 0;
1161 tcd->tcd_max_pages = (max_pages * factor) / 100;
1162 LASSERT(tcd->tcd_max_pages > 0);
1163 tcd->tcd_shutting_down = 0;
1164 }
1165
1166 return 0;
1167 }
1168
1169 static void trace_cleanup_on_all_cpus(void)
1170 {
1171 struct cfs_trace_cpu_data *tcd;
1172 struct cfs_trace_page *tage;
1173 struct cfs_trace_page *tmp;
1174 int i, cpu;
1175
1176 for_each_possible_cpu(cpu) {
1177 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1178 tcd->tcd_shutting_down = 1;
1179
1180 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1181 linkage) {
1182 __LASSERT_TAGE_INVARIANT(tage);
1183
1184 list_del(&tage->linkage);
1185 cfs_tage_free(tage);
1186 }
1187
1188 tcd->tcd_cur_pages = 0;
1189 }
1190 }
1191 }
1192
1193 static void cfs_trace_cleanup(void)
1194 {
1195 struct page_collection pc;
1196
1197 INIT_LIST_HEAD(&pc.pc_pages);
1198
1199 trace_cleanup_on_all_cpus();
1200
1201 cfs_tracefile_fini_arch();
1202 }
1203
1204 void cfs_tracefile_exit(void)
1205 {
1206 cfs_trace_stop_thread();
1207 cfs_trace_cleanup();
1208 }
This page took 0.076306 seconds and 5 git commands to generate.