Commit | Line | Data |
---|---|---|
852c2936 MD |
1 | #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H |
2 | #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H | |
3 | ||
4 | /* | |
5 | * linux/ringbuffer/frontend_types.h | |
6 | * | |
7 | * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
8 | * | |
9 | * Ring Buffer Library Synchronization Header (types). | |
10 | * | |
11 | * Author: | |
12 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
13 | * | |
14 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
15 | * | |
16 | * Dual LGPL v2.1/GPL v2 license. | |
17 | */ | |
18 | ||
a6352fd4 MD |
19 | #include <string.h> |
20 | ||
14641deb MD |
21 | #include <urcu/list.h> |
22 | #include <urcu/uatomic.h> | |
23 | #include <urcu/ref.h> | |
24 | ||
25 | #include "ust/core.h" | |
26 | ||
27 | #include "usterr_signal_safe.h" | |
4931a13e MD |
28 | #include "config.h" |
29 | #include "backend_types.h" | |
a6352fd4 | 30 | #include "shm.h" |
852c2936 MD |
31 | |
32 | /* | |
33 | * A switch is done during tracing or as a final flush after tracing (so it | |
34 | * won't write in the new sub-buffer). | |
35 | */ | |
36 | enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH }; | |
37 | ||
852c2936 MD |
38 | /* channel: collection of per-cpu ring buffers. */ |
39 | struct channel { | |
14641deb | 40 | int record_disabled; |
852c2936 MD |
41 | unsigned long commit_count_mask; /* |
42 | * Commit count mask, removing | |
43 | * the MSBs corresponding to | |
44 | * bits used to represent the | |
45 | * subbuffer index. | |
46 | */ | |
47 | ||
48 | struct channel_backend backend; /* Associated backend */ | |
49 | ||
50 | unsigned long switch_timer_interval; /* Buffer flush (jiffies) */ | |
51 | unsigned long read_timer_interval; /* Reader wakeup (jiffies) */ | |
14641deb | 52 | //wait_queue_head_t read_wait; /* reader wait queue */ |
852c2936 | 53 | int finalized; /* Has channel been finalized */ |
14641deb | 54 | struct urcu_ref ref; /* Reference count */ |
a6352fd4 MD |
55 | DECLARE_SHMP(struct shm_header, shm_header); |
56 | } ____cacheline_aligned; | |
852c2936 MD |
57 | |
58 | /* Per-subbuffer commit counters used on the hot path */ | |
59 | struct commit_counters_hot { | |
60 | union v_atomic cc; /* Commit counter */ | |
61 | union v_atomic seq; /* Consecutive commits */ | |
a6352fd4 | 62 | } ____cacheline_aligned; |
852c2936 MD |
63 | |
64 | /* Per-subbuffer commit counters used only on cold paths */ | |
65 | struct commit_counters_cold { | |
66 | union v_atomic cc_sb; /* Incremented _once_ at sb switch */ | |
a6352fd4 | 67 | } ____cacheline_aligned; |
852c2936 | 68 | |
852c2936 MD |
69 | /* ring buffer state */ |
70 | struct lib_ring_buffer { | |
71 | /* First 32 bytes cache-hot cacheline */ | |
72 | union v_atomic offset; /* Current offset in the buffer */ | |
a6352fd4 | 73 | DECLARE_SHMP(struct commit_counters_hot, commit_hot); |
852c2936 | 74 | /* Commit count per sub-buffer */ |
14641deb | 75 | long consumed; /* |
852c2936 MD |
76 | * Current offset in the buffer |
77 | * standard atomic access (shared) | |
78 | */ | |
14641deb | 79 | int record_disabled; |
852c2936 MD |
80 | /* End of first 32 bytes cacheline */ |
81 | union v_atomic last_tsc; /* | |
82 | * Last timestamp written in the buffer. | |
83 | */ | |
84 | ||
85 | struct lib_ring_buffer_backend backend; /* Associated backend */ | |
86 | ||
a6352fd4 | 87 | DECLARE_SHMP(struct commit_counters_cold, commit_cold); |
852c2936 | 88 | /* Commit count per sub-buffer */ |
14641deb | 89 | long active_readers; /* |
852c2936 MD |
90 | * Active readers count |
91 | * standard atomic access (shared) | |
92 | */ | |
93 | /* Dropped records */ | |
94 | union v_atomic records_lost_full; /* Buffer full */ | |
95 | union v_atomic records_lost_wrap; /* Nested wrap-around */ | |
96 | union v_atomic records_lost_big; /* Events too big */ | |
97 | union v_atomic records_count; /* Number of records written */ | |
98 | union v_atomic records_overrun; /* Number of overwritten records */ | |
14641deb | 99 | //wait_queue_head_t read_wait; /* reader buffer-level wait queue */ |
852c2936 | 100 | int finalized; /* buffer has been finalized */ |
14641deb MD |
101 | //struct timer_list switch_timer; /* timer for periodical switch */ |
102 | //struct timer_list read_timer; /* timer for read poll */ | |
852c2936 MD |
103 | unsigned long get_subbuf_consumed; /* Read-side consumed */ |
104 | unsigned long prod_snapshot; /* Producer count snapshot */ | |
105 | unsigned long cons_snapshot; /* Consumer count snapshot */ | |
106 | int get_subbuf:1; /* Sub-buffer being held by reader */ | |
107 | int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ | |
108 | int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */ | |
a6352fd4 | 109 | } ____cacheline_aligned; |
852c2936 MD |
110 | |
111 | static inline | |
112 | void *channel_get_private(struct channel *chan) | |
113 | { | |
114 | return chan->backend.priv; | |
115 | } | |
116 | ||
117 | /* | |
118 | * Issue warnings and disable channels upon internal error. | |
119 | * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend | |
120 | * parameters. | |
121 | */ | |
122 | #define CHAN_WARN_ON(c, cond) \ | |
123 | ({ \ | |
124 | struct channel *__chan; \ | |
125 | int _____ret = unlikely(cond); \ | |
126 | if (_____ret) { \ | |
127 | if (__same_type(*(c), struct channel_backend)) \ | |
14641deb | 128 | __chan = caa_container_of((void *) (c), \ |
852c2936 MD |
129 | struct channel, \ |
130 | backend); \ | |
131 | else if (__same_type(*(c), struct channel)) \ | |
132 | __chan = (void *) (c); \ | |
133 | else \ | |
134 | BUG_ON(1); \ | |
14641deb | 135 | uatomic_inc(&__chan->record_disabled); \ |
852c2936 MD |
136 | WARN_ON(1); \ |
137 | } \ | |
138 | _____ret; \ | |
139 | }) | |
140 | ||
141 | #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */ |