Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * ItLpQueue.c | |
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/stddef.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/sched.h> | |
512d31d6 | 14 | #include <linux/bootmem.h> |
7b01328d ME |
15 | #include <linux/seq_file.h> |
16 | #include <linux/proc_fs.h> | |
1da177e4 LT |
17 | #include <asm/system.h> |
18 | #include <asm/paca.h> | |
19 | #include <asm/iSeries/ItLpQueue.h> | |
20 | #include <asm/iSeries/HvLpEvent.h> | |
21 | #include <asm/iSeries/HvCallEvent.h> | |
1da177e4 | 22 | |
ab354b63 ME |
23 | /* |
24 | * The LpQueue is used to pass event data from the hypervisor to | |
25 | * the partition. This is where I/O interrupt events are communicated. | |
26 | * | |
27 | * It is written to by the hypervisor so cannot end up in the BSS. | |
28 | */ | |
29 | struct ItLpQueue xItLpQueue __attribute__((__section__(".data"))); | |
30 | ||
7b01328d ME |
31 | static char *event_types[9] = { |
32 | "Hypervisor\t\t", | |
33 | "Machine Facilities\t", | |
34 | "Session Manager\t", | |
35 | "SPD I/O\t\t", | |
36 | "Virtual Bus\t\t", | |
37 | "PCI I/O\t\t", | |
38 | "RIO I/O\t\t", | |
39 | "Virtual Lan\t\t", | |
40 | "Virtual I/O\t\t" | |
41 | }; | |
42 | ||
1b19bc72 | 43 | static __inline__ int set_inUse(void) |
1da177e4 LT |
44 | { |
45 | int t; | |
1b19bc72 | 46 | u32 * inUseP = &xItLpQueue.xInUseWord; |
1da177e4 LT |
47 | |
48 | __asm__ __volatile__("\n\ | |
49 | 1: lwarx %0,0,%2 \n\ | |
50 | cmpwi 0,%0,0 \n\ | |
51 | li %0,0 \n\ | |
52 | bne- 2f \n\ | |
53 | addi %0,%0,1 \n\ | |
54 | stwcx. %0,0,%2 \n\ | |
55 | bne- 1b \n\ | |
56 | 2: eieio" | |
1b19bc72 ME |
57 | : "=&r" (t), "=m" (xItLpQueue.xInUseWord) |
58 | : "r" (inUseP), "m" (xItLpQueue.xInUseWord) | |
1da177e4 LT |
59 | : "cc"); |
60 | ||
61 | return t; | |
62 | } | |
63 | ||
1b19bc72 | 64 | static __inline__ void clear_inUse(void) |
1da177e4 | 65 | { |
1b19bc72 | 66 | xItLpQueue.xInUseWord = 0; |
1da177e4 LT |
67 | } |
68 | ||
69 | /* Array of LpEvent handler functions */ | |
70 | extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; | |
71 | unsigned long ItLpQueueInProcess = 0; | |
72 | ||
0f6014b3 | 73 | static struct HvLpEvent * ItLpQueue_getNextLpEvent(void) |
1da177e4 LT |
74 | { |
75 | struct HvLpEvent * nextLpEvent = | |
1b19bc72 | 76 | (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; |
1da177e4 LT |
77 | if ( nextLpEvent->xFlags.xValid ) { |
78 | /* rmb() needed only for weakly consistent machines (regatta) */ | |
79 | rmb(); | |
80 | /* Set pointer to next potential event */ | |
1b19bc72 | 81 | xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + |
1da177e4 LT |
82 | LpEventAlign ) / |
83 | LpEventAlign ) * | |
84 | LpEventAlign; | |
85 | /* Wrap to beginning if no room at end */ | |
1b19bc72 ME |
86 | if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr) |
87 | xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr; | |
1da177e4 LT |
88 | } |
89 | else | |
90 | nextLpEvent = NULL; | |
91 | ||
92 | return nextLpEvent; | |
93 | } | |
94 | ||
0c885c17 | 95 | static unsigned long spread_lpevents = NR_CPUS; |
bea248fb | 96 | |
1b19bc72 | 97 | int ItLpQueue_isLpIntPending(void) |
1da177e4 | 98 | { |
bea248fb ME |
99 | struct HvLpEvent *next_event; |
100 | ||
101 | if (smp_processor_id() >= spread_lpevents) | |
102 | return 0; | |
103 | ||
1b19bc72 ME |
104 | next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; |
105 | return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending; | |
1da177e4 LT |
106 | } |
107 | ||
0f6014b3 | 108 | static void ItLpQueue_clearValid( struct HvLpEvent * event ) |
1da177e4 LT |
109 | { |
110 | /* Clear the valid bit of the event | |
111 | * Also clear bits within this event that might | |
112 | * look like valid bits (on 64-byte boundaries) | |
113 | */ | |
114 | unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) / | |
115 | LpEventAlign ) - 1; | |
116 | switch ( extra ) { | |
117 | case 3: | |
118 | ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; | |
119 | case 2: | |
120 | ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; | |
121 | case 1: | |
122 | ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; | |
123 | case 0: | |
124 | ; | |
125 | } | |
126 | mb(); | |
127 | event->xFlags.xValid = 0; | |
128 | } | |
129 | ||
1b19bc72 | 130 | unsigned ItLpQueue_process(struct pt_regs *regs) |
1da177e4 LT |
131 | { |
132 | unsigned numIntsProcessed = 0; | |
133 | struct HvLpEvent * nextLpEvent; | |
134 | ||
135 | /* If we have recursed, just return */ | |
1b19bc72 | 136 | if ( !set_inUse() ) |
1da177e4 LT |
137 | return 0; |
138 | ||
139 | if (ItLpQueueInProcess == 0) | |
140 | ItLpQueueInProcess = 1; | |
141 | else | |
142 | BUG(); | |
143 | ||
144 | for (;;) { | |
1b19bc72 | 145 | nextLpEvent = ItLpQueue_getNextLpEvent(); |
1da177e4 LT |
146 | if ( nextLpEvent ) { |
147 | /* Count events to return to caller | |
1b19bc72 | 148 | * and count processed events in xItLpQueue |
1da177e4 LT |
149 | */ |
150 | ++numIntsProcessed; | |
1b19bc72 | 151 | xItLpQueue.xLpIntCount++; |
1da177e4 LT |
152 | /* Call appropriate handler here, passing |
153 | * a pointer to the LpEvent. The handler | |
154 | * must make a copy of the LpEvent if it | |
155 | * needs it in a bottom half. (perhaps for | |
156 | * an ACK) | |
157 | * | |
158 | * Handlers are responsible for ACK processing | |
159 | * | |
160 | * The Hypervisor guarantees that LpEvents will | |
161 | * only be delivered with types that we have | |
162 | * registered for, so no type check is necessary | |
163 | * here! | |
164 | */ | |
165 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) | |
1b19bc72 | 166 | xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++; |
1da177e4 LT |
167 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && |
168 | lpEventHandler[nextLpEvent->xType] ) | |
169 | lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); | |
170 | else | |
171 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); | |
172 | ||
173 | ItLpQueue_clearValid( nextLpEvent ); | |
1b19bc72 | 174 | } else if ( xItLpQueue.xPlicOverflowIntPending ) |
1da177e4 LT |
175 | /* |
176 | * No more valid events. If overflow events are | |
177 | * pending process them | |
178 | */ | |
1b19bc72 | 179 | HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex); |
1da177e4 LT |
180 | else |
181 | break; | |
182 | } | |
183 | ||
184 | ItLpQueueInProcess = 0; | |
185 | mb(); | |
1b19bc72 | 186 | clear_inUse(); |
1da177e4 LT |
187 | |
188 | get_paca()->lpevent_count += numIntsProcessed; | |
189 | ||
190 | return numIntsProcessed; | |
191 | } | |
0c885c17 ME |
192 | |
193 | static int set_spread_lpevents(char *str) | |
194 | { | |
195 | unsigned long val = simple_strtoul(str, NULL, 0); | |
196 | ||
197 | /* | |
198 | * The parameter is the number of processors to share in processing | |
199 | * lp events. | |
200 | */ | |
201 | if (( val > 0) && (val <= NR_CPUS)) { | |
202 | spread_lpevents = val; | |
203 | printk("lpevent processing spread over %ld processors\n", val); | |
204 | } else { | |
205 | printk("invalid spread_lpevents %ld\n", val); | |
206 | } | |
207 | ||
208 | return 1; | |
209 | } | |
210 | __setup("spread_lpevents=", set_spread_lpevents); | |
211 | ||
512d31d6 ME |
212 | void setup_hvlpevent_queue(void) |
213 | { | |
214 | void *eventStack; | |
215 | ||
216 | /* | |
217 | * Allocate a page for the Event Stack. The Hypervisor needs the | |
218 | * absolute real address, so we subtract out the KERNELBASE and add | |
219 | * in the absolute real address of the kernel load area. | |
220 | */ | |
221 | eventStack = alloc_bootmem_pages(LpEventStackSize); | |
222 | memset(eventStack, 0, LpEventStackSize); | |
223 | ||
224 | /* Invoke the hypervisor to initialize the event stack */ | |
225 | HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); | |
226 | ||
227 | xItLpQueue.xSlicEventStackPtr = (char *)eventStack; | |
228 | xItLpQueue.xSlicCurEventPtr = (char *)eventStack; | |
229 | xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + | |
230 | (LpEventStackSize - LpEventMaxSize); | |
231 | xItLpQueue.xIndex = 0; | |
232 | } | |
7b01328d ME |
233 | |
234 | static int proc_lpevents_show(struct seq_file *m, void *v) | |
235 | { | |
236 | unsigned int i; | |
237 | ||
238 | seq_printf(m, "LpEventQueue 0\n"); | |
239 | seq_printf(m, " events processed:\t%lu\n", | |
240 | (unsigned long)xItLpQueue.xLpIntCount); | |
241 | ||
242 | for (i = 0; i < 9; ++i) | |
243 | seq_printf(m, " %s %10lu\n", event_types[i], | |
244 | (unsigned long)xItLpQueue.xLpIntCountByType[i]); | |
245 | ||
246 | seq_printf(m, "\n events processed by processor:\n"); | |
247 | ||
248 | for_each_online_cpu(i) | |
249 | seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count); | |
250 | ||
251 | return 0; | |
252 | } | |
253 | ||
254 | static int proc_lpevents_open(struct inode *inode, struct file *file) | |
255 | { | |
256 | return single_open(file, proc_lpevents_show, NULL); | |
257 | } | |
258 | ||
259 | static struct file_operations proc_lpevents_operations = { | |
260 | .open = proc_lpevents_open, | |
261 | .read = seq_read, | |
262 | .llseek = seq_lseek, | |
263 | .release = single_release, | |
264 | }; | |
265 | ||
266 | static int __init proc_lpevents_init(void) | |
267 | { | |
268 | struct proc_dir_entry *e; | |
269 | ||
270 | e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); | |
271 | if (e) | |
272 | e->proc_fops = &proc_lpevents_operations; | |
273 | ||
274 | return 0; | |
275 | } | |
276 | __initcall(proc_lpevents_init); | |
277 |