Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Adaptec AAC series RAID controller driver | |
3 | * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> | |
4 | * | |
5 | * based on the old aacraid driver that is.. | |
6 | * Adaptec aacraid device driver for Linux. | |
7 | * | |
03d44337 | 8 | * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com) |
1da177e4 LT |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2, or (at your option) | |
13 | * any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; see the file COPYING. If not, write to | |
22 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | * | |
24 | * Module Name: | |
25 | * dpcsup.c | |
26 | * | |
27 | * Abstract: All DPC processing routines for the cyclone board occur here. | |
28 | * | |
29 | * | |
30 | */ | |
31 | ||
32 | #include <linux/kernel.h> | |
33 | #include <linux/init.h> | |
34 | #include <linux/types.h> | |
1da177e4 LT |
35 | #include <linux/spinlock.h> |
36 | #include <linux/slab.h> | |
37 | #include <linux/completion.h> | |
38 | #include <linux/blkdev.h> | |
39 | #include <asm/semaphore.h> | |
40 | ||
41 | #include "aacraid.h" | |
42 | ||
43 | /** | |
44 | * aac_response_normal - Handle command replies | |
45 | * @q: Queue to read from | |
46 | * | |
47 | * This DPC routine will be run when the adapter interrupts us to let us | |
48 | * know there is a response on our normal priority queue. We will pull off | |
49 | * all QE there are and wake up all the waiters before exiting. We will | |
50 | * take a spinlock out on the queue before operating on it. | |
51 | */ | |
52 | ||
53 | unsigned int aac_response_normal(struct aac_queue * q) | |
54 | { | |
55 | struct aac_dev * dev = q->dev; | |
56 | struct aac_entry *entry; | |
57 | struct hw_fib * hwfib; | |
58 | struct fib * fib; | |
59 | int consumed = 0; | |
60 | unsigned long flags; | |
61 | ||
62 | spin_lock_irqsave(q->lock, flags); | |
63 | /* | |
64 | * Keep pulling response QEs off the response queue and waking | |
65 | * up the waiters until there are no more QEs. We then return | |
66 | * back to the system. If no response was requesed we just | |
67 | * deallocate the Fib here and continue. | |
68 | */ | |
69 | while(aac_consumer_get(dev, q, &entry)) | |
70 | { | |
71 | int fast; | |
72 | u32 index = le32_to_cpu(entry->addr); | |
73 | fast = index & 0x01; | |
8e0c5ebd | 74 | fib = &dev->fibs[index >> 2]; |
a8166a52 | 75 | hwfib = fib->hw_fib_va; |
1da177e4 LT |
76 | |
77 | aac_consumer_free(dev, q, HostNormRespQueue); | |
78 | /* | |
79 | * Remove this fib from the Outstanding I/O queue. | |
80 | * But only if it has not already been timed out. | |
81 | * | |
82 | * If the fib has been timed out already, then just | |
83 | * continue. The caller has already been notified that | |
84 | * the fib timed out. | |
85 | */ | |
03d44337 MH |
86 | dev->queues->queue[AdapNormCmdQueue].numpending--; |
87 | ||
88 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | |
89 | spin_unlock_irqrestore(q->lock, flags); | |
90 | aac_fib_complete(fib); | |
91 | aac_fib_free(fib); | |
92 | spin_lock_irqsave(q->lock, flags); | |
1da177e4 LT |
93 | continue; |
94 | } | |
95 | spin_unlock_irqrestore(q->lock, flags); | |
96 | ||
97 | if (fast) { | |
98 | /* | |
99 | * Doctor the fib | |
100 | */ | |
56b58712 | 101 | *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); |
1da177e4 LT |
102 | hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); |
103 | } | |
104 | ||
105 | FIB_COUNTER_INCREMENT(aac_config.FibRecved); | |
106 | ||
107 | if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) | |
108 | { | |
56b58712 | 109 | __le32 *pstatus = (__le32 *)hwfib->data; |
1da177e4 LT |
110 | if (*pstatus & cpu_to_le32(0xffff0000)) |
111 | *pstatus = cpu_to_le32(ST_OK); | |
112 | } | |
113 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) | |
114 | { | |
115 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) | |
116 | FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); | |
117 | else | |
118 | FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); | |
119 | /* | |
120 | * NOTE: we cannot touch the fib after this | |
121 | * call, because it may have been deallocated. | |
122 | */ | |
123 | fib->callback(fib->callback_data, fib); | |
124 | } else { | |
125 | unsigned long flagv; | |
126 | spin_lock_irqsave(&fib->event_lock, flagv); | |
c8f7b073 MH |
127 | if (!fib->done) |
128 | fib->done = 1; | |
1da177e4 LT |
129 | up(&fib->event_wait); |
130 | spin_unlock_irqrestore(&fib->event_lock, flagv); | |
131 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | |
c8f7b073 MH |
132 | if (fib->done == 2) { |
133 | aac_fib_complete(fib); | |
134 | aac_fib_free(fib); | |
135 | } | |
1da177e4 LT |
136 | } |
137 | consumed++; | |
138 | spin_lock_irqsave(q->lock, flags); | |
139 | } | |
140 | ||
141 | if (consumed > aac_config.peak_fibs) | |
142 | aac_config.peak_fibs = consumed; | |
143 | if (consumed == 0) | |
144 | aac_config.zero_fibs++; | |
145 | ||
146 | spin_unlock_irqrestore(q->lock, flags); | |
147 | return 0; | |
148 | } | |
149 | ||
150 | ||
151 | /** | |
152 | * aac_command_normal - handle commands | |
153 | * @q: queue to process | |
154 | * | |
155 | * This DPC routine will be queued when the adapter interrupts us to | |
156 | * let us know there is a command on our normal priority queue. We will | |
157 | * pull off all QE there are and wake up all the waiters before exiting. | |
158 | * We will take a spinlock out on the queue before operating on it. | |
159 | */ | |
160 | ||
161 | unsigned int aac_command_normal(struct aac_queue *q) | |
162 | { | |
163 | struct aac_dev * dev = q->dev; | |
164 | struct aac_entry *entry; | |
165 | unsigned long flags; | |
166 | ||
167 | spin_lock_irqsave(q->lock, flags); | |
168 | ||
169 | /* | |
170 | * Keep pulling response QEs off the response queue and waking | |
171 | * up the waiters until there are no more QEs. We then return | |
172 | * back to the system. | |
173 | */ | |
174 | while(aac_consumer_get(dev, q, &entry)) | |
175 | { | |
176 | struct fib fibctx; | |
177 | struct hw_fib * hw_fib; | |
178 | u32 index; | |
179 | struct fib *fib = &fibctx; | |
180 | ||
181 | index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib); | |
182 | hw_fib = &dev->aif_base_va[index]; | |
183 | ||
184 | /* | |
185 | * Allocate a FIB at all costs. For non queued stuff | |
186 | * we can just use the stack so we are happy. We need | |
187 | * a fib object in order to manage the linked lists | |
188 | */ | |
189 | if (dev->aif_thread) | |
190 | if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL) | |
191 | fib = &fibctx; | |
192 | ||
193 | memset(fib, 0, sizeof(struct fib)); | |
194 | INIT_LIST_HEAD(&fib->fiblink); | |
195 | fib->type = FSAFS_NTC_FIB_CONTEXT; | |
196 | fib->size = sizeof(struct fib); | |
a8166a52 | 197 | fib->hw_fib_va = hw_fib; |
1da177e4 LT |
198 | fib->data = hw_fib->data; |
199 | fib->dev = dev; | |
200 | ||
201 | ||
202 | if (dev->aif_thread && fib != &fibctx) { | |
203 | list_add_tail(&fib->fiblink, &q->cmdq); | |
204 | aac_consumer_free(dev, q, HostNormCmdQueue); | |
205 | wake_up_interruptible(&q->cmdready); | |
206 | } else { | |
207 | aac_consumer_free(dev, q, HostNormCmdQueue); | |
208 | spin_unlock_irqrestore(q->lock, flags); | |
209 | /* | |
210 | * Set the status of this FIB | |
211 | */ | |
56b58712 | 212 | *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); |
bfb35aa8 | 213 | aac_fib_adapter_complete(fib, sizeof(u32)); |
1da177e4 LT |
214 | spin_lock_irqsave(q->lock, flags); |
215 | } | |
216 | } | |
217 | spin_unlock_irqrestore(q->lock, flags); | |
218 | return 0; | |
219 | } | |
8e0c5ebd MH |
220 | |
221 | ||
222 | /** | |
223 | * aac_intr_normal - Handle command replies | |
224 | * @dev: Device | |
225 | * @index: completion reference | |
226 | * | |
227 | * This DPC routine will be run when the adapter interrupts us to let us | |
228 | * know there is a response on our normal priority queue. We will pull off | |
229 | * all QE there are and wake up all the waiters before exiting. | |
230 | */ | |
231 | ||
232 | unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) | |
233 | { | |
234 | u32 index = le32_to_cpu(Index); | |
235 | ||
236 | dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index)); | |
237 | if ((index & 0x00000002L)) { | |
238 | struct hw_fib * hw_fib; | |
239 | struct fib * fib; | |
240 | struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; | |
241 | unsigned long flags; | |
242 | ||
243 | if (index == 0xFFFFFFFEL) /* Special Case */ | |
244 | return 0; /* Do nothing */ | |
245 | /* | |
246 | * Allocate a FIB. For non queued stuff we can just use | |
247 | * the stack so we are happy. We need a fib object in order to | |
248 | * manage the linked lists. | |
249 | */ | |
250 | if ((!dev->aif_thread) | |
4dbc22d7 | 251 | || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC)))) |
8e0c5ebd | 252 | return 1; |
4dbc22d7 | 253 | if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) { |
8e0c5ebd MH |
254 | kfree (fib); |
255 | return 1; | |
256 | } | |
142956af | 257 | memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + |
4dfb7cbe | 258 | (index & ~0x00000002L)), sizeof(struct hw_fib)); |
8e0c5ebd MH |
259 | INIT_LIST_HEAD(&fib->fiblink); |
260 | fib->type = FSAFS_NTC_FIB_CONTEXT; | |
261 | fib->size = sizeof(struct fib); | |
a8166a52 | 262 | fib->hw_fib_va = hw_fib; |
8e0c5ebd MH |
263 | fib->data = hw_fib->data; |
264 | fib->dev = dev; | |
265 | ||
266 | spin_lock_irqsave(q->lock, flags); | |
267 | list_add_tail(&fib->fiblink, &q->cmdq); | |
268 | wake_up_interruptible(&q->cmdready); | |
269 | spin_unlock_irqrestore(q->lock, flags); | |
270 | return 1; | |
271 | } else { | |
272 | int fast = index & 0x01; | |
273 | struct fib * fib = &dev->fibs[index >> 2]; | |
a8166a52 | 274 | struct hw_fib * hwfib = fib->hw_fib_va; |
8e0c5ebd MH |
275 | |
276 | /* | |
277 | * Remove this fib from the Outstanding I/O queue. | |
278 | * But only if it has not already been timed out. | |
279 | * | |
280 | * If the fib has been timed out already, then just | |
281 | * continue. The caller has already been notified that | |
282 | * the fib timed out. | |
283 | */ | |
03d44337 MH |
284 | dev->queues->queue[AdapNormCmdQueue].numpending--; |
285 | ||
286 | if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { | |
287 | aac_fib_complete(fib); | |
288 | aac_fib_free(fib); | |
8e0c5ebd MH |
289 | return 0; |
290 | } | |
291 | ||
8e0c5ebd MH |
292 | if (fast) { |
293 | /* | |
294 | * Doctor the fib | |
295 | */ | |
296 | *(__le32 *)hwfib->data = cpu_to_le32(ST_OK); | |
297 | hwfib->header.XferState |= cpu_to_le32(AdapterProcessed); | |
298 | } | |
299 | ||
300 | FIB_COUNTER_INCREMENT(aac_config.FibRecved); | |
301 | ||
302 | if (hwfib->header.Command == cpu_to_le16(NuFileSystem)) | |
303 | { | |
304 | u32 *pstatus = (u32 *)hwfib->data; | |
305 | if (*pstatus & cpu_to_le32(0xffff0000)) | |
306 | *pstatus = cpu_to_le32(ST_OK); | |
307 | } | |
308 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) | |
309 | { | |
310 | if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected)) | |
311 | FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved); | |
312 | else | |
313 | FIB_COUNTER_INCREMENT(aac_config.AsyncRecved); | |
314 | /* | |
315 | * NOTE: we cannot touch the fib after this | |
316 | * call, because it may have been deallocated. | |
317 | */ | |
318 | fib->callback(fib->callback_data, fib); | |
319 | } else { | |
320 | unsigned long flagv; | |
321 | dprintk((KERN_INFO "event_wait up\n")); | |
322 | spin_lock_irqsave(&fib->event_lock, flagv); | |
c8f7b073 MH |
323 | if (!fib->done) |
324 | fib->done = 1; | |
8e0c5ebd MH |
325 | up(&fib->event_wait); |
326 | spin_unlock_irqrestore(&fib->event_lock, flagv); | |
327 | FIB_COUNTER_INCREMENT(aac_config.NormalRecved); | |
328 | } | |
329 | return 0; | |
330 | } | |
331 | } |