Commit | Line | Data |
---|---|---|
8722ff8c | 1 | /* |
2 | * Filename: dma.c | |
3 | * | |
4 | * | |
5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | |
6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | |
7 | * | |
8 | * (C) Copyright 2013 IBM Corporation | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2 of the | |
13 | * License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software Foundation, | |
22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
e5e9fdaa | 25 | #include <linux/slab.h> |
8722ff8c | 26 | #include "rsxx_priv.h" |
27 | ||
28 | struct rsxx_dma { | |
29 | struct list_head list; | |
30 | u8 cmd; | |
9bb3c446 | 31 | unsigned int laddr; /* Logical address */ |
8722ff8c | 32 | struct { |
33 | u32 off; | |
34 | u32 cnt; | |
35 | } sub_page; | |
36 | dma_addr_t dma_addr; | |
37 | struct page *page; | |
38 | unsigned int pg_off; /* Page Offset */ | |
39 | rsxx_dma_cb cb; | |
40 | void *cb_data; | |
41 | }; | |
42 | ||
43 | /* This timeout is used to detect a stalled DMA channel */ | |
44 | #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000) | |
45 | ||
46 | struct hw_status { | |
47 | u8 status; | |
48 | u8 tag; | |
49 | __le16 count; | |
50 | __le32 _rsvd2; | |
51 | __le64 _rsvd3; | |
52 | } __packed; | |
53 | ||
54 | enum rsxx_dma_status { | |
55 | DMA_SW_ERR = 0x1, | |
56 | DMA_HW_FAULT = 0x2, | |
57 | DMA_CANCELLED = 0x4, | |
58 | }; | |
59 | ||
60 | struct hw_cmd { | |
61 | u8 command; | |
62 | u8 tag; | |
63 | u8 _rsvd; | |
64 | u8 sub_page; /* Bit[0:2]: 512byte offset */ | |
65 | /* Bit[4:6]: 512byte count */ | |
66 | __le32 device_addr; | |
67 | __le64 host_addr; | |
68 | } __packed; | |
69 | ||
70 | enum rsxx_hw_cmd { | |
71 | HW_CMD_BLK_DISCARD = 0x70, | |
72 | HW_CMD_BLK_WRITE = 0x80, | |
73 | HW_CMD_BLK_READ = 0xC0, | |
74 | HW_CMD_BLK_RECON_READ = 0xE0, | |
75 | }; | |
76 | ||
77 | enum rsxx_hw_status { | |
78 | HW_STATUS_CRC = 0x01, | |
79 | HW_STATUS_HARD_ERR = 0x02, | |
80 | HW_STATUS_SOFT_ERR = 0x04, | |
81 | HW_STATUS_FAULT = 0x08, | |
82 | }; | |
83 | ||
8722ff8c | 84 | static struct kmem_cache *rsxx_dma_pool; |
85 | ||
86 | struct dma_tracker { | |
87 | int next_tag; | |
88 | struct rsxx_dma *dma; | |
89 | }; | |
90 | ||
91 | #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \ | |
92 | (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS)) | |
93 | ||
94 | struct dma_tracker_list { | |
95 | spinlock_t lock; | |
96 | int head; | |
97 | struct dma_tracker list[0]; | |
98 | }; | |
99 | ||
100 | ||
101 | /*----------------- Misc Utility Functions -------------------*/ | |
c206c709 | 102 | static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card) |
8722ff8c | 103 | { |
104 | unsigned long long tgt_addr8; | |
105 | ||
106 | tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) & | |
107 | card->_stripe.upper_mask) | | |
108 | ((addr8) & card->_stripe.lower_mask); | |
109 | do_div(tgt_addr8, RSXX_HW_BLK_SIZE); | |
110 | return tgt_addr8; | |
111 | } | |
112 | ||
c206c709 | 113 | static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) |
8722ff8c | 114 | { |
115 | unsigned int tgt; | |
116 | ||
117 | tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask; | |
118 | ||
119 | return tgt; | |
120 | } | |
121 | ||
c95246c3 | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
8722ff8c | 123 | { |
124 | /* Reset all DMA Command/Status Queues */ | |
125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | |
126 | } | |
127 | ||
128 | static unsigned int get_dma_size(struct rsxx_dma *dma) | |
129 | { | |
130 | if (dma->sub_page.cnt) | |
131 | return dma->sub_page.cnt << 9; | |
132 | else | |
133 | return RSXX_HW_BLK_SIZE; | |
134 | } | |
135 | ||
136 | ||
137 | /*----------------- DMA Tracker -------------------*/ | |
138 | static void set_tracker_dma(struct dma_tracker_list *trackers, | |
139 | int tag, | |
140 | struct rsxx_dma *dma) | |
141 | { | |
142 | trackers->list[tag].dma = dma; | |
143 | } | |
144 | ||
145 | static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers, | |
146 | int tag) | |
147 | { | |
148 | return trackers->list[tag].dma; | |
149 | } | |
150 | ||
151 | static int pop_tracker(struct dma_tracker_list *trackers) | |
152 | { | |
153 | int tag; | |
154 | ||
155 | spin_lock(&trackers->lock); | |
156 | tag = trackers->head; | |
157 | if (tag != -1) { | |
158 | trackers->head = trackers->list[tag].next_tag; | |
159 | trackers->list[tag].next_tag = -1; | |
160 | } | |
161 | spin_unlock(&trackers->lock); | |
162 | ||
163 | return tag; | |
164 | } | |
165 | ||
166 | static void push_tracker(struct dma_tracker_list *trackers, int tag) | |
167 | { | |
168 | spin_lock(&trackers->lock); | |
169 | trackers->list[tag].next_tag = trackers->head; | |
170 | trackers->head = tag; | |
171 | trackers->list[tag].dma = NULL; | |
172 | spin_unlock(&trackers->lock); | |
173 | } | |
174 | ||
175 | ||
176 | /*----------------- Interrupt Coalescing -------------*/ | |
177 | /* | |
178 | * Interrupt Coalescing Register Format: | |
179 | * Interrupt Timer (64ns units) [15:0] | |
180 | * Interrupt Count [24:16] | |
181 | * Reserved [31:25] | |
182 | */ | |
183 | #define INTR_COAL_LATENCY_MASK (0x0000ffff) | |
184 | ||
185 | #define INTR_COAL_COUNT_SHIFT 16 | |
186 | #define INTR_COAL_COUNT_BITS 9 | |
187 | #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \ | |
188 | INTR_COAL_COUNT_SHIFT) | |
189 | #define INTR_COAL_LATENCY_UNITS_NS 64 | |
190 | ||
191 | ||
192 | static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency) | |
193 | { | |
194 | u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS; | |
195 | ||
196 | if (mode == RSXX_INTR_COAL_DISABLED) | |
197 | return 0; | |
198 | ||
199 | return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) | | |
200 | (latency_units & INTR_COAL_LATENCY_MASK); | |
201 | ||
202 | } | |
203 | ||
204 | static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |
205 | { | |
206 | int i; | |
207 | u32 q_depth = 0; | |
208 | u32 intr_coal; | |
209 | ||
c95246c3 PK |
210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | |
8722ff8c | 212 | return; |
213 | ||
214 | for (i = 0; i < card->n_targets; i++) | |
215 | q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); | |
216 | ||
217 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
218 | q_depth / 2, | |
219 | card->config.data.intr_coal.latency); | |
220 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
221 | } | |
222 | ||
223 | /*----------------- RSXX DMA Handling -------------------*/ | |
e5feab22 PK |
224 | static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) |
225 | { | |
226 | if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | |
227 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | |
228 | get_dma_size(dma), | |
229 | dma->cmd == HW_CMD_BLK_WRITE ? | |
230 | PCI_DMA_TODEVICE : | |
231 | PCI_DMA_FROMDEVICE); | |
232 | } | |
233 | ||
234 | kmem_cache_free(rsxx_dma_pool, dma); | |
235 | } | |
236 | ||
c95246c3 | 237 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
8722ff8c | 238 | struct rsxx_dma *dma, |
239 | unsigned int status) | |
240 | { | |
241 | if (status & DMA_SW_ERR) | |
c95246c3 | 242 | ctrl->stats.dma_sw_err++; |
8722ff8c | 243 | if (status & DMA_HW_FAULT) |
c95246c3 | 244 | ctrl->stats.dma_hw_fault++; |
8722ff8c | 245 | if (status & DMA_CANCELLED) |
c95246c3 | 246 | ctrl->stats.dma_cancelled++; |
8722ff8c | 247 | |
8722ff8c | 248 | if (dma->cb) |
c95246c3 | 249 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
8722ff8c | 250 | |
e5feab22 | 251 | rsxx_free_dma(ctrl, dma); |
8722ff8c | 252 | } |
253 | ||
0ab4743e | 254 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
e5feab22 | 255 | struct list_head *q, unsigned int done) |
0ab4743e PK |
256 | { |
257 | struct rsxx_dma *dma; | |
258 | struct rsxx_dma *tmp; | |
259 | int cnt = 0; | |
260 | ||
261 | list_for_each_entry_safe(dma, tmp, q, list) { | |
262 | list_del(&dma->list); | |
e5feab22 PK |
263 | if (done & COMPLETE_DMA) |
264 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
265 | else | |
266 | rsxx_free_dma(ctrl, dma); | |
0ab4743e PK |
267 | cnt++; |
268 | } | |
269 | ||
270 | return cnt; | |
271 | } | |
272 | ||
8722ff8c | 273 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
274 | struct rsxx_dma *dma) | |
275 | { | |
276 | /* | |
277 | * Requeued DMAs go to the front of the queue so they are issued | |
278 | * first. | |
279 | */ | |
0ab4743e | 280 | spin_lock_bh(&ctrl->queue_lock); |
62302508 | 281 | ctrl->stats.sw_q_depth++; |
8722ff8c | 282 | list_add(&dma->list, &ctrl->queue); |
0ab4743e | 283 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 284 | } |
285 | ||
286 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |
287 | struct rsxx_dma *dma, | |
288 | u8 hw_st) | |
289 | { | |
290 | unsigned int status = 0; | |
291 | int requeue_cmd = 0; | |
292 | ||
293 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
294 | "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n", | |
295 | dma->cmd, dma->laddr, hw_st); | |
296 | ||
297 | if (hw_st & HW_STATUS_CRC) | |
298 | ctrl->stats.crc_errors++; | |
299 | if (hw_st & HW_STATUS_HARD_ERR) | |
300 | ctrl->stats.hard_errors++; | |
301 | if (hw_st & HW_STATUS_SOFT_ERR) | |
302 | ctrl->stats.soft_errors++; | |
303 | ||
304 | switch (dma->cmd) { | |
305 | case HW_CMD_BLK_READ: | |
306 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
307 | if (ctrl->card->scrub_hard) { | |
308 | dma->cmd = HW_CMD_BLK_RECON_READ; | |
309 | requeue_cmd = 1; | |
310 | ctrl->stats.reads_retried++; | |
311 | } else { | |
312 | status |= DMA_HW_FAULT; | |
313 | ctrl->stats.reads_failed++; | |
314 | } | |
315 | } else if (hw_st & HW_STATUS_FAULT) { | |
316 | status |= DMA_HW_FAULT; | |
317 | ctrl->stats.reads_failed++; | |
318 | } | |
319 | ||
320 | break; | |
321 | case HW_CMD_BLK_RECON_READ: | |
322 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
323 | /* Data could not be reconstructed. */ | |
324 | status |= DMA_HW_FAULT; | |
325 | ctrl->stats.reads_failed++; | |
326 | } | |
327 | ||
328 | break; | |
329 | case HW_CMD_BLK_WRITE: | |
330 | status |= DMA_HW_FAULT; | |
331 | ctrl->stats.writes_failed++; | |
332 | ||
333 | break; | |
334 | case HW_CMD_BLK_DISCARD: | |
335 | status |= DMA_HW_FAULT; | |
336 | ctrl->stats.discards_failed++; | |
337 | ||
338 | break; | |
339 | default: | |
340 | dev_err(CARD_TO_DEV(ctrl->card), | |
341 | "Unknown command in DMA!(cmd: x%02x " | |
342 | "laddr x%08x st: x%02x\n", | |
343 | dma->cmd, dma->laddr, hw_st); | |
344 | status |= DMA_SW_ERR; | |
345 | ||
346 | break; | |
347 | } | |
348 | ||
349 | if (requeue_cmd) | |
350 | rsxx_requeue_dma(ctrl, dma); | |
351 | else | |
c95246c3 | 352 | rsxx_complete_dma(ctrl, dma, status); |
8722ff8c | 353 | } |
354 | ||
355 | static void dma_engine_stalled(unsigned long data) | |
356 | { | |
357 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | |
0ab4743e | 358 | int cnt; |
8722ff8c | 359 | |
c95246c3 PK |
360 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
361 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 362 | return; |
363 | ||
364 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | |
365 | /* | |
366 | * The dma engine was stalled because the SW_CMD_IDX write | |
367 | * was lost. Issue it again to recover. | |
368 | */ | |
369 | dev_warn(CARD_TO_DEV(ctrl->card), | |
370 | "SW_CMD_IDX write was lost, re-writing...\n"); | |
371 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
372 | mod_timer(&ctrl->activity_timer, | |
373 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
374 | } else { | |
375 | dev_warn(CARD_TO_DEV(ctrl->card), | |
376 | "DMA channel %d has stalled, faulting interface.\n", | |
377 | ctrl->id); | |
378 | ctrl->card->dma_fault = 1; | |
0ab4743e PK |
379 | |
380 | /* Clean up the DMA queue */ | |
381 | spin_lock(&ctrl->queue_lock); | |
e5feab22 | 382 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
0ab4743e PK |
383 | spin_unlock(&ctrl->queue_lock); |
384 | ||
385 | cnt += rsxx_dma_cancel(ctrl); | |
386 | ||
387 | if (cnt) | |
388 | dev_info(CARD_TO_DEV(ctrl->card), | |
389 | "Freed %d queued DMAs on channel %d\n", | |
390 | cnt, ctrl->id); | |
8722ff8c | 391 | } |
392 | } | |
393 | ||
31a70bb4 | 394 | static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 395 | { |
8722ff8c | 396 | struct rsxx_dma *dma; |
397 | int tag; | |
398 | int cmds_pending = 0; | |
399 | struct hw_cmd *hw_cmd_buf; | |
1b21f5b2 | 400 | int dir; |
8722ff8c | 401 | |
8722ff8c | 402 | hw_cmd_buf = ctrl->cmd.buf; |
403 | ||
c95246c3 PK |
404 | if (unlikely(ctrl->card->halt) || |
405 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 406 | return; |
407 | ||
408 | while (1) { | |
0ab4743e | 409 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 410 | if (list_empty(&ctrl->queue)) { |
0ab4743e | 411 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 412 | break; |
413 | } | |
0ab4743e | 414 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 415 | |
416 | tag = pop_tracker(ctrl->trackers); | |
417 | if (tag == -1) | |
418 | break; | |
419 | ||
0ab4743e | 420 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 421 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
422 | list_del(&dma->list); | |
423 | ctrl->stats.sw_q_depth--; | |
0ab4743e | 424 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 425 | |
426 | /* | |
427 | * This will catch any DMAs that slipped in right before the | |
428 | * fault, but was queued after all the other DMAs were | |
429 | * cancelled. | |
430 | */ | |
431 | if (unlikely(ctrl->card->dma_fault)) { | |
432 | push_tracker(ctrl->trackers, tag); | |
c95246c3 | 433 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
8722ff8c | 434 | continue; |
435 | } | |
436 | ||
0317cd6d PK |
437 | if (dma->cmd != HW_CMD_BLK_DISCARD) { |
438 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
439 | dir = PCI_DMA_TODEVICE; | |
440 | else | |
441 | dir = PCI_DMA_FROMDEVICE; | |
442 | ||
443 | /* | |
444 | * The function pci_map_page is placed here because we | |
445 | * can only, by design, issue up to 255 commands to the | |
446 | * hardware at one time per DMA channel. So the maximum | |
447 | * amount of mapped memory would be 255 * 4 channels * | |
448 | * 4096 Bytes which is less than 2GB, the limit of a x8 | |
449 | * Non-HWWD PCIe slot. This way the pci_map_page | |
450 | * function should never fail because of a lack of | |
451 | * mappable memory. | |
452 | */ | |
453 | dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page, | |
454 | dma->pg_off, dma->sub_page.cnt << 9, dir); | |
455 | if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | |
456 | push_tracker(ctrl->trackers, tag); | |
457 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
458 | continue; | |
459 | } | |
1b21f5b2 PK |
460 | } |
461 | ||
8722ff8c | 462 | set_tracker_dma(ctrl->trackers, tag, dma); |
463 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | |
464 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | |
465 | hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; | |
466 | hw_cmd_buf[ctrl->cmd.idx].sub_page = | |
467 | ((dma->sub_page.cnt & 0x7) << 4) | | |
468 | (dma->sub_page.off & 0x7); | |
469 | ||
470 | hw_cmd_buf[ctrl->cmd.idx].device_addr = | |
471 | cpu_to_le32(dma->laddr); | |
472 | ||
473 | hw_cmd_buf[ctrl->cmd.idx].host_addr = | |
474 | cpu_to_le64(dma->dma_addr); | |
475 | ||
476 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
477 | "Issue DMA%d(laddr %d tag %d) to idx %d\n", | |
478 | ctrl->id, dma->laddr, tag, ctrl->cmd.idx); | |
479 | ||
480 | ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; | |
481 | cmds_pending++; | |
482 | ||
483 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
484 | ctrl->stats.writes_issued++; | |
485 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
486 | ctrl->stats.discards_issued++; | |
487 | else | |
488 | ctrl->stats.reads_issued++; | |
489 | } | |
490 | ||
491 | /* Let HW know we've queued commands. */ | |
492 | if (cmds_pending) { | |
8722ff8c | 493 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
494 | mod_timer(&ctrl->activity_timer, | |
495 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
c95246c3 PK |
496 | |
497 | if (unlikely(ctrl->card->eeh_state)) { | |
498 | del_timer_sync(&ctrl->activity_timer); | |
499 | return; | |
500 | } | |
501 | ||
8722ff8c | 502 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
503 | } | |
504 | } | |
505 | ||
31a70bb4 | 506 | static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 507 | { |
8722ff8c | 508 | struct rsxx_dma *dma; |
509 | unsigned long flags; | |
510 | u16 count; | |
511 | u8 status; | |
512 | u8 tag; | |
513 | struct hw_status *hw_st_buf; | |
514 | ||
8722ff8c | 515 | hw_st_buf = ctrl->status.buf; |
516 | ||
517 | if (unlikely(ctrl->card->halt) || | |
c95246c3 PK |
518 | unlikely(ctrl->card->dma_fault) || |
519 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 520 | return; |
521 | ||
522 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
523 | ||
524 | while (count == ctrl->e_cnt) { | |
525 | /* | |
526 | * The read memory-barrier is necessary to keep aggressive | |
527 | * processors/optimizers (such as the PPC Apple G5) from | |
528 | * reordering the following status-buffer tag & status read | |
529 | * *before* the count read on subsequent iterations of the | |
530 | * loop! | |
531 | */ | |
532 | rmb(); | |
533 | ||
534 | status = hw_st_buf[ctrl->status.idx].status; | |
535 | tag = hw_st_buf[ctrl->status.idx].tag; | |
536 | ||
537 | dma = get_tracker_dma(ctrl->trackers, tag); | |
538 | if (dma == NULL) { | |
539 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
540 | rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); | |
541 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
542 | ||
543 | dev_err(CARD_TO_DEV(ctrl->card), | |
544 | "No tracker for tag %d " | |
545 | "(idx %d id %d)\n", | |
546 | tag, ctrl->status.idx, ctrl->id); | |
547 | return; | |
548 | } | |
549 | ||
550 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
551 | "Completing DMA%d" | |
552 | "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n", | |
553 | ctrl->id, dma->laddr, tag, status, count, | |
554 | ctrl->status.idx); | |
555 | ||
556 | atomic_dec(&ctrl->stats.hw_q_depth); | |
557 | ||
558 | mod_timer(&ctrl->activity_timer, | |
559 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
560 | ||
561 | if (status) | |
562 | rsxx_handle_dma_error(ctrl, dma, status); | |
563 | else | |
c95246c3 | 564 | rsxx_complete_dma(ctrl, dma, 0); |
8722ff8c | 565 | |
566 | push_tracker(ctrl->trackers, tag); | |
567 | ||
568 | ctrl->status.idx = (ctrl->status.idx + 1) & | |
569 | RSXX_CS_IDX_MASK; | |
570 | ctrl->e_cnt++; | |
571 | ||
572 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
573 | } | |
574 | ||
575 | dma_intr_coal_auto_tune(ctrl->card); | |
576 | ||
577 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | |
578 | del_timer_sync(&ctrl->activity_timer); | |
579 | ||
580 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
581 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | |
582 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
583 | ||
0ab4743e | 584 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 585 | if (ctrl->stats.sw_q_depth) |
586 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | |
0ab4743e | 587 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 588 | } |
589 | ||
31a70bb4 PK |
590 | static void rsxx_schedule_issue(struct work_struct *work) |
591 | { | |
592 | struct rsxx_dma_ctrl *ctrl; | |
593 | ||
594 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | |
595 | ||
596 | mutex_lock(&ctrl->work_lock); | |
597 | rsxx_issue_dmas(ctrl); | |
598 | mutex_unlock(&ctrl->work_lock); | |
599 | } | |
600 | ||
601 | static void rsxx_schedule_done(struct work_struct *work) | |
602 | { | |
603 | struct rsxx_dma_ctrl *ctrl; | |
604 | ||
605 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | |
606 | ||
607 | mutex_lock(&ctrl->work_lock); | |
608 | rsxx_dma_done(ctrl); | |
609 | mutex_unlock(&ctrl->work_lock); | |
610 | } | |
611 | ||
8722ff8c | 612 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, |
613 | struct list_head *q, | |
614 | unsigned int laddr, | |
615 | rsxx_dma_cb cb, | |
616 | void *cb_data) | |
617 | { | |
618 | struct rsxx_dma *dma; | |
619 | ||
620 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
621 | if (!dma) | |
622 | return -ENOMEM; | |
623 | ||
624 | dma->cmd = HW_CMD_BLK_DISCARD; | |
625 | dma->laddr = laddr; | |
626 | dma->dma_addr = 0; | |
627 | dma->sub_page.off = 0; | |
628 | dma->sub_page.cnt = 0; | |
629 | dma->page = NULL; | |
630 | dma->pg_off = 0; | |
631 | dma->cb = cb; | |
632 | dma->cb_data = cb_data; | |
633 | ||
634 | dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr); | |
635 | ||
636 | list_add_tail(&dma->list, q); | |
637 | ||
638 | return 0; | |
639 | } | |
640 | ||
641 | static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |
642 | struct list_head *q, | |
643 | int dir, | |
644 | unsigned int dma_off, | |
645 | unsigned int dma_len, | |
646 | unsigned int laddr, | |
647 | struct page *page, | |
648 | unsigned int pg_off, | |
649 | rsxx_dma_cb cb, | |
650 | void *cb_data) | |
651 | { | |
652 | struct rsxx_dma *dma; | |
653 | ||
654 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
655 | if (!dma) | |
656 | return -ENOMEM; | |
657 | ||
8722ff8c | 658 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; |
659 | dma->laddr = laddr; | |
660 | dma->sub_page.off = (dma_off >> 9); | |
661 | dma->sub_page.cnt = (dma_len >> 9); | |
662 | dma->page = page; | |
663 | dma->pg_off = pg_off; | |
664 | dma->cb = cb; | |
665 | dma->cb_data = cb_data; | |
666 | ||
667 | dev_dbg(CARD_TO_DEV(card), | |
668 | "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n", | |
669 | dir ? 'W' : 'R', dma->laddr, dma->sub_page.off, | |
670 | dma->sub_page.cnt, dma->page, dma->pg_off); | |
671 | ||
672 | /* Queue the DMA */ | |
673 | list_add_tail(&dma->list, q); | |
674 | ||
675 | return 0; | |
676 | } | |
677 | ||
678 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |
679 | struct bio *bio, | |
680 | atomic_t *n_dmas, | |
681 | rsxx_dma_cb cb, | |
682 | void *cb_data) | |
683 | { | |
684 | struct list_head dma_list[RSXX_MAX_TARGETS]; | |
685 | struct bio_vec *bvec; | |
686 | unsigned long long addr8; | |
687 | unsigned int laddr; | |
688 | unsigned int bv_len; | |
689 | unsigned int bv_off; | |
690 | unsigned int dma_off; | |
691 | unsigned int dma_len; | |
692 | int dma_cnt[RSXX_MAX_TARGETS]; | |
693 | int tgt; | |
694 | int st; | |
695 | int i; | |
696 | ||
697 | addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ | |
698 | atomic_set(n_dmas, 0); | |
699 | ||
700 | for (i = 0; i < card->n_targets; i++) { | |
701 | INIT_LIST_HEAD(&dma_list[i]); | |
702 | dma_cnt[i] = 0; | |
703 | } | |
704 | ||
705 | if (bio->bi_rw & REQ_DISCARD) { | |
706 | bv_len = bio->bi_size; | |
707 | ||
708 | while (bv_len > 0) { | |
709 | tgt = rsxx_get_dma_tgt(card, addr8); | |
710 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
711 | ||
712 | st = rsxx_queue_discard(card, &dma_list[tgt], laddr, | |
713 | cb, cb_data); | |
714 | if (st) | |
715 | goto bvec_err; | |
716 | ||
717 | dma_cnt[tgt]++; | |
718 | atomic_inc(n_dmas); | |
719 | addr8 += RSXX_HW_BLK_SIZE; | |
720 | bv_len -= RSXX_HW_BLK_SIZE; | |
721 | } | |
722 | } else { | |
723 | bio_for_each_segment(bvec, bio, i) { | |
724 | bv_len = bvec->bv_len; | |
725 | bv_off = bvec->bv_offset; | |
726 | ||
727 | while (bv_len > 0) { | |
728 | tgt = rsxx_get_dma_tgt(card, addr8); | |
729 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
730 | dma_off = addr8 & RSXX_HW_BLK_MASK; | |
731 | dma_len = min(bv_len, | |
732 | RSXX_HW_BLK_SIZE - dma_off); | |
733 | ||
734 | st = rsxx_queue_dma(card, &dma_list[tgt], | |
735 | bio_data_dir(bio), | |
736 | dma_off, dma_len, | |
737 | laddr, bvec->bv_page, | |
738 | bv_off, cb, cb_data); | |
739 | if (st) | |
740 | goto bvec_err; | |
741 | ||
742 | dma_cnt[tgt]++; | |
743 | atomic_inc(n_dmas); | |
744 | addr8 += dma_len; | |
745 | bv_off += dma_len; | |
746 | bv_len -= dma_len; | |
747 | } | |
748 | } | |
749 | } | |
750 | ||
751 | for (i = 0; i < card->n_targets; i++) { | |
752 | if (!list_empty(&dma_list[i])) { | |
0ab4743e | 753 | spin_lock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 754 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
755 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | |
0ab4743e | 756 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 757 | |
758 | queue_work(card->ctrl[i].issue_wq, | |
759 | &card->ctrl[i].issue_dma_work); | |
760 | } | |
761 | } | |
762 | ||
763 | return 0; | |
764 | ||
765 | bvec_err: | |
e5feab22 PK |
766 | for (i = 0; i < card->n_targets; i++) |
767 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], | |
768 | FREE_DMA); | |
8722ff8c | 769 | |
770 | return st; | |
771 | } | |
772 | ||
773 | ||
774 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | |
c95246c3 PK |
775 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) |
776 | { | |
777 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | |
778 | &ctrl->status.dma_addr); | |
779 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | |
780 | &ctrl->cmd.dma_addr); | |
781 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | |
782 | return -ENOMEM; | |
783 | ||
784 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | |
785 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | |
786 | ctrl->regmap + SB_ADD_LO); | |
787 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | |
788 | ctrl->regmap + SB_ADD_HI); | |
789 | ||
790 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | |
791 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | |
792 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | |
793 | ||
794 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | |
795 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
796 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | |
797 | ctrl->status.idx); | |
798 | return -EINVAL; | |
799 | } | |
800 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | |
801 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | |
802 | ||
803 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | |
804 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
805 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | |
806 | ctrl->status.idx); | |
807 | return -EINVAL; | |
808 | } | |
809 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | |
810 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
8722ff8c | 815 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
816 | struct rsxx_dma_ctrl *ctrl) | |
817 | { | |
818 | int i; | |
c95246c3 | 819 | int st; |
8722ff8c | 820 | |
821 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | |
822 | ||
8722ff8c | 823 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
824 | if (!ctrl->trackers) | |
825 | return -ENOMEM; | |
826 | ||
827 | ctrl->trackers->head = 0; | |
828 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
829 | ctrl->trackers->list[i].next_tag = i + 1; | |
830 | ctrl->trackers->list[i].dma = NULL; | |
831 | } | |
832 | ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; | |
833 | spin_lock_init(&ctrl->trackers->lock); | |
834 | ||
835 | spin_lock_init(&ctrl->queue_lock); | |
31a70bb4 | 836 | mutex_init(&ctrl->work_lock); |
8722ff8c | 837 | INIT_LIST_HEAD(&ctrl->queue); |
838 | ||
839 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, | |
c206c709 | 840 | (unsigned long)ctrl); |
8722ff8c | 841 | |
842 | ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); | |
843 | if (!ctrl->issue_wq) | |
844 | return -ENOMEM; | |
845 | ||
846 | ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); | |
847 | if (!ctrl->done_wq) | |
848 | return -ENOMEM; | |
849 | ||
31a70bb4 PK |
850 | INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); |
851 | INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); | |
8722ff8c | 852 | |
c95246c3 PK |
853 | st = rsxx_hw_buffers_init(dev, ctrl); |
854 | if (st) | |
855 | return st; | |
8722ff8c | 856 | |
8722ff8c | 857 | return 0; |
858 | } | |
859 | ||
c206c709 | 860 | static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, |
8722ff8c | 861 | unsigned int stripe_size8) |
862 | { | |
863 | if (!is_power_of_2(stripe_size8)) { | |
864 | dev_err(CARD_TO_DEV(card), | |
865 | "stripe_size is NOT a power of 2!\n"); | |
866 | return -EINVAL; | |
867 | } | |
868 | ||
869 | card->_stripe.lower_mask = stripe_size8 - 1; | |
870 | ||
871 | card->_stripe.upper_mask = ~(card->_stripe.lower_mask); | |
872 | card->_stripe.upper_shift = ffs(card->n_targets) - 1; | |
873 | ||
874 | card->_stripe.target_mask = card->n_targets - 1; | |
875 | card->_stripe.target_shift = ffs(stripe_size8) - 1; | |
876 | ||
877 | dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n", | |
878 | card->_stripe.lower_mask); | |
879 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n", | |
880 | card->_stripe.upper_shift); | |
881 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n", | |
882 | card->_stripe.upper_mask); | |
883 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n", | |
884 | card->_stripe.target_mask); | |
885 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n", | |
886 | card->_stripe.target_shift); | |
887 | ||
888 | return 0; | |
889 | } | |
890 | ||
c95246c3 | 891 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
8722ff8c | 892 | { |
893 | u32 intr_coal; | |
894 | ||
895 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
896 | card->config.data.intr_coal.count, | |
897 | card->config.data.intr_coal.latency); | |
898 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
899 | ||
900 | return rsxx_dma_stripe_setup(card, card->config.data.stripe_size); | |
901 | } | |
902 | ||
903 | int rsxx_dma_setup(struct rsxx_cardinfo *card) | |
904 | { | |
905 | unsigned long flags; | |
906 | int st; | |
907 | int i; | |
908 | ||
909 | dev_info(CARD_TO_DEV(card), | |
910 | "Initializing %d DMA targets\n", | |
911 | card->n_targets); | |
912 | ||
913 | /* Regmap is divided up into 4K chunks. One for each DMA channel */ | |
914 | for (i = 0; i < card->n_targets; i++) | |
915 | card->ctrl[i].regmap = card->regmap + (i * 4096); | |
916 | ||
917 | card->dma_fault = 0; | |
918 | ||
919 | /* Reset the DMA queues */ | |
920 | rsxx_dma_queue_reset(card); | |
921 | ||
922 | /************* Setup DMA Control *************/ | |
923 | for (i = 0; i < card->n_targets; i++) { | |
924 | st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); | |
925 | if (st) | |
926 | goto failed_dma_setup; | |
927 | ||
928 | card->ctrl[i].card = card; | |
929 | card->ctrl[i].id = i; | |
930 | } | |
931 | ||
932 | card->scrub_hard = 1; | |
933 | ||
934 | if (card->config_valid) | |
935 | rsxx_dma_configure(card); | |
936 | ||
937 | /* Enable the interrupts after all setup has completed. */ | |
938 | for (i = 0; i < card->n_targets; i++) { | |
939 | spin_lock_irqsave(&card->irq_lock, flags); | |
940 | rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i)); | |
941 | spin_unlock_irqrestore(&card->irq_lock, flags); | |
942 | } | |
943 | ||
944 | return 0; | |
945 | ||
946 | failed_dma_setup: | |
947 | for (i = 0; i < card->n_targets; i++) { | |
948 | struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; | |
949 | ||
950 | if (ctrl->issue_wq) { | |
951 | destroy_workqueue(ctrl->issue_wq); | |
952 | ctrl->issue_wq = NULL; | |
953 | } | |
954 | ||
955 | if (ctrl->done_wq) { | |
956 | destroy_workqueue(ctrl->done_wq); | |
957 | ctrl->done_wq = NULL; | |
958 | } | |
959 | ||
960 | if (ctrl->trackers) | |
961 | vfree(ctrl->trackers); | |
962 | ||
963 | if (ctrl->status.buf) | |
964 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | |
965 | ctrl->status.buf, | |
966 | ctrl->status.dma_addr); | |
967 | if (ctrl->cmd.buf) | |
968 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | |
969 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
970 | } | |
971 | ||
972 | return st; | |
973 | } | |
974 | ||
0ab4743e PK |
975 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) |
976 | { | |
977 | struct rsxx_dma *dma; | |
978 | int i; | |
979 | int cnt = 0; | |
980 | ||
981 | /* Clean up issued DMAs */ | |
982 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
983 | dma = get_tracker_dma(ctrl->trackers, i); | |
984 | if (dma) { | |
985 | atomic_dec(&ctrl->stats.hw_q_depth); | |
986 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
987 | push_tracker(ctrl->trackers, i); | |
988 | cnt++; | |
989 | } | |
990 | } | |
991 | ||
992 | return cnt; | |
993 | } | |
8722ff8c | 994 | |
995 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |
996 | { | |
997 | struct rsxx_dma_ctrl *ctrl; | |
0ab4743e | 998 | int i; |
8722ff8c | 999 | |
1000 | for (i = 0; i < card->n_targets; i++) { | |
1001 | ctrl = &card->ctrl[i]; | |
1002 | ||
1003 | if (ctrl->issue_wq) { | |
1004 | destroy_workqueue(ctrl->issue_wq); | |
1005 | ctrl->issue_wq = NULL; | |
1006 | } | |
1007 | ||
1008 | if (ctrl->done_wq) { | |
1009 | destroy_workqueue(ctrl->done_wq); | |
1010 | ctrl->done_wq = NULL; | |
1011 | } | |
1012 | ||
1013 | if (timer_pending(&ctrl->activity_timer)) | |
1014 | del_timer_sync(&ctrl->activity_timer); | |
1015 | ||
1016 | /* Clean up the DMA queue */ | |
0ab4743e | 1017 | spin_lock_bh(&ctrl->queue_lock); |
e5feab22 | 1018 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
0ab4743e | 1019 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 1020 | |
0ab4743e | 1021 | rsxx_dma_cancel(ctrl); |
8722ff8c | 1022 | |
1023 | vfree(ctrl->trackers); | |
1024 | ||
1025 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | |
1026 | ctrl->status.buf, ctrl->status.dma_addr); | |
1027 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | |
1028 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
1029 | } | |
1030 | } | |
1031 | ||
4dcaf472 | 1032 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) |
c95246c3 PK |
1033 | { |
1034 | int i; | |
1035 | int j; | |
1036 | int cnt; | |
1037 | struct rsxx_dma *dma; | |
d8d595df PK |
1038 | struct list_head *issued_dmas; |
1039 | ||
1040 | issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, | |
1041 | GFP_KERNEL); | |
4dcaf472 PK |
1042 | if (!issued_dmas) |
1043 | return -ENOMEM; | |
c95246c3 PK |
1044 | |
1045 | for (i = 0; i < card->n_targets; i++) { | |
1046 | INIT_LIST_HEAD(&issued_dmas[i]); | |
1047 | cnt = 0; | |
1048 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | |
1049 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | |
1050 | if (dma == NULL) | |
1051 | continue; | |
1052 | ||
1053 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
1054 | card->ctrl[i].stats.writes_issued--; | |
1055 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
1056 | card->ctrl[i].stats.discards_issued--; | |
1057 | else | |
1058 | card->ctrl[i].stats.reads_issued--; | |
1059 | ||
1b21f5b2 PK |
1060 | pci_unmap_page(card->dev, dma->dma_addr, |
1061 | get_dma_size(dma), | |
1062 | dma->cmd == HW_CMD_BLK_WRITE ? | |
1063 | PCI_DMA_TODEVICE : | |
1064 | PCI_DMA_FROMDEVICE); | |
c95246c3 PK |
1065 | list_add_tail(&dma->list, &issued_dmas[i]); |
1066 | push_tracker(card->ctrl[i].trackers, j); | |
1067 | cnt++; | |
1068 | } | |
1069 | ||
0ab4743e | 1070 | spin_lock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1071 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
1072 | ||
1073 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | |
1074 | card->ctrl[i].stats.sw_q_depth += cnt; | |
1075 | card->ctrl[i].e_cnt = 0; | |
0ab4743e | 1076 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 | 1077 | } |
d8d595df PK |
1078 | |
1079 | kfree(issued_dmas); | |
4dcaf472 PK |
1080 | |
1081 | return 0; | |
c95246c3 PK |
1082 | } |
1083 | ||
8722ff8c | 1084 | int rsxx_dma_init(void) |
1085 | { | |
1086 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | |
1087 | if (!rsxx_dma_pool) | |
1088 | return -ENOMEM; | |
1089 | ||
1090 | return 0; | |
1091 | } | |
1092 | ||
1093 | ||
1094 | void rsxx_dma_cleanup(void) | |
1095 | { | |
1096 | kmem_cache_destroy(rsxx_dma_pool); | |
1097 | } | |
1098 |