Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
19 | ||
6ac7b687 | 20 | static void be_mcc_notify(struct be_ctrl_info *ctrl) |
5fb379ee SP |
21 | { |
22 | struct be_queue_info *mccq = &ctrl->mcc_obj.q; | |
23 | u32 val = 0; | |
24 | ||
25 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | |
26 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | |
27 | iowrite32(val, ctrl->db + DB_MCCQ_OFFSET); | |
28 | } | |
29 | ||
30 | /* To check if valid bit is set, check the entire word as we don't know | |
31 | * the endianness of the data (old entry is host endian while a new entry is | |
32 | * little endian) */ | |
33 | static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl) | |
34 | { | |
35 | if (compl->flags != 0) { | |
36 | compl->flags = le32_to_cpu(compl->flags); | |
37 | BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); | |
38 | return true; | |
39 | } else { | |
40 | return false; | |
41 | } | |
42 | } | |
43 | ||
44 | /* Need to reset the entire word that houses the valid bit */ | |
45 | static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl) | |
46 | { | |
47 | compl->flags = 0; | |
48 | } | |
49 | ||
50 | static int be_mcc_compl_process(struct be_ctrl_info *ctrl, | |
51 | struct be_mcc_cq_entry *compl) | |
52 | { | |
53 | u16 compl_status, extd_status; | |
54 | ||
55 | /* Just swap the status to host endian; mcc tag is opaquely copied | |
56 | * from mcc_wrb */ | |
57 | be_dws_le_to_cpu(compl, 4); | |
58 | ||
59 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | |
60 | CQE_STATUS_COMPL_MASK; | |
61 | if (compl_status != MCC_STATUS_SUCCESS) { | |
62 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | |
63 | CQE_STATUS_EXTD_MASK; | |
64 | printk(KERN_WARNING DRV_NAME | |
65 | " error in cmd completion: status(compl/extd)=%d/%d\n", | |
66 | compl_status, extd_status); | |
67 | return -1; | |
68 | } | |
69 | return 0; | |
70 | } | |
71 | ||
72 | ||
73 | static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl) | |
74 | { | |
75 | struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq; | |
76 | struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq); | |
77 | ||
78 | if (be_mcc_compl_is_new(compl)) { | |
79 | queue_tail_inc(mcc_cq); | |
80 | return compl; | |
81 | } | |
82 | return NULL; | |
83 | } | |
84 | ||
85 | void be_process_mcc(struct be_ctrl_info *ctrl) | |
86 | { | |
87 | struct be_mcc_cq_entry *compl; | |
88 | int num = 0; | |
89 | ||
90 | spin_lock_bh(&ctrl->mcc_cq_lock); | |
91 | while ((compl = be_mcc_compl_get(ctrl))) { | |
92 | if (!(compl->flags & CQE_FLAGS_ASYNC_MASK)) { | |
93 | be_mcc_compl_process(ctrl, compl); | |
94 | atomic_dec(&ctrl->mcc_obj.q.used); | |
95 | } | |
96 | be_mcc_compl_use(compl); | |
97 | num++; | |
98 | } | |
99 | if (num) | |
100 | be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num); | |
101 | spin_unlock_bh(&ctrl->mcc_cq_lock); | |
102 | } | |
103 | ||
6ac7b687 SP |
104 | /* Wait till no more pending mcc requests are present */ |
105 | static void be_mcc_wait_compl(struct be_ctrl_info *ctrl) | |
106 | { | |
107 | #define mcc_timeout 50000 /* 5s timeout */ | |
108 | int i; | |
109 | for (i = 0; i < mcc_timeout; i++) { | |
110 | be_process_mcc(ctrl); | |
111 | if (atomic_read(&ctrl->mcc_obj.q.used) == 0) | |
112 | break; | |
113 | udelay(100); | |
114 | } | |
115 | if (i == mcc_timeout) | |
116 | printk(KERN_WARNING DRV_NAME "mcc poll timed out\n"); | |
117 | } | |
118 | ||
119 | /* Notify MCC requests and wait for completion */ | |
120 | static void be_mcc_notify_wait(struct be_ctrl_info *ctrl) | |
121 | { | |
122 | be_mcc_notify(ctrl); | |
123 | be_mcc_wait_compl(ctrl); | |
124 | } | |
125 | ||
6b7c5b94 SP |
126 | static int be_mbox_db_ready_wait(void __iomem *db) |
127 | { | |
128 | int cnt = 0, wait = 5; | |
129 | u32 ready; | |
130 | ||
131 | do { | |
132 | ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; | |
133 | if (ready) | |
134 | break; | |
135 | ||
136 | if (cnt > 200000) { | |
137 | printk(KERN_WARNING DRV_NAME | |
138 | ": mbox_db poll timed out\n"); | |
139 | return -1; | |
140 | } | |
141 | ||
142 | if (cnt > 50) | |
143 | wait = 200; | |
144 | cnt += wait; | |
145 | udelay(wait); | |
146 | } while (true); | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
151 | /* | |
152 | * Insert the mailbox address into the doorbell in two steps | |
5fb379ee | 153 | * Polls on the mbox doorbell till a command completion (or a timeout) occurs |
6b7c5b94 SP |
154 | */ |
155 | static int be_mbox_db_ring(struct be_ctrl_info *ctrl) | |
156 | { | |
157 | int status; | |
6b7c5b94 SP |
158 | u32 val = 0; |
159 | void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; | |
160 | struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; | |
161 | struct be_mcc_mailbox *mbox = mbox_mem->va; | |
162 | struct be_mcc_cq_entry *cqe = &mbox->cqe; | |
163 | ||
164 | memset(cqe, 0, sizeof(*cqe)); | |
165 | ||
166 | val &= ~MPU_MAILBOX_DB_RDY_MASK; | |
167 | val |= MPU_MAILBOX_DB_HI_MASK; | |
168 | /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ | |
169 | val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; | |
170 | iowrite32(val, db); | |
171 | ||
172 | /* wait for ready to be set */ | |
173 | status = be_mbox_db_ready_wait(db); | |
174 | if (status != 0) | |
175 | return status; | |
176 | ||
177 | val = 0; | |
178 | val &= ~MPU_MAILBOX_DB_RDY_MASK; | |
179 | val &= ~MPU_MAILBOX_DB_HI_MASK; | |
180 | /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ | |
181 | val |= (u32)(mbox_mem->dma >> 4) << 2; | |
182 | iowrite32(val, db); | |
183 | ||
184 | status = be_mbox_db_ready_wait(db); | |
185 | if (status != 0) | |
186 | return status; | |
187 | ||
5fb379ee SP |
188 | /* A cq entry has been made now */ |
189 | if (be_mcc_compl_is_new(cqe)) { | |
190 | status = be_mcc_compl_process(ctrl, &mbox->cqe); | |
191 | be_mcc_compl_use(cqe); | |
192 | if (status) | |
193 | return status; | |
194 | } else { | |
195 | printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n"); | |
6b7c5b94 SP |
196 | return -1; |
197 | } | |
5fb379ee | 198 | return 0; |
6b7c5b94 SP |
199 | } |
200 | ||
201 | static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage) | |
202 | { | |
203 | u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET); | |
204 | ||
205 | *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; | |
206 | if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) | |
207 | return -1; | |
208 | else | |
209 | return 0; | |
210 | } | |
211 | ||
212 | static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage) | |
213 | { | |
214 | u16 stage, cnt, error; | |
215 | for (cnt = 0; cnt < 5000; cnt++) { | |
216 | error = be_POST_stage_get(ctrl, &stage); | |
217 | if (error) | |
218 | return -1; | |
219 | ||
220 | if (stage == poll_stage) | |
221 | break; | |
222 | udelay(1000); | |
223 | } | |
224 | if (stage != poll_stage) | |
225 | return -1; | |
226 | return 0; | |
227 | } | |
228 | ||
229 | ||
230 | int be_cmd_POST(struct be_ctrl_info *ctrl) | |
231 | { | |
232 | u16 stage, error; | |
233 | ||
234 | error = be_POST_stage_get(ctrl, &stage); | |
235 | if (error) | |
236 | goto err; | |
237 | ||
238 | if (stage == POST_STAGE_ARMFW_RDY) | |
239 | return 0; | |
240 | ||
241 | if (stage != POST_STAGE_AWAITING_HOST_RDY) | |
242 | goto err; | |
243 | ||
244 | /* On awaiting host rdy, reset and again poll on awaiting host rdy */ | |
245 | iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET); | |
246 | error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY); | |
247 | if (error) | |
248 | goto err; | |
249 | ||
250 | /* Now kickoff POST and poll on armfw ready */ | |
251 | iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET); | |
252 | error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY); | |
253 | if (error) | |
254 | goto err; | |
255 | ||
256 | return 0; | |
257 | err: | |
258 | printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage); | |
259 | return -1; | |
260 | } | |
261 | ||
262 | static inline void *embedded_payload(struct be_mcc_wrb *wrb) | |
263 | { | |
264 | return wrb->payload.embedded_payload; | |
265 | } | |
266 | ||
267 | static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) | |
268 | { | |
269 | return &wrb->payload.sgl[0]; | |
270 | } | |
271 | ||
272 | /* Don't touch the hdr after it's prepared */ | |
273 | static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | |
274 | bool embedded, u8 sge_cnt) | |
275 | { | |
276 | if (embedded) | |
277 | wrb->embedded |= MCC_WRB_EMBEDDED_MASK; | |
278 | else | |
279 | wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << | |
280 | MCC_WRB_SGE_CNT_SHIFT; | |
281 | wrb->payload_length = payload_len; | |
282 | be_dws_cpu_to_le(wrb, 20); | |
283 | } | |
284 | ||
285 | /* Don't touch the hdr after it's prepared */ | |
286 | static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, | |
287 | u8 subsystem, u8 opcode, int cmd_len) | |
288 | { | |
289 | req_hdr->opcode = opcode; | |
290 | req_hdr->subsystem = subsystem; | |
291 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); | |
292 | } | |
293 | ||
294 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, | |
295 | struct be_dma_mem *mem) | |
296 | { | |
297 | int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); | |
298 | u64 dma = (u64)mem->dma; | |
299 | ||
300 | for (i = 0; i < buf_pages; i++) { | |
301 | pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); | |
302 | pages[i].hi = cpu_to_le32(upper_32_bits(dma)); | |
303 | dma += PAGE_SIZE_4K; | |
304 | } | |
305 | } | |
306 | ||
307 | /* Converts interrupt delay in microseconds to multiplier value */ | |
308 | static u32 eq_delay_to_mult(u32 usec_delay) | |
309 | { | |
310 | #define MAX_INTR_RATE 651042 | |
311 | const u32 round = 10; | |
312 | u32 multiplier; | |
313 | ||
314 | if (usec_delay == 0) | |
315 | multiplier = 0; | |
316 | else { | |
317 | u32 interrupt_rate = 1000000 / usec_delay; | |
318 | /* Max delay, corresponding to the lowest interrupt rate */ | |
319 | if (interrupt_rate == 0) | |
320 | multiplier = 1023; | |
321 | else { | |
322 | multiplier = (MAX_INTR_RATE - interrupt_rate) * round; | |
323 | multiplier /= interrupt_rate; | |
324 | /* Round the multiplier to the closest value.*/ | |
325 | multiplier = (multiplier + round/2) / round; | |
326 | multiplier = min(multiplier, (u32)1023); | |
327 | } | |
328 | } | |
329 | return multiplier; | |
330 | } | |
331 | ||
332 | static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) | |
333 | { | |
334 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; | |
335 | } | |
336 | ||
5fb379ee SP |
337 | static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq) |
338 | { | |
339 | struct be_mcc_wrb *wrb = NULL; | |
340 | if (atomic_read(&mccq->used) < mccq->len) { | |
341 | wrb = queue_head_node(mccq); | |
342 | queue_head_inc(mccq); | |
343 | atomic_inc(&mccq->used); | |
344 | memset(wrb, 0, sizeof(*wrb)); | |
345 | } | |
346 | return wrb; | |
347 | } | |
348 | ||
6b7c5b94 SP |
349 | int be_cmd_eq_create(struct be_ctrl_info *ctrl, |
350 | struct be_queue_info *eq, int eq_delay) | |
351 | { | |
352 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
353 | struct be_cmd_req_eq_create *req = embedded_payload(wrb); | |
354 | struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); | |
355 | struct be_dma_mem *q_mem = &eq->dma_mem; | |
356 | int status; | |
357 | ||
5fb379ee | 358 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
359 | memset(wrb, 0, sizeof(*wrb)); |
360 | ||
361 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
362 | ||
363 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
364 | OPCODE_COMMON_EQ_CREATE, sizeof(*req)); | |
365 | ||
366 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
367 | ||
368 | AMAP_SET_BITS(struct amap_eq_context, func, req->context, | |
369 | ctrl->pci_func); | |
370 | AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); | |
371 | /* 4byte eqe*/ | |
372 | AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); | |
373 | AMAP_SET_BITS(struct amap_eq_context, count, req->context, | |
374 | __ilog2_u32(eq->len/256)); | |
375 | AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, | |
376 | eq_delay_to_mult(eq_delay)); | |
377 | be_dws_cpu_to_le(req->context, sizeof(req->context)); | |
378 | ||
379 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
380 | ||
381 | status = be_mbox_db_ring(ctrl); | |
382 | if (!status) { | |
383 | eq->id = le16_to_cpu(resp->eq_id); | |
384 | eq->created = true; | |
385 | } | |
5fb379ee | 386 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
387 | return status; |
388 | } | |
389 | ||
390 | int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr, | |
391 | u8 type, bool permanent, u32 if_handle) | |
392 | { | |
393 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
394 | struct be_cmd_req_mac_query *req = embedded_payload(wrb); | |
395 | struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); | |
396 | int status; | |
397 | ||
5fb379ee | 398 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
399 | memset(wrb, 0, sizeof(*wrb)); |
400 | ||
401 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
402 | ||
403 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
404 | OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); | |
405 | ||
406 | req->type = type; | |
407 | if (permanent) { | |
408 | req->permanent = 1; | |
409 | } else { | |
410 | req->if_id = cpu_to_le16((u16)if_handle); | |
411 | req->permanent = 0; | |
412 | } | |
413 | ||
414 | status = be_mbox_db_ring(ctrl); | |
415 | if (!status) | |
416 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); | |
417 | ||
5fb379ee | 418 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
419 | return status; |
420 | } | |
421 | ||
422 | int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr, | |
423 | u32 if_id, u32 *pmac_id) | |
424 | { | |
425 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
426 | struct be_cmd_req_pmac_add *req = embedded_payload(wrb); | |
427 | int status; | |
428 | ||
5fb379ee | 429 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
430 | memset(wrb, 0, sizeof(*wrb)); |
431 | ||
432 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
433 | ||
434 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
435 | OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); | |
436 | ||
437 | req->if_id = cpu_to_le32(if_id); | |
438 | memcpy(req->mac_address, mac_addr, ETH_ALEN); | |
439 | ||
440 | status = be_mbox_db_ring(ctrl); | |
441 | if (!status) { | |
442 | struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); | |
443 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
444 | } | |
445 | ||
5fb379ee | 446 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
447 | return status; |
448 | } | |
449 | ||
450 | int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id) | |
451 | { | |
452 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
453 | struct be_cmd_req_pmac_del *req = embedded_payload(wrb); | |
454 | int status; | |
455 | ||
5fb379ee | 456 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
457 | memset(wrb, 0, sizeof(*wrb)); |
458 | ||
459 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
460 | ||
461 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
462 | OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); | |
463 | ||
464 | req->if_id = cpu_to_le32(if_id); | |
465 | req->pmac_id = cpu_to_le32(pmac_id); | |
466 | ||
467 | status = be_mbox_db_ring(ctrl); | |
5fb379ee | 468 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
469 | |
470 | return status; | |
471 | } | |
472 | ||
473 | int be_cmd_cq_create(struct be_ctrl_info *ctrl, | |
474 | struct be_queue_info *cq, struct be_queue_info *eq, | |
475 | bool sol_evts, bool no_delay, int coalesce_wm) | |
476 | { | |
477 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
478 | struct be_cmd_req_cq_create *req = embedded_payload(wrb); | |
479 | struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); | |
480 | struct be_dma_mem *q_mem = &cq->dma_mem; | |
481 | void *ctxt = &req->context; | |
482 | int status; | |
483 | ||
5fb379ee | 484 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
485 | memset(wrb, 0, sizeof(*wrb)); |
486 | ||
487 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
488 | ||
489 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
490 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); | |
491 | ||
492 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
493 | ||
494 | AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); | |
495 | AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); | |
496 | AMAP_SET_BITS(struct amap_cq_context, count, ctxt, | |
497 | __ilog2_u32(cq->len/256)); | |
498 | AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); | |
499 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); | |
500 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); | |
501 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); | |
5fb379ee | 502 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); |
6b7c5b94 SP |
503 | AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func); |
504 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
505 | ||
506 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
507 | ||
508 | status = be_mbox_db_ring(ctrl); | |
509 | if (!status) { | |
510 | cq->id = le16_to_cpu(resp->cq_id); | |
511 | cq->created = true; | |
512 | } | |
5fb379ee SP |
513 | spin_unlock(&ctrl->mbox_lock); |
514 | ||
515 | return status; | |
516 | } | |
517 | ||
518 | static u32 be_encoded_q_len(int q_len) | |
519 | { | |
520 | u32 len_encoded = fls(q_len); /* log2(len) + 1 */ | |
521 | if (len_encoded == 16) | |
522 | len_encoded = 0; | |
523 | return len_encoded; | |
524 | } | |
525 | ||
526 | int be_cmd_mccq_create(struct be_ctrl_info *ctrl, | |
527 | struct be_queue_info *mccq, | |
528 | struct be_queue_info *cq) | |
529 | { | |
530 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
531 | struct be_cmd_req_mcc_create *req = embedded_payload(wrb); | |
532 | struct be_dma_mem *q_mem = &mccq->dma_mem; | |
533 | void *ctxt = &req->context; | |
534 | int status; | |
535 | ||
536 | spin_lock(&ctrl->mbox_lock); | |
537 | memset(wrb, 0, sizeof(*wrb)); | |
538 | ||
539 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
540 | ||
541 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
542 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | |
543 | ||
544 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
545 | ||
546 | AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func); | |
547 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); | |
548 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | |
549 | be_encoded_q_len(mccq->len)); | |
550 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | |
551 | ||
552 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
553 | ||
554 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
555 | ||
556 | status = be_mbox_db_ring(ctrl); | |
557 | if (!status) { | |
558 | struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); | |
559 | mccq->id = le16_to_cpu(resp->id); | |
560 | mccq->created = true; | |
561 | } | |
562 | spin_unlock(&ctrl->mbox_lock); | |
6b7c5b94 SP |
563 | |
564 | return status; | |
565 | } | |
566 | ||
567 | int be_cmd_txq_create(struct be_ctrl_info *ctrl, | |
568 | struct be_queue_info *txq, | |
569 | struct be_queue_info *cq) | |
570 | { | |
571 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
572 | struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb); | |
573 | struct be_dma_mem *q_mem = &txq->dma_mem; | |
574 | void *ctxt = &req->context; | |
575 | int status; | |
576 | u32 len_encoded; | |
577 | ||
5fb379ee | 578 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
579 | memset(wrb, 0, sizeof(*wrb)); |
580 | ||
581 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
582 | ||
583 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, | |
584 | sizeof(*req)); | |
585 | ||
586 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
587 | req->ulp_num = BE_ULP1_NUM; | |
588 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; | |
589 | ||
590 | len_encoded = fls(txq->len); /* log2(len) + 1 */ | |
591 | if (len_encoded == 16) | |
592 | len_encoded = 0; | |
593 | AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded); | |
594 | AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, | |
595 | ctrl->pci_func); | |
596 | AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); | |
597 | AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); | |
598 | ||
599 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
600 | ||
601 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
602 | ||
603 | status = be_mbox_db_ring(ctrl); | |
604 | if (!status) { | |
605 | struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); | |
606 | txq->id = le16_to_cpu(resp->cid); | |
607 | txq->created = true; | |
608 | } | |
5fb379ee | 609 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
610 | |
611 | return status; | |
612 | } | |
613 | ||
614 | int be_cmd_rxq_create(struct be_ctrl_info *ctrl, | |
615 | struct be_queue_info *rxq, u16 cq_id, u16 frag_size, | |
616 | u16 max_frame_size, u32 if_id, u32 rss) | |
617 | { | |
618 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
619 | struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb); | |
620 | struct be_dma_mem *q_mem = &rxq->dma_mem; | |
621 | int status; | |
622 | ||
5fb379ee | 623 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
624 | memset(wrb, 0, sizeof(*wrb)); |
625 | ||
626 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
627 | ||
628 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, | |
629 | sizeof(*req)); | |
630 | ||
631 | req->cq_id = cpu_to_le16(cq_id); | |
632 | req->frag_size = fls(frag_size) - 1; | |
633 | req->num_pages = 2; | |
634 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
635 | req->interface_id = cpu_to_le32(if_id); | |
636 | req->max_frame_size = cpu_to_le16(max_frame_size); | |
637 | req->rss_queue = cpu_to_le32(rss); | |
638 | ||
639 | status = be_mbox_db_ring(ctrl); | |
640 | if (!status) { | |
641 | struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); | |
642 | rxq->id = le16_to_cpu(resp->id); | |
643 | rxq->created = true; | |
644 | } | |
5fb379ee | 645 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
646 | |
647 | return status; | |
648 | } | |
649 | ||
650 | /* Generic destroyer function for all types of queues */ | |
651 | int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, | |
652 | int queue_type) | |
653 | { | |
654 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
655 | struct be_cmd_req_q_destroy *req = embedded_payload(wrb); | |
656 | u8 subsys = 0, opcode = 0; | |
657 | int status; | |
658 | ||
5fb379ee | 659 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
660 | |
661 | memset(wrb, 0, sizeof(*wrb)); | |
662 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
663 | ||
664 | switch (queue_type) { | |
665 | case QTYPE_EQ: | |
666 | subsys = CMD_SUBSYSTEM_COMMON; | |
667 | opcode = OPCODE_COMMON_EQ_DESTROY; | |
668 | break; | |
669 | case QTYPE_CQ: | |
670 | subsys = CMD_SUBSYSTEM_COMMON; | |
671 | opcode = OPCODE_COMMON_CQ_DESTROY; | |
672 | break; | |
673 | case QTYPE_TXQ: | |
674 | subsys = CMD_SUBSYSTEM_ETH; | |
675 | opcode = OPCODE_ETH_TX_DESTROY; | |
676 | break; | |
677 | case QTYPE_RXQ: | |
678 | subsys = CMD_SUBSYSTEM_ETH; | |
679 | opcode = OPCODE_ETH_RX_DESTROY; | |
680 | break; | |
5fb379ee SP |
681 | case QTYPE_MCCQ: |
682 | subsys = CMD_SUBSYSTEM_COMMON; | |
683 | opcode = OPCODE_COMMON_MCC_DESTROY; | |
684 | break; | |
6b7c5b94 SP |
685 | default: |
686 | printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); | |
687 | status = -1; | |
688 | goto err; | |
689 | } | |
690 | be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); | |
691 | req->id = cpu_to_le16(q->id); | |
692 | ||
693 | status = be_mbox_db_ring(ctrl); | |
694 | err: | |
5fb379ee | 695 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
696 | |
697 | return status; | |
698 | } | |
699 | ||
700 | /* Create an rx filtering policy configuration on an i/f */ | |
701 | int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac, | |
702 | bool pmac_invalid, u32 *if_handle, u32 *pmac_id) | |
703 | { | |
704 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
705 | struct be_cmd_req_if_create *req = embedded_payload(wrb); | |
706 | int status; | |
707 | ||
5fb379ee | 708 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
709 | memset(wrb, 0, sizeof(*wrb)); |
710 | ||
711 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
712 | ||
713 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
714 | OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); | |
715 | ||
716 | req->capability_flags = cpu_to_le32(flags); | |
717 | req->enable_flags = cpu_to_le32(flags); | |
718 | if (!pmac_invalid) | |
719 | memcpy(req->mac_addr, mac, ETH_ALEN); | |
720 | ||
721 | status = be_mbox_db_ring(ctrl); | |
722 | if (!status) { | |
723 | struct be_cmd_resp_if_create *resp = embedded_payload(wrb); | |
724 | *if_handle = le32_to_cpu(resp->interface_id); | |
725 | if (!pmac_invalid) | |
726 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
727 | } | |
728 | ||
5fb379ee | 729 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
730 | return status; |
731 | } | |
732 | ||
733 | int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id) | |
734 | { | |
735 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
736 | struct be_cmd_req_if_destroy *req = embedded_payload(wrb); | |
737 | int status; | |
738 | ||
5fb379ee | 739 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
740 | memset(wrb, 0, sizeof(*wrb)); |
741 | ||
742 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
743 | ||
744 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
745 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); | |
746 | ||
747 | req->interface_id = cpu_to_le32(interface_id); | |
748 | status = be_mbox_db_ring(ctrl); | |
749 | ||
5fb379ee | 750 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
751 | |
752 | return status; | |
753 | } | |
754 | ||
755 | /* Get stats is a non embedded command: the request is not embedded inside | |
756 | * WRB but is a separate dma memory block | |
757 | */ | |
758 | int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) | |
759 | { | |
760 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
761 | struct be_cmd_req_get_stats *req = nonemb_cmd->va; | |
762 | struct be_sge *sge = nonembedded_sgl(wrb); | |
763 | int status; | |
764 | ||
5fb379ee | 765 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
766 | memset(wrb, 0, sizeof(*wrb)); |
767 | ||
768 | memset(req, 0, sizeof(*req)); | |
769 | ||
770 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); | |
771 | ||
772 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
773 | OPCODE_ETH_GET_STATISTICS, sizeof(*req)); | |
774 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | |
775 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | |
776 | sge->len = cpu_to_le32(nonemb_cmd->size); | |
777 | ||
778 | status = be_mbox_db_ring(ctrl); | |
779 | if (!status) { | |
780 | struct be_cmd_resp_get_stats *resp = nonemb_cmd->va; | |
781 | be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); | |
782 | } | |
783 | ||
5fb379ee | 784 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
785 | return status; |
786 | } | |
787 | ||
788 | int be_cmd_link_status_query(struct be_ctrl_info *ctrl, | |
789 | struct be_link_info *link) | |
790 | { | |
791 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
792 | struct be_cmd_req_link_status *req = embedded_payload(wrb); | |
793 | int status; | |
794 | ||
5fb379ee | 795 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
796 | memset(wrb, 0, sizeof(*wrb)); |
797 | ||
798 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
799 | ||
800 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
801 | OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); | |
802 | ||
803 | status = be_mbox_db_ring(ctrl); | |
804 | if (!status) { | |
805 | struct be_cmd_resp_link_status *resp = embedded_payload(wrb); | |
806 | link->speed = resp->mac_speed; | |
807 | link->duplex = resp->mac_duplex; | |
808 | link->fault = resp->mac_fault; | |
809 | } else { | |
810 | link->speed = PHY_LINK_SPEED_ZERO; | |
811 | } | |
812 | ||
5fb379ee | 813 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
814 | return status; |
815 | } | |
816 | ||
817 | int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver) | |
818 | { | |
819 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
820 | struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); | |
821 | int status; | |
822 | ||
5fb379ee | 823 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
824 | memset(wrb, 0, sizeof(*wrb)); |
825 | ||
826 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
827 | ||
828 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
829 | OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); | |
830 | ||
831 | status = be_mbox_db_ring(ctrl); | |
832 | if (!status) { | |
833 | struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); | |
834 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); | |
835 | } | |
836 | ||
5fb379ee | 837 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
838 | return status; |
839 | } | |
840 | ||
841 | /* set the EQ delay interval of an EQ to specified value */ | |
842 | int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd) | |
843 | { | |
844 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
845 | struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); | |
846 | int status; | |
847 | ||
5fb379ee | 848 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
849 | memset(wrb, 0, sizeof(*wrb)); |
850 | ||
851 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
852 | ||
853 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
854 | OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); | |
855 | ||
856 | req->num_eq = cpu_to_le32(1); | |
857 | req->delay[0].eq_id = cpu_to_le32(eq_id); | |
858 | req->delay[0].phase = 0; | |
859 | req->delay[0].delay_multiplier = cpu_to_le32(eqd); | |
860 | ||
861 | status = be_mbox_db_ring(ctrl); | |
862 | ||
5fb379ee | 863 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
864 | return status; |
865 | } | |
866 | ||
867 | int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array, | |
868 | u32 num, bool untagged, bool promiscuous) | |
869 | { | |
870 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
871 | struct be_cmd_req_vlan_config *req = embedded_payload(wrb); | |
872 | int status; | |
873 | ||
5fb379ee | 874 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
875 | memset(wrb, 0, sizeof(*wrb)); |
876 | ||
877 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
878 | ||
879 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
880 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); | |
881 | ||
882 | req->interface_id = if_id; | |
883 | req->promiscuous = promiscuous; | |
884 | req->untagged = untagged; | |
885 | req->num_vlan = num; | |
886 | if (!promiscuous) { | |
887 | memcpy(req->normal_vlan, vtag_array, | |
888 | req->num_vlan * sizeof(vtag_array[0])); | |
889 | } | |
890 | ||
891 | status = be_mbox_db_ring(ctrl); | |
892 | ||
5fb379ee | 893 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
894 | return status; |
895 | } | |
896 | ||
6ac7b687 | 897 | /* Use MCC for this command as it may be called in BH context */ |
6b7c5b94 SP |
898 | int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en) |
899 | { | |
6ac7b687 SP |
900 | struct be_mcc_wrb *wrb; |
901 | struct be_cmd_req_promiscuous_config *req; | |
6b7c5b94 | 902 | |
6ac7b687 SP |
903 | spin_lock_bh(&ctrl->mcc_lock); |
904 | ||
905 | wrb = wrb_from_mcc(&ctrl->mcc_obj.q); | |
906 | BUG_ON(!wrb); | |
907 | ||
908 | req = embedded_payload(wrb); | |
6b7c5b94 SP |
909 | |
910 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
911 | ||
912 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
913 | OPCODE_ETH_PROMISCUOUS, sizeof(*req)); | |
914 | ||
915 | if (port_num) | |
916 | req->port1_promiscuous = en; | |
917 | else | |
918 | req->port0_promiscuous = en; | |
919 | ||
6ac7b687 | 920 | be_mcc_notify_wait(ctrl); |
6b7c5b94 | 921 | |
6ac7b687 SP |
922 | spin_unlock_bh(&ctrl->mcc_lock); |
923 | return 0; | |
6b7c5b94 SP |
924 | } |
925 | ||
6ac7b687 SP |
926 | /* |
927 | * Use MCC for this command as it may be called in BH context | |
928 | * (mc == NULL) => multicast promiscous | |
929 | */ | |
6b7c5b94 SP |
930 | int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table, |
931 | u32 num, bool promiscuous) | |
932 | { | |
6ac7b687 SP |
933 | #define BE_MAX_MC 32 /* set mcast promisc if > 32 */ |
934 | struct be_mcc_wrb *wrb; | |
935 | struct be_cmd_req_mcast_mac_config *req; | |
6b7c5b94 | 936 | |
6ac7b687 SP |
937 | spin_lock_bh(&ctrl->mcc_lock); |
938 | ||
939 | wrb = wrb_from_mcc(&ctrl->mcc_obj.q); | |
940 | BUG_ON(!wrb); | |
941 | ||
942 | req = embedded_payload(wrb); | |
6b7c5b94 SP |
943 | |
944 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
945 | ||
946 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
947 | OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); | |
948 | ||
949 | req->interface_id = if_id; | |
950 | req->promiscuous = promiscuous; | |
951 | if (!promiscuous) { | |
952 | req->num_mac = cpu_to_le16(num); | |
953 | if (num) | |
954 | memcpy(req->mac, mac_table, ETH_ALEN * num); | |
955 | } | |
956 | ||
6ac7b687 | 957 | be_mcc_notify_wait(ctrl); |
6b7c5b94 | 958 | |
6ac7b687 SP |
959 | spin_unlock_bh(&ctrl->mcc_lock); |
960 | ||
961 | return 0; | |
6b7c5b94 SP |
962 | } |
963 | ||
964 | int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc) | |
965 | { | |
966 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
967 | struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); | |
968 | int status; | |
969 | ||
5fb379ee | 970 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
971 | |
972 | memset(wrb, 0, sizeof(*wrb)); | |
973 | ||
974 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
975 | ||
976 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
977 | OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); | |
978 | ||
979 | req->tx_flow_control = cpu_to_le16((u16)tx_fc); | |
980 | req->rx_flow_control = cpu_to_le16((u16)rx_fc); | |
981 | ||
982 | status = be_mbox_db_ring(ctrl); | |
983 | ||
5fb379ee | 984 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
985 | return status; |
986 | } | |
987 | ||
988 | int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc) | |
989 | { | |
990 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
991 | struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); | |
992 | int status; | |
993 | ||
5fb379ee | 994 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
995 | |
996 | memset(wrb, 0, sizeof(*wrb)); | |
997 | ||
998 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
999 | ||
1000 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1001 | OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); | |
1002 | ||
1003 | status = be_mbox_db_ring(ctrl); | |
1004 | if (!status) { | |
1005 | struct be_cmd_resp_get_flow_control *resp = | |
1006 | embedded_payload(wrb); | |
1007 | *tx_fc = le16_to_cpu(resp->tx_flow_control); | |
1008 | *rx_fc = le16_to_cpu(resp->rx_flow_control); | |
1009 | } | |
1010 | ||
5fb379ee | 1011 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
1012 | return status; |
1013 | } | |
1014 | ||
1015 | int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num) | |
1016 | { | |
1017 | struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); | |
1018 | struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); | |
1019 | int status; | |
1020 | ||
5fb379ee | 1021 | spin_lock(&ctrl->mbox_lock); |
6b7c5b94 SP |
1022 | |
1023 | memset(wrb, 0, sizeof(*wrb)); | |
1024 | ||
1025 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
1026 | ||
1027 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1028 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); | |
1029 | ||
1030 | status = be_mbox_db_ring(ctrl); | |
1031 | if (!status) { | |
1032 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); | |
1033 | *port_num = le32_to_cpu(resp->phys_port); | |
1034 | } | |
1035 | ||
5fb379ee | 1036 | spin_unlock(&ctrl->mbox_lock); |
6b7c5b94 SP |
1037 | return status; |
1038 | } |