MAINTAINERS: Add MFD's DT bindings directory to MFD entry
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / octeon_network.h
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22
23 /*! \file octeon_network.h
24 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
25 */
26
27 #ifndef __OCTEON_NETWORK_H__
28 #define __OCTEON_NETWORK_H__
29 #include <linux/version.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/ptp_clock_kernel.h>
32
33 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
34 #define LIO_MIN_MTU_SIZE 68
35
36 struct oct_nic_stats_resp {
37 u64 rh;
38 struct oct_link_stats stats;
39 u64 status;
40 };
41
42 struct oct_nic_stats_ctrl {
43 struct completion complete;
44 struct net_device *netdev;
45 };
46
47 /** LiquidIO per-interface network private data */
48 struct lio {
49 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
50 atomic_t ifstate;
51
52 /** Octeon Interface index number. This device will be represented as
53 * oct<ifidx> in the system.
54 */
55 int ifidx;
56
57 /** Octeon Input queue to use to transmit for this network interface. */
58 int txq;
59
60 /** Octeon Output queue from which pkts arrive
61 * for this network interface.
62 */
63 int rxq;
64
65 /** Guards each glist */
66 spinlock_t *glist_lock;
67
68 /** Array of gather component linked lists */
69 struct list_head *glist;
70
71 /** Pointer to the NIC properties for the Octeon device this network
72 * interface is associated with.
73 */
74 struct octdev_props *octprops;
75
76 /** Pointer to the octeon device structure. */
77 struct octeon_device *oct_dev;
78
79 struct net_device *netdev;
80
81 /** Link information sent by the core application for this interface. */
82 struct oct_link_info linfo;
83
84 /** counter of link changes */
85 u64 link_changes;
86
87 /** Size of Tx queue for this octeon device. */
88 u32 tx_qsize;
89
90 /** Size of Rx queue for this octeon device. */
91 u32 rx_qsize;
92
93 /** Size of MTU this octeon device. */
94 u32 mtu;
95
96 /** msg level flag per interface. */
97 u32 msg_enable;
98
99 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
100 u64 dev_capability;
101
102 /* Copy of transmit encapsulation capabilities:
103 * TSO, TSO6, Checksums for this device for Kernel
104 * 3.10.0 onwards
105 */
106 u64 enc_dev_capability;
107
108 /** Copy of beacaon reg in phy */
109 u32 phy_beacon_val;
110
111 /** Copy of ctrl reg in phy */
112 u32 led_ctrl_val;
113
114 /* PTP clock information */
115 struct ptp_clock_info ptp_info;
116 struct ptp_clock *ptp_clock;
117 s64 ptp_adjust;
118
119 /* for atomic access to Octeon PTP reg and data struct */
120 spinlock_t ptp_lock;
121
122 /* Interface info */
123 u32 intf_open;
124
125 /* work queue for txq status */
126 struct cavium_wq txq_status_wq;
127 };
128
129 #define LIO_SIZE (sizeof(struct lio))
130 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
131
132 /**
133 * \brief Enable or disable feature
134 * @param netdev pointer to network device
135 * @param cmd Command that just requires acknowledgment
136 * @param param1 Parameter to command
137 */
138 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
139
140 /**
141 * \brief Link control command completion callback
142 * @param nctrl_ptr pointer to control packet structure
143 *
144 * This routine is called by the callback function when a ctrl pkt sent to
145 * core app completes. The nctrl_ptr contains a copy of the command type
146 * and data sent to the core app. This routine is only called if the ctrl
147 * pkt was sent successfully to the core app.
148 */
149 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
150
151 /**
152 * \brief Register ethtool operations
153 * @param netdev pointer to network device
154 */
155 void liquidio_set_ethtool_ops(struct net_device *netdev);
156
157 #define SKB_ADJ_MASK 0x3F
158 #define SKB_ADJ (SKB_ADJ_MASK + 1)
159
160 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
161 #define LIO_RXBUFFER_SZ 2048
162
163 static inline void
164 *recv_buffer_alloc(struct octeon_device *oct,
165 struct octeon_skb_page_info *pg_info)
166 {
167 struct page *page;
168 struct sk_buff *skb;
169 struct octeon_skb_page_info *skb_pg_info;
170
171 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
172 if (unlikely(!page))
173 return NULL;
174
175 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
176 if (unlikely(!skb)) {
177 __free_page(page);
178 pg_info->page = NULL;
179 return NULL;
180 }
181
182 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
183 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
184
185 skb_reserve(skb, r);
186 }
187
188 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
189 /* Get DMA info */
190 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
191 PAGE_SIZE, DMA_FROM_DEVICE);
192
193 /* Mapping failed!! */
194 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
195 __free_page(page);
196 dev_kfree_skb_any((struct sk_buff *)skb);
197 pg_info->page = NULL;
198 return NULL;
199 }
200
201 pg_info->page = page;
202 pg_info->page_offset = 0;
203 skb_pg_info->page = page;
204 skb_pg_info->page_offset = 0;
205 skb_pg_info->dma = pg_info->dma;
206
207 return (void *)skb;
208 }
209
210 static inline void
211 *recv_buffer_fast_alloc(u32 size)
212 {
213 struct sk_buff *skb;
214 struct octeon_skb_page_info *skb_pg_info;
215
216 skb = dev_alloc_skb(size + SKB_ADJ);
217 if (unlikely(!skb))
218 return NULL;
219
220 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
221 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
222
223 skb_reserve(skb, r);
224 }
225
226 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
227 skb_pg_info->page = NULL;
228 skb_pg_info->page_offset = 0;
229 skb_pg_info->dma = 0;
230
231 return skb;
232 }
233
234 static inline int
235 recv_buffer_recycle(struct octeon_device *oct, void *buf)
236 {
237 struct octeon_skb_page_info *pg_info = buf;
238
239 if (!pg_info->page) {
240 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
241 __func__);
242 return -ENOMEM;
243 }
244
245 if (unlikely(page_count(pg_info->page) != 1) ||
246 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
247 dma_unmap_page(&oct->pci_dev->dev,
248 pg_info->dma, (PAGE_SIZE << 0),
249 DMA_FROM_DEVICE);
250 pg_info->dma = 0;
251 pg_info->page = NULL;
252 pg_info->page_offset = 0;
253 return -ENOMEM;
254 }
255
256 /* Flip to other half of the buffer */
257 if (pg_info->page_offset == 0)
258 pg_info->page_offset = LIO_RXBUFFER_SZ;
259 else
260 pg_info->page_offset = 0;
261 page_ref_inc(pg_info->page);
262
263 return 0;
264 }
265
266 static inline void
267 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
268 {
269 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
270 struct sk_buff *skb;
271
272 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
273 if (unlikely(!skb)) {
274 dma_unmap_page(&oct->pci_dev->dev,
275 pg_info->dma, (PAGE_SIZE << 0),
276 DMA_FROM_DEVICE);
277 return NULL;
278 }
279
280 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
281 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
282
283 skb_reserve(skb, r);
284 }
285
286 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
287 skb_pg_info->page = pg_info->page;
288 skb_pg_info->page_offset = pg_info->page_offset;
289 skb_pg_info->dma = pg_info->dma;
290
291 return skb;
292 }
293
294 static inline void
295 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
296 {
297 struct sk_buff *skb = (struct sk_buff *)buffer;
298
299 put_page(pg_info->page);
300 pg_info->dma = 0;
301 pg_info->page = NULL;
302 pg_info->page_offset = 0;
303
304 if (skb)
305 dev_kfree_skb_any(skb);
306 }
307
308 static inline void recv_buffer_free(void *buffer)
309 {
310 struct sk_buff *skb = (struct sk_buff *)buffer;
311 struct octeon_skb_page_info *pg_info;
312
313 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
314
315 if (pg_info->page) {
316 put_page(pg_info->page);
317 pg_info->dma = 0;
318 pg_info->page = NULL;
319 pg_info->page_offset = 0;
320 }
321
322 dev_kfree_skb_any((struct sk_buff *)buffer);
323 }
324
325 static inline void
326 recv_buffer_fast_free(void *buffer)
327 {
328 dev_kfree_skb_any((struct sk_buff *)buffer);
329 }
330
331 static inline void tx_buffer_free(void *buffer)
332 {
333 dev_kfree_skb_any((struct sk_buff *)buffer);
334 }
335
336 #define lio_dma_alloc(oct, size, dma_addr) \
337 dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
338 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
339 dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
340
341 static inline
342 void *get_rbd(struct sk_buff *skb)
343 {
344 struct octeon_skb_page_info *pg_info;
345 unsigned char *va;
346
347 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
348 va = page_address(pg_info->page) + pg_info->page_offset;
349
350 return va;
351 }
352
353 static inline u64
354 lio_map_ring_info(struct octeon_droq *droq, u32 i)
355 {
356 dma_addr_t dma_addr;
357 struct octeon_device *oct = droq->oct_dev;
358
359 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
360 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
361
362 WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
363
364 return (u64)dma_addr;
365 }
366
367 static inline void
368 lio_unmap_ring_info(struct pci_dev *pci_dev,
369 u64 info_ptr, u32 size)
370 {
371 dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
372 }
373
374 static inline u64
375 lio_map_ring(void *buf)
376 {
377 dma_addr_t dma_addr;
378
379 struct sk_buff *skb = (struct sk_buff *)buf;
380 struct octeon_skb_page_info *pg_info;
381
382 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
383 if (!pg_info->page) {
384 pr_err("%s: pg_info->page NULL\n", __func__);
385 WARN_ON(1);
386 }
387
388 /* Get DMA info */
389 dma_addr = pg_info->dma;
390 if (!pg_info->dma) {
391 pr_err("%s: ERROR it should be already available\n",
392 __func__);
393 WARN_ON(1);
394 }
395 dma_addr += pg_info->page_offset;
396
397 return (u64)dma_addr;
398 }
399
400 static inline void
401 lio_unmap_ring(struct pci_dev *pci_dev,
402 u64 buf_ptr)
403
404 {
405 dma_unmap_page(&pci_dev->dev,
406 buf_ptr, (PAGE_SIZE << 0),
407 DMA_FROM_DEVICE);
408 }
409
410 static inline void *octeon_fast_packet_alloc(u32 size)
411 {
412 return recv_buffer_fast_alloc(size);
413 }
414
415 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
416 struct sk_buff *nicbuf,
417 int copy_len,
418 int idx)
419 {
420 memcpy(skb_put(nicbuf, copy_len),
421 get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
422 }
423
424 #endif
This page took 0.041625 seconds and 5 git commands to generate.