Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / octeon_main.h
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22
23 /*! \file octeon_main.h
24 * \brief Host Driver: This file is included by all host driver source files
25 * to include common definitions.
26 */
27
28 #ifndef _OCTEON_MAIN_H_
29 #define _OCTEON_MAIN_H_
30
31 #if BITS_PER_LONG == 32
32 #define CVM_CAST64(v) ((long long)(v))
33 #elif BITS_PER_LONG == 64
34 #define CVM_CAST64(v) ((long long)(long)(v))
35 #else
36 #error "Unknown system architecture"
37 #endif
38
39 #define DRV_NAME "LiquidIO"
40
41 /** This structure is used by NIC driver to store information required
42 * to free the sk_buff when the packet has been fetched by Octeon.
43 * Bytes offset below assume worst-case of a 64-bit system.
44 */
45 struct octnet_buf_free_info {
46 /** Bytes 1-8. Pointer to network device private structure. */
47 struct lio *lio;
48
49 /** Bytes 9-16. Pointer to sk_buff. */
50 struct sk_buff *skb;
51
52 /** Bytes 17-24. Pointer to gather list. */
53 struct octnic_gather *g;
54
55 /** Bytes 25-32. Physical address of skb->data or gather list. */
56 u64 dptr;
57
58 /** Bytes 33-47. Piggybacked soft command, if any */
59 struct octeon_soft_command *sc;
60 };
61
62 /* BQL-related functions */
63 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
64 void octeon_update_tx_completion_counters(void *buf, int reqtype,
65 unsigned int *pkts_compl,
66 unsigned int *bytes_compl);
67 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
68 unsigned int bytes_compl);
69
70 /** Swap 8B blocks */
71 static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
72 {
73 while (blocks) {
74 cpu_to_be64s(data);
75 blocks--;
76 data++;
77 }
78 }
79
80 /**
81 * \brief unmaps a PCI BAR
82 * @param oct Pointer to Octeon device
83 * @param baridx bar index
84 */
85 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
86 {
87 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
88 baridx);
89
90 if (oct->mmio[baridx].done)
91 iounmap(oct->mmio[baridx].hw_addr);
92
93 if (oct->mmio[baridx].start)
94 pci_release_region(oct->pci_dev, baridx * 2);
95 }
96
97 /**
98 * \brief maps a PCI BAR
99 * @param oct Pointer to Octeon device
100 * @param baridx bar index
101 * @param max_map_len maximum length of mapped memory
102 */
103 static inline int octeon_map_pci_barx(struct octeon_device *oct,
104 int baridx, int max_map_len)
105 {
106 u32 mapped_len = 0;
107
108 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
109 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
110 baridx);
111 return 1;
112 }
113
114 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
115 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
116
117 mapped_len = oct->mmio[baridx].len;
118 if (!mapped_len)
119 return 1;
120
121 if (max_map_len && (mapped_len > max_map_len))
122 mapped_len = max_map_len;
123
124 oct->mmio[baridx].hw_addr =
125 ioremap(oct->mmio[baridx].start, mapped_len);
126 oct->mmio[baridx].mapped_len = mapped_len;
127
128 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
129 baridx, oct->mmio[baridx].start, mapped_len,
130 oct->mmio[baridx].len);
131
132 if (!oct->mmio[baridx].hw_addr) {
133 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
134 baridx);
135 return 1;
136 }
137 oct->mmio[baridx].done = 1;
138
139 return 0;
140 }
141
142 static inline void *
143 cnnic_numa_alloc_aligned_dma(u32 size,
144 u32 *alloc_size,
145 size_t *orig_ptr,
146 int numa_node)
147 {
148 int retries = 0;
149 void *ptr = NULL;
150
151 #define OCTEON_MAX_ALLOC_RETRIES 1
152 do {
153 struct page *page = NULL;
154
155 page = alloc_pages_node(numa_node,
156 GFP_KERNEL,
157 get_order(size));
158 if (!page)
159 page = alloc_pages(GFP_KERNEL,
160 get_order(size));
161 ptr = (void *)page_address(page);
162 if ((unsigned long)ptr & 0x07) {
163 __free_pages(page, get_order(size));
164 ptr = NULL;
165 /* Increment the size required if the first
166 * attempt failed.
167 */
168 if (!retries)
169 size += 7;
170 }
171 retries++;
172 } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
173
174 *alloc_size = size;
175 *orig_ptr = (unsigned long)ptr;
176 if ((unsigned long)ptr & 0x07)
177 ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
178 return ptr;
179 }
180
181 #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
182 free_pages(orig_ptr, get_order(size))
183
184 static inline int
185 sleep_cond(wait_queue_head_t *wait_queue, int *condition)
186 {
187 int errno = 0;
188 wait_queue_t we;
189
190 init_waitqueue_entry(&we, current);
191 add_wait_queue(wait_queue, &we);
192 while (!(READ_ONCE(*condition))) {
193 set_current_state(TASK_INTERRUPTIBLE);
194 if (signal_pending(current)) {
195 errno = -EINTR;
196 goto out;
197 }
198 schedule();
199 }
200 out:
201 set_current_state(TASK_RUNNING);
202 remove_wait_queue(wait_queue, &we);
203 return errno;
204 }
205
206 static inline void
207 sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
208 {
209 wait_queue_t we;
210
211 init_waitqueue_entry(&we, current);
212 add_wait_queue(waitq, &we);
213 while (!atomic_read(pcond)) {
214 set_current_state(TASK_INTERRUPTIBLE);
215 if (signal_pending(current))
216 goto out;
217 schedule();
218 }
219 out:
220 set_current_state(TASK_RUNNING);
221 remove_wait_queue(waitq, &we);
222 }
223
224 /* Gives up the CPU for a timeout period.
225 * Check that the condition is not true before we go to sleep for a
226 * timeout period.
227 */
228 static inline void
229 sleep_timeout_cond(wait_queue_head_t *wait_queue,
230 int *condition,
231 int timeout)
232 {
233 wait_queue_t we;
234
235 init_waitqueue_entry(&we, current);
236 add_wait_queue(wait_queue, &we);
237 set_current_state(TASK_INTERRUPTIBLE);
238 if (!(*condition))
239 schedule_timeout(timeout);
240 set_current_state(TASK_RUNNING);
241 remove_wait_queue(wait_queue, &we);
242 }
243
244 #ifndef ROUNDUP4
245 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
246 #endif
247
248 #ifndef ROUNDUP8
249 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
250 #endif
251
252 #ifndef ROUNDUP16
253 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
254 #endif
255
256 #ifndef ROUNDUP128
257 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
258 #endif
259
260 #endif /* _OCTEON_MAIN_H_ */
This page took 0.035479 seconds and 5 git commands to generate.