Merge branches 'x86/apic', 'x86/cleanups', 'x86/cpufeature', 'x86/crashdump', 'x86...
[deliverable/linux.git] / drivers / net / ehea / ehea.h
1 /*
2 * linux/drivers/net/ehea/ehea.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29 #ifndef __EHEA_H__
30 #define __EHEA_H__
31
32 #include <linux/module.h>
33 #include <linux/ethtool.h>
34 #include <linux/vmalloc.h>
35 #include <linux/if_vlan.h>
36 #include <linux/inet_lro.h>
37
38 #include <asm/ibmebus.h>
39 #include <asm/abs_addr.h>
40 #include <asm/io.h>
41
42 #define DRV_NAME "ehea"
43 #define DRV_VERSION "EHEA_0095"
44
45 /* eHEA capability flags */
46 #define DLPAR_PORT_ADD_REM 1
47 #define DLPAR_MEM_ADD 2
48 #define DLPAR_MEM_REM 4
49 #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
50
51 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
53
54 #define EHEA_MAX_ENTRIES_RQ1 32767
55 #define EHEA_MAX_ENTRIES_RQ2 16383
56 #define EHEA_MAX_ENTRIES_RQ3 16383
57 #define EHEA_MAX_ENTRIES_SQ 32767
58 #define EHEA_MIN_ENTRIES_QP 127
59
60 #define EHEA_SMALL_QUEUES
61 #define EHEA_NUM_TX_QP 1
62 #define EHEA_LRO_MAX_AGGR 64
63
64 #ifdef EHEA_SMALL_QUEUES
65 #define EHEA_MAX_CQE_COUNT 1023
66 #define EHEA_DEF_ENTRIES_SQ 1023
67 #define EHEA_DEF_ENTRIES_RQ1 4095
68 #define EHEA_DEF_ENTRIES_RQ2 1023
69 #define EHEA_DEF_ENTRIES_RQ3 1023
70 #else
71 #define EHEA_MAX_CQE_COUNT 4080
72 #define EHEA_DEF_ENTRIES_SQ 4080
73 #define EHEA_DEF_ENTRIES_RQ1 8160
74 #define EHEA_DEF_ENTRIES_RQ2 2040
75 #define EHEA_DEF_ENTRIES_RQ3 2040
76 #endif
77
78 #define EHEA_MAX_ENTRIES_EQ 20
79
80 #define EHEA_SG_SQ 2
81 #define EHEA_SG_RQ1 1
82 #define EHEA_SG_RQ2 0
83 #define EHEA_SG_RQ3 0
84
85 #define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
86 #define EHEA_RQ2_PKT_SIZE 1522
87 #define EHEA_L_PKT_SIZE 256 /* low latency */
88
89 #define MAX_LRO_DESCRIPTORS 8
90
91 /* Send completion signaling */
92
93 /* Protection Domain Identifier */
94 #define EHEA_PD_ID 0xaabcdeff
95
96 #define EHEA_RQ2_THRESHOLD 1
97 #define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
98
99 #define EHEA_SPEED_10G 10000
100 #define EHEA_SPEED_1G 1000
101 #define EHEA_SPEED_100M 100
102 #define EHEA_SPEED_10M 10
103 #define EHEA_SPEED_AUTONEG 0
104
105 /* Broadcast/Multicast registration types */
106 #define EHEA_BCMC_SCOPE_ALL 0x08
107 #define EHEA_BCMC_SCOPE_SINGLE 0x00
108 #define EHEA_BCMC_MULTICAST 0x04
109 #define EHEA_BCMC_BROADCAST 0x00
110 #define EHEA_BCMC_UNTAGGED 0x02
111 #define EHEA_BCMC_TAGGED 0x00
112 #define EHEA_BCMC_VLANID_ALL 0x01
113 #define EHEA_BCMC_VLANID_SINGLE 0x00
114
115 #define EHEA_CACHE_LINE 128
116
117 /* Memory Regions */
118 #define EHEA_MR_ACC_CTRL 0x00800000
119
120 #define EHEA_BUSMAP_START 0x8000000000000000ULL
121 #define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
122 #define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
123 #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
124 #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
125 #define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
126 #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
127
128
129 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
130
131 /* utility functions */
132
133 #define ehea_info(fmt, args...) \
134 printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
135
136 #define ehea_error(fmt, args...) \
137 printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
138
139 #ifdef DEBUG
140 #define ehea_debug(fmt, args...) \
141 printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
142 #else
143 #define ehea_debug(fmt, args...) do {} while (0)
144 #endif
145
146 void ehea_dump(void *adr, int len, char *msg);
147
148 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
149
150 #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
151
152 #define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
153
154 #define EHEA_BMASK_MASK(mask) \
155 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
156
157 #define EHEA_BMASK_SET(mask, value) \
158 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
159
160 #define EHEA_BMASK_GET(mask, value) \
161 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
162
163 /*
164 * Generic ehea page
165 */
166 struct ehea_page {
167 u8 entries[PAGE_SIZE];
168 };
169
170 /*
171 * Generic queue in linux kernel virtual memory
172 */
173 struct hw_queue {
174 u64 current_q_offset; /* current queue entry */
175 struct ehea_page **queue_pages; /* array of pages belonging to queue */
176 u32 qe_size; /* queue entry size */
177 u32 queue_length; /* queue length allocated in bytes */
178 u32 pagesize;
179 u32 toggle_state; /* toggle flag - per page */
180 u32 reserved; /* 64 bit alignment */
181 };
182
183 /*
184 * For pSeries this is a 64bit memory address where
185 * I/O memory is mapped into CPU address space
186 */
187 struct h_epa {
188 void __iomem *addr;
189 };
190
191 struct h_epa_user {
192 u64 addr;
193 };
194
195 struct h_epas {
196 struct h_epa kernel; /* kernel space accessible resource,
197 set to 0 if unused */
198 struct h_epa_user user; /* user space accessible resource
199 set to 0 if unused */
200 };
201
202 /*
203 * Memory map data structures
204 */
205 struct ehea_dir_bmap
206 {
207 u64 ent[EHEA_MAP_ENTRIES];
208 };
209 struct ehea_top_bmap
210 {
211 struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
212 };
213 struct ehea_bmap
214 {
215 struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
216 };
217
218 struct ehea_qp;
219 struct ehea_cq;
220 struct ehea_eq;
221 struct ehea_port;
222 struct ehea_av;
223
224 /*
225 * Queue attributes passed to ehea_create_qp()
226 */
227 struct ehea_qp_init_attr {
228 /* input parameter */
229 u32 qp_token; /* queue token */
230 u8 low_lat_rq1;
231 u8 signalingtype; /* cqe generation flag */
232 u8 rq_count; /* num of receive queues */
233 u8 eqe_gen; /* eqe generation flag */
234 u16 max_nr_send_wqes; /* max number of send wqes */
235 u16 max_nr_rwqes_rq1; /* max number of receive wqes */
236 u16 max_nr_rwqes_rq2;
237 u16 max_nr_rwqes_rq3;
238 u8 wqe_size_enc_sq;
239 u8 wqe_size_enc_rq1;
240 u8 wqe_size_enc_rq2;
241 u8 wqe_size_enc_rq3;
242 u8 swqe_imm_data_len; /* immediate data length for swqes */
243 u16 port_nr;
244 u16 rq2_threshold;
245 u16 rq3_threshold;
246 u64 send_cq_handle;
247 u64 recv_cq_handle;
248 u64 aff_eq_handle;
249
250 /* output parameter */
251 u32 qp_nr;
252 u16 act_nr_send_wqes;
253 u16 act_nr_rwqes_rq1;
254 u16 act_nr_rwqes_rq2;
255 u16 act_nr_rwqes_rq3;
256 u8 act_wqe_size_enc_sq;
257 u8 act_wqe_size_enc_rq1;
258 u8 act_wqe_size_enc_rq2;
259 u8 act_wqe_size_enc_rq3;
260 u32 nr_sq_pages;
261 u32 nr_rq1_pages;
262 u32 nr_rq2_pages;
263 u32 nr_rq3_pages;
264 u32 liobn_sq;
265 u32 liobn_rq1;
266 u32 liobn_rq2;
267 u32 liobn_rq3;
268 };
269
270 /*
271 * Event Queue attributes, passed as paramter
272 */
273 struct ehea_eq_attr {
274 u32 type;
275 u32 max_nr_of_eqes;
276 u8 eqe_gen; /* generate eqe flag */
277 u64 eq_handle;
278 u32 act_nr_of_eqes;
279 u32 nr_pages;
280 u32 ist1; /* Interrupt service token */
281 u32 ist2;
282 u32 ist3;
283 u32 ist4;
284 };
285
286
287 /*
288 * Event Queue
289 */
290 struct ehea_eq {
291 struct ehea_adapter *adapter;
292 struct hw_queue hw_queue;
293 u64 fw_handle;
294 struct h_epas epas;
295 spinlock_t spinlock;
296 struct ehea_eq_attr attr;
297 };
298
299 /*
300 * HEA Queues
301 */
302 struct ehea_qp {
303 struct ehea_adapter *adapter;
304 u64 fw_handle; /* QP handle for firmware calls */
305 struct hw_queue hw_squeue;
306 struct hw_queue hw_rqueue1;
307 struct hw_queue hw_rqueue2;
308 struct hw_queue hw_rqueue3;
309 struct h_epas epas;
310 struct ehea_qp_init_attr init_attr;
311 };
312
313 /*
314 * Completion Queue attributes
315 */
316 struct ehea_cq_attr {
317 /* input parameter */
318 u32 max_nr_of_cqes;
319 u32 cq_token;
320 u64 eq_handle;
321
322 /* output parameter */
323 u32 act_nr_of_cqes;
324 u32 nr_pages;
325 };
326
327 /*
328 * Completion Queue
329 */
330 struct ehea_cq {
331 struct ehea_adapter *adapter;
332 u64 fw_handle;
333 struct hw_queue hw_queue;
334 struct h_epas epas;
335 struct ehea_cq_attr attr;
336 };
337
338 /*
339 * Memory Region
340 */
341 struct ehea_mr {
342 struct ehea_adapter *adapter;
343 u64 handle;
344 u64 vaddr;
345 u32 lkey;
346 };
347
348 /*
349 * Port state information
350 */
351 struct port_stats {
352 int poll_receive_errors;
353 int queue_stopped;
354 int err_tcp_cksum;
355 int err_ip_cksum;
356 int err_frame_crc;
357 };
358
359 #define EHEA_IRQ_NAME_SIZE 20
360
361 /*
362 * Queue SKB Array
363 */
364 struct ehea_q_skb_arr {
365 struct sk_buff **arr; /* skb array for queue */
366 int len; /* array length */
367 int index; /* array index */
368 int os_skbs; /* rq2/rq3 only: outstanding skbs */
369 };
370
371 /*
372 * Port resources
373 */
374 struct ehea_port_res {
375 struct napi_struct napi;
376 struct port_stats p_stats;
377 struct ehea_mr send_mr; /* send memory region */
378 struct ehea_mr recv_mr; /* receive memory region */
379 spinlock_t xmit_lock;
380 struct ehea_port *port;
381 char int_recv_name[EHEA_IRQ_NAME_SIZE];
382 char int_send_name[EHEA_IRQ_NAME_SIZE];
383 struct ehea_qp *qp;
384 struct ehea_cq *send_cq;
385 struct ehea_cq *recv_cq;
386 struct ehea_eq *eq;
387 struct ehea_q_skb_arr rq1_skba;
388 struct ehea_q_skb_arr rq2_skba;
389 struct ehea_q_skb_arr rq3_skba;
390 struct ehea_q_skb_arr sq_skba;
391 int sq_skba_size;
392 spinlock_t netif_queue;
393 int queue_stopped;
394 int swqe_refill_th;
395 atomic_t swqe_avail;
396 int swqe_ll_count;
397 u32 swqe_id_counter;
398 u64 tx_packets;
399 u64 rx_packets;
400 u32 poll_counter;
401 struct net_lro_mgr lro_mgr;
402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
403 };
404
405
406 #define EHEA_MAX_PORTS 16
407
408 #define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
409 RecvCQ handle, EQ handle,
410 SendMR handle, RecvMR handle */
411 #define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
412 #define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
413
414 struct ehea_adapter {
415 u64 handle;
416 struct of_device *ofdev;
417 struct ehea_port *port[EHEA_MAX_PORTS];
418 struct ehea_eq *neq; /* notification event queue */
419 struct tasklet_struct neq_tasklet;
420 struct ehea_mr mr;
421 u32 pd; /* protection domain */
422 u64 max_mc_mac; /* max number of multicast mac addresses */
423 int active_ports;
424 struct list_head list;
425 };
426
427
428 struct ehea_mc_list {
429 struct list_head list;
430 u64 macaddr;
431 };
432
433 /* kdump support */
434 struct ehea_fw_handle_entry {
435 u64 adh; /* Adapter Handle */
436 u64 fwh; /* Firmware Handle */
437 };
438
439 struct ehea_fw_handle_array {
440 struct ehea_fw_handle_entry *arr;
441 int num_entries;
442 struct mutex lock;
443 };
444
445 struct ehea_bcmc_reg_entry {
446 u64 adh; /* Adapter Handle */
447 u32 port_id; /* Logical Port Id */
448 u8 reg_type; /* Registration Type */
449 u64 macaddr;
450 };
451
452 struct ehea_bcmc_reg_array {
453 struct ehea_bcmc_reg_entry *arr;
454 int num_entries;
455 spinlock_t lock;
456 };
457
458 #define EHEA_PORT_UP 1
459 #define EHEA_PORT_DOWN 0
460 #define EHEA_PHY_LINK_UP 1
461 #define EHEA_PHY_LINK_DOWN 0
462 #define EHEA_MAX_PORT_RES 16
463 struct ehea_port {
464 struct ehea_adapter *adapter; /* adapter that owns this port */
465 struct net_device *netdev;
466 struct net_device_stats stats;
467 struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
468 struct of_device ofdev; /* Open Firmware Device */
469 struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
470 struct vlan_group *vgrp;
471 struct ehea_eq *qp_eq;
472 struct work_struct reset_task;
473 struct mutex port_lock;
474 char int_aff_name[EHEA_IRQ_NAME_SIZE];
475 int allmulti; /* Indicates IFF_ALLMULTI state */
476 int promisc; /* Indicates IFF_PROMISC state */
477 int num_tx_qps;
478 int num_add_tx_qps;
479 int num_mcs;
480 int resets;
481 u64 flags;
482 u64 mac_addr;
483 u32 logical_port_id;
484 u32 port_speed;
485 u32 msg_enable;
486 u32 sig_comp_iv;
487 u32 state;
488 u32 lro_max_aggr;
489 u8 phy_link;
490 u8 full_duplex;
491 u8 autoneg;
492 u8 num_def_qps;
493 };
494
495 struct port_res_cfg {
496 int max_entries_rcq;
497 int max_entries_scq;
498 int max_entries_sq;
499 int max_entries_rq1;
500 int max_entries_rq2;
501 int max_entries_rq3;
502 };
503
504 enum ehea_flag_bits {
505 __EHEA_STOP_XFER,
506 __EHEA_DISABLE_PORT_RESET
507 };
508
509 void ehea_set_ethtool_ops(struct net_device *netdev);
510 int ehea_sense_port_attr(struct ehea_port *port);
511 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
512
513 extern u64 ehea_driver_flags;
514 extern struct work_struct ehea_rereg_mr_task;
515
516 #endif /* __EHEA_H__ */
This page took 0.04161 seconds and 5 git commands to generate.