1 /******************************************************************************
2 iphase.c: Device driver for Interphase ATM PCI adapter cards
3 Author: Peter Wang <pwang@iphase.com>
4 Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 Interphase Corporation <www.iphase.com>
7 *******************************************************************************
9 This software may be used and distributed according to the terms
10 of the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on this skeleton fall under the GPL and must retain
12 the authorship (implicit copyright) notice.
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
19 Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20 was originally written by Monalisa Agrawal at UNH. Now this driver
21 supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22 card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23 in terms of PHY type, the size of control memory and the size of
24 packet memory. The followings are the change log and history:
26 Bugfix the Mona's UBR driver.
27 Modify the basic memory allocation and dma logic.
28 Port the driver to the latest kernel from 2.0.46.
29 Complete the ABR logic of the driver, and added the ABR work-
30 around for the hardware anormalies.
32 Add the flow control logic to the driver to allow rate-limit VC.
33 Add 4K VC support to the board with 512K control memory.
34 Add the support of all the variants of the Interphase ATM PCI
35 (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36 (25M UTP25) and x531 (DS3 and E3).
39 Support and updates available at: ftp://ftp.iphase.com/pub/atm
41 *******************************************************************************/
43 #include <linux/module.h>
44 #include <linux/kernel.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/sonet.h>
51 #include <linux/skbuff.h>
52 #include <linux/time.h>
53 #include <linux/delay.h>
54 #include <linux/uio.h>
55 #include <linux/init.h>
56 #include <linux/interrupt.h>
57 #include <linux/wait.h>
58 #include <linux/slab.h>
60 #include <linux/atomic.h>
61 #include <asm/uaccess.h>
62 #include <asm/string.h>
63 #include <asm/byteorder.h>
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
72 static unsigned char ia_phy_get(struct atm_dev
*dev
, unsigned long addr
);
73 static void desc_dbg(IADEV
*iadev
);
75 static IADEV
*ia_dev
[8];
76 static struct atm_dev
*_ia_dev
[8];
77 static int iadev_count
;
78 static void ia_led_timer(unsigned long arg
);
79 static DEFINE_TIMER(ia_timer
, ia_led_timer
, 0, 0);
80 static int IA_TX_BUF
= DFL_TX_BUFFERS
, IA_TX_BUF_SZ
= DFL_TX_BUF_SZ
;
81 static int IA_RX_BUF
= DFL_RX_BUFFERS
, IA_RX_BUF_SZ
= DFL_RX_BUF_SZ
;
82 static uint IADebugFlag
= /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83 |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
85 module_param(IA_TX_BUF
, int, 0);
86 module_param(IA_TX_BUF_SZ
, int, 0);
87 module_param(IA_RX_BUF
, int, 0);
88 module_param(IA_RX_BUF_SZ
, int, 0);
89 module_param(IADebugFlag
, uint
, 0644);
91 MODULE_LICENSE("GPL");
93 /**************************** IA_LIB **********************************/
95 static void ia_init_rtn_q (IARTN_Q
*que
)
101 static void ia_enque_head_rtn_q (IARTN_Q
*que
, IARTN_Q
* data
)
104 if (que
->next
== NULL
)
105 que
->next
= que
->tail
= data
;
107 data
->next
= que
->next
;
113 static int ia_enque_rtn_q (IARTN_Q
*que
, struct desc_tbl_t data
) {
114 IARTN_Q
*entry
= kmalloc(sizeof(*entry
), GFP_ATOMIC
);
115 if (!entry
) return -1;
118 if (que
->next
== NULL
)
119 que
->next
= que
->tail
= entry
;
121 que
->tail
->next
= entry
;
122 que
->tail
= que
->tail
->next
;
127 static IARTN_Q
* ia_deque_rtn_q (IARTN_Q
*que
) {
129 if (que
->next
== NULL
)
132 if ( que
->next
== que
->tail
)
133 que
->next
= que
->tail
= NULL
;
135 que
->next
= que
->next
->next
;
139 static void ia_hack_tcq(IADEV
*dev
) {
143 struct ia_vcc
*iavcc_r
= NULL
;
145 tcq_wr
= readl(dev
->seg_reg
+TCQ_WR_PTR
) & 0xffff;
146 while (dev
->host_tcq_wr
!= tcq_wr
) {
147 desc1
= *(u_short
*)(dev
->seg_ram
+ dev
->host_tcq_wr
);
149 else if (!dev
->desc_tbl
[desc1
-1].timestamp
) {
150 IF_ABR(printk(" Desc %d is reset at %ld\n", desc1
-1, jiffies
);)
151 *(u_short
*) (dev
->seg_ram
+ dev
->host_tcq_wr
) = 0;
153 else if (dev
->desc_tbl
[desc1
-1].timestamp
) {
154 if (!(iavcc_r
= dev
->desc_tbl
[desc1
-1].iavcc
)) {
155 printk("IA: Fatal err in get_desc\n");
158 iavcc_r
->vc_desc_cnt
--;
159 dev
->desc_tbl
[desc1
-1].timestamp
= 0;
160 IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161 dev
->desc_tbl
[desc1
-1].txskb
, desc1
);)
162 if (iavcc_r
->pcr
< dev
->rate_limit
) {
163 IA_SKB_STATE (dev
->desc_tbl
[desc1
-1].txskb
) |= IA_TX_DONE
;
164 if (ia_enque_rtn_q(&dev
->tx_return_q
, dev
->desc_tbl
[desc1
-1]) < 0)
165 printk("ia_hack_tcq: No memory available\n");
167 dev
->desc_tbl
[desc1
-1].iavcc
= NULL
;
168 dev
->desc_tbl
[desc1
-1].txskb
= NULL
;
170 dev
->host_tcq_wr
+= 2;
171 if (dev
->host_tcq_wr
> dev
->ffL
.tcq_ed
)
172 dev
->host_tcq_wr
= dev
->ffL
.tcq_st
;
176 static u16
get_desc (IADEV
*dev
, struct ia_vcc
*iavcc
) {
179 struct ia_vcc
*iavcc_r
= NULL
;
181 static unsigned long timer
= 0;
185 if((time_after(jiffies
,timer
+50)) || ((dev
->ffL
.tcq_rd
==dev
->host_tcq_wr
))) {
188 while (i
< dev
->num_tx_desc
) {
189 if (!dev
->desc_tbl
[i
].timestamp
) {
193 ltimeout
= dev
->desc_tbl
[i
].iavcc
->ltimeout
;
194 delta
= jiffies
- dev
->desc_tbl
[i
].timestamp
;
195 if (delta
>= ltimeout
) {
196 IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i
,dev
->desc_tbl
[i
].timestamp
, delta
, jiffies
);)
197 if (dev
->ffL
.tcq_rd
== dev
->ffL
.tcq_st
)
198 dev
->ffL
.tcq_rd
= dev
->ffL
.tcq_ed
;
200 dev
->ffL
.tcq_rd
-= 2;
201 *(u_short
*)(dev
->seg_ram
+ dev
->ffL
.tcq_rd
) = i
+1;
202 if (!(skb
= dev
->desc_tbl
[i
].txskb
) ||
203 !(iavcc_r
= dev
->desc_tbl
[i
].iavcc
))
204 printk("Fatal err, desc table vcc or skb is NULL\n");
206 iavcc_r
->vc_desc_cnt
--;
207 dev
->desc_tbl
[i
].timestamp
= 0;
208 dev
->desc_tbl
[i
].iavcc
= NULL
;
209 dev
->desc_tbl
[i
].txskb
= NULL
;
214 if (dev
->ffL
.tcq_rd
== dev
->host_tcq_wr
)
217 /* Get the next available descriptor number from TCQ */
218 desc_num
= *(u_short
*)(dev
->seg_ram
+ dev
->ffL
.tcq_rd
);
220 while (!desc_num
|| (dev
->desc_tbl
[desc_num
-1]).timestamp
) {
221 dev
->ffL
.tcq_rd
+= 2;
222 if (dev
->ffL
.tcq_rd
> dev
->ffL
.tcq_ed
)
223 dev
->ffL
.tcq_rd
= dev
->ffL
.tcq_st
;
224 if (dev
->ffL
.tcq_rd
== dev
->host_tcq_wr
)
226 desc_num
= *(u_short
*)(dev
->seg_ram
+ dev
->ffL
.tcq_rd
);
229 /* get system time */
230 dev
->desc_tbl
[desc_num
-1].timestamp
= jiffies
;
234 static void clear_lockup (struct atm_vcc
*vcc
, IADEV
*dev
) {
236 vcstatus_t
*vcstatus
;
238 u_short tempCellSlot
, tempFract
;
239 struct main_vc
*abr_vc
= (struct main_vc
*)dev
->MAIN_VC_TABLE_ADDR
;
240 struct ext_vc
*eabr_vc
= (struct ext_vc
*)dev
->EXT_VC_TABLE_ADDR
;
243 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
) {
244 vcstatus
= (vcstatus_t
*) &(dev
->testTable
[vcc
->vci
]->vc_status
);
247 if( vcstatus
->cnt
== 0x05 ) {
250 if( eabr_vc
->last_desc
) {
251 if( (abr_vc
->status
& 0x07) == ABR_STATE
/* 0x2 */ ) {
252 /* Wait for 10 Micro sec */
254 if ((eabr_vc
->last_desc
)&&((abr_vc
->status
& 0x07)==ABR_STATE
))
258 tempCellSlot
= abr_vc
->last_cell_slot
;
259 tempFract
= abr_vc
->fraction
;
260 if((tempCellSlot
== dev
->testTable
[vcc
->vci
]->lastTime
)
261 && (tempFract
== dev
->testTable
[vcc
->vci
]->fract
))
263 dev
->testTable
[vcc
->vci
]->lastTime
= tempCellSlot
;
264 dev
->testTable
[vcc
->vci
]->fract
= tempFract
;
266 } /* last descriptor */
268 } /* vcstatus->cnt */
271 IF_ABR(printk("LOCK UP found\n");)
272 writew(0xFFFD, dev
->seg_reg
+MODE_REG_0
);
273 /* Wait for 10 Micro sec */
275 abr_vc
->status
&= 0xFFF8;
276 abr_vc
->status
|= 0x0001; /* state is idle */
277 shd_tbl
= (u_short
*)dev
->ABR_SCHED_TABLE_ADDR
;
278 for( i
= 0; ((i
< dev
->num_vc
) && (shd_tbl
[i
])); i
++ );
280 shd_tbl
[i
] = vcc
->vci
;
282 IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc
->vci
);)
283 writew(T_ONLINE
, dev
->seg_reg
+MODE_REG_0
);
284 writew(~(TRANSMIT_DONE
|TCQ_NOT_EMPTY
), dev
->seg_reg
+SEG_MASK_REG
);
285 writew(TRANSMIT_DONE
, dev
->seg_reg
+SEG_INTR_STATUS_REG
);
295 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
297 ** +----+----+------------------+-------------------------------+
298 ** | R | NZ | 5-bit exponent | 9-bit mantissa |
299 ** +----+----+------------------+-------------------------------+
301 ** R = reserved (written as 0)
302 ** NZ = 0 if 0 cells/sec; 1 otherwise
304 ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
307 cellrate_to_float(u32 cr
)
311 #define M_BITS 9 /* Number of bits in mantissa */
312 #define E_BITS 5 /* Number of bits in exponent */
316 u32 tmp
= cr
& 0x00ffffff;
325 flot
= NZ
| (i
<< M_BITS
) | (cr
& M_MASK
);
327 flot
= NZ
| (i
<< M_BITS
) | ((cr
<< (M_BITS
- i
)) & M_MASK
);
329 flot
= NZ
| (i
<< M_BITS
) | ((cr
>> (i
- M_BITS
)) & M_MASK
);
335 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
338 float_to_cellrate(u16 rate
)
340 u32 exp
, mantissa
, cps
;
341 if ((rate
& NZ
) == 0)
343 exp
= (rate
>> M_BITS
) & E_MASK
;
344 mantissa
= rate
& M_MASK
;
347 cps
= (1 << M_BITS
) | mantissa
;
350 else if (exp
> M_BITS
)
351 cps
<<= (exp
- M_BITS
);
353 cps
>>= (M_BITS
- exp
);
358 static void init_abr_vc (IADEV
*dev
, srv_cls_param_t
*srv_p
) {
359 srv_p
->class_type
= ATM_ABR
;
360 srv_p
->pcr
= dev
->LineRate
;
362 srv_p
->icr
= 0x055cb7;
363 srv_p
->tbe
= 0xffffff;
374 ia_open_abr_vc(IADEV
*dev
, srv_cls_param_t
*srv_p
,
375 struct atm_vcc
*vcc
, u8 flag
)
377 f_vc_abr_entry
*f_abr_vc
;
378 r_vc_abr_entry
*r_abr_vc
;
381 u16 adtf
, air
, *ptr16
;
382 f_abr_vc
=(f_vc_abr_entry
*)dev
->MAIN_VC_TABLE_ADDR
;
383 f_abr_vc
+= vcc
->vci
;
385 case 1: /* FFRED initialization */
386 #if 0 /* sanity check */
389 if (srv_p
->pcr
> dev
->LineRate
)
390 srv_p
->pcr
= dev
->LineRate
;
391 if ((srv_p
->mcr
+ dev
->sum_mcr
) > dev
->LineRate
)
392 return MCR_UNAVAILABLE
;
393 if (srv_p
->mcr
> srv_p
->pcr
)
396 srv_p
->icr
= srv_p
->pcr
;
397 if ((srv_p
->icr
< srv_p
->mcr
) || (srv_p
->icr
> srv_p
->pcr
))
399 if ((srv_p
->tbe
< MIN_TBE
) || (srv_p
->tbe
> MAX_TBE
))
401 if ((srv_p
->frtt
< MIN_FRTT
) || (srv_p
->frtt
> MAX_FRTT
))
403 if (srv_p
->nrm
> MAX_NRM
)
405 if (srv_p
->trm
> MAX_TRM
)
407 if (srv_p
->adtf
> MAX_ADTF
)
409 else if (srv_p
->adtf
== 0)
411 if (srv_p
->cdf
> MAX_CDF
)
413 if (srv_p
->rif
> MAX_RIF
)
415 if (srv_p
->rdf
> MAX_RDF
)
418 memset ((caddr_t
)f_abr_vc
, 0, sizeof(*f_abr_vc
));
419 f_abr_vc
->f_vc_type
= ABR
;
420 nrm
= 2 << srv_p
->nrm
; /* (2 ** (srv_p->nrm +1)) */
421 /* i.e 2**n = 2 << (n-1) */
422 f_abr_vc
->f_nrm
= nrm
<< 8 | nrm
;
423 trm
= 100000/(2 << (16 - srv_p
->trm
));
424 if ( trm
== 0) trm
= 1;
425 f_abr_vc
->f_nrmexp
=(((srv_p
->nrm
+1) & 0x0f) << 12)|(MRM
<< 8) | trm
;
426 crm
= srv_p
->tbe
/ nrm
;
427 if (crm
== 0) crm
= 1;
428 f_abr_vc
->f_crm
= crm
& 0xff;
429 f_abr_vc
->f_pcr
= cellrate_to_float(srv_p
->pcr
);
430 icr
= min( srv_p
->icr
, (srv_p
->tbe
> srv_p
->frtt
) ?
431 ((srv_p
->tbe
/srv_p
->frtt
)*1000000) :
432 (1000000/(srv_p
->frtt
/srv_p
->tbe
)));
433 f_abr_vc
->f_icr
= cellrate_to_float(icr
);
434 adtf
= (10000 * srv_p
->adtf
)/8192;
435 if (adtf
== 0) adtf
= 1;
436 f_abr_vc
->f_cdf
= ((7 - srv_p
->cdf
) << 12 | adtf
) & 0xfff;
437 f_abr_vc
->f_mcr
= cellrate_to_float(srv_p
->mcr
);
438 f_abr_vc
->f_acr
= f_abr_vc
->f_icr
;
439 f_abr_vc
->f_status
= 0x0042;
441 case 0: /* RFRED initialization */
442 ptr16
= (u_short
*)(dev
->reass_ram
+ REASS_TABLE
*dev
->memSize
);
443 *(ptr16
+ vcc
->vci
) = NO_AAL5_PKT
| REASS_ABR
;
444 r_abr_vc
= (r_vc_abr_entry
*)(dev
->reass_ram
+ABR_VC_TABLE
*dev
->memSize
);
445 r_abr_vc
+= vcc
->vci
;
446 r_abr_vc
->r_status_rdf
= (15 - srv_p
->rdf
) & 0x000f;
447 air
= srv_p
->pcr
<< (15 - srv_p
->rif
);
448 if (air
== 0) air
= 1;
449 r_abr_vc
->r_air
= cellrate_to_float(air
);
450 dev
->testTable
[vcc
->vci
]->vc_status
= VC_ACTIVE
| VC_ABR
;
451 dev
->sum_mcr
+= srv_p
->mcr
;
459 static int ia_cbr_setup (IADEV
*dev
, struct atm_vcc
*vcc
) {
460 u32 rateLow
=0, rateHigh
, rate
;
462 struct ia_vcc
*ia_vcc
;
464 int idealSlot
=0, testSlot
, toBeAssigned
, inc
;
466 u16
*SchedTbl
, *TstSchedTbl
;
472 /* IpAdjustTrafficParams */
473 if (vcc
->qos
.txtp
.max_pcr
<= 0) {
474 IF_ERR(printk("PCR for CBR not defined\n");)
477 rate
= vcc
->qos
.txtp
.max_pcr
;
478 entries
= rate
/ dev
->Granularity
;
479 IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480 entries
, rate
, dev
->Granularity
);)
482 IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
483 rateLow
= entries
* dev
->Granularity
;
484 rateHigh
= (entries
+ 1) * dev
->Granularity
;
485 if (3*(rate
- rateLow
) > (rateHigh
- rate
))
487 if (entries
> dev
->CbrRemEntries
) {
488 IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489 IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490 entries
, dev
->CbrRemEntries
);)
494 ia_vcc
= INPH_IA_VCC(vcc
);
495 ia_vcc
->NumCbrEntry
= entries
;
496 dev
->sum_mcr
+= entries
* dev
->Granularity
;
497 /* IaFFrednInsertCbrSched */
498 // Starting at an arbitrary location, place the entries into the table
499 // as smoothly as possible
501 spacing
= dev
->CbrTotEntries
/ entries
;
502 sp_mod
= dev
->CbrTotEntries
% entries
; // get modulo
503 toBeAssigned
= entries
;
506 IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex
,spacing
,sp_mod
);)
509 // If this is the first time, start the table loading for this connection
510 // as close to entryPoint as possible.
511 if (toBeAssigned
== entries
)
513 idealSlot
= dev
->CbrEntryPt
;
514 dev
->CbrEntryPt
+= 2; // Adding 2 helps to prevent clumping
515 if (dev
->CbrEntryPt
>= dev
->CbrTotEntries
)
516 dev
->CbrEntryPt
-= dev
->CbrTotEntries
;// Wrap if necessary
518 idealSlot
+= (u32
)(spacing
+ fracSlot
); // Point to the next location
519 // in the table that would be smoothest
520 fracSlot
= ((sp_mod
+ sp_mod2
) / entries
); // get new integer part
521 sp_mod2
= ((sp_mod
+ sp_mod2
) % entries
); // calc new fractional part
523 if (idealSlot
>= (int)dev
->CbrTotEntries
)
524 idealSlot
-= dev
->CbrTotEntries
;
525 // Continuously check around this ideal value until a null
526 // location is encountered.
527 SchedTbl
= (u16
*)(dev
->seg_ram
+CBR_SCHED_TABLE
*dev
->memSize
);
529 testSlot
= idealSlot
;
530 TstSchedTbl
= (u16
*)(SchedTbl
+testSlot
); //set index and read in value
531 IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532 testSlot
, TstSchedTbl
,toBeAssigned
);)
533 memcpy((caddr_t
)&cbrVC
,(caddr_t
)TstSchedTbl
,sizeof(cbrVC
));
534 while (cbrVC
) // If another VC at this location, we have to keep looking
537 testSlot
= idealSlot
- inc
;
538 if (testSlot
< 0) { // Wrap if necessary
539 testSlot
+= dev
->CbrTotEntries
;
540 IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
543 TstSchedTbl
= (u16
*)(SchedTbl
+ testSlot
); // set table index
544 memcpy((caddr_t
)&cbrVC
,(caddr_t
)TstSchedTbl
,sizeof(cbrVC
));
547 testSlot
= idealSlot
+ inc
;
548 if (testSlot
>= (int)dev
->CbrTotEntries
) { // Wrap if necessary
549 testSlot
-= dev
->CbrTotEntries
;
550 IF_CBR(printk("TotCbrEntries=%d",dev
->CbrTotEntries
);)
551 IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
552 testSlot
, toBeAssigned
);)
554 // set table index and read in value
555 TstSchedTbl
= (u16
*)(SchedTbl
+ testSlot
);
556 IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557 TstSchedTbl
,cbrVC
,inc
);)
558 memcpy((caddr_t
)&cbrVC
,(caddr_t
)TstSchedTbl
,sizeof(cbrVC
));
560 // Move this VCI number into this location of the CBR Sched table.
561 memcpy((caddr_t
)TstSchedTbl
, (caddr_t
)&vcIndex
, sizeof(*TstSchedTbl
));
562 dev
->CbrRemEntries
--;
566 /* IaFFrednCbrEnable */
567 dev
->NumEnabledCBR
++;
568 if (dev
->NumEnabledCBR
== 1) {
569 writew((CBR_EN
| UBR_EN
| ABR_EN
| (0x23 << 2)), dev
->seg_reg
+STPARMS
);
570 IF_CBR(printk("CBR is enabled\n");)
574 static void ia_cbrVc_close (struct atm_vcc
*vcc
) {
576 u16
*SchedTbl
, NullVci
= 0;
579 iadev
= INPH_IA_DEV(vcc
->dev
);
580 iadev
->NumEnabledCBR
--;
581 SchedTbl
= (u16
*)(iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
);
582 if (iadev
->NumEnabledCBR
== 0) {
583 writew((UBR_EN
| ABR_EN
| (0x23 << 2)), iadev
->seg_reg
+STPARMS
);
584 IF_CBR (printk("CBR support disabled\n");)
587 for (i
=0; i
< iadev
->CbrTotEntries
; i
++)
589 if (*SchedTbl
== vcc
->vci
) {
590 iadev
->CbrRemEntries
++;
596 IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound
);)
599 static int ia_avail_descs(IADEV
*iadev
) {
602 if (iadev
->host_tcq_wr
>= iadev
->ffL
.tcq_rd
)
603 tmp
= (iadev
->host_tcq_wr
- iadev
->ffL
.tcq_rd
) / 2;
605 tmp
= (iadev
->ffL
.tcq_ed
- iadev
->ffL
.tcq_rd
+ 2 + iadev
->host_tcq_wr
-
606 iadev
->ffL
.tcq_st
) / 2;
610 static int ia_pkt_tx (struct atm_vcc
*vcc
, struct sk_buff
*skb
);
612 static int ia_que_tx (IADEV
*iadev
) {
616 num_desc
= ia_avail_descs(iadev
);
618 while (num_desc
&& (skb
= skb_dequeue(&iadev
->tx_backlog
))) {
619 if (!(vcc
= ATM_SKB(skb
)->vcc
)) {
620 dev_kfree_skb_any(skb
);
621 printk("ia_que_tx: Null vcc\n");
624 if (!test_bit(ATM_VF_READY
,&vcc
->flags
)) {
625 dev_kfree_skb_any(skb
);
626 printk("Free the SKB on closed vci %d \n", vcc
->vci
);
629 if (ia_pkt_tx (vcc
, skb
)) {
630 skb_queue_head(&iadev
->tx_backlog
, skb
);
637 static void ia_tx_poll (IADEV
*iadev
) {
638 struct atm_vcc
*vcc
= NULL
;
639 struct sk_buff
*skb
= NULL
, *skb1
= NULL
;
640 struct ia_vcc
*iavcc
;
644 while ( (rtne
= ia_deque_rtn_q(&iadev
->tx_return_q
))) {
645 skb
= rtne
->data
.txskb
;
647 printk("ia_tx_poll: skb is null\n");
650 vcc
= ATM_SKB(skb
)->vcc
;
652 printk("ia_tx_poll: vcc is null\n");
653 dev_kfree_skb_any(skb
);
657 iavcc
= INPH_IA_VCC(vcc
);
659 printk("ia_tx_poll: iavcc is null\n");
660 dev_kfree_skb_any(skb
);
664 skb1
= skb_dequeue(&iavcc
->txing_skb
);
665 while (skb1
&& (skb1
!= skb
)) {
666 if (!(IA_SKB_STATE(skb1
) & IA_TX_DONE
)) {
667 printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc
->vci
);
669 IF_ERR(printk("Release the SKB not match\n");)
670 if ((vcc
->pop
) && (skb1
->len
!= 0))
673 IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
677 dev_kfree_skb_any(skb1
);
678 skb1
= skb_dequeue(&iavcc
->txing_skb
);
681 IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc
->vci
);)
682 ia_enque_head_rtn_q (&iadev
->tx_return_q
, rtne
);
685 if ((vcc
->pop
) && (skb
->len
!= 0))
688 IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb
);)
691 dev_kfree_skb_any(skb
);
699 static void ia_eeprom_put (IADEV
*iadev
, u32 addr
, u_short val
)
704 * Issue a command to enable writes to the NOVRAM
706 NVRAM_CMD (EXTEND
+ EWEN
);
709 * issue the write command
711 NVRAM_CMD(IAWRITE
+ addr
);
713 * Send the data, starting with D15, then D14, and so on for 16 bits
715 for (i
=15; i
>=0; i
--) {
716 NVRAM_CLKOUT (val
& 0x8000);
721 t
= readl(iadev
->reg
+IPHASE5575_EEPROM_ACCESS
);
723 t
= readl(iadev
->reg
+IPHASE5575_EEPROM_ACCESS
);
727 * disable writes again
729 NVRAM_CMD(EXTEND
+ EWDS
)
735 static u16
ia_eeprom_get (IADEV
*iadev
, u32 addr
)
741 * Read the first bit that was clocked with the falling edge of the
742 * the last command data clock
744 NVRAM_CMD(IAREAD
+ addr
);
746 * Now read the rest of the bits, the next bit read is D14, then D13,
750 for (i
=15; i
>=0; i
--) {
759 static void ia_hw_type(IADEV
*iadev
) {
760 u_short memType
= ia_eeprom_get(iadev
, 25);
761 iadev
->memType
= memType
;
762 if ((memType
& MEM_SIZE_MASK
) == MEM_SIZE_1M
) {
763 iadev
->num_tx_desc
= IA_TX_BUF
;
764 iadev
->tx_buf_sz
= IA_TX_BUF_SZ
;
765 iadev
->num_rx_desc
= IA_RX_BUF
;
766 iadev
->rx_buf_sz
= IA_RX_BUF_SZ
;
767 } else if ((memType
& MEM_SIZE_MASK
) == MEM_SIZE_512K
) {
768 if (IA_TX_BUF
== DFL_TX_BUFFERS
)
769 iadev
->num_tx_desc
= IA_TX_BUF
/ 2;
771 iadev
->num_tx_desc
= IA_TX_BUF
;
772 iadev
->tx_buf_sz
= IA_TX_BUF_SZ
;
773 if (IA_RX_BUF
== DFL_RX_BUFFERS
)
774 iadev
->num_rx_desc
= IA_RX_BUF
/ 2;
776 iadev
->num_rx_desc
= IA_RX_BUF
;
777 iadev
->rx_buf_sz
= IA_RX_BUF_SZ
;
780 if (IA_TX_BUF
== DFL_TX_BUFFERS
)
781 iadev
->num_tx_desc
= IA_TX_BUF
/ 8;
783 iadev
->num_tx_desc
= IA_TX_BUF
;
784 iadev
->tx_buf_sz
= IA_TX_BUF_SZ
;
785 if (IA_RX_BUF
== DFL_RX_BUFFERS
)
786 iadev
->num_rx_desc
= IA_RX_BUF
/ 8;
788 iadev
->num_rx_desc
= IA_RX_BUF
;
789 iadev
->rx_buf_sz
= IA_RX_BUF_SZ
;
791 iadev
->rx_pkt_ram
= TX_PACKET_RAM
+ (iadev
->num_tx_desc
* iadev
->tx_buf_sz
);
792 IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
793 iadev
->num_tx_desc
, iadev
->tx_buf_sz
, iadev
->num_rx_desc
,
794 iadev
->rx_buf_sz
, iadev
->rx_pkt_ram
);)
797 if ((memType
& FE_MASK
) == FE_SINGLE_MODE
) {
798 iadev
->phy_type
= PHY_OC3C_S
;
799 else if ((memType
& FE_MASK
) == FE_UTP_OPTION
)
800 iadev
->phy_type
= PHY_UTP155
;
802 iadev
->phy_type
= PHY_OC3C_M
;
805 iadev
->phy_type
= memType
& FE_MASK
;
806 IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
807 memType
,iadev
->phy_type
);)
808 if (iadev
->phy_type
== FE_25MBIT_PHY
)
809 iadev
->LineRate
= (u32
)(((25600000/8)*26)/(27*53));
810 else if (iadev
->phy_type
== FE_DS3_PHY
)
811 iadev
->LineRate
= (u32
)(((44736000/8)*26)/(27*53));
812 else if (iadev
->phy_type
== FE_E3_PHY
)
813 iadev
->LineRate
= (u32
)(((34368000/8)*26)/(27*53));
815 iadev
->LineRate
= (u32
)(ATM_OC3_PCR
);
816 IF_INIT(printk("iadev->LineRate = %d \n", iadev
->LineRate
);)
820 static u32
ia_phy_read32(struct iadev_priv
*ia
, unsigned int reg
)
822 return readl(ia
->phy
+ (reg
>> 2));
825 static void ia_phy_write32(struct iadev_priv
*ia
, unsigned int reg
, u32 val
)
827 writel(val
, ia
->phy
+ (reg
>> 2));
830 static void ia_frontend_intr(struct iadev_priv
*iadev
)
834 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
835 status
= ia_phy_read32(iadev
, MB25_INTR_STATUS
);
836 iadev
->carrier_detect
= (status
& MB25_IS_GSB
) ? 1 : 0;
837 } else if (iadev
->phy_type
& FE_DS3_PHY
) {
838 ia_phy_read32(iadev
, SUNI_DS3_FRM_INTR_STAT
);
839 status
= ia_phy_read32(iadev
, SUNI_DS3_FRM_STAT
);
840 iadev
->carrier_detect
= (status
& SUNI_DS3_LOSV
) ? 0 : 1;
841 } else if (iadev
->phy_type
& FE_E3_PHY
) {
842 ia_phy_read32(iadev
, SUNI_E3_FRM_MAINT_INTR_IND
);
843 status
= ia_phy_read32(iadev
, SUNI_E3_FRM_FRAM_INTR_IND_STAT
);
844 iadev
->carrier_detect
= (status
& SUNI_E3_LOS
) ? 0 : 1;
846 status
= ia_phy_read32(iadev
, SUNI_RSOP_STATUS
);
847 iadev
->carrier_detect
= (status
& SUNI_LOSV
) ? 0 : 1;
850 printk(KERN_INFO
"IA: SUNI carrier %s\n",
851 iadev
->carrier_detect
? "detected" : "lost signal");
854 static void ia_mb25_init(struct iadev_priv
*iadev
)
857 mb25
->mb25_master_ctrl
= MB25_MC_DRIC
| MB25_MC_DREC
| MB25_MC_ENABLED
;
859 ia_phy_write32(iadev
, MB25_MASTER_CTRL
, MB25_MC_DRIC
| MB25_MC_DREC
);
860 ia_phy_write32(iadev
, MB25_DIAG_CONTROL
, 0);
862 iadev
->carrier_detect
=
863 (ia_phy_read32(iadev
, MB25_INTR_STATUS
) & MB25_IS_GSB
) ? 1 : 0;
871 static void ia_phy_write(struct iadev_priv
*iadev
,
872 const struct ia_reg
*regs
, int len
)
875 ia_phy_write32(iadev
, regs
->reg
, regs
->val
);
880 static void ia_suni_pm7345_init_ds3(struct iadev_priv
*iadev
)
882 static const struct ia_reg suni_ds3_init
[] = {
883 { SUNI_DS3_FRM_INTR_ENBL
, 0x17 },
884 { SUNI_DS3_FRM_CFG
, 0x01 },
885 { SUNI_DS3_TRAN_CFG
, 0x01 },
887 { SUNI_SPLR_CFG
, 0 },
892 status
= ia_phy_read32(iadev
, SUNI_DS3_FRM_STAT
);
893 iadev
->carrier_detect
= (status
& SUNI_DS3_LOSV
) ? 0 : 1;
895 ia_phy_write(iadev
, suni_ds3_init
, ARRAY_SIZE(suni_ds3_init
));
898 static void ia_suni_pm7345_init_e3(struct iadev_priv
*iadev
)
900 static const struct ia_reg suni_e3_init
[] = {
901 { SUNI_E3_FRM_FRAM_OPTIONS
, 0x04 },
902 { SUNI_E3_FRM_MAINT_OPTIONS
, 0x20 },
903 { SUNI_E3_FRM_FRAM_INTR_ENBL
, 0x1d },
904 { SUNI_E3_FRM_MAINT_INTR_ENBL
, 0x30 },
905 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS
, 0 },
906 { SUNI_E3_TRAN_FRAM_OPTIONS
, 0x01 },
907 { SUNI_CONFIG
, SUNI_PM7345_E3ENBL
},
908 { SUNI_SPLR_CFG
, 0x41 },
909 { SUNI_SPLT_CFG
, 0x41 }
913 status
= ia_phy_read32(iadev
, SUNI_E3_FRM_FRAM_INTR_IND_STAT
);
914 iadev
->carrier_detect
= (status
& SUNI_E3_LOS
) ? 0 : 1;
915 ia_phy_write(iadev
, suni_e3_init
, ARRAY_SIZE(suni_e3_init
));
918 static void ia_suni_pm7345_init(struct iadev_priv
*iadev
)
920 static const struct ia_reg suni_init
[] = {
921 /* Enable RSOP loss of signal interrupt. */
922 { SUNI_INTR_ENBL
, 0x28 },
923 /* Clear error counters. */
924 { SUNI_ID_RESET
, 0 },
925 /* Clear "PMCTST" in master test register. */
926 { SUNI_MASTER_TEST
, 0 },
928 { SUNI_RXCP_CTRL
, 0x2c },
929 { SUNI_RXCP_FCTRL
, 0x81 },
931 { SUNI_RXCP_IDLE_PAT_H1
, 0 },
932 { SUNI_RXCP_IDLE_PAT_H2
, 0 },
933 { SUNI_RXCP_IDLE_PAT_H3
, 0 },
934 { SUNI_RXCP_IDLE_PAT_H4
, 0x01 },
936 { SUNI_RXCP_IDLE_MASK_H1
, 0xff },
937 { SUNI_RXCP_IDLE_MASK_H2
, 0xff },
938 { SUNI_RXCP_IDLE_MASK_H3
, 0xff },
939 { SUNI_RXCP_IDLE_MASK_H4
, 0xfe },
941 { SUNI_RXCP_CELL_PAT_H1
, 0 },
942 { SUNI_RXCP_CELL_PAT_H2
, 0 },
943 { SUNI_RXCP_CELL_PAT_H3
, 0 },
944 { SUNI_RXCP_CELL_PAT_H4
, 0x01 },
946 { SUNI_RXCP_CELL_MASK_H1
, 0xff },
947 { SUNI_RXCP_CELL_MASK_H2
, 0xff },
948 { SUNI_RXCP_CELL_MASK_H3
, 0xff },
949 { SUNI_RXCP_CELL_MASK_H4
, 0xff },
951 { SUNI_TXCP_CTRL
, 0xa4 },
952 { SUNI_TXCP_INTR_EN_STS
, 0x10 },
953 { SUNI_TXCP_IDLE_PAT_H5
, 0x55 }
956 if (iadev
->phy_type
& FE_DS3_PHY
)
957 ia_suni_pm7345_init_ds3(iadev
);
959 ia_suni_pm7345_init_e3(iadev
);
961 ia_phy_write(iadev
, suni_init
, ARRAY_SIZE(suni_init
));
963 ia_phy_write32(iadev
, SUNI_CONFIG
, ia_phy_read32(iadev
, SUNI_CONFIG
) &
964 ~(SUNI_PM7345_LLB
| SUNI_PM7345_CLB
|
965 SUNI_PM7345_DLB
| SUNI_PM7345_PLB
));
967 suni_pm7345
->suni_rxcp_intr_en_sts
|= SUNI_OOCDE
;
968 #endif /* __SNMP__ */
973 /***************************** IA_LIB END *****************************/
975 #ifdef CONFIG_ATM_IA_DEBUG
976 static int tcnter
= 0;
977 static void xdump( u_char
* cp
, int length
, char* prefix
)
981 u_char
* pBuf
= prntBuf
;
983 while(count
< length
){
984 pBuf
+= sprintf( pBuf
, "%s", prefix
);
985 for(col
= 0;count
+ col
< length
&& col
< 16; col
++){
986 if (col
!= 0 && (col
% 4) == 0)
987 pBuf
+= sprintf( pBuf
, " " );
988 pBuf
+= sprintf( pBuf
, "%02X ", cp
[count
+ col
] );
990 while(col
++ < 16){ /* pad end of buffer with blanks */
992 sprintf( pBuf
, " " );
993 pBuf
+= sprintf( pBuf
, " " );
995 pBuf
+= sprintf( pBuf
, " " );
996 for(col
= 0;count
+ col
< length
&& col
< 16; col
++){
997 if (isprint((int)cp
[count
+ col
]))
998 pBuf
+= sprintf( pBuf
, "%c", cp
[count
+ col
] );
1000 pBuf
+= sprintf( pBuf
, "." );
1002 printk("%s\n", prntBuf
);
1007 } /* close xdump(... */
1008 #endif /* CONFIG_ATM_IA_DEBUG */
1011 static struct atm_dev
*ia_boards
= NULL
;
1013 #define ACTUAL_RAM_BASE \
1014 RAM_BASE*((iadev->mem)/(128 * 1024))
1015 #define ACTUAL_SEG_RAM_BASE \
1016 IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1017 #define ACTUAL_REASS_RAM_BASE \
1018 IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1021 /*-- some utilities and memory allocation stuff will come here -------------*/
1023 static void desc_dbg(IADEV
*iadev
) {
1025 u_short tcq_wr_ptr
, tcq_st_ptr
, tcq_ed_ptr
;
1028 // regval = readl((u32)ia_cmds->maddr);
1029 tcq_wr_ptr
= readw(iadev
->seg_reg
+TCQ_WR_PTR
);
1030 printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1031 tcq_wr_ptr
, readw(iadev
->seg_ram
+tcq_wr_ptr
),
1032 readw(iadev
->seg_ram
+tcq_wr_ptr
-2));
1033 printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev
->host_tcq_wr
,
1035 tcq_st_ptr
= readw(iadev
->seg_reg
+TCQ_ST_ADR
);
1036 tcq_ed_ptr
= readw(iadev
->seg_reg
+TCQ_ED_ADR
);
1037 printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr
, tcq_ed_ptr
);
1039 while (tcq_st_ptr
!= tcq_ed_ptr
) {
1040 tmp
= iadev
->seg_ram
+tcq_st_ptr
;
1041 printk("TCQ slot %d desc = %d Addr = %p\n", i
++, readw(tmp
), tmp
);
1044 for(i
=0; i
<iadev
->num_tx_desc
; i
++)
1045 printk("Desc_tbl[%d] = %d \n", i
, iadev
->desc_tbl
[i
].timestamp
);
1049 /*----------------------------- Receiving side stuff --------------------------*/
1051 static void rx_excp_rcvd(struct atm_dev
*dev
)
1053 #if 0 /* closing the receiving size will cause too many excp int */
1056 u_short excpq_rd_ptr
;
1059 iadev
= INPH_IA_DEV(dev
);
1060 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1061 while((state
& EXCPQ_EMPTY
) != EXCPQ_EMPTY
)
1062 { printk("state = %x \n", state
);
1063 excpq_rd_ptr
= readw(iadev
->reass_reg
+ EXCP_Q_RD_PTR
) & 0xffff;
1064 printk("state = %x excpq_rd_ptr = %x \n", state
, excpq_rd_ptr
);
1065 if (excpq_rd_ptr
== *(u16
*)(iadev
->reass_reg
+ EXCP_Q_WR_PTR
))
1066 IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1067 // TODO: update exception stat
1068 vci
= readw(iadev
->reass_ram
+excpq_rd_ptr
);
1069 error
= readw(iadev
->reass_ram
+excpq_rd_ptr
+2) & 0x0007;
1072 if (excpq_rd_ptr
> (readw(iadev
->reass_reg
+ EXCP_Q_ED_ADR
)& 0xffff))
1073 excpq_rd_ptr
= readw(iadev
->reass_reg
+ EXCP_Q_ST_ADR
)& 0xffff;
1074 writew( excpq_rd_ptr
, iadev
->reass_reg
+ EXCP_Q_RD_PTR
);
1075 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1080 static void free_desc(struct atm_dev
*dev
, int desc
)
1083 iadev
= INPH_IA_DEV(dev
);
1084 writew(desc
, iadev
->reass_ram
+iadev
->rfL
.fdq_wr
);
1085 iadev
->rfL
.fdq_wr
+=2;
1086 if (iadev
->rfL
.fdq_wr
> iadev
->rfL
.fdq_ed
)
1087 iadev
->rfL
.fdq_wr
= iadev
->rfL
.fdq_st
;
1088 writew(iadev
->rfL
.fdq_wr
, iadev
->reass_reg
+FREEQ_WR_PTR
);
1092 static int rx_pkt(struct atm_dev
*dev
)
1095 struct atm_vcc
*vcc
;
1096 unsigned short status
;
1097 struct rx_buf_desc __iomem
*buf_desc_ptr
;
1101 struct sk_buff
*skb
;
1102 u_int buf_addr
, dma_addr
;
1104 iadev
= INPH_IA_DEV(dev
);
1105 if (iadev
->rfL
.pcq_rd
== (readw(iadev
->reass_reg
+PCQ_WR_PTR
)&0xffff))
1107 printk(KERN_ERR DEV_LABEL
"(itf %d) Receive queue empty\n", dev
->number
);
1110 /* mask 1st 3 bits to get the actual descno. */
1111 desc
= readw(iadev
->reass_ram
+iadev
->rfL
.pcq_rd
) & 0x1fff;
1112 IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1113 iadev
->reass_ram
, iadev
->rfL
.pcq_rd
, desc
);
1114 printk(" pcq_wr_ptr = 0x%x\n",
1115 readw(iadev
->reass_reg
+PCQ_WR_PTR
)&0xffff);)
1116 /* update the read pointer - maybe we shud do this in the end*/
1117 if ( iadev
->rfL
.pcq_rd
== iadev
->rfL
.pcq_ed
)
1118 iadev
->rfL
.pcq_rd
= iadev
->rfL
.pcq_st
;
1120 iadev
->rfL
.pcq_rd
+= 2;
1121 writew(iadev
->rfL
.pcq_rd
, iadev
->reass_reg
+PCQ_RD_PTR
);
1123 /* get the buffer desc entry.
1124 update stuff. - doesn't seem to be any update necessary
1126 buf_desc_ptr
= iadev
->RX_DESC_BASE_ADDR
;
1127 /* make the ptr point to the corresponding buffer desc entry */
1128 buf_desc_ptr
+= desc
;
1129 if (!desc
|| (desc
> iadev
->num_rx_desc
) ||
1130 ((buf_desc_ptr
->vc_index
& 0xffff) > iadev
->num_vc
)) {
1131 free_desc(dev
, desc
);
1132 IF_ERR(printk("IA: bad descriptor desc = %d \n", desc
);)
1135 vcc
= iadev
->rx_open
[buf_desc_ptr
->vc_index
& 0xffff];
1138 free_desc(dev
, desc
);
1139 printk("IA: null vcc, drop PDU\n");
1144 /* might want to check the status bits for errors */
1145 status
= (u_short
) (buf_desc_ptr
->desc_mode
);
1146 if (status
& (RX_CER
| RX_PTE
| RX_OFL
))
1148 atomic_inc(&vcc
->stats
->rx_err
);
1149 IF_ERR(printk("IA: bad packet, dropping it");)
1150 if (status
& RX_CER
) {
1151 IF_ERR(printk(" cause: packet CRC error\n");)
1153 else if (status
& RX_PTE
) {
1154 IF_ERR(printk(" cause: packet time out\n");)
1157 IF_ERR(printk(" cause: buffer overflow\n");)
1166 buf_addr
= (buf_desc_ptr
->buf_start_hi
<< 16) | buf_desc_ptr
->buf_start_lo
;
1167 dma_addr
= (buf_desc_ptr
->dma_start_hi
<< 16) | buf_desc_ptr
->dma_start_lo
;
1168 len
= dma_addr
- buf_addr
;
1169 if (len
> iadev
->rx_buf_sz
) {
1170 printk("Over %d bytes sdu received, dropped!!!\n", iadev
->rx_buf_sz
);
1171 atomic_inc(&vcc
->stats
->rx_err
);
1175 if (!(skb
= atm_alloc_charge(vcc
, len
, GFP_ATOMIC
))) {
1177 printk("Drop control packets\n");
1182 ATM_SKB(skb
)->vcc
= vcc
;
1183 ATM_DESC(skb
) = desc
;
1184 skb_queue_tail(&iadev
->rx_dma_q
, skb
);
1186 /* Build the DLE structure */
1187 wr_ptr
= iadev
->rx_dle_q
.write
;
1188 wr_ptr
->sys_pkt_addr
= pci_map_single(iadev
->pci
, skb
->data
,
1189 len
, PCI_DMA_FROMDEVICE
);
1190 wr_ptr
->local_pkt_addr
= buf_addr
;
1191 wr_ptr
->bytes
= len
; /* We don't know this do we ?? */
1192 wr_ptr
->mode
= DMA_INT_ENABLE
;
1194 /* shud take care of wrap around here too. */
1195 if(++wr_ptr
== iadev
->rx_dle_q
.end
)
1196 wr_ptr
= iadev
->rx_dle_q
.start
;
1197 iadev
->rx_dle_q
.write
= wr_ptr
;
1199 /* Increment transaction counter */
1200 writel(1, iadev
->dma
+IPHASE5575_RX_COUNTER
);
1203 free_desc(dev
, desc
);
1207 static void rx_intr(struct atm_dev
*dev
)
1213 iadev
= INPH_IA_DEV(dev
);
1214 status
= readl(iadev
->reass_reg
+REASS_INTR_STATUS_REG
) & 0xffff;
1215 IF_EVENT(printk("rx_intr: status = 0x%x\n", status
);)
1216 if (status
& RX_PKT_RCVD
)
1219 /* Basically recvd an interrupt for receiving a packet.
1220 A descriptor would have been written to the packet complete
1221 queue. Get all the descriptors and set up dma to move the
1222 packets till the packet complete queue is empty..
1224 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1225 IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status
);)
1226 while(!(state
& PCQ_EMPTY
))
1229 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1233 if (status
& RX_FREEQ_EMPT
)
1236 iadev
->rx_tmp_cnt
= iadev
->rx_pkt_cnt
;
1237 iadev
->rx_tmp_jif
= jiffies
;
1240 else if ((time_after(jiffies
, iadev
->rx_tmp_jif
+ 50)) &&
1241 ((iadev
->rx_pkt_cnt
- iadev
->rx_tmp_cnt
) == 0)) {
1242 for (i
= 1; i
<= iadev
->num_rx_desc
; i
++)
1244 printk("Test logic RUN!!!!\n");
1245 writew( ~(RX_FREEQ_EMPT
|RX_EXCP_RCVD
),iadev
->reass_reg
+REASS_MASK_REG
);
1248 IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status
);)
1251 if (status
& RX_EXCP_RCVD
)
1253 /* probably need to handle the exception queue also. */
1254 IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status
);)
1259 if (status
& RX_RAW_RCVD
)
1261 /* need to handle the raw incoming cells. This deepnds on
1262 whether we have programmed to receive the raw cells or not.
1264 IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status
);)
1269 static void rx_dle_intr(struct atm_dev
*dev
)
1272 struct atm_vcc
*vcc
;
1273 struct sk_buff
*skb
;
1276 struct dle
*dle
, *cur_dle
;
1279 iadev
= INPH_IA_DEV(dev
);
1281 /* free all the dles done, that is just update our own dle read pointer
1282 - do we really need to do this. Think not. */
1283 /* DMA is done, just get all the recevie buffers from the rx dma queue
1284 and push them up to the higher layer protocol. Also free the desc
1285 associated with the buffer. */
1286 dle
= iadev
->rx_dle_q
.read
;
1287 dle_lp
= readl(iadev
->dma
+IPHASE5575_RX_LIST_ADDR
) & (sizeof(struct dle
)*DLE_ENTRIES
- 1);
1288 cur_dle
= (struct dle
*)(iadev
->rx_dle_q
.start
+ (dle_lp
>> 4));
1289 while(dle
!= cur_dle
)
1291 /* free the DMAed skb */
1292 skb
= skb_dequeue(&iadev
->rx_dma_q
);
1295 desc
= ATM_DESC(skb
);
1296 free_desc(dev
, desc
);
1298 if (!(len
= skb
->len
))
1300 printk("rx_dle_intr: skb len 0\n");
1301 dev_kfree_skb_any(skb
);
1305 struct cpcs_trailer
*trailer
;
1307 struct ia_vcc
*ia_vcc
;
1309 pci_unmap_single(iadev
->pci
, iadev
->rx_dle_q
.write
->sys_pkt_addr
,
1310 len
, PCI_DMA_FROMDEVICE
);
1311 /* no VCC related housekeeping done as yet. lets see */
1312 vcc
= ATM_SKB(skb
)->vcc
;
1314 printk("IA: null vcc\n");
1315 dev_kfree_skb_any(skb
);
1318 ia_vcc
= INPH_IA_VCC(vcc
);
1321 atomic_inc(&vcc
->stats
->rx_err
);
1322 atm_return(vcc
, skb
->truesize
);
1323 dev_kfree_skb_any(skb
);
1326 // get real pkt length pwang_test
1327 trailer
= (struct cpcs_trailer
*)((u_char
*)skb
->data
+
1328 skb
->len
- sizeof(*trailer
));
1329 length
= swap_byte_order(trailer
->length
);
1330 if ((length
> iadev
->rx_buf_sz
) || (length
>
1331 (skb
->len
- sizeof(struct cpcs_trailer
))))
1333 atomic_inc(&vcc
->stats
->rx_err
);
1334 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
1336 atm_return(vcc
, skb
->truesize
);
1337 dev_kfree_skb_any(skb
);
1340 skb_trim(skb
, length
);
1342 /* Display the packet */
1343 IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb
->len
);
1344 xdump(skb
->data
, skb
->len
, "RX: ");
1347 IF_RX(printk("rx_dle_intr: skb push");)
1349 atomic_inc(&vcc
->stats
->rx
);
1350 iadev
->rx_pkt_cnt
++;
1353 if (++dle
== iadev
->rx_dle_q
.end
)
1354 dle
= iadev
->rx_dle_q
.start
;
1356 iadev
->rx_dle_q
.read
= dle
;
1358 /* if the interrupts are masked because there were no free desc available,
1360 if (!iadev
->rxing
) {
1361 state
= readl(iadev
->reass_reg
+ STATE_REG
) & 0xffff;
1362 if (!(state
& FREEQ_EMPTY
)) {
1363 state
= readl(iadev
->reass_reg
+ REASS_MASK_REG
) & 0xffff;
1364 writel(state
& ~(RX_FREEQ_EMPT
|/* RX_EXCP_RCVD |*/ RX_PKT_RCVD
),
1365 iadev
->reass_reg
+REASS_MASK_REG
);
1372 static int open_rx(struct atm_vcc
*vcc
)
1375 u_short __iomem
*vc_table
;
1376 u_short __iomem
*reass_ptr
;
1377 IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc
->vpi
, vcc
->vci
);)
1379 if (vcc
->qos
.rxtp
.traffic_class
== ATM_NONE
) return 0;
1380 iadev
= INPH_IA_DEV(vcc
->dev
);
1381 if (vcc
->qos
.rxtp
.traffic_class
== ATM_ABR
) {
1382 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
1383 printk("IA: ABR not support\n");
1387 /* Make only this VCI in the vc table valid and let all
1388 others be invalid entries */
1389 vc_table
= iadev
->reass_ram
+RX_VC_TABLE
*iadev
->memSize
;
1390 vc_table
+= vcc
->vci
;
1391 /* mask the last 6 bits and OR it with 3 for 1K VCs */
1393 *vc_table
= vcc
->vci
<< 6;
1394 /* Also keep a list of open rx vcs so that we can attach them with
1395 incoming PDUs later. */
1396 if ((vcc
->qos
.rxtp
.traffic_class
== ATM_ABR
) ||
1397 (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
))
1399 srv_cls_param_t srv_p
;
1400 init_abr_vc(iadev
, &srv_p
);
1401 ia_open_abr_vc(iadev
, &srv_p
, vcc
, 0);
1403 else { /* for UBR later may need to add CBR logic */
1404 reass_ptr
= iadev
->reass_ram
+REASS_TABLE
*iadev
->memSize
;
1405 reass_ptr
+= vcc
->vci
;
1406 *reass_ptr
= NO_AAL5_PKT
;
1409 if (iadev
->rx_open
[vcc
->vci
])
1410 printk(KERN_CRIT DEV_LABEL
"(itf %d): VCI %d already open\n",
1411 vcc
->dev
->number
, vcc
->vci
);
1412 iadev
->rx_open
[vcc
->vci
] = vcc
;
1416 static int rx_init(struct atm_dev
*dev
)
1419 struct rx_buf_desc __iomem
*buf_desc_ptr
;
1420 unsigned long rx_pkt_start
= 0;
1422 struct abr_vc_table
*abr_vc_table
;
1425 int i
,j
, vcsize_sel
;
1426 u_short freeq_st_adr
;
1427 u_short
*freeq_start
;
1429 iadev
= INPH_IA_DEV(dev
);
1430 // spin_lock_init(&iadev->rx_lock);
1432 /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1433 dle_addr
= pci_alloc_consistent(iadev
->pci
, DLE_TOTAL_SIZE
,
1434 &iadev
->rx_dle_dma
);
1436 printk(KERN_ERR DEV_LABEL
"can't allocate DLEs\n");
1439 iadev
->rx_dle_q
.start
= (struct dle
*)dle_addr
;
1440 iadev
->rx_dle_q
.read
= iadev
->rx_dle_q
.start
;
1441 iadev
->rx_dle_q
.write
= iadev
->rx_dle_q
.start
;
1442 iadev
->rx_dle_q
.end
= (struct dle
*)((unsigned long)dle_addr
+sizeof(struct dle
)*DLE_ENTRIES
);
1443 /* the end of the dle q points to the entry after the last
1444 DLE that can be used. */
1446 /* write the upper 20 bits of the start address to rx list address register */
1447 /* We know this is 32bit bus addressed so the following is safe */
1448 writel(iadev
->rx_dle_dma
& 0xfffff000,
1449 iadev
->dma
+ IPHASE5575_RX_LIST_ADDR
);
1450 IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1451 iadev
->dma
+IPHASE5575_TX_LIST_ADDR
,
1452 readl(iadev
->dma
+ IPHASE5575_TX_LIST_ADDR
));
1453 printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1454 iadev
->dma
+IPHASE5575_RX_LIST_ADDR
,
1455 readl(iadev
->dma
+ IPHASE5575_RX_LIST_ADDR
));)
1457 writew(0xffff, iadev
->reass_reg
+REASS_MASK_REG
);
1458 writew(0, iadev
->reass_reg
+MODE_REG
);
1459 writew(RESET_REASS
, iadev
->reass_reg
+REASS_COMMAND_REG
);
1461 /* Receive side control memory map
1462 -------------------------------
1464 Buffer descr 0x0000 (736 - 23K)
1465 VP Table 0x5c00 (256 - 512)
1466 Except q 0x5e00 (128 - 512)
1467 Free buffer q 0x6000 (1K - 2K)
1468 Packet comp q 0x6800 (1K - 2K)
1469 Reass Table 0x7000 (1K - 2K)
1470 VC Table 0x7800 (1K - 2K)
1471 ABR VC Table 0x8000 (1K - 32K)
1474 /* Base address for Buffer Descriptor Table */
1475 writew(RX_DESC_BASE
>> 16, iadev
->reass_reg
+REASS_DESC_BASE
);
1476 /* Set the buffer size register */
1477 writew(iadev
->rx_buf_sz
, iadev
->reass_reg
+BUF_SIZE
);
1479 /* Initialize each entry in the Buffer Descriptor Table */
1480 iadev
->RX_DESC_BASE_ADDR
= iadev
->reass_ram
+RX_DESC_BASE
*iadev
->memSize
;
1481 buf_desc_ptr
= iadev
->RX_DESC_BASE_ADDR
;
1482 memset_io(buf_desc_ptr
, 0, sizeof(*buf_desc_ptr
));
1484 rx_pkt_start
= iadev
->rx_pkt_ram
;
1485 for(i
=1; i
<=iadev
->num_rx_desc
; i
++)
1487 memset_io(buf_desc_ptr
, 0, sizeof(*buf_desc_ptr
));
1488 buf_desc_ptr
->buf_start_hi
= rx_pkt_start
>> 16;
1489 buf_desc_ptr
->buf_start_lo
= rx_pkt_start
& 0x0000ffff;
1491 rx_pkt_start
+= iadev
->rx_buf_sz
;
1493 IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr
);)
1494 i
= FREE_BUF_DESC_Q
*iadev
->memSize
;
1495 writew(i
>> 16, iadev
->reass_reg
+REASS_QUEUE_BASE
);
1496 writew(i
, iadev
->reass_reg
+FREEQ_ST_ADR
);
1497 writew(i
+iadev
->num_rx_desc
*sizeof(u_short
),
1498 iadev
->reass_reg
+FREEQ_ED_ADR
);
1499 writew(i
, iadev
->reass_reg
+FREEQ_RD_PTR
);
1500 writew(i
+iadev
->num_rx_desc
*sizeof(u_short
),
1501 iadev
->reass_reg
+FREEQ_WR_PTR
);
1502 /* Fill the FREEQ with all the free descriptors. */
1503 freeq_st_adr
= readw(iadev
->reass_reg
+FREEQ_ST_ADR
);
1504 freeq_start
= (u_short
*)(iadev
->reass_ram
+freeq_st_adr
);
1505 for(i
=1; i
<=iadev
->num_rx_desc
; i
++)
1507 *freeq_start
= (u_short
)i
;
1510 IF_INIT(printk("freeq_start: 0x%p\n", freeq_start
);)
1511 /* Packet Complete Queue */
1512 i
= (PKT_COMP_Q
* iadev
->memSize
) & 0xffff;
1513 writew(i
, iadev
->reass_reg
+PCQ_ST_ADR
);
1514 writew(i
+iadev
->num_vc
*sizeof(u_short
), iadev
->reass_reg
+PCQ_ED_ADR
);
1515 writew(i
, iadev
->reass_reg
+PCQ_RD_PTR
);
1516 writew(i
, iadev
->reass_reg
+PCQ_WR_PTR
);
1518 /* Exception Queue */
1519 i
= (EXCEPTION_Q
* iadev
->memSize
) & 0xffff;
1520 writew(i
, iadev
->reass_reg
+EXCP_Q_ST_ADR
);
1521 writew(i
+ NUM_RX_EXCP
* sizeof(RX_ERROR_Q
),
1522 iadev
->reass_reg
+EXCP_Q_ED_ADR
);
1523 writew(i
, iadev
->reass_reg
+EXCP_Q_RD_PTR
);
1524 writew(i
, iadev
->reass_reg
+EXCP_Q_WR_PTR
);
1526 /* Load local copy of FREEQ and PCQ ptrs */
1527 iadev
->rfL
.fdq_st
= readw(iadev
->reass_reg
+FREEQ_ST_ADR
) & 0xffff;
1528 iadev
->rfL
.fdq_ed
= readw(iadev
->reass_reg
+FREEQ_ED_ADR
) & 0xffff ;
1529 iadev
->rfL
.fdq_rd
= readw(iadev
->reass_reg
+FREEQ_RD_PTR
) & 0xffff;
1530 iadev
->rfL
.fdq_wr
= readw(iadev
->reass_reg
+FREEQ_WR_PTR
) & 0xffff;
1531 iadev
->rfL
.pcq_st
= readw(iadev
->reass_reg
+PCQ_ST_ADR
) & 0xffff;
1532 iadev
->rfL
.pcq_ed
= readw(iadev
->reass_reg
+PCQ_ED_ADR
) & 0xffff;
1533 iadev
->rfL
.pcq_rd
= readw(iadev
->reass_reg
+PCQ_RD_PTR
) & 0xffff;
1534 iadev
->rfL
.pcq_wr
= readw(iadev
->reass_reg
+PCQ_WR_PTR
) & 0xffff;
1536 IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1537 iadev
->rfL
.pcq_st
, iadev
->rfL
.pcq_ed
, iadev
->rfL
.pcq_rd
,
1538 iadev
->rfL
.pcq_wr
);)
1539 /* just for check - no VP TBL */
1541 /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1542 /* initialize VP Table for invalid VPIs
1543 - I guess we can write all 1s or 0x000f in the entire memory
1544 space or something similar.
1547 /* This seems to work and looks right to me too !!! */
1548 i
= REASS_TABLE
* iadev
->memSize
;
1549 writew((i
>> 3), iadev
->reass_reg
+REASS_TABLE_BASE
);
1550 /* initialize Reassembly table to I don't know what ???? */
1551 reass_table
= (u16
*)(iadev
->reass_ram
+i
);
1552 j
= REASS_TABLE_SZ
* iadev
->memSize
;
1553 for(i
=0; i
< j
; i
++)
1554 *reass_table
++ = NO_AAL5_PKT
;
1557 while (i
!= iadev
->num_vc
) {
1561 i
= RX_VC_TABLE
* iadev
->memSize
;
1562 writew(((i
>>3) & 0xfff8) | vcsize_sel
, iadev
->reass_reg
+VC_LKUP_BASE
);
1563 vc_table
= (u16
*)(iadev
->reass_ram
+RX_VC_TABLE
*iadev
->memSize
);
1564 j
= RX_VC_TABLE_SZ
* iadev
->memSize
;
1565 for(i
= 0; i
< j
; i
++)
1567 /* shift the reassembly pointer by 3 + lower 3 bits of
1568 vc_lkup_base register (=3 for 1K VCs) and the last byte
1569 is those low 3 bits.
1570 Shall program this later.
1572 *vc_table
= (i
<< 6) | 15; /* for invalid VCI */
1576 i
= ABR_VC_TABLE
* iadev
->memSize
;
1577 writew(i
>> 3, iadev
->reass_reg
+ABR_LKUP_BASE
);
1579 i
= ABR_VC_TABLE
* iadev
->memSize
;
1580 abr_vc_table
= (struct abr_vc_table
*)(iadev
->reass_ram
+i
);
1581 j
= REASS_TABLE_SZ
* iadev
->memSize
;
1582 memset ((char*)abr_vc_table
, 0, j
* sizeof(*abr_vc_table
));
1583 for(i
= 0; i
< j
; i
++) {
1584 abr_vc_table
->rdf
= 0x0003;
1585 abr_vc_table
->air
= 0x5eb1;
1589 /* Initialize other registers */
1591 /* VP Filter Register set for VC Reassembly only */
1592 writew(0xff00, iadev
->reass_reg
+VP_FILTER
);
1593 writew(0, iadev
->reass_reg
+XTRA_RM_OFFSET
);
1594 writew(0x1, iadev
->reass_reg
+PROTOCOL_ID
);
1596 /* Packet Timeout Count related Registers :
1597 Set packet timeout to occur in about 3 seconds
1598 Set Packet Aging Interval count register to overflow in about 4 us
1600 writew(0xF6F8, iadev
->reass_reg
+PKT_TM_CNT
);
1602 i
= (j
>> 6) & 0xFF;
1604 i
|= ((j
<< 2) & 0xFF00);
1605 writew(i
, iadev
->reass_reg
+TMOUT_RANGE
);
1607 /* initiate the desc_tble */
1608 for(i
=0; i
<iadev
->num_tx_desc
;i
++)
1609 iadev
->desc_tbl
[i
].timestamp
= 0;
1611 /* to clear the interrupt status register - read it */
1612 readw(iadev
->reass_reg
+REASS_INTR_STATUS_REG
);
1614 /* Mask Register - clear it */
1615 writew(~(RX_FREEQ_EMPT
|RX_PKT_RCVD
), iadev
->reass_reg
+REASS_MASK_REG
);
1617 skb_queue_head_init(&iadev
->rx_dma_q
);
1618 iadev
->rx_free_desc_qhead
= NULL
;
1620 iadev
->rx_open
= kzalloc(4 * iadev
->num_vc
, GFP_KERNEL
);
1621 if (!iadev
->rx_open
) {
1622 printk(KERN_ERR DEV_LABEL
"itf %d couldn't get free page\n",
1628 iadev
->rx_pkt_cnt
= 0;
1630 writew(R_ONLINE
, iadev
->reass_reg
+MODE_REG
);
1634 pci_free_consistent(iadev
->pci
, DLE_TOTAL_SIZE
, iadev
->rx_dle_q
.start
,
1642 The memory map suggested in appendix A and the coding for it.
1643 Keeping it around just in case we change our mind later.
1645 Buffer descr 0x0000 (128 - 4K)
1646 UBR sched 0x1000 (1K - 4K)
1647 UBR Wait q 0x2000 (1K - 4K)
1648 Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
1650 extended VC 0x4000 (1K - 8K)
1651 ABR sched 0x6000 and ABR wait queue (1K - 2K) each
1652 CBR sched 0x7000 (as needed)
1653 VC table 0x8000 (1K - 32K)
1656 static void tx_intr(struct atm_dev
*dev
)
1659 unsigned short status
;
1660 unsigned long flags
;
1662 iadev
= INPH_IA_DEV(dev
);
1664 status
= readl(iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
1665 if (status
& TRANSMIT_DONE
){
1667 IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1668 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
1670 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
1671 writew(TRANSMIT_DONE
, iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
1672 if (iadev
->close_pending
)
1673 wake_up(&iadev
->close_wait
);
1675 if (status
& TCQ_NOT_EMPTY
)
1677 IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1681 static void tx_dle_intr(struct atm_dev
*dev
)
1684 struct dle
*dle
, *cur_dle
;
1685 struct sk_buff
*skb
;
1686 struct atm_vcc
*vcc
;
1687 struct ia_vcc
*iavcc
;
1689 unsigned long flags
;
1691 iadev
= INPH_IA_DEV(dev
);
1692 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
1693 dle
= iadev
->tx_dle_q
.read
;
1694 dle_lp
= readl(iadev
->dma
+IPHASE5575_TX_LIST_ADDR
) &
1695 (sizeof(struct dle
)*DLE_ENTRIES
- 1);
1696 cur_dle
= (struct dle
*)(iadev
->tx_dle_q
.start
+ (dle_lp
>> 4));
1697 while (dle
!= cur_dle
)
1699 /* free the DMAed skb */
1700 skb
= skb_dequeue(&iadev
->tx_dma_q
);
1703 /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1704 if (!((dle
- iadev
->tx_dle_q
.start
)%(2*sizeof(struct dle
)))) {
1705 pci_unmap_single(iadev
->pci
, dle
->sys_pkt_addr
, skb
->len
,
1708 vcc
= ATM_SKB(skb
)->vcc
;
1710 printk("tx_dle_intr: vcc is null\n");
1711 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
1712 dev_kfree_skb_any(skb
);
1716 iavcc
= INPH_IA_VCC(vcc
);
1718 printk("tx_dle_intr: iavcc is null\n");
1719 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
1720 dev_kfree_skb_any(skb
);
1723 if (vcc
->qos
.txtp
.pcr
>= iadev
->rate_limit
) {
1724 if ((vcc
->pop
) && (skb
->len
!= 0))
1729 dev_kfree_skb_any(skb
);
1732 else { /* Hold the rate-limited skb for flow control */
1733 IA_SKB_STATE(skb
) |= IA_DLED
;
1734 skb_queue_tail(&iavcc
->txing_skb
, skb
);
1736 IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb
);)
1737 if (++dle
== iadev
->tx_dle_q
.end
)
1738 dle
= iadev
->tx_dle_q
.start
;
1740 iadev
->tx_dle_q
.read
= dle
;
1741 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
1744 static int open_tx(struct atm_vcc
*vcc
)
1746 struct ia_vcc
*ia_vcc
;
1751 IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc
->vci
);)
1752 if (vcc
->qos
.txtp
.traffic_class
== ATM_NONE
) return 0;
1753 iadev
= INPH_IA_DEV(vcc
->dev
);
1755 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
1756 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
) {
1757 printk("IA: ABR not support\n");
1760 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
1761 printk("IA: CBR not support\n");
1765 ia_vcc
= INPH_IA_VCC(vcc
);
1766 memset((caddr_t
)ia_vcc
, 0, sizeof(*ia_vcc
));
1767 if (vcc
->qos
.txtp
.max_sdu
>
1768 (iadev
->tx_buf_sz
- sizeof(struct cpcs_trailer
))){
1769 printk("IA: SDU size over (%d) the configured SDU size %d\n",
1770 vcc
->qos
.txtp
.max_sdu
,iadev
->tx_buf_sz
);
1771 vcc
->dev_data
= NULL
;
1775 ia_vcc
->vc_desc_cnt
= 0;
1779 if (vcc
->qos
.txtp
.max_pcr
== ATM_MAX_PCR
)
1780 vcc
->qos
.txtp
.pcr
= iadev
->LineRate
;
1781 else if ((vcc
->qos
.txtp
.max_pcr
== 0)&&( vcc
->qos
.txtp
.pcr
<= 0))
1782 vcc
->qos
.txtp
.pcr
= iadev
->LineRate
;
1783 else if ((vcc
->qos
.txtp
.max_pcr
> vcc
->qos
.txtp
.pcr
) && (vcc
->qos
.txtp
.max_pcr
> 0))
1784 vcc
->qos
.txtp
.pcr
= vcc
->qos
.txtp
.max_pcr
;
1785 if (vcc
->qos
.txtp
.pcr
> iadev
->LineRate
)
1786 vcc
->qos
.txtp
.pcr
= iadev
->LineRate
;
1787 ia_vcc
->pcr
= vcc
->qos
.txtp
.pcr
;
1789 if (ia_vcc
->pcr
> (iadev
->LineRate
/ 6) ) ia_vcc
->ltimeout
= HZ
/ 10;
1790 else if (ia_vcc
->pcr
> (iadev
->LineRate
/ 130)) ia_vcc
->ltimeout
= HZ
;
1791 else if (ia_vcc
->pcr
<= 170) ia_vcc
->ltimeout
= 16 * HZ
;
1792 else ia_vcc
->ltimeout
= 2700 * HZ
/ ia_vcc
->pcr
;
1793 if (ia_vcc
->pcr
< iadev
->rate_limit
)
1794 skb_queue_head_init (&ia_vcc
->txing_skb
);
1795 if (ia_vcc
->pcr
< iadev
->rate_limit
) {
1796 struct sock
*sk
= sk_atm(vcc
);
1798 if (vcc
->qos
.txtp
.max_sdu
!= 0) {
1799 if (ia_vcc
->pcr
> 60000)
1800 sk
->sk_sndbuf
= vcc
->qos
.txtp
.max_sdu
* 5;
1801 else if (ia_vcc
->pcr
> 2000)
1802 sk
->sk_sndbuf
= vcc
->qos
.txtp
.max_sdu
* 4;
1804 sk
->sk_sndbuf
= vcc
->qos
.txtp
.max_sdu
* 3;
1807 sk
->sk_sndbuf
= 24576;
1810 vc
= (struct main_vc
*)iadev
->MAIN_VC_TABLE_ADDR
;
1811 evc
= (struct ext_vc
*)iadev
->EXT_VC_TABLE_ADDR
;
1814 memset((caddr_t
)vc
, 0, sizeof(*vc
));
1815 memset((caddr_t
)evc
, 0, sizeof(*evc
));
1817 /* store the most significant 4 bits of vci as the last 4 bits
1818 of first part of atm header.
1819 store the last 12 bits of vci as first 12 bits of the second
1820 part of the atm header.
1822 evc
->atm_hdr1
= (vcc
->vci
>> 12) & 0x000f;
1823 evc
->atm_hdr2
= (vcc
->vci
& 0x0fff) << 4;
1825 /* check the following for different traffic classes */
1826 if (vcc
->qos
.txtp
.traffic_class
== ATM_UBR
)
1829 vc
->status
= CRC_APPEND
;
1830 vc
->acr
= cellrate_to_float(iadev
->LineRate
);
1831 if (vcc
->qos
.txtp
.pcr
> 0)
1832 vc
->acr
= cellrate_to_float(vcc
->qos
.txtp
.pcr
);
1833 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1834 vcc
->qos
.txtp
.max_pcr
,vc
->acr
);)
1836 else if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
)
1837 { srv_cls_param_t srv_p
;
1838 IF_ABR(printk("Tx ABR VCC\n");)
1839 init_abr_vc(iadev
, &srv_p
);
1840 if (vcc
->qos
.txtp
.pcr
> 0)
1841 srv_p
.pcr
= vcc
->qos
.txtp
.pcr
;
1842 if (vcc
->qos
.txtp
.min_pcr
> 0) {
1843 int tmpsum
= iadev
->sum_mcr
+iadev
->sum_cbr
+vcc
->qos
.txtp
.min_pcr
;
1844 if (tmpsum
> iadev
->LineRate
)
1846 srv_p
.mcr
= vcc
->qos
.txtp
.min_pcr
;
1847 iadev
->sum_mcr
+= vcc
->qos
.txtp
.min_pcr
;
1850 if (vcc
->qos
.txtp
.icr
)
1851 srv_p
.icr
= vcc
->qos
.txtp
.icr
;
1852 if (vcc
->qos
.txtp
.tbe
)
1853 srv_p
.tbe
= vcc
->qos
.txtp
.tbe
;
1854 if (vcc
->qos
.txtp
.frtt
)
1855 srv_p
.frtt
= vcc
->qos
.txtp
.frtt
;
1856 if (vcc
->qos
.txtp
.rif
)
1857 srv_p
.rif
= vcc
->qos
.txtp
.rif
;
1858 if (vcc
->qos
.txtp
.rdf
)
1859 srv_p
.rdf
= vcc
->qos
.txtp
.rdf
;
1860 if (vcc
->qos
.txtp
.nrm_pres
)
1861 srv_p
.nrm
= vcc
->qos
.txtp
.nrm
;
1862 if (vcc
->qos
.txtp
.trm_pres
)
1863 srv_p
.trm
= vcc
->qos
.txtp
.trm
;
1864 if (vcc
->qos
.txtp
.adtf_pres
)
1865 srv_p
.adtf
= vcc
->qos
.txtp
.adtf
;
1866 if (vcc
->qos
.txtp
.cdf_pres
)
1867 srv_p
.cdf
= vcc
->qos
.txtp
.cdf
;
1868 if (srv_p
.icr
> srv_p
.pcr
)
1869 srv_p
.icr
= srv_p
.pcr
;
1870 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
1871 srv_p
.pcr
, srv_p
.mcr
);)
1872 ia_open_abr_vc(iadev
, &srv_p
, vcc
, 1);
1873 } else if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
1874 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
1875 printk("IA: CBR not support\n");
1878 if (vcc
->qos
.txtp
.max_pcr
> iadev
->LineRate
) {
1879 IF_CBR(printk("PCR is not available\n");)
1883 vc
->status
= CRC_APPEND
;
1884 if ((ret
= ia_cbr_setup (iadev
, vcc
)) < 0) {
1889 printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
1891 iadev
->testTable
[vcc
->vci
]->vc_status
|= VC_ACTIVE
;
1892 IF_EVENT(printk("ia open_tx returning \n");)
1897 static int tx_init(struct atm_dev
*dev
)
1900 struct tx_buf_desc
*buf_desc_ptr
;
1901 unsigned int tx_pkt_start
;
1913 iadev
= INPH_IA_DEV(dev
);
1914 spin_lock_init(&iadev
->tx_lock
);
1916 IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1917 readw(iadev
->seg_reg
+SEG_MASK_REG
));)
1919 /* Allocate 4k (boundary aligned) bytes */
1920 dle_addr
= pci_alloc_consistent(iadev
->pci
, DLE_TOTAL_SIZE
,
1921 &iadev
->tx_dle_dma
);
1923 printk(KERN_ERR DEV_LABEL
"can't allocate DLEs\n");
1926 iadev
->tx_dle_q
.start
= (struct dle
*)dle_addr
;
1927 iadev
->tx_dle_q
.read
= iadev
->tx_dle_q
.start
;
1928 iadev
->tx_dle_q
.write
= iadev
->tx_dle_q
.start
;
1929 iadev
->tx_dle_q
.end
= (struct dle
*)((unsigned long)dle_addr
+sizeof(struct dle
)*DLE_ENTRIES
);
1931 /* write the upper 20 bits of the start address to tx list address register */
1932 writel(iadev
->tx_dle_dma
& 0xfffff000,
1933 iadev
->dma
+ IPHASE5575_TX_LIST_ADDR
);
1934 writew(0xffff, iadev
->seg_reg
+SEG_MASK_REG
);
1935 writew(0, iadev
->seg_reg
+MODE_REG_0
);
1936 writew(RESET_SEG
, iadev
->seg_reg
+SEG_COMMAND_REG
);
1937 iadev
->MAIN_VC_TABLE_ADDR
= iadev
->seg_ram
+MAIN_VC_TABLE
*iadev
->memSize
;
1938 iadev
->EXT_VC_TABLE_ADDR
= iadev
->seg_ram
+EXT_VC_TABLE
*iadev
->memSize
;
1939 iadev
->ABR_SCHED_TABLE_ADDR
=iadev
->seg_ram
+ABR_SCHED_TABLE
*iadev
->memSize
;
1942 Transmit side control memory map
1943 --------------------------------
1944 Buffer descr 0x0000 (128 - 4K)
1945 Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
1948 CBR Table 0x1800 (as needed) - 6K
1949 UBR Table 0x3000 (1K - 4K) - 12K
1950 UBR Wait queue 0x4000 (1K - 4K) - 16K
1951 ABR sched 0x5000 and ABR wait queue (1K - 2K) each
1952 ABR Tbl - 20K, ABR Wq - 22K
1953 extended VC 0x6000 (1K - 8K) - 24K
1954 VC Table 0x8000 (1K - 32K) - 32K
1956 Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1957 and Wait q, which can be allotted later.
1960 /* Buffer Descriptor Table Base address */
1961 writew(TX_DESC_BASE
, iadev
->seg_reg
+SEG_DESC_BASE
);
1963 /* initialize each entry in the buffer descriptor table */
1964 buf_desc_ptr
=(struct tx_buf_desc
*)(iadev
->seg_ram
+TX_DESC_BASE
);
1965 memset((caddr_t
)buf_desc_ptr
, 0, sizeof(*buf_desc_ptr
));
1967 tx_pkt_start
= TX_PACKET_RAM
;
1968 for(i
=1; i
<=iadev
->num_tx_desc
; i
++)
1970 memset((caddr_t
)buf_desc_ptr
, 0, sizeof(*buf_desc_ptr
));
1971 buf_desc_ptr
->desc_mode
= AAL5
;
1972 buf_desc_ptr
->buf_start_hi
= tx_pkt_start
>> 16;
1973 buf_desc_ptr
->buf_start_lo
= tx_pkt_start
& 0x0000ffff;
1975 tx_pkt_start
+= iadev
->tx_buf_sz
;
1977 iadev
->tx_buf
= kmalloc(iadev
->num_tx_desc
*sizeof(struct cpcs_trailer_desc
), GFP_KERNEL
);
1978 if (!iadev
->tx_buf
) {
1979 printk(KERN_ERR DEV_LABEL
" couldn't get mem\n");
1982 for (i
= 0; i
< iadev
->num_tx_desc
; i
++)
1984 struct cpcs_trailer
*cpcs
;
1986 cpcs
= kmalloc(sizeof(*cpcs
), GFP_KERNEL
|GFP_DMA
);
1988 printk(KERN_ERR DEV_LABEL
" couldn't get freepage\n");
1989 goto err_free_tx_bufs
;
1991 iadev
->tx_buf
[i
].cpcs
= cpcs
;
1992 iadev
->tx_buf
[i
].dma_addr
= pci_map_single(iadev
->pci
,
1993 cpcs
, sizeof(*cpcs
), PCI_DMA_TODEVICE
);
1995 iadev
->desc_tbl
= kmalloc(iadev
->num_tx_desc
*
1996 sizeof(struct desc_tbl_t
), GFP_KERNEL
);
1997 if (!iadev
->desc_tbl
) {
1998 printk(KERN_ERR DEV_LABEL
" couldn't get mem\n");
1999 goto err_free_all_tx_bufs
;
2002 /* Communication Queues base address */
2003 i
= TX_COMP_Q
* iadev
->memSize
;
2004 writew(i
>> 16, iadev
->seg_reg
+SEG_QUEUE_BASE
);
2006 /* Transmit Complete Queue */
2007 writew(i
, iadev
->seg_reg
+TCQ_ST_ADR
);
2008 writew(i
, iadev
->seg_reg
+TCQ_RD_PTR
);
2009 writew(i
+iadev
->num_tx_desc
*sizeof(u_short
),iadev
->seg_reg
+TCQ_WR_PTR
);
2010 iadev
->host_tcq_wr
= i
+ iadev
->num_tx_desc
*sizeof(u_short
);
2011 writew(i
+2 * iadev
->num_tx_desc
* sizeof(u_short
),
2012 iadev
->seg_reg
+TCQ_ED_ADR
);
2013 /* Fill the TCQ with all the free descriptors. */
2014 tcq_st_adr
= readw(iadev
->seg_reg
+TCQ_ST_ADR
);
2015 tcq_start
= (u_short
*)(iadev
->seg_ram
+tcq_st_adr
);
2016 for(i
=1; i
<=iadev
->num_tx_desc
; i
++)
2018 *tcq_start
= (u_short
)i
;
2022 /* Packet Ready Queue */
2023 i
= PKT_RDY_Q
* iadev
->memSize
;
2024 writew(i
, iadev
->seg_reg
+PRQ_ST_ADR
);
2025 writew(i
+2 * iadev
->num_tx_desc
* sizeof(u_short
),
2026 iadev
->seg_reg
+PRQ_ED_ADR
);
2027 writew(i
, iadev
->seg_reg
+PRQ_RD_PTR
);
2028 writew(i
, iadev
->seg_reg
+PRQ_WR_PTR
);
2030 /* Load local copy of PRQ and TCQ ptrs */
2031 iadev
->ffL
.prq_st
= readw(iadev
->seg_reg
+PRQ_ST_ADR
) & 0xffff;
2032 iadev
->ffL
.prq_ed
= readw(iadev
->seg_reg
+PRQ_ED_ADR
) & 0xffff;
2033 iadev
->ffL
.prq_wr
= readw(iadev
->seg_reg
+PRQ_WR_PTR
) & 0xffff;
2035 iadev
->ffL
.tcq_st
= readw(iadev
->seg_reg
+TCQ_ST_ADR
) & 0xffff;
2036 iadev
->ffL
.tcq_ed
= readw(iadev
->seg_reg
+TCQ_ED_ADR
) & 0xffff;
2037 iadev
->ffL
.tcq_rd
= readw(iadev
->seg_reg
+TCQ_RD_PTR
) & 0xffff;
2039 /* Just for safety initializing the queue to have desc 1 always */
2040 /* Fill the PRQ with all the free descriptors. */
2041 prq_st_adr
= readw(iadev
->seg_reg
+PRQ_ST_ADR
);
2042 prq_start
= (u_short
*)(iadev
->seg_ram
+prq_st_adr
);
2043 for(i
=1; i
<=iadev
->num_tx_desc
; i
++)
2045 *prq_start
= (u_short
)0; /* desc 1 in all entries */
2049 IF_INIT(printk("Start CBR Init\n");)
2050 #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
2051 writew(0,iadev
->seg_reg
+CBR_PTR_BASE
);
2052 #else /* Charlie's logic is wrong ? */
2053 tmp16
= (iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
)>>17;
2054 IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16
);)
2055 writew(tmp16
,iadev
->seg_reg
+CBR_PTR_BASE
);
2058 IF_INIT(printk("value in register = 0x%x\n",
2059 readw(iadev
->seg_reg
+CBR_PTR_BASE
));)
2060 tmp16
= (CBR_SCHED_TABLE
*iadev
->memSize
) >> 1;
2061 writew(tmp16
, iadev
->seg_reg
+CBR_TAB_BEG
);
2062 IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16
,
2063 readw(iadev
->seg_reg
+CBR_TAB_BEG
));)
2064 writew(tmp16
, iadev
->seg_reg
+CBR_TAB_END
+1); // CBR_PTR;
2065 tmp16
= (CBR_SCHED_TABLE
*iadev
->memSize
+ iadev
->num_vc
*6 - 2) >> 1;
2066 writew(tmp16
, iadev
->seg_reg
+CBR_TAB_END
);
2067 IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2068 iadev
->seg_reg
, readw(iadev
->seg_reg
+CBR_PTR_BASE
));)
2069 IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2070 readw(iadev
->seg_reg
+CBR_TAB_BEG
), readw(iadev
->seg_reg
+CBR_TAB_END
),
2071 readw(iadev
->seg_reg
+CBR_TAB_END
+1));)
2073 /* Initialize the CBR Schedualing Table */
2074 memset_io(iadev
->seg_ram
+CBR_SCHED_TABLE
*iadev
->memSize
,
2075 0, iadev
->num_vc
*6);
2076 iadev
->CbrRemEntries
= iadev
->CbrTotEntries
= iadev
->num_vc
*3;
2077 iadev
->CbrEntryPt
= 0;
2078 iadev
->Granularity
= MAX_ATM_155
/ iadev
->CbrTotEntries
;
2079 iadev
->NumEnabledCBR
= 0;
2081 /* UBR scheduling Table and wait queue */
2082 /* initialize all bytes of UBR scheduler table and wait queue to 0
2083 - SCHEDSZ is 1K (# of entries).
2084 - UBR Table size is 4K
2085 - UBR wait queue is 4K
2086 since the table and wait queues are contiguous, all the bytes
2087 can be initialized by one memeset.
2092 while (i
!= iadev
->num_vc
) {
2097 i
= MAIN_VC_TABLE
* iadev
->memSize
;
2098 writew(vcsize_sel
| ((i
>> 8) & 0xfff8),iadev
->seg_reg
+VCT_BASE
);
2099 i
= EXT_VC_TABLE
* iadev
->memSize
;
2100 writew((i
>> 8) & 0xfffe, iadev
->seg_reg
+VCTE_BASE
);
2101 i
= UBR_SCHED_TABLE
* iadev
->memSize
;
2102 writew((i
& 0xffff) >> 11, iadev
->seg_reg
+UBR_SBPTR_BASE
);
2103 i
= UBR_WAIT_Q
* iadev
->memSize
;
2104 writew((i
>> 7) & 0xffff, iadev
->seg_reg
+UBRWQ_BASE
);
2105 memset((caddr_t
)(iadev
->seg_ram
+UBR_SCHED_TABLE
*iadev
->memSize
),
2106 0, iadev
->num_vc
*8);
2107 /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2108 /* initialize all bytes of ABR scheduler table and wait queue to 0
2109 - SCHEDSZ is 1K (# of entries).
2110 - ABR Table size is 2K
2111 - ABR wait queue is 2K
2112 since the table and wait queues are contiguous, all the bytes
2113 can be initialized by one memeset.
2115 i
= ABR_SCHED_TABLE
* iadev
->memSize
;
2116 writew((i
>> 11) & 0xffff, iadev
->seg_reg
+ABR_SBPTR_BASE
);
2117 i
= ABR_WAIT_Q
* iadev
->memSize
;
2118 writew((i
>> 7) & 0xffff, iadev
->seg_reg
+ABRWQ_BASE
);
2120 i
= ABR_SCHED_TABLE
*iadev
->memSize
;
2121 memset((caddr_t
)(iadev
->seg_ram
+i
), 0, iadev
->num_vc
*4);
2122 vc
= (struct main_vc
*)iadev
->MAIN_VC_TABLE_ADDR
;
2123 evc
= (struct ext_vc
*)iadev
->EXT_VC_TABLE_ADDR
;
2124 iadev
->testTable
= kmalloc(sizeof(long)*iadev
->num_vc
, GFP_KERNEL
);
2125 if (!iadev
->testTable
) {
2126 printk("Get freepage failed\n");
2127 goto err_free_desc_tbl
;
2129 for(i
=0; i
<iadev
->num_vc
; i
++)
2131 memset((caddr_t
)vc
, 0, sizeof(*vc
));
2132 memset((caddr_t
)evc
, 0, sizeof(*evc
));
2133 iadev
->testTable
[i
] = kmalloc(sizeof(struct testTable_t
),
2135 if (!iadev
->testTable
[i
])
2136 goto err_free_test_tables
;
2137 iadev
->testTable
[i
]->lastTime
= 0;
2138 iadev
->testTable
[i
]->fract
= 0;
2139 iadev
->testTable
[i
]->vc_status
= VC_UBR
;
2144 /* Other Initialization */
2146 /* Max Rate Register */
2147 if (iadev
->phy_type
& FE_25MBIT_PHY
) {
2148 writew(RATE25
, iadev
->seg_reg
+MAXRATE
);
2149 writew((UBR_EN
| (0x23 << 2)), iadev
->seg_reg
+STPARMS
);
2152 writew(cellrate_to_float(iadev
->LineRate
),iadev
->seg_reg
+MAXRATE
);
2153 writew((UBR_EN
| ABR_EN
| (0x23 << 2)), iadev
->seg_reg
+STPARMS
);
2155 /* Set Idle Header Reigisters to be sure */
2156 writew(0, iadev
->seg_reg
+IDLEHEADHI
);
2157 writew(0, iadev
->seg_reg
+IDLEHEADLO
);
2159 /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
2160 writew(0xaa00, iadev
->seg_reg
+ABRUBR_ARB
);
2162 iadev
->close_pending
= 0;
2163 init_waitqueue_head(&iadev
->close_wait
);
2164 init_waitqueue_head(&iadev
->timeout_wait
);
2165 skb_queue_head_init(&iadev
->tx_dma_q
);
2166 ia_init_rtn_q(&iadev
->tx_return_q
);
2168 /* RM Cell Protocol ID and Message Type */
2169 writew(RM_TYPE_4_0
, iadev
->seg_reg
+RM_TYPE
);
2170 skb_queue_head_init (&iadev
->tx_backlog
);
2172 /* Mode Register 1 */
2173 writew(MODE_REG_1_VAL
, iadev
->seg_reg
+MODE_REG_1
);
2175 /* Mode Register 0 */
2176 writew(T_ONLINE
, iadev
->seg_reg
+MODE_REG_0
);
2178 /* Interrupt Status Register - read to clear */
2179 readw(iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
2181 /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2182 writew(~(TRANSMIT_DONE
| TCQ_NOT_EMPTY
), iadev
->seg_reg
+SEG_MASK_REG
);
2183 writew(TRANSMIT_DONE
, iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
2184 iadev
->tx_pkt_cnt
= 0;
2185 iadev
->rate_limit
= iadev
->LineRate
/ 3;
2189 err_free_test_tables
:
2191 kfree(iadev
->testTable
[i
]);
2192 kfree(iadev
->testTable
);
2194 kfree(iadev
->desc_tbl
);
2195 err_free_all_tx_bufs
:
2196 i
= iadev
->num_tx_desc
;
2199 struct cpcs_trailer_desc
*desc
= iadev
->tx_buf
+ i
;
2201 pci_unmap_single(iadev
->pci
, desc
->dma_addr
,
2202 sizeof(*desc
->cpcs
), PCI_DMA_TODEVICE
);
2205 kfree(iadev
->tx_buf
);
2207 pci_free_consistent(iadev
->pci
, DLE_TOTAL_SIZE
, iadev
->tx_dle_q
.start
,
2213 static irqreturn_t
ia_int(int irq
, void *dev_id
)
2215 struct atm_dev
*dev
;
2217 unsigned int status
;
2221 iadev
= INPH_IA_DEV(dev
);
2222 while( (status
= readl(iadev
->reg
+IPHASE5575_BUS_STATUS_REG
) & 0x7f))
2225 IF_EVENT(printk("ia_int: status = 0x%x\n", status
);)
2226 if (status
& STAT_REASSINT
)
2229 IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status
);)
2232 if (status
& STAT_DLERINT
)
2234 /* Clear this bit by writing a 1 to it. */
2235 writel(STAT_DLERINT
, iadev
->reg
+ IPHASE5575_BUS_STATUS_REG
);
2238 if (status
& STAT_SEGINT
)
2241 IF_EVENT(printk("IA: tx_intr \n");)
2244 if (status
& STAT_DLETINT
)
2246 writel(STAT_DLETINT
, iadev
->reg
+ IPHASE5575_BUS_STATUS_REG
);
2249 if (status
& (STAT_FEINT
| STAT_ERRINT
| STAT_MARKINT
))
2251 if (status
& STAT_FEINT
)
2252 ia_frontend_intr(iadev
);
2255 return IRQ_RETVAL(handled
);
2260 /*----------------------------- entries --------------------------------*/
2261 static int get_esi(struct atm_dev
*dev
)
2268 iadev
= INPH_IA_DEV(dev
);
2269 mac1
= cpu_to_be32(le32_to_cpu(readl(
2270 iadev
->reg
+IPHASE5575_MAC1
)));
2271 mac2
= cpu_to_be16(le16_to_cpu(readl(iadev
->reg
+IPHASE5575_MAC2
)));
2272 IF_INIT(printk("ESI: 0x%08x%04x\n", mac1
, mac2
);)
2273 for (i
=0; i
<MAC1_LEN
; i
++)
2274 dev
->esi
[i
] = mac1
>>(8*(MAC1_LEN
-1-i
));
2276 for (i
=0; i
<MAC2_LEN
; i
++)
2277 dev
->esi
[i
+MAC1_LEN
] = mac2
>>(8*(MAC2_LEN
- 1 -i
));
2281 static int reset_sar(struct atm_dev
*dev
)
2285 unsigned int pci
[64];
2287 iadev
= INPH_IA_DEV(dev
);
2289 if ((error
= pci_read_config_dword(iadev
->pci
,
2290 i
*4, &pci
[i
])) != PCIBIOS_SUCCESSFUL
)
2292 writel(0, iadev
->reg
+IPHASE5575_EXT_RESET
);
2294 if ((error
= pci_write_config_dword(iadev
->pci
,
2295 i
*4, pci
[i
])) != PCIBIOS_SUCCESSFUL
)
2302 static int __devinit
ia_init(struct atm_dev
*dev
)
2305 unsigned long real_base
;
2307 unsigned short command
;
2310 /* The device has been identified and registered. Now we read
2311 necessary configuration info like memory base address,
2312 interrupt number etc */
2314 IF_INIT(printk(">ia_init\n");)
2315 dev
->ci_range
.vpi_bits
= 0;
2316 dev
->ci_range
.vci_bits
= NR_VCI_LD
;
2318 iadev
= INPH_IA_DEV(dev
);
2319 real_base
= pci_resource_start (iadev
->pci
, 0);
2320 iadev
->irq
= iadev
->pci
->irq
;
2322 error
= pci_read_config_word(iadev
->pci
, PCI_COMMAND
, &command
);
2324 printk(KERN_ERR DEV_LABEL
"(itf %d): init error 0x%x\n",
2328 IF_INIT(printk(DEV_LABEL
"(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2329 dev
->number
, iadev
->pci
->revision
, real_base
, iadev
->irq
);)
2331 /* find mapping size of board */
2333 iadev
->pci_map_size
= pci_resource_len(iadev
->pci
, 0);
2335 if (iadev
->pci_map_size
== 0x100000){
2336 iadev
->num_vc
= 4096;
2337 dev
->ci_range
.vci_bits
= NR_VCI_4K_LD
;
2340 else if (iadev
->pci_map_size
== 0x40000) {
2341 iadev
->num_vc
= 1024;
2345 printk("Unknown pci_map_size = 0x%x\n", iadev
->pci_map_size
);
2348 IF_INIT(printk (DEV_LABEL
"map size: %i\n", iadev
->pci_map_size
);)
2350 /* enable bus mastering */
2351 pci_set_master(iadev
->pci
);
2354 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2358 /* mapping the physical address to a virtual address in address space */
2359 base
= ioremap(real_base
,iadev
->pci_map_size
); /* ioremap is not resolved ??? */
2363 printk(DEV_LABEL
" (itf %d): can't set up page mapping\n",
2367 IF_INIT(printk(DEV_LABEL
" (itf %d): rev.%d,base=%p,irq=%d\n",
2368 dev
->number
, iadev
->pci
->revision
, base
, iadev
->irq
);)
2370 /* filling the iphase dev structure */
2371 iadev
->mem
= iadev
->pci_map_size
/2;
2372 iadev
->real_base
= real_base
;
2375 /* Bus Interface Control Registers */
2376 iadev
->reg
= base
+ REG_BASE
;
2377 /* Segmentation Control Registers */
2378 iadev
->seg_reg
= base
+ SEG_BASE
;
2379 /* Reassembly Control Registers */
2380 iadev
->reass_reg
= base
+ REASS_BASE
;
2381 /* Front end/ DMA control registers */
2382 iadev
->phy
= base
+ PHY_BASE
;
2383 iadev
->dma
= base
+ PHY_BASE
;
2384 /* RAM - Segmentation RAm and Reassembly RAM */
2385 iadev
->ram
= base
+ ACTUAL_RAM_BASE
;
2386 iadev
->seg_ram
= base
+ ACTUAL_SEG_RAM_BASE
;
2387 iadev
->reass_ram
= base
+ ACTUAL_REASS_RAM_BASE
;
2389 /* lets print out the above */
2390 IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2391 iadev
->reg
,iadev
->seg_reg
,iadev
->reass_reg
,
2392 iadev
->phy
, iadev
->ram
, iadev
->seg_ram
,
2395 /* lets try reading the MAC address */
2396 error
= get_esi(dev
);
2398 iounmap(iadev
->base
);
2402 for (i
=0; i
< ESI_LEN
; i
++)
2403 printk("%s%02X",i
? "-" : "",dev
->esi
[i
]);
2407 if (reset_sar(dev
)) {
2408 iounmap(iadev
->base
);
2409 printk("IA: reset SAR fail, please try again\n");
2415 static void ia_update_stats(IADEV
*iadev
) {
2416 if (!iadev
->carrier_detect
)
2418 iadev
->rx_cell_cnt
+= readw(iadev
->reass_reg
+CELL_CTR0
)&0xffff;
2419 iadev
->rx_cell_cnt
+= (readw(iadev
->reass_reg
+CELL_CTR1
) & 0xffff) << 16;
2420 iadev
->drop_rxpkt
+= readw(iadev
->reass_reg
+ DRP_PKT_CNTR
) & 0xffff;
2421 iadev
->drop_rxcell
+= readw(iadev
->reass_reg
+ ERR_CNTR
) & 0xffff;
2422 iadev
->tx_cell_cnt
+= readw(iadev
->seg_reg
+ CELL_CTR_LO_AUTO
)&0xffff;
2423 iadev
->tx_cell_cnt
+= (readw(iadev
->seg_reg
+CELL_CTR_HIGH_AUTO
)&0xffff)<<16;
2427 static void ia_led_timer(unsigned long arg
) {
2428 unsigned long flags
;
2429 static u_char blinking
[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2431 static u32 ctrl_reg
;
2432 for (i
= 0; i
< iadev_count
; i
++) {
2434 ctrl_reg
= readl(ia_dev
[i
]->reg
+IPHASE5575_BUS_CONTROL_REG
);
2435 if (blinking
[i
] == 0) {
2437 ctrl_reg
&= (~CTRL_LED
);
2438 writel(ctrl_reg
, ia_dev
[i
]->reg
+IPHASE5575_BUS_CONTROL_REG
);
2439 ia_update_stats(ia_dev
[i
]);
2443 ctrl_reg
|= CTRL_LED
;
2444 writel(ctrl_reg
, ia_dev
[i
]->reg
+IPHASE5575_BUS_CONTROL_REG
);
2445 spin_lock_irqsave(&ia_dev
[i
]->tx_lock
, flags
);
2446 if (ia_dev
[i
]->close_pending
)
2447 wake_up(&ia_dev
[i
]->close_wait
);
2448 ia_tx_poll(ia_dev
[i
]);
2449 spin_unlock_irqrestore(&ia_dev
[i
]->tx_lock
, flags
);
2453 mod_timer(&ia_timer
, jiffies
+ HZ
/ 4);
2457 static void ia_phy_put(struct atm_dev
*dev
, unsigned char value
,
2460 writel(value
, INPH_IA_DEV(dev
)->phy
+addr
);
2463 static unsigned char ia_phy_get(struct atm_dev
*dev
, unsigned long addr
)
2465 return readl(INPH_IA_DEV(dev
)->phy
+addr
);
2468 static void ia_free_tx(IADEV
*iadev
)
2472 kfree(iadev
->desc_tbl
);
2473 for (i
= 0; i
< iadev
->num_vc
; i
++)
2474 kfree(iadev
->testTable
[i
]);
2475 kfree(iadev
->testTable
);
2476 for (i
= 0; i
< iadev
->num_tx_desc
; i
++) {
2477 struct cpcs_trailer_desc
*desc
= iadev
->tx_buf
+ i
;
2479 pci_unmap_single(iadev
->pci
, desc
->dma_addr
,
2480 sizeof(*desc
->cpcs
), PCI_DMA_TODEVICE
);
2483 kfree(iadev
->tx_buf
);
2484 pci_free_consistent(iadev
->pci
, DLE_TOTAL_SIZE
, iadev
->tx_dle_q
.start
,
2488 static void ia_free_rx(IADEV
*iadev
)
2490 kfree(iadev
->rx_open
);
2491 pci_free_consistent(iadev
->pci
, DLE_TOTAL_SIZE
, iadev
->rx_dle_q
.start
,
2495 static int __devinit
ia_start(struct atm_dev
*dev
)
2501 IF_EVENT(printk(">ia_start\n");)
2502 iadev
= INPH_IA_DEV(dev
);
2503 if (request_irq(iadev
->irq
, &ia_int
, IRQF_SHARED
, DEV_LABEL
, dev
)) {
2504 printk(KERN_ERR DEV_LABEL
"(itf %d): IRQ%d is already in use\n",
2505 dev
->number
, iadev
->irq
);
2509 /* @@@ should release IRQ on error */
2510 /* enabling memory + master */
2511 if ((error
= pci_write_config_word(iadev
->pci
,
2513 PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
)))
2515 printk(KERN_ERR DEV_LABEL
"(itf %d): can't enable memory+"
2516 "master (0x%x)\n",dev
->number
, error
);
2522 /* Maybe we should reset the front end, initialize Bus Interface Control
2523 Registers and see. */
2525 IF_INIT(printk("Bus ctrl reg: %08x\n",
2526 readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
));)
2527 ctrl_reg
= readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2528 ctrl_reg
= (ctrl_reg
& (CTRL_LED
| CTRL_FE_RST
))
2536 | CTRL_DLETMASK
/* shud be removed l8r */
2543 writel(ctrl_reg
, iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2545 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2546 readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
));
2547 printk("Bus status reg after init: %08x\n",
2548 readl(iadev
->reg
+IPHASE5575_BUS_STATUS_REG
));)
2551 error
= tx_init(dev
);
2554 error
= rx_init(dev
);
2558 ctrl_reg
= readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2559 writel(ctrl_reg
| CTRL_FE_RST
, iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
);
2560 IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2561 readl(iadev
->reg
+IPHASE5575_BUS_CONTROL_REG
));)
2562 phy
= 0; /* resolve compiler complaint */
2564 if ((phy
=ia_phy_get(dev
,0)) == 0x30)
2565 printk("IA: pm5346,rev.%d\n",phy
&0x0f);
2567 printk("IA: utopia,rev.%0x\n",phy
);)
2569 if (iadev
->phy_type
& FE_25MBIT_PHY
)
2570 ia_mb25_init(iadev
);
2571 else if (iadev
->phy_type
& (FE_DS3_PHY
| FE_E3_PHY
))
2572 ia_suni_pm7345_init(iadev
);
2574 error
= suni_init(dev
);
2577 if (dev
->phy
->start
) {
2578 error
= dev
->phy
->start(dev
);
2582 /* Get iadev->carrier_detect status */
2583 ia_frontend_intr(iadev
);
2592 free_irq(iadev
->irq
, dev
);
2597 static void ia_close(struct atm_vcc
*vcc
)
2602 struct ia_vcc
*ia_vcc
;
2603 struct sk_buff
*skb
= NULL
;
2604 struct sk_buff_head tmp_tx_backlog
, tmp_vcc_backlog
;
2605 unsigned long closetime
, flags
;
2607 iadev
= INPH_IA_DEV(vcc
->dev
);
2608 ia_vcc
= INPH_IA_VCC(vcc
);
2609 if (!ia_vcc
) return;
2611 IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
2612 ia_vcc
->vc_desc_cnt
,vcc
->vci
);)
2613 clear_bit(ATM_VF_READY
,&vcc
->flags
);
2614 skb_queue_head_init (&tmp_tx_backlog
);
2615 skb_queue_head_init (&tmp_vcc_backlog
);
2616 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2617 iadev
->close_pending
++;
2618 prepare_to_wait(&iadev
->timeout_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
2619 schedule_timeout(50);
2620 finish_wait(&iadev
->timeout_wait
, &wait
);
2621 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
2622 while((skb
= skb_dequeue(&iadev
->tx_backlog
))) {
2623 if (ATM_SKB(skb
)->vcc
== vcc
){
2624 if (vcc
->pop
) vcc
->pop(vcc
, skb
);
2625 else dev_kfree_skb_any(skb
);
2628 skb_queue_tail(&tmp_tx_backlog
, skb
);
2630 while((skb
= skb_dequeue(&tmp_tx_backlog
)))
2631 skb_queue_tail(&iadev
->tx_backlog
, skb
);
2632 IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc
->vc_desc_cnt
);)
2633 closetime
= 300000 / ia_vcc
->pcr
;
2636 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
2637 wait_event_timeout(iadev
->close_wait
, (ia_vcc
->vc_desc_cnt
<= 0), closetime
);
2638 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
2639 iadev
->close_pending
--;
2640 iadev
->testTable
[vcc
->vci
]->lastTime
= 0;
2641 iadev
->testTable
[vcc
->vci
]->fract
= 0;
2642 iadev
->testTable
[vcc
->vci
]->vc_status
= VC_UBR
;
2643 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
) {
2644 if (vcc
->qos
.txtp
.min_pcr
> 0)
2645 iadev
->sum_mcr
-= vcc
->qos
.txtp
.min_pcr
;
2647 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
2648 ia_vcc
= INPH_IA_VCC(vcc
);
2649 iadev
->sum_mcr
-= ia_vcc
->NumCbrEntry
*iadev
->Granularity
;
2650 ia_cbrVc_close (vcc
);
2652 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
2655 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2656 // reset reass table
2657 vc_table
= (u16
*)(iadev
->reass_ram
+REASS_TABLE
*iadev
->memSize
);
2658 vc_table
+= vcc
->vci
;
2659 *vc_table
= NO_AAL5_PKT
;
2661 vc_table
= (u16
*)(iadev
->reass_ram
+RX_VC_TABLE
*iadev
->memSize
);
2662 vc_table
+= vcc
->vci
;
2663 *vc_table
= (vcc
->vci
<< 6) | 15;
2664 if (vcc
->qos
.rxtp
.traffic_class
== ATM_ABR
) {
2665 struct abr_vc_table __iomem
*abr_vc_table
=
2666 (iadev
->reass_ram
+ABR_VC_TABLE
*iadev
->memSize
);
2667 abr_vc_table
+= vcc
->vci
;
2668 abr_vc_table
->rdf
= 0x0003;
2669 abr_vc_table
->air
= 0x5eb1;
2671 // Drain the packets
2672 rx_dle_intr(vcc
->dev
);
2673 iadev
->rx_open
[vcc
->vci
] = NULL
;
2675 kfree(INPH_IA_VCC(vcc
));
2677 vcc
->dev_data
= NULL
;
2678 clear_bit(ATM_VF_ADDR
,&vcc
->flags
);
2682 static int ia_open(struct atm_vcc
*vcc
)
2684 struct ia_vcc
*ia_vcc
;
2686 if (!test_bit(ATM_VF_PARTIAL
,&vcc
->flags
))
2688 IF_EVENT(printk("ia: not partially allocated resources\n");)
2689 vcc
->dev_data
= NULL
;
2691 if (vcc
->vci
!= ATM_VPI_UNSPEC
&& vcc
->vpi
!= ATM_VCI_UNSPEC
)
2693 IF_EVENT(printk("iphase open: unspec part\n");)
2694 set_bit(ATM_VF_ADDR
,&vcc
->flags
);
2696 if (vcc
->qos
.aal
!= ATM_AAL5
)
2698 IF_EVENT(printk(DEV_LABEL
"(itf %d): open %d.%d\n",
2699 vcc
->dev
->number
, vcc
->vpi
, vcc
->vci
);)
2701 /* Device dependent initialization */
2702 ia_vcc
= kmalloc(sizeof(*ia_vcc
), GFP_KERNEL
);
2703 if (!ia_vcc
) return -ENOMEM
;
2704 vcc
->dev_data
= ia_vcc
;
2706 if ((error
= open_rx(vcc
)))
2708 IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2713 if ((error
= open_tx(vcc
)))
2715 IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2720 set_bit(ATM_VF_READY
,&vcc
->flags
);
2724 static u8 first
= 1;
2726 ia_timer
.expires
= jiffies
+ 3*HZ
;
2727 add_timer(&ia_timer
);
2732 IF_EVENT(printk("ia open returning\n");)
2736 static int ia_change_qos(struct atm_vcc
*vcc
, struct atm_qos
*qos
, int flags
)
2738 IF_EVENT(printk(">ia_change_qos\n");)
2742 static int ia_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void __user
*arg
)
2748 IF_EVENT(printk(">ia_ioctl\n");)
2749 if (cmd
!= IA_CMD
) {
2750 if (!dev
->phy
->ioctl
) return -EINVAL
;
2751 return dev
->phy
->ioctl(dev
,cmd
,arg
);
2753 if (copy_from_user(&ia_cmds
, arg
, sizeof ia_cmds
)) return -EFAULT
;
2754 board
= ia_cmds
.status
;
2755 if ((board
< 0) || (board
> iadev_count
))
2757 iadev
= ia_dev
[board
];
2758 switch (ia_cmds
.cmd
) {
2761 switch (ia_cmds
.sub_cmd
) {
2763 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2764 if (copy_to_user(ia_cmds
.buf
, iadev
, sizeof(IADEV
)))
2768 case MEMDUMP_SEGREG
:
2769 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2770 tmps
= (u16 __user
*)ia_cmds
.buf
;
2771 for(i
=0; i
<0x80; i
+=2, tmps
++)
2772 if(put_user((u16
)(readl(iadev
->seg_reg
+i
) & 0xffff), tmps
)) return -EFAULT
;
2776 case MEMDUMP_REASSREG
:
2777 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2778 tmps
= (u16 __user
*)ia_cmds
.buf
;
2779 for(i
=0; i
<0x80; i
+=2, tmps
++)
2780 if(put_user((u16
)(readl(iadev
->reass_reg
+i
) & 0xffff), tmps
)) return -EFAULT
;
2786 ia_regs_t
*regs_local
;
2790 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2791 regs_local
= kmalloc(sizeof(*regs_local
), GFP_KERNEL
);
2792 if (!regs_local
) return -ENOMEM
;
2793 ffL
= ®s_local
->ffredn
;
2794 rfL
= ®s_local
->rfredn
;
2795 /* Copy real rfred registers into the local copy */
2796 for (i
=0; i
<(sizeof (rfredn_t
))/4; i
++)
2797 ((u_int
*)rfL
)[i
] = readl(iadev
->reass_reg
+ i
) & 0xffff;
2798 /* Copy real ffred registers into the local copy */
2799 for (i
=0; i
<(sizeof (ffredn_t
))/4; i
++)
2800 ((u_int
*)ffL
)[i
] = readl(iadev
->seg_reg
+ i
) & 0xffff;
2802 if (copy_to_user(ia_cmds
.buf
, regs_local
,sizeof(ia_regs_t
))) {
2807 printk("Board %d registers dumped\n", board
);
2813 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2821 printk("skb = 0x%lx\n", (long)skb_peek(&iadev
->tx_backlog
));
2822 printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev
->tx_return_q
));
2827 struct k_sonet_stats
*stats
;
2828 stats
= &PRIV(_ia_dev
[board
])->sonet_stats
;
2829 printk("section_bip: %d\n", atomic_read(&stats
->section_bip
));
2830 printk("line_bip : %d\n", atomic_read(&stats
->line_bip
));
2831 printk("path_bip : %d\n", atomic_read(&stats
->path_bip
));
2832 printk("line_febe : %d\n", atomic_read(&stats
->line_febe
));
2833 printk("path_febe : %d\n", atomic_read(&stats
->path_febe
));
2834 printk("corr_hcs : %d\n", atomic_read(&stats
->corr_hcs
));
2835 printk("uncorr_hcs : %d\n", atomic_read(&stats
->uncorr_hcs
));
2836 printk("tx_cells : %d\n", atomic_read(&stats
->tx_cells
));
2837 printk("rx_cells : %d\n", atomic_read(&stats
->rx_cells
));
2842 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2843 for (i
= 1; i
<= iadev
->num_rx_desc
; i
++)
2844 free_desc(_ia_dev
[board
], i
);
2845 writew( ~(RX_FREEQ_EMPT
| RX_EXCP_RCVD
),
2846 iadev
->reass_reg
+REASS_MASK_REG
);
2853 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2854 ia_frontend_intr(iadev
);
2857 if (!capable(CAP_NET_ADMIN
)) return -EPERM
;
2860 IADebugFlag
= ia_cmds
.maddr
;
2861 printk("New debug option loaded\n");
2877 static int ia_getsockopt(struct atm_vcc
*vcc
, int level
, int optname
,
2878 void __user
*optval
, int optlen
)
2880 IF_EVENT(printk(">ia_getsockopt\n");)
2884 static int ia_setsockopt(struct atm_vcc
*vcc
, int level
, int optname
,
2885 void __user
*optval
, unsigned int optlen
)
2887 IF_EVENT(printk(">ia_setsockopt\n");)
2891 static int ia_pkt_tx (struct atm_vcc
*vcc
, struct sk_buff
*skb
) {
2894 struct tx_buf_desc __iomem
*buf_desc_ptr
;
2898 struct cpcs_trailer
*trailer
;
2899 struct ia_vcc
*iavcc
;
2901 iadev
= INPH_IA_DEV(vcc
->dev
);
2902 iavcc
= INPH_IA_VCC(vcc
);
2903 if (!iavcc
->txing
) {
2904 printk("discard packet on closed VC\n");
2908 dev_kfree_skb_any(skb
);
2912 if (skb
->len
> iadev
->tx_buf_sz
- 8) {
2913 printk("Transmit size over tx buffer size\n");
2917 dev_kfree_skb_any(skb
);
2920 if ((unsigned long)skb
->data
& 3) {
2921 printk("Misaligned SKB\n");
2925 dev_kfree_skb_any(skb
);
2928 /* Get a descriptor number from our free descriptor queue
2929 We get the descr number from the TCQ now, since I am using
2930 the TCQ as a free buffer queue. Initially TCQ will be
2931 initialized with all the descriptors and is hence, full.
2933 desc
= get_desc (iadev
, iavcc
);
2936 comp_code
= desc
>> 13;
2939 if ((desc
== 0) || (desc
> iadev
->num_tx_desc
))
2941 IF_ERR(printk(DEV_LABEL
"invalid desc for send: %d\n", desc
);)
2942 atomic_inc(&vcc
->stats
->tx
);
2946 dev_kfree_skb_any(skb
);
2947 return 0; /* return SUCCESS */
2952 IF_ERR(printk(DEV_LABEL
"send desc:%d completion code %d error\n",
2956 /* remember the desc and vcc mapping */
2957 iavcc
->vc_desc_cnt
++;
2958 iadev
->desc_tbl
[desc
-1].iavcc
= iavcc
;
2959 iadev
->desc_tbl
[desc
-1].txskb
= skb
;
2960 IA_SKB_STATE(skb
) = 0;
2962 iadev
->ffL
.tcq_rd
+= 2;
2963 if (iadev
->ffL
.tcq_rd
> iadev
->ffL
.tcq_ed
)
2964 iadev
->ffL
.tcq_rd
= iadev
->ffL
.tcq_st
;
2965 writew(iadev
->ffL
.tcq_rd
, iadev
->seg_reg
+TCQ_RD_PTR
);
2967 /* Put the descriptor number in the packet ready queue
2968 and put the updated write pointer in the DLE field
2970 *(u16
*)(iadev
->seg_ram
+iadev
->ffL
.prq_wr
) = desc
;
2972 iadev
->ffL
.prq_wr
+= 2;
2973 if (iadev
->ffL
.prq_wr
> iadev
->ffL
.prq_ed
)
2974 iadev
->ffL
.prq_wr
= iadev
->ffL
.prq_st
;
2976 /* Figure out the exact length of the packet and padding required to
2977 make it aligned on a 48 byte boundary. */
2978 total_len
= skb
->len
+ sizeof(struct cpcs_trailer
);
2979 total_len
= ((total_len
+ 47) / 48) * 48;
2980 IF_TX(printk("ia packet len:%d padding:%d\n", total_len
, total_len
- skb
->len
);)
2982 /* Put the packet in a tx buffer */
2983 trailer
= iadev
->tx_buf
[desc
-1].cpcs
;
2984 IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2985 skb
, skb
->data
, skb
->len
, desc
);)
2986 trailer
->control
= 0;
2988 trailer
->length
= ((skb
->len
& 0xff) << 8) | ((skb
->len
& 0xff00) >> 8);
2989 trailer
->crc32
= 0; /* not needed - dummy bytes */
2991 /* Display the packet */
2992 IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2993 skb
->len
, tcnter
++);
2994 xdump(skb
->data
, skb
->len
, "TX: ");
2997 /* Build the buffer descriptor */
2998 buf_desc_ptr
= iadev
->seg_ram
+TX_DESC_BASE
;
2999 buf_desc_ptr
+= desc
; /* points to the corresponding entry */
3000 buf_desc_ptr
->desc_mode
= AAL5
| EOM_EN
| APP_CRC32
| CMPL_INT
;
3001 /* Huh ? p.115 of users guide describes this as a read-only register */
3002 writew(TRANSMIT_DONE
, iadev
->seg_reg
+SEG_INTR_STATUS_REG
);
3003 buf_desc_ptr
->vc_index
= vcc
->vci
;
3004 buf_desc_ptr
->bytes
= total_len
;
3006 if (vcc
->qos
.txtp
.traffic_class
== ATM_ABR
)
3007 clear_lockup (vcc
, iadev
);
3009 /* Build the DLE structure */
3010 wr_ptr
= iadev
->tx_dle_q
.write
;
3011 memset((caddr_t
)wr_ptr
, 0, sizeof(*wr_ptr
));
3012 wr_ptr
->sys_pkt_addr
= pci_map_single(iadev
->pci
, skb
->data
,
3013 skb
->len
, PCI_DMA_TODEVICE
);
3014 wr_ptr
->local_pkt_addr
= (buf_desc_ptr
->buf_start_hi
<< 16) |
3015 buf_desc_ptr
->buf_start_lo
;
3016 /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3017 wr_ptr
->bytes
= skb
->len
;
3019 /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3020 if ((wr_ptr
->bytes
>> 2) == 0xb)
3021 wr_ptr
->bytes
= 0x30;
3023 wr_ptr
->mode
= TX_DLE_PSI
;
3024 wr_ptr
->prq_wr_ptr_data
= 0;
3026 /* end is not to be used for the DLE q */
3027 if (++wr_ptr
== iadev
->tx_dle_q
.end
)
3028 wr_ptr
= iadev
->tx_dle_q
.start
;
3030 /* Build trailer dle */
3031 wr_ptr
->sys_pkt_addr
= iadev
->tx_buf
[desc
-1].dma_addr
;
3032 wr_ptr
->local_pkt_addr
= ((buf_desc_ptr
->buf_start_hi
<< 16) |
3033 buf_desc_ptr
->buf_start_lo
) + total_len
- sizeof(struct cpcs_trailer
);
3035 wr_ptr
->bytes
= sizeof(struct cpcs_trailer
);
3036 wr_ptr
->mode
= DMA_INT_ENABLE
;
3037 wr_ptr
->prq_wr_ptr_data
= iadev
->ffL
.prq_wr
;
3039 /* end is not to be used for the DLE q */
3040 if (++wr_ptr
== iadev
->tx_dle_q
.end
)
3041 wr_ptr
= iadev
->tx_dle_q
.start
;
3043 iadev
->tx_dle_q
.write
= wr_ptr
;
3044 ATM_DESC(skb
) = vcc
->vci
;
3045 skb_queue_tail(&iadev
->tx_dma_q
, skb
);
3047 atomic_inc(&vcc
->stats
->tx
);
3048 iadev
->tx_pkt_cnt
++;
3049 /* Increment transaction counter */
3050 writel(2, iadev
->dma
+IPHASE5575_TX_COUNTER
);
3053 /* add flow control logic */
3054 if (atomic_read(&vcc
->stats
->tx
) % 20 == 0) {
3055 if (iavcc
->vc_desc_cnt
> 10) {
3056 vcc
->tx_quota
= vcc
->tx_quota
* 3 / 4;
3057 printk("Tx1: vcc->tx_quota = %d \n", (u32
)vcc
->tx_quota
);
3058 iavcc
->flow_inc
= -1;
3059 iavcc
->saved_tx_quota
= vcc
->tx_quota
;
3060 } else if ((iavcc
->flow_inc
< 0) && (iavcc
->vc_desc_cnt
< 3)) {
3061 // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3062 printk("Tx2: vcc->tx_quota = %d \n", (u32
)vcc
->tx_quota
);
3063 iavcc
->flow_inc
= 0;
3067 IF_TX(printk("ia send done\n");)
3071 static int ia_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
3074 unsigned long flags
;
3076 iadev
= INPH_IA_DEV(vcc
->dev
);
3077 if ((!skb
)||(skb
->len
>(iadev
->tx_buf_sz
-sizeof(struct cpcs_trailer
))))
3080 printk(KERN_CRIT
"null skb in ia_send\n");
3081 else dev_kfree_skb_any(skb
);
3084 spin_lock_irqsave(&iadev
->tx_lock
, flags
);
3085 if (!test_bit(ATM_VF_READY
,&vcc
->flags
)){
3086 dev_kfree_skb_any(skb
);
3087 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
3090 ATM_SKB(skb
)->vcc
= vcc
;
3092 if (skb_peek(&iadev
->tx_backlog
)) {
3093 skb_queue_tail(&iadev
->tx_backlog
, skb
);
3096 if (ia_pkt_tx (vcc
, skb
)) {
3097 skb_queue_tail(&iadev
->tx_backlog
, skb
);
3100 spin_unlock_irqrestore(&iadev
->tx_lock
, flags
);
3105 static int ia_proc_read(struct atm_dev
*dev
,loff_t
*pos
,char *page
)
3109 IADEV
*iadev
= INPH_IA_DEV(dev
);
3111 if (iadev
->phy_type
== FE_25MBIT_PHY
) {
3112 n
= sprintf(page
, " Board Type : Iphase5525-1KVC-128K\n");
3115 if (iadev
->phy_type
== FE_DS3_PHY
)
3116 n
= sprintf(page
, " Board Type : Iphase-ATM-DS3");
3117 else if (iadev
->phy_type
== FE_E3_PHY
)
3118 n
= sprintf(page
, " Board Type : Iphase-ATM-E3");
3119 else if (iadev
->phy_type
== FE_UTP_OPTION
)
3120 n
= sprintf(page
, " Board Type : Iphase-ATM-UTP155");
3122 n
= sprintf(page
, " Board Type : Iphase-ATM-OC3");
3124 if (iadev
->pci_map_size
== 0x40000)
3125 n
+= sprintf(tmpPtr
, "-1KVC-");
3127 n
+= sprintf(tmpPtr
, "-4KVC-");
3129 if ((iadev
->memType
& MEM_SIZE_MASK
) == MEM_SIZE_1M
)
3130 n
+= sprintf(tmpPtr
, "1M \n");
3131 else if ((iadev
->memType
& MEM_SIZE_MASK
) == MEM_SIZE_512K
)
3132 n
+= sprintf(tmpPtr
, "512K\n");
3134 n
+= sprintf(tmpPtr
, "128K\n");
3138 return sprintf(page
, " Number of Tx Buffer: %u\n"
3139 " Size of Tx Buffer : %u\n"
3140 " Number of Rx Buffer: %u\n"
3141 " Size of Rx Buffer : %u\n"
3142 " Packets Receiverd : %u\n"
3143 " Packets Transmitted: %u\n"
3144 " Cells Received : %u\n"
3145 " Cells Transmitted : %u\n"
3146 " Board Dropped Cells: %u\n"
3147 " Board Dropped Pkts : %u\n",
3148 iadev
->num_tx_desc
, iadev
->tx_buf_sz
,
3149 iadev
->num_rx_desc
, iadev
->rx_buf_sz
,
3150 iadev
->rx_pkt_cnt
, iadev
->tx_pkt_cnt
,
3151 iadev
->rx_cell_cnt
, iadev
->tx_cell_cnt
,
3152 iadev
->drop_rxcell
, iadev
->drop_rxpkt
);
3157 static const struct atmdev_ops ops
= {
3161 .getsockopt
= ia_getsockopt
,
3162 .setsockopt
= ia_setsockopt
,
3164 .phy_put
= ia_phy_put
,
3165 .phy_get
= ia_phy_get
,
3166 .change_qos
= ia_change_qos
,
3167 .proc_read
= ia_proc_read
,
3168 .owner
= THIS_MODULE
,
3171 static int __devinit
ia_init_one(struct pci_dev
*pdev
,
3172 const struct pci_device_id
*ent
)
3174 struct atm_dev
*dev
;
3178 iadev
= kzalloc(sizeof(*iadev
), GFP_KERNEL
);
3186 IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3187 pdev
->bus
->number
, PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));)
3188 if (pci_enable_device(pdev
)) {
3190 goto err_out_free_iadev
;
3192 dev
= atm_dev_register(DEV_LABEL
, &pdev
->dev
, &ops
, -1, NULL
);
3195 goto err_out_disable_dev
;
3197 dev
->dev_data
= iadev
;
3198 IF_INIT(printk(DEV_LABEL
"registered at (itf :%d)\n", dev
->number
);)
3199 IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev
,
3202 pci_set_drvdata(pdev
, dev
);
3204 ia_dev
[iadev_count
] = iadev
;
3205 _ia_dev
[iadev_count
] = dev
;
3207 if (ia_init(dev
) || ia_start(dev
)) {
3208 IF_INIT(printk("IA register failed!\n");)
3210 ia_dev
[iadev_count
] = NULL
;
3211 _ia_dev
[iadev_count
] = NULL
;
3213 goto err_out_deregister_dev
;
3215 IF_EVENT(printk("iadev_count = %d\n", iadev_count
);)
3217 iadev
->next_board
= ia_boards
;
3222 err_out_deregister_dev
:
3223 atm_dev_deregister(dev
);
3224 err_out_disable_dev
:
3225 pci_disable_device(pdev
);
3232 static void __devexit
ia_remove_one(struct pci_dev
*pdev
)
3234 struct atm_dev
*dev
= pci_get_drvdata(pdev
);
3235 IADEV
*iadev
= INPH_IA_DEV(dev
);
3237 /* Disable phy interrupts */
3238 ia_phy_put(dev
, ia_phy_get(dev
, SUNI_RSOP_CIE
) & ~(SUNI_RSOP_CIE_LOSE
),
3242 if (dev
->phy
&& dev
->phy
->stop
)
3243 dev
->phy
->stop(dev
);
3245 /* De-register device */
3246 free_irq(iadev
->irq
, dev
);
3248 ia_dev
[iadev_count
] = NULL
;
3249 _ia_dev
[iadev_count
] = NULL
;
3250 IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev
->number
);)
3251 atm_dev_deregister(dev
);
3253 iounmap(iadev
->base
);
3254 pci_disable_device(pdev
);
3262 static struct pci_device_id ia_pci_tbl
[] = {
3263 { PCI_VENDOR_ID_IPHASE
, 0x0008, PCI_ANY_ID
, PCI_ANY_ID
, },
3264 { PCI_VENDOR_ID_IPHASE
, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, },
3267 MODULE_DEVICE_TABLE(pci
, ia_pci_tbl
);
3269 static struct pci_driver ia_driver
= {
3271 .id_table
= ia_pci_tbl
,
3272 .probe
= ia_init_one
,
3273 .remove
= __devexit_p(ia_remove_one
),
3276 static int __init
ia_module_init(void)
3280 ret
= pci_register_driver(&ia_driver
);
3282 ia_timer
.expires
= jiffies
+ 3*HZ
;
3283 add_timer(&ia_timer
);
3285 printk(KERN_ERR DEV_LABEL
": no adapter found\n");
3289 static void __exit
ia_module_exit(void)
3291 pci_unregister_driver(&ia_driver
);
3293 del_timer(&ia_timer
);
3296 module_init(ia_module_init
);
3297 module_exit(ia_module_exit
);