/[dynamips]/trunk/dev_i8255x.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /trunk/dev_i8255x.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 12 - (hide annotations)
Sat Oct 6 16:45:40 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 30151 byte(s)
make working copy

1 dpavlin 11 /*
2     * Cisco router simulation platform.
3     * Copyright (c) 2007 Christophe Fillot.
4     *
5     * Intel i8255x (eepro100) Ethernet chip emulation.
6     */
7    
8     #include <stdio.h>
9     #include <stdlib.h>
10     #include <string.h>
11     #include <stdarg.h>
12     #include <unistd.h>
13     #include <time.h>
14     #include <errno.h>
15     #include <assert.h>
16    
17     #include "utils.h"
18     #include "cpu.h"
19     #include "vm.h"
20     #include "dynamips.h"
21     #include "memory.h"
22     #include "device.h"
23     #include "net.h"
24     #include "net_io.h"
25     #include "ptask.h"
26     #include "dev_i8255x.h"
27    
28     /* Debugging flags */
29     #define DEBUG_MII_REGS 0
30     #define DEBUG_PCI_REGS 0
31     #define DEBUG_ACCESS 0
32     #define DEBUG_TRANSMIT 0
33     #define DEBUG_RECEIVE 0
34     #define DEBUG_UNKNOWN 1
35    
36     /* Intel i8255x PCI vendor/product codes */
37     #define I8255X_PCI_VENDOR_ID 0x8086
38     #define I8255X_PCI_PRODUCT_ID 0x1229
39    
40     /* Maximum packet size */
41     #define I8255X_MAX_PKT_SIZE 4096
42    
43     /* MDI Control Register */
44     #define I8255X_MDI_IE 0x20000000
45     #define I8255X_MDI_R 0x10000000
46     #define I8255X_MDI_OP_MASK 0x0C000000
47     #define I8255X_MDI_OP_SHIFT 26
48     #define I8255X_MDI_PHY_MASK 0x03E00000
49     #define I8255X_MDI_PHY_SHIFT 21
50     #define I8255X_MDI_REG_MASK 0x001F0000
51     #define I8255X_MDI_REG_SHIFT 16
52     #define I8255X_MDI_DATA_MASK 0x0000FFFF
53    
54     /* Microcode size (in dwords) */
55     #define I8255X_UCODE_SIZE 64
56    
57     /* Size of configuration block (in dwords) */
58     #define I8255X_CONFIG_SIZE 6
59    
60     /* Size of statistical counters (in dwords) */
61     #define I8255X_STAT_CNT_SIZE 20
62    
63     /* SCB Command Register (Command byte) */
64     #define SCB_CMD_RUC_MASK 0x07 /* RU Command */
65     #define SCB_CMD_RUC_SHIFT 0
66     #define SCB_CMD_CUC_MASK 0xF0 /* CU Command */
67     #define SCB_CMD_CUC_SHIFT 4
68    
69     /* SCB Command Register (Interrupt Control Byte) */
70     #define SCB_CMD_CX 0x80 /* CX Mask */
71     #define SCB_CMD_FR 0x40 /* FR Mask */
72     #define SCB_CMD_CNA 0x20 /* CNA Mask */
73     #define SCB_CMD_RNR 0x10 /* RNR Mask */
74     #define SCB_CMD_ER 0x08 /* ER Mask */
75     #define SCB_CMD_FCP 0x04 /* FCP Mask */
76     #define SCB_CMD_SI 0x02 /* Software generated interrupt */
77     #define SCB_CMD_M 0x01 /* Mask Interrupt */
78    
79     /* SCB Interrupt Mask */
80     #define SCB_INT_MASK 0xF0
81    
82     /* SCB Status Word (Stat/ACK byte) */
83     #define SCB_STAT_CX 0x80 /* CU finished command execution */
84     #define SCB_STAT_FR 0x40 /* RU finished Frame Reception */
85     #define SCB_STAT_CNA 0x20 /* CU left active state or entered idle state */
86     #define SCB_STAT_RNR 0x10 /* RU left ready state */
87     #define SCB_STAT_MDI 0x08 /* MDI read/write cycle completed */
88     #define SCB_STAT_SWI 0x04 /* Software generated interrupt */
89     #define SCB_STAT_FCP 0x01 /* Flow Control Pause interrupt */
90    
91     /* CU states */
92     #define CU_STATE_IDLE 0x00 /* Idle */
93     #define CU_STATE_SUSPEND 0x01 /* Suspended */
94     #define CU_STATE_LPQ_ACT 0x02 /* LPQ Active */
95     #define CU_STATE_HQP_ACT 0x03 /* HQP Active */
96    
97     /* RU states */
98     #define RU_STATE_IDLE 0x00 /* Idle */
99     #define RU_STATE_SUSPEND 0x01 /* Suspended */
100     #define RU_STATE_NO_RES 0x02 /* No RX ressources available */
101     #define RU_STATE_READY 0x04 /* Ready */
102    
103     /* CU (Command Unit) commands */
104     #define CU_CMD_NOP 0x00 /* No Operation */
105     #define CU_CMD_START 0x01 /* Start */
106     #define CU_CMD_RESUME 0x02 /* Resume */
107     #define CU_CMD_LOAD_DUMP_CNT 0x04 /* Load Dump Counters Address */
108     #define CU_CMD_DUMP_STAT_CNT 0x05 /* Dump Statistical Counters */
109     #define CU_CMD_LOAD_CU_BASE 0x06 /* Load CU Base */
110     #define CU_CMD_DUMP_RST_STAT_CNT 0x07 /* Dump & Reset Stat Counters */
111     #define CU_CMD_STAT_RESUME 0x0a /* Static Resume */
112    
113     /* RU (Receive Unit) commands */
114     #define RU_CMD_NOP 0x00 /* No Operation */
115     #define RU_CMD_START 0x01 /* Start */
116     #define RU_CMD_RESUME 0x02 /* Resume */
117     #define RU_CMD_RX_DMA_REDIRECT 0x03 /* Receive DMA redirect */
118     #define RU_CMD_ABORT 0x04 /* Abort */
119     #define RU_CMD_LOAD_HDS 0x05 /* Load Header Data Size */
120     #define RU_CMD_LOAD_RU_BASE 0x06 /* Load RU Base */
121    
122     /* CB (Command Block) commands */
123     #define CB_CMD_NOP 0x00 /* No Operation */
124     #define CB_CMD_IADDR_SETUP 0x01 /* Individual Address Setup */
125     #define CB_CMD_CONFIGURE 0x02 /* Configure Device Parameters */
126     #define CB_CMD_XCAST_SETUP 0x03 /* Multicast Address Setup */
127     #define CB_CMD_TRANSMIT 0x04 /* Transmit a single frame */
128     #define CB_CMD_LOAD_UCODE 0x05 /* Load Microcode */
129     #define CB_CMD_DUMP 0x06 /* Dump Internal Registers */
130     #define CB_CMD_DIAGNOSE 0x07 /* Diagnostics */
131    
132     /* CB (Command Block) control/status word */
133     #define CB_CTRL_EL 0x80000000 /* Last command in CBL */
134     #define CB_CTRL_S 0x40000000 /* Suspend CU after completion */
135     #define CB_CTRL_I 0x20000000 /* Interrupt at end of exec (CX) */
136     #define CB_CTRL_SF 0x00080000 /* Mode: 0=simplified,1=flexible */
137     #define CB_CTRL_CMD_MASK 0x00070000 /* Command */
138     #define CB_CTRL_CMD_SHIFT 16
139     #define CB_CTRL_C 0x00008000 /* Execution status (1=completed) */
140     #define CB_CTRL_OK 0x00002000 /* Command success */
141    
142     /* CB Transmit Command */
143     #define TXCB_NUM_MASK 0xFF000000 /* TBD Number */
144     #define TXCB_NUM_SHIFT 24
145     #define TXCB_EOF 0x00008000 /* Whole frame in TxCB */
146     #define TXCB_BLK_SIZE 0x00003FFF /* TxCB Byte count */
147    
148     /* Receive Frame Descriptor (RxFD) control status/word */
149     #define RXFD_CTRL_EL 0x80000000 /* Last RXFD in RFA */
150     #define RXFD_CTRL_S 0x40000000 /* Suspend RU after completion */
151     #define RXFD_CTRL_H 0x00100000 /* Header RXFD */
152     #define RXFD_CTRL_SF 0x00080000 /* Mode: 0=simplified,1=flexible */
153     #define RXFD_CTRL_C 0x00008000 /* Execution status (1=completed) */
154     #define RXFD_CTRL_OK 0x00002000 /* Packet OK */
155     #define RXFD_CTRL_CRC_ERR 0x00000800 /* CRC Error */
156     #define RXFD_CTRL_CRC_AL 0x00000400 /* Alignment Error */
157     #define RXFD_CTRL_NO_RES 0x00000200 /* No Ressources */
158     #define RXFD_CTRL_DMA_OV 0x00000100 /* DMA Overrun */
159     #define RXFD_CTRL_FTS 0x00000080 /* Frame Too Short */
160     #define RXFD_CTRL_TL 0x00000020 /* Type/Length */
161     #define RXFD_CTRL_ERR 0x00000010 /* RX Error */
162     #define RXFD_CTRL_NAM 0x00000004 /* No Address Match */
163     #define RXFD_CTRL_IAM 0x00000002 /* Individual Address Match */
164     #define RXFD_CTRL_COLL 0x00000001 /* RX Collision */
165    
166     #define RXFD_EOF 0x00008000 /* End Of Frame */
167     #define RXFD_SIZE_MASK 0x00003FFF /* Size mask */
168    
169     #define RXBD_CTRL_EOF 0x00008000 /* End Of Frame */
170     #define RXBD_CTRL_F 0x00004000 /* Buffer used */
171    
172     /* Tx Buffer Descriptor */
173     struct i8255x_txbd {
174     m_uint32_t buf_addr;
175     m_uint32_t buf_size;
176     };
177    
178     /* CU (Command Unit) Action */
179     struct i8255x_cu_action {
180     m_uint32_t ctrl;
181     m_uint32_t link_offset;
182     m_uint32_t txbd_addr;
183     m_uint32_t txbd_count;
184     };
185    
186     /* RX Buffer Descriptor */
187     struct i8255x_rxbd {
188     m_uint32_t ctrl;
189     m_uint32_t rxbd_next;
190     m_uint32_t buf_addr;
191     m_uint32_t buf_size;
192     };
193    
194     /* RX Frame Descriptor */
195     struct i8255x_rxfd {
196     m_uint32_t ctrl;
197     m_uint32_t link_offset;
198     m_uint32_t rxbd_addr;
199     m_uint32_t rxbd_size;
200     };
201    
202     /* Statistical counters indexes */
203     #define STAT_CNT_TX_GOOD 0 /* Transmit good frames */
204     #define STAT_CNT_RX_GOOD 9 /* Receive good frames */
205     #define STAT_CNT_RX_RES_ERR 12 /* Receive resource errors */
206    
207    
208     /* Intel i8255x private data */
209     struct i8255x_data {
210     char *name;
211    
212     /* Lock test */
213     pthread_mutex_t lock;
214    
215     /* Physical (MAC) address */
216     n_eth_addr_t mac_addr;
217    
218     /* Device information */
219     struct vdevice *dev;
220    
221     /* PCI device information */
222     struct pci_device *pci_dev;
223    
224     /* Virtual machine */
225     vm_instance_t *vm;
226    
227     /* NetIO descriptor */
228     netio_desc_t *nio;
229    
230     /* CU and RU current states */
231     u_int cu_state,ru_state;
232    
233     /* CU/RU bases + current offsets */
234     m_uint32_t cu_base,ru_base;
235     m_uint32_t cu_offset,ru_offset;
236    
237     /* SCB general pointer */
238     m_uint32_t scb_gptr;
239    
240     /* SCB Interrupt Control */
241     m_uint8_t scb_ic;
242    
243     /* SCB Status Acknowledge (for interrupts) */
244     m_uint8_t scb_stat_ack;
245    
246     /* Statistical counters address */
247     m_uint32_t stat_cnt_addr;
248    
249     /* MII registers */
250     m_uint32_t mii_ctrl;
251     u_int mii_regs[32][32];
252    
253     /* MAC Individual Address */
254     n_eth_addr_t iaddr;
255    
256     /* Configuration data */
257     m_uint32_t config_data[I8255X_CONFIG_SIZE];
258    
259     /* Microcode */
260     m_uint32_t microcode[I8255X_UCODE_SIZE];
261    
262     /* Statistical counters */
263     m_uint32_t stat_counters[I8255X_STAT_CNT_SIZE];
264    
265     /* TX packet buffer */
266     m_uint8_t tx_buffer[I8255X_MAX_PKT_SIZE];
267     };
268    
269     #define EEPRO_LOCK(d) pthread_mutex_lock(&(d)->lock)
270     #define EEPRO_UNLOCK(d) pthread_mutex_unlock(&(d)->lock)
271    
272     /* Log an message */
273     #define EEPRO_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
274    
275     enum {
276     MII_OPCODE_WRITE = 1,
277     MII_OPCODE_READ,
278     };
279    
280     /* Read a MII register */
281     static m_uint16_t mii_reg_read(struct i8255x_data *d)
282     {
283     u_int mii_phy,mii_reg;
284    
285     mii_phy = (d->mii_ctrl & I8255X_MDI_PHY_MASK) >> I8255X_MDI_PHY_SHIFT;
286     mii_reg = (d->mii_ctrl & I8255X_MDI_REG_MASK) >> I8255X_MDI_REG_SHIFT;
287    
288     #if DEBUG_MII_REGS
289     EEPRO_LOG(d,"MII PHY read %d reg %d\n",mii_phy,mii_reg);
290     #endif
291    
292     switch(mii_reg) {
293     case 0x00:
294     return((d->mii_regs[mii_phy][mii_reg] & ~0x8200) | 0x2000);
295     case 0x01:
296     return(0x782c);
297     case 0x02:
298     return(0x0013);
299     case 0x03:
300     return(0x61d4);
301     case 0x05:
302     return(0x41e1);
303     case 0x06:
304     return(0x0001);
305     case 0x11:
306     return(0x4700);
307     default:
308     return(d->mii_regs[mii_phy][mii_reg]);
309     }
310     }
311    
312     /* Write a MII register */
313     static void mii_reg_write(struct i8255x_data *d)
314     {
315     u_int mii_phy,mii_reg,mii_data;
316    
317     mii_phy = (d->mii_ctrl & I8255X_MDI_PHY_MASK) >> I8255X_MDI_PHY_SHIFT;
318     mii_reg = (d->mii_ctrl & I8255X_MDI_REG_MASK) >> I8255X_MDI_REG_SHIFT;
319     mii_data = d->mii_ctrl & I8255X_MDI_DATA_MASK;
320    
321     #if DEBUG_MII_REGS
322     EEPRO_LOG(d,"MII PHY write %d reg %d value %04x\n",
323     mii_phy,mii_reg,mii_data);
324     #endif
325    
326     d->mii_regs[mii_phy][mii_reg] = mii_data;
327     }
328    
329     /* Update interrupt status */
330     static void dev_i8255x_update_irq_status(struct i8255x_data *d)
331     {
332     /* If interrupts are masked, clear IRQ */
333     if (d->scb_ic & SCB_CMD_M) {
334     pci_dev_clear_irq(d->vm,d->pci_dev);
335     return;
336     }
337    
338     /* Software generated interrupt ? */
339     if (d->scb_ic & SCB_CMD_SI) {
340     pci_dev_trigger_irq(d->vm,d->pci_dev);
341     return;
342     }
343    
344     /* Hardware interrupt ? */
345     if (d->scb_stat_ack & (~d->scb_ic & SCB_INT_MASK))
346     pci_dev_trigger_irq(d->vm,d->pci_dev);
347     else
348     pci_dev_clear_irq(d->vm,d->pci_dev);
349     }
350    
351     /* Fetch a CB (Command Block) */
352     static void dev_i8255x_fetch_cb(struct i8255x_data *d,m_uint32_t addr,
353     struct i8255x_cu_action *action)
354     {
355     physmem_copy_from_vm(d->vm,action,addr,sizeof(*action));
356     action->ctrl = vmtoh32(action->ctrl);
357     action->link_offset = vmtoh32(action->link_offset);
358     action->txbd_addr = vmtoh32(action->txbd_addr);
359     action->txbd_count = vmtoh32(action->txbd_count);
360     }
361    
362     /* Fetch a TX buffer descriptor */
363     static void dev_i8255x_fetch_txbd(struct i8255x_data *d,m_uint32_t addr,
364     struct i8255x_txbd *bd)
365     {
366     physmem_copy_from_vm(d->vm,bd,addr,sizeof(*bd));
367     bd->buf_addr = vmtoh32(bd->buf_addr);
368     bd->buf_size = vmtoh32(bd->buf_size);
369     }
370    
371     /* Transmit a frame */
372     static int dev_i8255x_send_tx_pkt(struct i8255x_data *d,m_uint32_t cb_addr,
373     struct i8255x_cu_action *action)
374     {
375     m_uint32_t i,blk_size,tx_size,txbd_addr,txbd_cnt;
376     struct i8255x_txbd txbd;
377     m_uint8_t *tx_ptr;
378     m_uint32_t norm_len;
379    
380     /* === Simplified mode: copy the data directly from the TxCB === */
381     if (!(action->ctrl & CB_CTRL_SF)) {
382     tx_size = action->txbd_count & TXCB_BLK_SIZE;
383    
384     norm_len = normalize_size(tx_size,4,0);
385     physmem_copy_from_vm(d->vm,d->tx_buffer,cb_addr+0x10,norm_len);
386     mem_bswap32(d->tx_buffer,norm_len);
387     goto do_transmit;
388     }
389    
390     /* === Flexible mode === */
391     tx_ptr = d->tx_buffer;
392     tx_size = 0;
393    
394     if (action->txbd_addr == 0xFFFFFFFF) {
395     txbd_addr = cb_addr + 0x10;
396     } else {
397     /* copy the data directly from the TxCB if present */
398     blk_size = action->txbd_count & TXCB_BLK_SIZE;
399    
400     if (blk_size > 0) {
401     tx_size = action->txbd_count & TXCB_BLK_SIZE;
402    
403     norm_len = normalize_size(tx_size,4,0);
404     physmem_copy_from_vm(d->vm,tx_ptr,cb_addr+0x10,norm_len);
405     mem_bswap32(tx_ptr,norm_len);
406    
407     tx_ptr += tx_size;
408     }
409    
410     txbd_addr = action->txbd_addr;
411     }
412    
413     txbd_cnt = (action->txbd_count & TXCB_NUM_MASK) >> TXCB_NUM_SHIFT;
414    
415     /*
416     * Fetch all Tx buffer descriptors and copy data from each separate buffer.
417     */
418     for(i=0;i<txbd_cnt;i++) {
419     dev_i8255x_fetch_txbd(d,txbd_addr,&txbd);
420    
421     norm_len = normalize_size(txbd.buf_size,4,0);
422     physmem_copy_from_vm(d->vm,tx_ptr,txbd.buf_addr,norm_len);
423     mem_bswap32(tx_ptr,norm_len);
424    
425     tx_ptr += txbd.buf_size;
426     tx_size += txbd.buf_size;
427    
428     txbd_addr += sizeof(txbd);
429     }
430    
431     do_transmit:
432     d->stat_counters[STAT_CNT_TX_GOOD]++;
433    
434     #if DEBUG_TRANSMIT
435     EEPRO_LOG(d,"sending packet of %u bytes\n",tx_size);
436     mem_dump(log_file,d->tx_buffer,tx_size);
437     #endif
438     netio_send(d->nio,d->tx_buffer,tx_size);
439     return(TRUE);
440     }
441    
442     /* Process an indidual CB (Command Block) */
443     static void dev_i8255x_process_cb(struct i8255x_data *d,m_uint32_t cb_addr,
444     struct i8255x_cu_action *action)
445     {
446     m_uint32_t tmp[2];
447     u_int cmd,res;
448    
449     cmd = (action->ctrl & CB_CTRL_CMD_MASK) >> CB_CTRL_CMD_SHIFT;
450    
451     switch(cmd) {
452     /* No Operation */
453     case CB_CMD_NOP:
454     res = TRUE;
455     break;
456    
457     /* Transmit a frame */
458     case CB_CMD_TRANSMIT:
459     res = dev_i8255x_send_tx_pkt(d,cb_addr,action);
460     break;
461    
462     /* Configure */
463     case CB_CMD_CONFIGURE:
464     physmem_copy_from_vm(d->vm,d->config_data,cb_addr+0x08,
465     I8255X_CONFIG_SIZE * sizeof(m_uint32_t));
466     mem_bswap32(d->config_data,I8255X_CONFIG_SIZE * sizeof(m_uint32_t));
467     res = TRUE;
468     break;
469    
470     /* Individual address setup */
471     case CB_CMD_IADDR_SETUP:
472     tmp[0] = physmem_copy_u32_from_vm(d->vm,cb_addr+0x08);
473     tmp[1] = physmem_copy_u32_from_vm(d->vm,cb_addr+0x0c);
474    
475     d->iaddr.eth_addr_byte[0] = tmp[0];
476     d->iaddr.eth_addr_byte[1] = tmp[0] >> 8;
477     d->iaddr.eth_addr_byte[2] = tmp[0] >> 16;
478     d->iaddr.eth_addr_byte[3] = tmp[0] >> 24;
479     d->iaddr.eth_addr_byte[4] = tmp[1];
480     d->iaddr.eth_addr_byte[5] = tmp[1] >> 8;
481    
482     EEPRO_LOG(d,"iaddr set to: %2.2x%2.2x.%2.2x%2.2x.%2.2x%2.2x\n",
483     d->iaddr.eth_addr_byte[0],d->iaddr.eth_addr_byte[1],
484     d->iaddr.eth_addr_byte[2],d->iaddr.eth_addr_byte[3],
485     d->iaddr.eth_addr_byte[4],d->iaddr.eth_addr_byte[5]);
486    
487     res = TRUE;
488     break;
489    
490     /* Load Microcode */
491     case CB_CMD_LOAD_UCODE:
492     physmem_copy_from_vm(d->vm,d->microcode,cb_addr+0x08,
493     I8255X_UCODE_SIZE * sizeof(m_uint32_t));
494     mem_bswap32(d->microcode,I8255X_UCODE_SIZE * sizeof(m_uint32_t));
495     EEPRO_LOG(d,"microcode loaded\n");
496     res = TRUE;
497     break;
498    
499     /* Unsupported command */
500     default:
501     EEPRO_LOG(d,"unsupported CB command 0x%2.2x (cb_addr=0x%8.8x)\n",
502     cmd,cb_addr);
503     res = TRUE;
504     }
505    
506     /* Set the completed bit with the result */
507     action->ctrl |= CB_CTRL_C;
508     if (res) action->ctrl |= CB_CTRL_OK;
509    
510     /* Update control word */
511     physmem_copy_u32_to_vm(d->vm,cb_addr,action->ctrl);
512     }
513    
514     /* Process a CBL (Command Block List) */
515     static void dev_i8255x_process_cbl(struct i8255x_data *d)
516     {
517     struct i8255x_cu_action action;
518     m_uint32_t cb_addr;
519    
520     for(;;) {
521     cb_addr = d->cu_base + d->cu_offset;
522     dev_i8255x_fetch_cb(d,cb_addr,&action);
523    
524     /* Execute command */
525     dev_i8255x_process_cb(d,cb_addr,&action);
526    
527     /* Interrupt at end of execution ? */
528     if (action.ctrl & CB_CTRL_I)
529     d->scb_stat_ack |= SCB_STAT_CX;
530    
531     /* Return to idle state ? */
532     if (action.ctrl & CB_CTRL_EL) {
533     d->cu_state = CU_STATE_IDLE;
534     d->scb_stat_ack |= SCB_STAT_CNA;
535     break;
536     } else {
537     /* Enter suspended state ? */
538     if (action.ctrl & CB_CTRL_S) {
539     d->cu_state = CU_STATE_SUSPEND;
540     d->scb_stat_ack |= SCB_STAT_CNA;
541     break;
542     }
543     }
544    
545     /* Go to next descriptor */
546     d->cu_offset = action.link_offset;
547     }
548    
549     /* Update interrupt status */
550     dev_i8255x_update_irq_status(d);
551     }
552    
553     /* Resume a Command Block List */
554     static int dev_i8255x_cu_resume(struct i8255x_data *d)
555     {
556     struct i8255x_cu_action action;
557     m_uint32_t cu_addr;
558    
559     /* If we are in idle state, ignore the command */
560     if (d->cu_state == CU_STATE_IDLE)
561     return(FALSE);
562    
563     cu_addr = d->cu_base + d->cu_offset;
564    
565     /* Check if the previous block has still the S bit set */
566     dev_i8255x_fetch_cb(d,cu_addr,&action);
567    
568     if (action.ctrl & CB_CTRL_S)
569     return(FALSE);
570    
571     d->cu_offset = action.link_offset;
572     d->cu_state = CU_STATE_LPQ_ACT;
573     dev_i8255x_process_cbl(d);
574     return(TRUE);
575     }
576    
577     /* Dump Statistical counters */
578     static void dev_i8255x_dump_stat_cnt(struct i8255x_data *d)
579     {
580     m_uint32_t counters[I8255X_STAT_CNT_SIZE];
581    
582     memcpy(counters,d->stat_counters,sizeof(counters));
583     mem_bswap32(counters,sizeof(counters));
584     physmem_copy_to_vm(d->vm,counters,d->stat_cnt_addr,sizeof(counters));
585     }
586    
587     /* Process a CU command */
588     static void dev_i8255x_process_cu_cmd(struct i8255x_data *d,u_int cuc)
589     {
590     switch(cuc) {
591     /* No Operation */
592     case CU_CMD_NOP:
593     break;
594    
595     /* Start */
596     case CU_CMD_START:
597     d->cu_offset = d->scb_gptr;
598     d->cu_state = CU_STATE_LPQ_ACT;
599     dev_i8255x_process_cbl(d);
600     break;
601    
602     /* Resume */
603     case CU_CMD_RESUME:
604     dev_i8255x_cu_resume(d);
605     break;
606    
607     /* Load CU base */
608     case CU_CMD_LOAD_CU_BASE:
609     d->cu_base = d->scb_gptr;
610     break;
611    
612     /* Load Dump Counters Address */
613     case CU_CMD_LOAD_DUMP_CNT:
614     d->stat_cnt_addr = d->scb_gptr;
615     break;
616    
617     /* Dump Statistical Counters */
618     case CU_CMD_DUMP_STAT_CNT:
619     dev_i8255x_dump_stat_cnt(d);
620     break;
621    
622     /* Dump Statistical Counters and reset them */
623     case CU_CMD_DUMP_RST_STAT_CNT:
624     dev_i8255x_dump_stat_cnt(d);
625     memset(d->stat_counters,0,sizeof(d->stat_counters));
626     break;
627    
628     default:
629     EEPRO_LOG(d,"unsupported CU command 0x%2.2x\n",cuc);
630     }
631     }
632    
633     /* Fetch an RxFD (RX Frame Descriptor) */
634     static void dev_i8255x_fetch_rxfd(struct i8255x_data *d,m_uint32_t addr,
635     struct i8255x_rxfd *rxfd)
636     {
637     physmem_copy_from_vm(d->vm,rxfd,addr,sizeof(*rxfd));
638     rxfd->ctrl = vmtoh32(rxfd->ctrl);
639     rxfd->link_offset = vmtoh32(rxfd->link_offset);
640     rxfd->rxbd_addr = vmtoh32(rxfd->rxbd_addr);
641     rxfd->rxbd_size = vmtoh32(rxfd->rxbd_size);
642     }
643    
644     /* Fetch an RxBD (Rx Buffer Descriptor) */
645     static void dev_i8255x_fetch_rxbd(struct i8255x_data *d,m_uint32_t addr,
646     struct i8255x_rxbd *rxbd)
647     {
648     physmem_copy_from_vm(d->vm,rxbd,addr,sizeof(*rxbd));
649     rxbd->ctrl = vmtoh32(rxbd->ctrl);
650     rxbd->rxbd_next = vmtoh32(rxbd->rxbd_next);
651     rxbd->buf_addr = vmtoh32(rxbd->buf_addr);
652     rxbd->buf_size = vmtoh32(rxbd->buf_size);
653     }
654    
655     /* Store a packet */
656     static int dev_i8255x_store_rx_pkt(struct i8255x_data *d,
657     m_uint8_t *pkt,ssize_t pkt_len)
658     {
659     m_uint32_t rxfd_addr,rxbd_addr;
660     m_uint32_t rxfd_next,rxbd_next;
661     m_uint32_t clen,buf_size,norm_len;
662     struct i8255x_rxfd rxfd;
663     struct i8255x_rxbd rxbd;
664     m_uint8_t *pkt_ptr;
665     ssize_t tot_len;
666    
667     /* Fetch the RX Frame descriptor */
668     rxfd_addr = d->ru_base + d->ru_offset;
669     dev_i8255x_fetch_rxfd(d,rxfd_addr,&rxfd);
670    
671     /* === Simplified mode === */
672     if (!(rxfd.ctrl & RXFD_CTRL_SF)) {
673     /* Copy the packet data directly into the frame descriptor */
674     norm_len = normalize_size(pkt_len,4,0);
675     mem_bswap32(pkt,norm_len);
676     physmem_copy_to_vm(d->vm,pkt,rxfd_addr+0x10,norm_len);
677    
678     /* Update the RxFD and generate the appropriate interrupt */
679     goto update_rxfd;
680     }
681    
682     /* === Flexible mode === */
683     rxbd_addr = d->ru_base + rxfd.rxbd_addr;
684     pkt_ptr = pkt;
685     tot_len = pkt_len;
686    
687     do {
688     /* Fetch the RX buffer */
689     dev_i8255x_fetch_rxbd(d,rxbd_addr,&rxbd);
690     rxbd_next = rxbd.rxbd_next;
691    
692     /* Get the current buffer size */
693     buf_size = rxbd.buf_size & RXFD_SIZE_MASK;
694     clen = m_min(tot_len,buf_size);
695    
696     /* Copy the data into the buffer */
697     norm_len = normalize_size(clen,4,0);
698     mem_bswap32(pkt_ptr,norm_len);
699     physmem_copy_to_vm(d->vm,pkt_ptr,rxbd.buf_addr,norm_len);
700    
701     pkt_ptr += clen;
702     tot_len -= clen;
703    
704     /* Update RX buffer info */
705     if (!tot_len) {
706     rxbd.ctrl |= RXBD_CTRL_EOF;
707     clen += 4; /* Add CRC */
708     }
709    
710     rxbd.ctrl |= RXBD_CTRL_F | clen;
711     physmem_copy_u32_to_vm(d->vm,rxbd_addr+0x00,rxbd.ctrl);
712     }while(tot_len > 0);
713    
714     /* Set the next available RxBD in next RxFD */
715     rxbd_next = d->ru_base + rxbd.rxbd_next;
716     rxfd_next = d->ru_base + rxfd.link_offset;
717     physmem_copy_u32_to_vm(d->vm,rxfd_next+0x08,rxbd_next);
718    
719     /* Update the RxFD */
720     update_rxfd:
721     rxfd.ctrl |= RXFD_CTRL_C | RXFD_CTRL_OK;
722     rxfd.rxbd_size &= ~0xFFFF;
723     rxfd.rxbd_size |= RXFD_EOF | (pkt_len + 4);
724    
725     physmem_copy_u32_to_vm(d->vm,rxfd_addr+0x00,rxfd.ctrl);
726     physmem_copy_u32_to_vm(d->vm,rxfd_addr+0x0c,rxfd.rxbd_size);
727    
728     d->stat_counters[STAT_CNT_RX_GOOD]++;
729    
730     /* A frame has been received: generate an IRQ */
731     d->scb_stat_ack |= SCB_STAT_FR;
732    
733     if (rxfd.ctrl & RXFD_CTRL_EL) {
734     d->ru_state = RU_STATE_NO_RES;
735     d->scb_stat_ack |= SCB_STAT_RNR;
736     } else {
737     if (rxfd.ctrl & RXFD_CTRL_S) {
738     d->ru_state = RU_STATE_SUSPEND;
739     d->scb_stat_ack |= SCB_STAT_RNR;
740     } else {
741     d->ru_offset = rxfd.link_offset;
742     }
743     }
744    
745     dev_i8255x_update_irq_status(d);
746     return(TRUE);
747     }
748    
749     /* Resume reception */
750     static int dev_i8255x_ru_resume(struct i8255x_data *d)
751     {
752     struct i8255x_rxfd rxfd;
753     m_uint32_t rxfd_addr;
754    
755     /* If we are not in ready state, ignore the command */
756     if (d->ru_state != RU_STATE_READY)
757     return(FALSE);
758    
759     /* Fetch the RX Frame descriptor */
760     rxfd_addr = d->ru_base + d->ru_offset;
761     dev_i8255x_fetch_rxfd(d,rxfd_addr,&rxfd);
762    
763     /* Check if the previous frame descriptor has still the S bit set */
764     if (rxfd.ctrl & RXFD_CTRL_S)
765     return(FALSE);
766    
767     d->ru_offset = rxfd.link_offset;
768     d->ru_state = RU_STATE_READY;
769     return(TRUE);
770     }
771    
772     /* Process a RU command */
773     static void dev_i8255x_process_ru_cmd(struct i8255x_data *d,u_int ruc)
774     {
775     switch(ruc) {
776     /* No Operation */
777     case RU_CMD_NOP:
778     break;
779    
780     /* Start */
781     case RU_CMD_START:
782     d->ru_offset = d->scb_gptr;
783     d->ru_state = RU_STATE_READY;
784     break;
785    
786     /* Resume */
787     case RU_CMD_RESUME:
788     dev_i8255x_ru_resume(d);
789     break;
790    
791     /* Load RU base */
792     case RU_CMD_LOAD_RU_BASE:
793     d->ru_base = d->scb_gptr;
794     break;
795    
796     default:
797     EEPRO_LOG(d,"unsupported RU command 0x%2.2x\n",ruc);
798     }
799     }
800    
801     /*
802     * dev_i8255x_access()
803     */
804     void *dev_i8255x_access(cpu_gen_t *cpu,struct vdevice *dev,
805     m_uint32_t offset,u_int op_size,u_int op_type,
806     m_uint64_t *data)
807     {
808     struct i8255x_data *d = dev->priv_data;
809     u_int cuc,ruc,mii_op;
810    
811     if (op_type == MTS_READ)
812     *data = 0x0;
813    
814     #if DEBUG_ACCESS
815     if (op_type == MTS_READ) {
816     cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
817     offset,cpu_get_pc(cpu),op_size);
818     } else {
819     cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
820     "val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size);
821     }
822     #endif
823    
824     EEPRO_LOCK(d);
825    
826     switch(offset) {
827     /* SCB Command Word (interrupt control byte) */
828     case 0x00:
829     if (op_type == MTS_WRITE)
830     d->scb_ic = *data;
831     break;
832    
833     /* SCB Command Word (command byte) */
834     case 0x01:
835     if (op_type == MTS_WRITE) {
836     cuc = (*data & SCB_CMD_CUC_MASK) >> SCB_CMD_CUC_SHIFT;
837     ruc = (*data & SCB_CMD_RUC_MASK) >> SCB_CMD_RUC_SHIFT;
838    
839     /* Process CU and RU commands */
840     dev_i8255x_process_cu_cmd(d,cuc);
841     dev_i8255x_process_ru_cmd(d,ruc);
842     }
843     break;
844    
845     /* SCB Status Word */
846     case 0x02:
847     if (op_type == MTS_READ) {
848     *data = d->scb_stat_ack << 8;
849     } else {
850     d->scb_stat_ack &= ~(*data >> 8);
851     dev_i8255x_update_irq_status(d);
852     }
853     break;
854    
855     /* SCB General Pointer */
856     case 0x04:
857     if (op_type == MTS_WRITE)
858     d->scb_gptr = *data;
859     else
860     *data = d->scb_gptr;
861     break;
862    
863     /* MDI control register */
864     case 0x10:
865     if (op_type == MTS_READ) {
866     mii_op = (d->mii_ctrl & I8255X_MDI_OP_MASK) >> I8255X_MDI_OP_SHIFT;
867    
868     if (mii_op == MII_OPCODE_READ) {
869     d->mii_ctrl &= ~I8255X_MDI_DATA_MASK;
870     d->mii_ctrl |= mii_reg_read(d);
871     }
872    
873     *data = d->mii_ctrl | I8255X_MDI_R;
874     } else {
875     d->mii_ctrl = *data;
876    
877     mii_op = (d->mii_ctrl & I8255X_MDI_OP_MASK) >> I8255X_MDI_OP_SHIFT;
878     if (mii_op == MII_OPCODE_WRITE)
879     mii_reg_write(d);
880     }
881     break;
882    
883     #if DEBUG_UNKNOWN
884     default:
885     if (op_type == MTS_READ) {
886     cpu_log(cpu,d->name,
887     "read access to unknown offset=0x%x, "
888     "pc=0x%llx (size=%u)\n",
889     offset,cpu_get_pc(cpu),op_size);
890     } else {
891     cpu_log(cpu,d->name,
892     "write access to unknown offset=0x%x, pc=0x%llx, "
893     "val=0x%llx (size=%u)\n",
894     offset,cpu_get_pc(cpu),*data,op_size);
895     }
896     #endif
897     }
898    
899     EEPRO_UNLOCK(d);
900     return NULL;
901     }
902    
903     /* Handle the RX ring */
904     static int dev_i8255x_handle_rxring(netio_desc_t *nio,
905     u_char *pkt,ssize_t pkt_len,
906     struct i8255x_data *d)
907     {
908     int res = FALSE;
909    
910     EEPRO_LOCK(d);
911    
912     if (d->ru_state == RU_STATE_READY)
913     res = dev_i8255x_store_rx_pkt(d,pkt,pkt_len);
914    
915     EEPRO_UNLOCK(d);
916     return(res);
917     }
918    
919     /*
920     * pci_i8255x_read()
921     *
922     * Read a PCI register.
923     */
924     static m_uint32_t pci_i8255x_read(cpu_gen_t *cpu,struct pci_device *dev,
925     int reg)
926     {
927     struct i8255x_data *d = dev->priv_data;
928    
929     #if DEBUG_PCI_REGS
930     EEPRO_LOG(d,"read PCI register 0x%x\n",reg);
931     #endif
932    
933     switch (reg) {
934     case 0x00:
935     return((I8255X_PCI_PRODUCT_ID << 16) | I8255X_PCI_VENDOR_ID);
936     case PCI_REG_BAR0:
937     return(d->dev->phys_addr);
938     case 0x0c:
939     return(0x4000);
940     default:
941     return(0);
942     }
943     }
944    
945     /*
946     * pci_i8255x_write()
947     *
948     * Write a PCI register.
949     */
950     static void pci_i8255x_write(cpu_gen_t *cpu,struct pci_device *dev,
951     int reg,m_uint32_t value)
952     {
953     struct i8255x_data *d = dev->priv_data;
954    
955     #if DEBUG_PCI_REGS
956     EEPRO_LOG(d,"write PCI register 0x%x, value 0x%x\n",reg,value);
957     #endif
958    
959     switch(reg) {
960     case PCI_REG_BAR0:
961     vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
962     EEPRO_LOG(d,"registers are mapped at 0x%x\n",value);
963     break;
964     }
965     }
966    
967     /*
968     * dev_i8255x_init()
969     */
970     struct i8255x_data *
971     dev_i8255x_init(vm_instance_t *vm,char *name,int interface_type,
972     struct pci_bus *pci_bus,int pci_device,int irq)
973     {
974     struct i8255x_data *d;
975     struct pci_device *pci_dev;
976     struct vdevice *dev;
977    
978     /* Allocate the private data structure for I8255X */
979     if (!(d = malloc(sizeof(*d)))) {
980     fprintf(stderr,"%s (i8255x): out of memory\n",name);
981     return NULL;
982     }
983    
984     memset(d,0,sizeof(*d));
985     pthread_mutex_init(&d->lock,NULL);
986    
987     /* Add as PCI device */
988     pci_dev = pci_dev_add(pci_bus,name,
989     I8255X_PCI_VENDOR_ID,I8255X_PCI_PRODUCT_ID,
990     pci_device,0,irq,
991     d,NULL,pci_i8255x_read,pci_i8255x_write);
992    
993     if (!pci_dev) {
994     fprintf(stderr,"%s (i8255x): unable to create PCI device.\n",name);
995     goto err_pci_dev;
996     }
997    
998     /* Create the device itself */
999     if (!(dev = dev_create(name))) {
1000     fprintf(stderr,"%s (i8255x): unable to create device.\n",name);
1001     goto err_dev;
1002     }
1003    
1004     d->name = name;
1005     d->vm = vm;
1006     d->pci_dev = pci_dev;
1007     d->dev = dev;
1008    
1009     dev->phys_addr = 0;
1010     dev->phys_len = 0x10000;
1011     dev->handler = dev_i8255x_access;
1012     dev->priv_data = d;
1013     return(d);
1014    
1015     err_dev:
1016     pci_dev_remove(pci_dev);
1017     err_pci_dev:
1018     free(d);
1019     return NULL;
1020     }
1021    
1022     /* Remove an Intel i8255x device */
1023     void dev_i8255x_remove(struct i8255x_data *d)
1024     {
1025     if (d != NULL) {
1026     pci_dev_remove(d->pci_dev);
1027     vm_unbind_device(d->vm,d->dev);
1028     cpu_group_rebuild_mts(d->vm->cpu_group);
1029     free(d->dev);
1030     free(d);
1031     }
1032     }
1033    
1034     /* Bind a NIO to an Intel i8255x device */
1035     int dev_i8255x_set_nio(struct i8255x_data *d,netio_desc_t *nio)
1036     {
1037     /* check that a NIO is not already bound */
1038     if (d->nio != NULL)
1039     return(-1);
1040    
1041     d->nio = nio;
1042     netio_rxl_add(nio,(netio_rx_handler_t)dev_i8255x_handle_rxring,d,NULL);
1043     return(0);
1044     }
1045    
1046     /* Unbind a NIO from an Intel i8255x device */
1047     void dev_i8255x_unset_nio(struct i8255x_data *d)
1048     {
1049     if (d->nio != NULL) {
1050     netio_rxl_remove(d->nio);
1051     d->nio = NULL;
1052     }
1053     }

  ViewVC Help
Powered by ViewVC 1.1.26