/[dynamips]/trunk/dev_am79c971.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/dev_am79c971.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 8 - (show annotations)
Sat Oct 6 16:24:54 2007 UTC (16 years, 5 months ago) by dpavlin
Original Path: upstream/dynamips-0.2.7-RC2/dev_am79c971.c
File MIME type: text/plain
File size: 30986 byte(s)
dynamips-0.2.7-RC2

1 /*
2 * Cisco router simulation platform.
3 * Copyright (C) 2006 Christophe Fillot. All rights reserved.
4 *
5 * AMD Am79c971 FastEthernet chip emulation.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <time.h>
14 #include <errno.h>
15 #include <assert.h>
16
17 #include "utils.h"
18 #include "cpu.h"
19 #include "vm.h"
20 #include "dynamips.h"
21 #include "memory.h"
22 #include "device.h"
23 #include "net.h"
24 #include "net_io.h"
25 #include "ptask.h"
26 #include "dev_am79c971.h"
27
28 /* Debugging flags */
29 #define DEBUG_CSR_REGS 0
30 #define DEBUG_BCR_REGS 0
31 #define DEBUG_PCI_REGS 0
32 #define DEBUG_ACCESS 0
33 #define DEBUG_TRANSMIT 0
34 #define DEBUG_RECEIVE 0
35 #define DEBUG_UNKNOWN 0
36
37 /* AMD Am79c971 PCI vendor/product codes */
38 #define AM79C971_PCI_VENDOR_ID 0x1022
39 #define AM79C971_PCI_PRODUCT_ID 0x2000
40
41 /* Maximum packet size */
42 #define AM79C971_MAX_PKT_SIZE 2048
43
44 /* Send up to 16 packets in a TX ring scan pass */
45 #define AM79C971_TXRING_PASS_COUNT 16
46
47 /* CSR0: Controller Status and Control Register */
48 #define AM79C971_CSR0_ERR 0x00008000 /* Error (BABL,CERR,MISS,MERR) */
49 #define AM79C971_CSR0_BABL 0x00004000 /* Transmitter Timeout Error */
50 #define AM79C971_CSR0_CERR 0x00002000 /* Collision Error */
51 #define AM79C971_CSR0_MISS 0x00001000 /* Missed Frame */
52 #define AM79C971_CSR0_MERR 0x00000800 /* Memory Error */
53 #define AM79C971_CSR0_RINT 0x00000400 /* Receive Interrupt */
54 #define AM79C971_CSR0_TINT 0x00000200 /* Transmit Interrupt */
55 #define AM79C971_CSR0_IDON 0x00000100 /* Initialization Done */
56 #define AM79C971_CSR0_INTR 0x00000080 /* Interrupt Flag */
57 #define AM79C971_CSR0_IENA 0x00000040 /* Interrupt Enable */
58 #define AM79C971_CSR0_RXON 0x00000020 /* Receive On */
59 #define AM79C971_CSR0_TXON 0x00000010 /* Transmit On */
60 #define AM79C971_CSR0_TDMD 0x00000008 /* Transmit Demand */
61 #define AM79C971_CSR0_STOP 0x00000004 /* Stop */
62 #define AM79C971_CSR0_STRT 0x00000002 /* Start */
63 #define AM79C971_CSR0_INIT 0x00000001 /* Initialization */
64
65 /* CSR3: Interrupt Masks and Deferral Control */
66 #define AM79C971_CSR3_BABLM 0x00004000 /* Transmit. Timeout Int. Mask */
67 #define AM79C971_CSR3_CERRM 0x00002000 /* Collision Error Int. Mask*/
68 #define AM79C971_CSR3_MISSM 0x00001000 /* Missed Frame Interrupt Mask */
69 #define AM79C971_CSR3_MERRM 0x00000800 /* Memory Error Interrupt Mask */
70 #define AM79C971_CSR3_RINTM 0x00000400 /* Receive Interrupt Mask */
71 #define AM79C971_CSR3_TINTM 0x00000200 /* Transmit Interrupt Mask */
72 #define AM79C971_CSR3_IDONM 0x00000100 /* Initialization Done Mask */
73 #define AM79C971_CSR3_BSWP 0x00000004 /* Byte Swap */
74 #define AM79C971_CSR3_IM_MASK 0x00007F00 /* Interrupt Masks for CSR3 */
75
76 /* CSR5: Extended Control and Interrupt 1 */
77 #define AM79C971_CSR5_TOKINTD 0x00008000 /* Receive Interrupt Mask */
78 #define AM79C971_CSR5_SPND 0x00000001 /* Suspend */
79
80 /* CSR15: Mode */
81 #define AM79C971_CSR15_PROM 0x00008000 /* Promiscous Mode */
82 #define AM79C971_CSR15_DRCVBC 0x00004000 /* Disable Receive Broadcast */
83 #define AM79C971_CSR15_DRCVPA 0x00002000 /* Disable Receive PHY address */
84 #define AM79C971_CSR15_DTX 0x00000002 /* Disable Transmit */
85 #define AM79C971_CSR15_DRX 0x00000001 /* Disable Receive */
86
87 /* AMD 79C971 Initialization block length */
88 #define AM79C971_INIT_BLOCK_LEN 0x1c
89
90 /* RX descriptors */
91 #define AM79C971_RMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
92 #define AM79C971_RMD1_ERR 0x40000000 /* Error */
93 #define AM79C971_RMD1_FRAM 0x20000000 /* Framing Error */
94 #define AM79C971_RMD1_OFLO 0x10000000 /* Overflow Error */
95 #define AM79C971_RMD1_CRC 0x08000000 /* Invalid CRC */
96 #define AM79C971_RMD1_BUFF 0x08000000 /* Buffer Error (chaining) */
97 #define AM79C971_RMD1_STP 0x02000000 /* Start of Packet */
98 #define AM79C971_RMD1_ENP 0x01000000 /* End of Packet */
99 #define AM79C971_RMD1_BPE 0x00800000 /* Bus Parity Error */
100 #define AM79C971_RMD1_PAM 0x00400000 /* Physical Address Match */
101 #define AM79C971_RMD1_LAFM 0x00200000 /* Logical Addr. Filter Match */
102 #define AM79C971_RMD1_BAM 0x00100000 /* Broadcast Address Match */
103 #define AM79C971_RMD1_LEN 0x00000FFF /* Buffer Length */
104
105 #define AM79C971_RMD2_LEN 0x00000FFF /* Received byte count */
106
107 /* TX descriptors */
108 #define AM79C971_TMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */
109 #define AM79C971_TMD1_ERR 0x40000000 /* Error */
110 #define AM79C971_TMD1_ADD_FCS 0x20000000 /* FCS generation */
111 #define AM79C971_TMD1_STP 0x02000000 /* Start of Packet */
112 #define AM79C971_TMD1_ENP 0x01000000 /* End of Packet */
113 #define AM79C971_TMD1_LEN 0x00000FFF /* Buffer Length */
114
115 /* RX Descriptor */
116 struct rx_desc {
117 m_uint32_t rmd[4];
118 };
119
120 /* TX Descriptor */
121 struct tx_desc {
122 m_uint32_t tmd[4];
123 };
124
125 /* AMD 79C971 Data */
126 struct am79c971_data {
127 char *name;
128
129 /* Lock */
130 pthread_mutex_t lock;
131
132 /* Interface type (10baseT or 100baseTX) */
133 int type;
134
135 /* RX/TX clearing count */
136 int rx_tx_clear_count;
137
138 /* Current RAP (Register Address Pointer) value */
139 m_uint8_t rap;
140
141 /* CSR and BCR registers */
142 m_uint32_t csr[256],bcr[256];
143
144 /* RX/TX rings start addresses */
145 m_uint32_t rx_start,tx_start;
146
147 /* RX/TX number of descriptors (log2) */
148 m_uint32_t rx_l2len,tx_l2len;
149
150 /* RX/TX number of descriptors */
151 m_uint32_t rx_len,tx_len;
152
153 /* RX/TX ring positions */
154 m_uint32_t rx_pos,tx_pos;
155
156 /* MII registers */
157 m_uint16_t mii_regs[32][32];
158
159 /* Physical (MAC) address */
160 n_eth_addr_t mac_addr;
161
162 /* Device information */
163 struct vdevice *dev;
164
165 /* PCI device information */
166 struct pci_device *pci_dev;
167
168 /* Virtual machine */
169 vm_instance_t *vm;
170
171 /* NetIO descriptor */
172 netio_desc_t *nio;
173
174 /* TX ring scanner task id */
175 ptask_id_t tx_tid;
176 };
177
178 /* Log an am79c971 message */
179 #define AM79C971_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
180
181 /* Lock/Unlock primitives */
182 #define AM79C971_LOCK(d) pthread_mutex_lock(&(d)->lock)
183 #define AM79C971_UNLOCK(d) pthread_mutex_unlock(&(d)->lock)
184
185 static m_uint16_t mii_reg_values[32] = {
186 0x1000, 0x782D, 0x0013, 0x78E2, 0x01E1, 0xC9E1, 0x000F, 0x2001,
187 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
188 0x0104, 0x4780, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
189 0x0000, 0x0000, 0x00C8, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000,
190
191 #if 0
192 0x1000, 0x782D, 0x0013, 0x78e2, 0x01E1, 0xC9E1, 0x0000, 0x0000,
193 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
194 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8060,
195 0x8023, 0x0820, 0x0000, 0x3800, 0xA3B9, 0x0000, 0x0000, 0x0000,
196 #endif
197 };
198
199 /* Read a MII register */
200 static m_uint16_t mii_reg_read(struct am79c971_data *d,u_int phy,u_int reg)
201 {
202 if ((phy >= 32) || (reg >= 32))
203 return(0);
204
205 return(d->mii_regs[phy][reg]);
206 }
207
208 /* Write a MII register */
209 static void mii_reg_write(struct am79c971_data *d,u_int phy,u_int reg,
210 m_uint16_t value)
211 {
212 if ((phy < 32) && (reg < 32))
213 d->mii_regs[phy][reg] = value;
214 }
215
216 /* Check if a packet must be delivered to the emulated chip */
217 static inline int am79c971_handle_mac_addr(struct am79c971_data *d,
218 m_uint8_t *pkt)
219 {
220 n_eth_hdr_t *hdr = (n_eth_hdr_t *)pkt;
221
222 /* Accept systematically frames if we are running in promiscuous mode */
223 if (d->csr[15] & AM79C971_CSR15_PROM)
224 return(TRUE);
225
226 /* Accept systematically all multicast frames */
227 if (eth_addr_is_mcast(&hdr->daddr))
228 return(TRUE);
229
230 /* Accept frames directly for us, discard others */
231 if (!memcmp(&d->mac_addr,&hdr->daddr,N_ETH_ALEN))
232 return(TRUE);
233
234 return(FALSE);
235 }
236
237 /* Update the Interrupt Flag bit of csr0 */
238 static void am79c971_update_irq_status(struct am79c971_data *d)
239 {
240 m_uint32_t mask;
241
242 /* Bits set in CR3 disable the specified interrupts */
243 mask = AM79C971_CSR3_IM_MASK & ~(d->csr[3] & AM79C971_CSR3_IM_MASK);
244
245 if (d->csr[0] & mask)
246 d->csr[0] |= AM79C971_CSR0_INTR;
247 else
248 d->csr[0] &= ~AM79C971_CSR0_INTR;
249
250 if ((d->csr[0] & (AM79C971_CSR0_INTR|AM79C971_CSR0_IENA)) ==
251 (AM79C971_CSR0_INTR|AM79C971_CSR0_IENA))
252 {
253 pci_dev_trigger_irq(d->vm,d->pci_dev);
254 } else {
255 pci_dev_clear_irq(d->vm,d->pci_dev);
256 }
257 }
258
259 /* Update RX/TX ON bits of csr0 */
260 static void am79c971_update_rx_tx_on_bits(struct am79c971_data *d)
261 {
262 /*
263 * Set RX ON if DRX in csr15 is cleared, and set TX on if DTX
264 * in csr15 is cleared. The START bit must be set.
265 */
266 d->csr[0] &= ~(AM79C971_CSR0_RXON|AM79C971_CSR0_TXON);
267
268 if (d->csr[0] & AM79C971_CSR0_STRT) {
269 if (!(d->csr[15] & AM79C971_CSR15_DRX))
270 d->csr[0] |= AM79C971_CSR0_RXON;
271
272 if (!(d->csr[15] & AM79C971_CSR15_DTX))
273 d->csr[0] |= AM79C971_CSR0_TXON;
274 }
275 }
276
277 /* Update RX/TX descriptor lengths */
278 static void am79c971_update_rx_tx_len(struct am79c971_data *d)
279 {
280 d->rx_len = 1 << d->rx_l2len;
281 d->tx_len = 1 << d->tx_l2len;
282
283 /* Normalize ring sizes */
284 if (d->rx_len > 512) d->rx_len = 512;
285 if (d->tx_len > 512) d->tx_len = 512;
286 }
287
288 /* Fetch the initialization block from memory */
289 static int am79c971_fetch_init_block(struct am79c971_data *d)
290 {
291 m_uint32_t ib[AM79C971_INIT_BLOCK_LEN];
292 m_uint32_t ib_addr,ib_tmp;
293
294 /* The init block address is contained in csr1 (low) and csr2 (high) */
295 ib_addr = (d->csr[2] << 16) | d->csr[1];
296
297 if (!ib_addr) {
298 AM79C971_LOG(d,"trying to fetch init block at address 0...\n");
299 return(-1);
300 }
301
302 AM79C971_LOG(d,"fetching init block at address 0x%8.8x\n",ib_addr);
303 physmem_copy_from_vm(d->vm,ib,ib_addr,sizeof(ib));
304
305 /* Extract RX/TX ring addresses */
306 d->rx_start = vmtoh32(ib[5]);
307 d->tx_start = vmtoh32(ib[6]);
308
309 /* Set csr15 from mode field */
310 ib_tmp = vmtoh32(ib[0]);
311 d->csr[15] = ib_tmp & 0xffff;
312
313 /* Extract RX/TX ring sizes */
314 d->rx_l2len = (ib_tmp >> 20) & 0x0F;
315 d->tx_l2len = (ib_tmp >> 28) & 0x0F;
316 am79c971_update_rx_tx_len(d);
317
318 AM79C971_LOG(d,"rx_ring = 0x%8.8x (%u), tx_ring = 0x%8.8x (%u)\n",
319 d->rx_start,d->rx_len,d->tx_start,d->tx_len);
320
321 /* Get the physical MAC address */
322 ib_tmp = vmtoh32(ib[1]);
323 d->csr[12] = ib_tmp & 0xFFFF;
324 d->csr[13] = ib_tmp >> 16;
325
326 d->mac_addr.eth_addr_byte[3] = (ib_tmp >> 24) & 0xFF;
327 d->mac_addr.eth_addr_byte[2] = (ib_tmp >> 16) & 0xFF;
328 d->mac_addr.eth_addr_byte[1] = (ib_tmp >> 8) & 0xFF;
329 d->mac_addr.eth_addr_byte[0] = ib_tmp & 0xFF;
330
331 ib_tmp = vmtoh32(ib[2]);
332 d->csr[14] = ib_tmp & 0xFFFF;
333 d->mac_addr.eth_addr_byte[5] = (ib_tmp >> 8) & 0xFF;
334 d->mac_addr.eth_addr_byte[4] = ib_tmp & 0xFF;
335
336 /*
337 * Mark the initialization as done is csr0.
338 */
339 d->csr[0] |= AM79C971_CSR0_IDON;
340
341 /* Update RX/TX ON bits of csr0 since csr15 has been modified */
342 am79c971_update_rx_tx_on_bits(d);
343 AM79C971_LOG(d,"CSR0 = 0x%4.4x\n",d->csr[0]);
344 return(0);
345 }
346
347 /* RDP (Register Data Port) access */
348 static void am79c971_rdp_access(cpu_gen_t *cpu,struct am79c971_data *d,
349 u_int op_type,m_uint64_t *data)
350 {
351 m_uint32_t mask;
352
353 #if DEBUG_CSR_REGS
354 if (op_type == MTS_READ) {
355 cpu_log(cpu,d->name,"read access to CSR %d\n",d->rap);
356 } else {
357 cpu_log(cpu,d->name,"write access to CSR %d, value=0x%x\n",d->rap,*data);
358 }
359 #endif
360
361 switch(d->rap) {
362 case 0: /* CSR0: Controller Status and Control Register */
363 if (op_type == MTS_READ) {
364 //AM79C971_LOG(d,"reading CSR0 (val=0x%4.4x)\n",d->csr[0]);
365 *data = d->csr[0];
366 } else {
367 /*
368 * The STOP bit clears other bits.
369 * It has precedence over INIT and START bits.
370 */
371 if (*data & AM79C971_CSR0_STOP) {
372 //AM79C971_LOG(d,"stopping interface!\n");
373 d->csr[0] = AM79C971_CSR0_STOP;
374 d->tx_pos = d->rx_pos = 0;
375 am79c971_update_irq_status(d);
376 break;
377 }
378
379 /* These bits are cleared when set to 1 */
380 mask = AM79C971_CSR0_BABL | AM79C971_CSR0_CERR;
381 mask |= AM79C971_CSR0_MISS | AM79C971_CSR0_MERR;
382 mask |= AM79C971_CSR0_IDON;
383
384 if (++d->rx_tx_clear_count == 3) {
385 mask |= AM79C971_CSR0_RINT | AM79C971_CSR0_TINT;
386 d->rx_tx_clear_count = 0;
387 }
388
389 d->csr[0] &= ~(*data & mask);
390
391 /* Save the Interrupt Enable bit */
392 d->csr[0] |= *data & AM79C971_CSR0_IENA;
393
394 /* If INIT bit is set, fetch the initialization block */
395 if (*data & AM79C971_CSR0_INIT) {
396 d->csr[0] |= AM79C971_CSR0_INIT;
397 d->csr[0] &= ~AM79C971_CSR0_STOP;
398 am79c971_fetch_init_block(d);
399 }
400
401 /* If STRT bit is set, clear the stop bit */
402 if (*data & AM79C971_CSR0_STRT) {
403 //AM79C971_LOG(d,"enabling interface!\n");
404 d->csr[0] |= AM79C971_CSR0_STRT;
405 d->csr[0] &= ~AM79C971_CSR0_STOP;
406 am79c971_update_rx_tx_on_bits(d);
407 }
408
409 /* Update IRQ status */
410 am79c971_update_irq_status(d);
411 }
412 break;
413
414 case 6: /* CSR6: RX/TX Descriptor Table Length */
415 if (op_type == MTS_WRITE) {
416 d->rx_l2len = (*data >> 8) & 0x0F;
417 d->tx_l2len = (*data >> 12) & 0x0F;
418 am79c971_update_rx_tx_len(d);
419 } else {
420 *data = (d->tx_l2len << 12) | (d->rx_l2len << 8);
421 }
422 break;
423
424 case 15: /* CSR15: Mode */
425 if (op_type == MTS_WRITE) {
426 d->csr[15] = *data;
427 am79c971_update_rx_tx_on_bits(d);
428 } else {
429 *data = d->csr[15];
430 }
431 break;
432
433 case 88:
434 if (op_type == MTS_READ) {
435 switch(d->type) {
436 case AM79C971_TYPE_100BASE_TX:
437 *data = 0x2623003;
438 break;
439 default:
440 *data = 0;
441 break;
442 }
443 }
444 break;
445
446 default:
447 if (op_type == MTS_READ) {
448 *data = d->csr[d->rap];
449 } else {
450 d->csr[d->rap] = *data;
451 }
452
453 #if DEBUG_UNKNOWN
454 if (op_type == MTS_READ) {
455 cpu_log(cpu,d->name,"read access to unknown CSR %d\n",d->rap);
456 } else {
457 cpu_log(cpu,d->name,"write access to unknown CSR %d, value=0x%x\n",
458 d->rap,*data);
459 }
460 #endif
461 }
462 }
463
464 /* BDP (BCR Data Port) access */
465 static void am79c971_bdp_access(cpu_gen_t *cpu,struct am79c971_data *d,
466 u_int op_type,m_uint64_t *data)
467 {
468 u_int mii_phy,mii_reg;
469
470 #if DEBUG_BCR_REGS
471 if (op_type == MTS_READ) {
472 cpu_log(cpu,d->name,"read access to BCR %d\n",d->rap);
473 } else {
474 cpu_log(cpu,d->name,"write access to BCR %d, value=0x%x\n",d->rap,*data);
475 }
476 #endif
477
478 switch(d->rap) {
479 case 9:
480 if (op_type == MTS_READ)
481 *data = 1;
482 break;
483
484 case 34: /* BCR34: MII Management Data Register */
485 mii_phy = (d->bcr[33] >> 5) & 0x1F;
486 mii_reg = (d->bcr[33] >> 0) & 0x1F;
487
488 if (op_type == MTS_READ)
489 *data = mii_reg_read(d,mii_phy,mii_reg);
490 //else
491 //mii_reg_write(d,mii_phy,mii_reg,*data);
492 break;
493
494 default:
495 if (op_type == MTS_READ) {
496 *data = d->bcr[d->rap];
497 } else {
498 d->bcr[d->rap] = *data;
499 }
500
501 #if DEBUG_UNKNOWN
502 if (op_type == MTS_READ) {
503 cpu_log(cpu,d->name,"read access to unknown BCR %d\n",d->rap);
504 } else {
505 cpu_log(cpu,d->name,"write access to unknown BCR %d, value=0x%x\n",
506 d->rap,*data);
507 }
508 #endif
509 }
510 }
511
512 /*
513 * dev_am79c971_access()
514 */
515 void *dev_am79c971_access(cpu_gen_t *cpu,struct vdevice *dev,
516 m_uint32_t offset,u_int op_size,u_int op_type,
517 m_uint64_t *data)
518 {
519 struct am79c971_data *d = dev->priv_data;
520
521 if (op_type == MTS_READ)
522 *data = 0;
523
524 #if DEBUG_ACCESS
525 if (op_type == MTS_READ) {
526 cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n",
527 offset,cpu_get_pc(cpu),op_size);
528 } else {
529 cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, "
530 "val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size);
531 }
532 #endif
533
534 AM79C971_LOCK(d);
535
536 switch(offset) {
537 case 0x14: /* RAP (Register Address Pointer) */
538 if (op_type == MTS_WRITE) {
539 d->rap = *data & 0xFF;
540 } else {
541 *data = d->rap;
542 }
543 break;
544
545 case 0x10: /* RDP (Register Data Port) */
546 am79c971_rdp_access(cpu,d,op_type,data);
547 break;
548
549 case 0x1c: /* BDP (BCR Data Port) */
550 am79c971_bdp_access(cpu,d,op_type,data);
551 break;
552 }
553
554 AM79C971_UNLOCK(d);
555 return NULL;
556 }
557
558 /* Read a RX descriptor */
559 static int rxdesc_read(struct am79c971_data *d,m_uint32_t rxd_addr,
560 struct rx_desc *rxd)
561 {
562 m_uint32_t buf[4];
563 m_uint8_t sw_style;
564
565 /* Get the software style */
566 sw_style = d->bcr[20];
567
568 /* Read the descriptor from VM physical RAM */
569 physmem_copy_from_vm(d->vm,&buf,rxd_addr,sizeof(struct rx_desc));
570
571 switch(sw_style) {
572 case 2:
573 rxd->rmd[0] = vmtoh32(buf[0]); /* rb addr */
574 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
575 rxd->rmd[2] = vmtoh32(buf[2]); /* rfrtag, mcnt, ... */
576 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
577 break;
578
579 case 3:
580 rxd->rmd[0] = vmtoh32(buf[2]); /* rb addr */
581 rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */
582 rxd->rmd[2] = vmtoh32(buf[0]); /* rfrtag, mcnt, ... */
583 rxd->rmd[3] = vmtoh32(buf[3]); /* user */
584 break;
585
586 default:
587 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
588 return(-1);
589 }
590
591 return(0);
592 }
593
594 /* Set the address of the next RX descriptor */
595 static inline void rxdesc_set_next(struct am79c971_data *d)
596 {
597 d->rx_pos++;
598
599 if (d->rx_pos == d->rx_len)
600 d->rx_pos = 0;
601 }
602
603 /* Compute the address of the current RX descriptor */
604 static inline m_uint32_t rxdesc_get_current(struct am79c971_data *d)
605 {
606 return(d->rx_start + (d->rx_pos * sizeof(struct rx_desc)));
607 }
608
609 /* Put a packet in buffer of a descriptor */
610 static void rxdesc_put_pkt(struct am79c971_data *d,struct rx_desc *rxd,
611 u_char **pkt,ssize_t *pkt_len)
612 {
613 ssize_t len,cp_len;
614
615 /* Compute the data length to copy */
616 len = ~((rxd->rmd[1] & AM79C971_RMD1_LEN) - 1);
617 len &= AM79C971_RMD1_LEN;
618 cp_len = m_min(len,*pkt_len);
619
620 /* Copy packet data to the VM physical RAM */
621 #if DEBUG_RECEIVE
622 AM79C971_LOG(d,"am79c971_handle_rxring: storing %u bytes at 0x%8.8x\n",
623 cp_len, rxd->rmd[0]);
624 #endif
625 physmem_copy_to_vm(d->vm,*pkt,rxd->rmd[0],cp_len);
626
627 *pkt += cp_len;
628 *pkt_len -= cp_len;
629 }
630
631 /*
632 * Put a packet in the RX ring.
633 */
634 static int am79c971_receive_pkt(struct am79c971_data *d,
635 u_char *pkt,ssize_t pkt_len)
636 {
637 m_uint32_t rx_start,rx_current,rx_next,rxdn_rmd1;
638 struct rx_desc rxd0,rxdn,*rxdc;
639 ssize_t tot_len = pkt_len;
640 u_char *pkt_ptr = pkt;
641 m_uint8_t sw_style;
642 int i;
643
644 /* Truncate the packet if it is too big */
645 pkt_len = m_min(pkt_len,AM79C971_MAX_PKT_SIZE);
646
647 /* Copy the current rxring descriptor */
648 rx_start = rx_current = rxdesc_get_current(d);
649 rxdesc_read(d,rx_start,&rxd0);
650
651 /* We must have the first descriptor... */
652 if (!(rxd0.rmd[1] & AM79C971_RMD1_OWN))
653 return(FALSE);
654
655 for(i=0,rxdc=&rxd0;;i++)
656 {
657 #if DEBUG_RECEIVE
658 AM79C971_LOG(d,"am79c971_handle_rxring: i=%d, addr=0x%8.8x: "
659 "rmd[0]=0x%x, rmd[1]=0x%x, rmd[2]=0x%x, rmd[3]=0x%x\n",
660 i,rx_current,
661 rxdc->rmd[0],rxdc->rmd[1],rxdc->rmd[2],rxdc->rmd[3]);
662 #endif
663 /* Put data into the descriptor buffer */
664 rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
665
666 /* Go to the next descriptor */
667 rxdesc_set_next(d);
668
669 /* If this is not the first descriptor, clear the OWN bit */
670 if (i != 0)
671 rxdc->rmd[1] &= ~AM79C971_RMD1_OWN;
672
673 /* If we have finished, mark the descriptor as end of packet */
674 if (tot_len == 0) {
675 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
676 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
677
678 /* Get the software style */
679 sw_style = d->bcr[20];
680
681 /* Update the message byte count field */
682 rxdc->rmd[2] &= ~AM79C971_RMD2_LEN;
683 rxdc->rmd[2] |= pkt_len + 4;
684
685 switch(sw_style) {
686 case 2:
687 physmem_copy_u32_to_vm(d->vm,rx_current+8,rxdc->rmd[2]);
688 break;
689 case 3:
690 physmem_copy_u32_to_vm(d->vm,rx_current,rxdc->rmd[2]);
691 break;
692 default:
693 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
694 }
695
696 break;
697 }
698
699 /* Try to acquire the next descriptor */
700 rx_next = rxdesc_get_current(d);
701 rxdn_rmd1 = physmem_copy_u32_from_vm(d->vm,rx_next+4);
702
703 if (!(rxdn_rmd1 & AM79C971_RMD1_OWN)) {
704 rxdc->rmd[1] |= AM79C971_RMD1_ERR | AM79C971_RMD1_BUFF;
705 rxdc->rmd[1] |= AM79C971_RMD1_ENP;
706 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
707 break;
708 }
709
710 /* Update rmd1 to store change of OWN bit */
711 physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]);
712
713 /* Read the next descriptor from VM physical RAM */
714 rxdesc_read(d,rx_next,&rxdn);
715 rxdc = &rxdn;
716 rx_current = rx_next;
717 }
718
719 /* Update the first RX descriptor */
720 rxd0.rmd[1] &= ~AM79C971_RMD1_OWN;
721 rxd0.rmd[1] |= AM79C971_RMD1_STP;
722 physmem_copy_u32_to_vm(d->vm,rx_start+4,rxd0.rmd[1]);
723
724 d->csr[0] |= AM79C971_CSR0_RINT;
725 am79c971_update_irq_status(d);
726 return(TRUE);
727 }
728
729 /* Handle the RX ring */
730 static int am79c971_handle_rxring(netio_desc_t *nio,
731 u_char *pkt,ssize_t pkt_len,
732 struct am79c971_data *d)
733 {
734 n_eth_hdr_t *hdr;
735
736 /*
737 * Don't start receive if the RX ring address has not been set
738 * and if RX ON is not set.
739 */
740 if ((d->rx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
741 return(FALSE);
742
743 #if DEBUG_RECEIVE
744 AM79C971_LOG(d,"receiving a packet of %d bytes\n",pkt_len);
745 mem_dump(log_file,pkt,pkt_len);
746 #endif
747
748 AM79C971_LOCK(d);
749
750 /*
751 * Receive only multicast/broadcast trafic + unicast traffic
752 * for this virtual machine.
753 */
754 hdr = (n_eth_hdr_t *)pkt;
755 if (am79c971_handle_mac_addr(d,pkt))
756 am79c971_receive_pkt(d,pkt,pkt_len);
757
758 AM79C971_UNLOCK(d);
759 return(TRUE);
760 }
761
762 /* Read a TX descriptor */
763 static int txdesc_read(struct am79c971_data *d,m_uint32_t txd_addr,
764 struct tx_desc *txd)
765 {
766 m_uint32_t buf[4];
767 m_uint8_t sw_style;
768
769 /* Get the software style */
770 sw_style = d->bcr[20];
771
772 /* Read the descriptor from VM physical RAM */
773 physmem_copy_from_vm(d->vm,&buf,txd_addr,sizeof(struct tx_desc));
774
775 switch(sw_style) {
776 case 2:
777 txd->tmd[0] = vmtoh32(buf[0]); /* tb addr */
778 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
779 txd->tmd[2] = vmtoh32(buf[2]); /* buff, uflo, ... */
780 txd->tmd[3] = vmtoh32(buf[3]); /* user */
781 break;
782
783 case 3:
784 txd->tmd[0] = vmtoh32(buf[2]); /* tb addr */
785 txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */
786 txd->tmd[2] = vmtoh32(buf[0]); /* buff, uflo, ... */
787 txd->tmd[3] = vmtoh32(buf[3]); /* user */
788 break;
789
790 default:
791 AM79C971_LOG(d,"invalid software style %u!\n",sw_style);
792 return(-1);
793 }
794
795 return(0);
796 }
797
798 /* Set the address of the next TX descriptor */
799 static inline void txdesc_set_next(struct am79c971_data *d)
800 {
801 d->tx_pos++;
802
803 if (d->tx_pos == d->tx_len)
804 d->tx_pos = 0;
805 }
806
807 /* Compute the address of the current TX descriptor */
808 static inline m_uint32_t txdesc_get_current(struct am79c971_data *d)
809 {
810 return(d->tx_start + (d->tx_pos * sizeof(struct tx_desc)));
811 }
812
813 /* Handle the TX ring (single packet) */
814 static int am79c971_handle_txring_single(struct am79c971_data *d)
815 {
816 u_char pkt[AM79C971_MAX_PKT_SIZE],*pkt_ptr;
817 struct tx_desc txd0,ctxd,ntxd,*ptxd;
818 m_uint32_t tx_start,tx_current;
819 m_uint32_t clen,tot_len;
820
821 if ((d->tx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON))
822 return(FALSE);
823
824 /* Copy the current txring descriptor */
825 tx_start = tx_current = txdesc_get_current(d);
826 ptxd = &txd0;
827 txdesc_read(d,tx_start,ptxd);
828
829 /* If we don't own the first descriptor, we cannot transmit */
830 if (!(ptxd->tmd[1] & AM79C971_TMD1_OWN))
831 return(FALSE);
832
833 #if DEBUG_TRANSMIT
834 AM79C971_LOG(d,"am79c971_handle_txring: 1st desc: "
835 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
836 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
837 #endif
838
839 /* Empty packet for now */
840 pkt_ptr = pkt;
841 tot_len = 0;
842
843 for(;;) {
844 #if DEBUG_TRANSMIT
845 AM79C971_LOG(d,"am79c971_handle_txring: loop: "
846 "tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n",
847 ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]);
848 #endif
849 /* Copy packet data */
850 clen = ~((ptxd->tmd[1] & AM79C971_TMD1_LEN) - 1);
851 clen &= AM79C971_TMD1_LEN;
852
853 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tmd[0],clen);
854
855 pkt_ptr += clen;
856 tot_len += clen;
857
858 /* Clear the OWN bit if this is not the first descriptor */
859 if (!(ptxd->tmd[1] & AM79C971_TMD1_STP)) {
860 ptxd->tmd[1] &= ~AM79C971_TMD1_OWN;
861 physmem_copy_u32_to_vm(d->vm,tx_current+4,ptxd->tmd[1]);
862 }
863
864 /* Set the next descriptor */
865 txdesc_set_next(d);
866
867 /* Stop now if end of packet has been reached */
868 if (ptxd->tmd[1] & AM79C971_TMD1_ENP)
869 break;
870
871 /* Read the next descriptor and try to acquire it */
872 tx_current = txdesc_get_current(d);
873 txdesc_read(d,tx_current,&ntxd);
874
875 if (!(ntxd.tmd[1] & AM79C971_TMD1_OWN)) {
876 AM79C971_LOG(d,"am79c971_handle_txring: UNDERFLOW!\n");
877 return(FALSE);
878 }
879
880 memcpy(&ctxd,&ntxd,sizeof(struct tx_desc));
881 ptxd = &ctxd;
882 }
883
884 if (tot_len != 0) {
885 #if DEBUG_TRANSMIT
886 AM79C971_LOG(d,"sending packet of %u bytes\n",tot_len);
887 mem_dump(log_file,pkt,tot_len);
888 #endif
889 /* send it on wire */
890 netio_send(d->nio,pkt,tot_len);
891 }
892
893 /* Clear the OWN flag of the first descriptor */
894 txd0.tmd[1] &= ~AM79C971_TMD1_OWN;
895 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.tmd[1]);
896
897 /* Generate TX interrupt */
898 d->csr[0] |= AM79C971_CSR0_TINT;
899 am79c971_update_irq_status(d);
900 return(TRUE);
901 }
902
903 /* Handle the TX ring */
904 static int am79c971_handle_txring(struct am79c971_data *d)
905 {
906 int i;
907
908 AM79C971_LOCK(d);
909
910 for(i=0;i<AM79C971_TXRING_PASS_COUNT;i++)
911 if (!am79c971_handle_txring_single(d))
912 break;
913
914 AM79C971_UNLOCK(d);
915 return(TRUE);
916 }
917
918 /*
919 * pci_am79c971_read()
920 *
921 * Read a PCI register.
922 */
923 static m_uint32_t pci_am79c971_read(cpu_gen_t *cpu,struct pci_device *dev,
924 int reg)
925 {
926 struct am79c971_data *d = dev->priv_data;
927
928 #if DEBUG_PCI_REGS
929 AM79C971_LOG(d,"read PCI register 0x%x\n",reg);
930 #endif
931
932 switch (reg) {
933 case 0x00:
934 return((AM79C971_PCI_PRODUCT_ID << 16) | AM79C971_PCI_VENDOR_ID);
935 case 0x08:
936 return(0x02000002);
937 case PCI_REG_BAR1:
938 return(d->dev->phys_addr);
939 default:
940 return(0);
941 }
942 }
943
944 /*
945 * pci_am79c971_write()
946 *
947 * Write a PCI register.
948 */
949 static void pci_am79c971_write(cpu_gen_t *cpu,struct pci_device *dev,
950 int reg,m_uint32_t value)
951 {
952 struct am79c971_data *d = dev->priv_data;
953
954 #if DEBUG_PCI_REGS
955 AM79C971_LOG(d,"write PCI register 0x%x, value 0x%x\n",reg,value);
956 #endif
957
958 switch(reg) {
959 case PCI_REG_BAR1:
960 vm_map_device(cpu->vm,d->dev,(m_uint64_t)value);
961 AM79C971_LOG(d,"registers are mapped at 0x%x\n",value);
962 break;
963 }
964 }
965
966 /*
967 * dev_am79c971_init()
968 *
969 * Generic AMD Am79c971 initialization code.
970 */
971 struct am79c971_data *
972 dev_am79c971_init(vm_instance_t *vm,char *name,int interface_type,
973 struct pci_bus *pci_bus,int pci_device,int irq)
974 {
975 struct am79c971_data *d;
976 struct pci_device *pci_dev;
977 struct vdevice *dev;
978
979 /* Allocate the private data structure for AM79C971 */
980 if (!(d = malloc(sizeof(*d)))) {
981 fprintf(stderr,"%s (AM79C971): out of memory\n",name);
982 return NULL;
983 }
984
985 memset(d,0,sizeof(*d));
986 memcpy(d->mii_regs[0],mii_reg_values,sizeof(mii_reg_values));
987 pthread_mutex_init(&d->lock,NULL);
988
989 /* Add as PCI device */
990 pci_dev = pci_dev_add(pci_bus,name,
991 AM79C971_PCI_VENDOR_ID,AM79C971_PCI_PRODUCT_ID,
992 pci_device,0,irq,
993 d,NULL,pci_am79c971_read,pci_am79c971_write);
994
995 if (!pci_dev) {
996 fprintf(stderr,"%s (AM79C971): unable to create PCI device.\n",name);
997 goto err_pci_dev;
998 }
999
1000 /* Create the device itself */
1001 if (!(dev = dev_create(name))) {
1002 fprintf(stderr,"%s (AM79C971): unable to create device.\n",name);
1003 goto err_dev;
1004 }
1005
1006 d->name = name;
1007 d->vm = vm;
1008 d->type = interface_type;
1009 d->pci_dev = pci_dev;
1010 d->dev = dev;
1011
1012 dev->phys_addr = 0;
1013 dev->phys_len = 0x4000;
1014 dev->handler = dev_am79c971_access;
1015 dev->priv_data = d;
1016 return(d);
1017
1018 err_dev:
1019 pci_dev_remove(pci_dev);
1020 err_pci_dev:
1021 free(d);
1022 return NULL;
1023 }
1024
1025 /* Remove an AMD Am79c971 device */
1026 void dev_am79c971_remove(struct am79c971_data *d)
1027 {
1028 if (d != NULL) {
1029 pci_dev_remove(d->pci_dev);
1030 vm_unbind_device(d->vm,d->dev);
1031 cpu_group_rebuild_mts(d->vm->cpu_group);
1032 free(d->dev);
1033 free(d);
1034 }
1035 }
1036
1037 /* Bind a NIO to an AMD Am79c971 device */
1038 int dev_am79c971_set_nio(struct am79c971_data *d,netio_desc_t *nio)
1039 {
1040 /* check that a NIO is not already bound */
1041 if (d->nio != NULL)
1042 return(-1);
1043
1044 d->nio = nio;
1045 d->tx_tid = ptask_add((ptask_callback)am79c971_handle_txring,d,NULL);
1046 netio_rxl_add(nio,(netio_rx_handler_t)am79c971_handle_rxring,d,NULL);
1047 return(0);
1048 }
1049
1050 /* Unbind a NIO from an AMD Am79c971 device */
1051 void dev_am79c971_unset_nio(struct am79c971_data *d)
1052 {
1053 if (d->nio != NULL) {
1054 ptask_remove(d->tx_tid);
1055 netio_rxl_remove(d->nio);
1056 d->nio = NULL;
1057 }
1058 }

  ViewVC Help
Powered by ViewVC 1.1.26