1 |
/* |
2 |
* Cisco router simlation platform. |
3 |
* Copyright (C) 2005,2006 Christophe Fillot. All rights reserved. |
4 |
* |
5 |
* DEC21140 FastEthernet chip emulation. |
6 |
* |
7 |
* It allows to emulate a C7200-IO-FE card with 1 port and PA-FE-TX cards. |
8 |
* |
9 |
* Many many thanks to mtve (aka "Mtv Europe") for his great work on |
10 |
* this stuff. |
11 |
* |
12 |
* Manuals: |
13 |
* |
14 |
* DECchip 21140 PCI fast Ethernet LAN controller Hardware reference manual |
15 |
* http://ftp.nluug.nl/NetBSD/misc/dec-docs/ec-qc0cb-te.ps.gz |
16 |
* |
17 |
* National DP83840 PHY |
18 |
* http://www.rezrov.net/docs/DP83840A.pdf |
19 |
* |
20 |
* Remark: only Big-endian mode is supported. |
21 |
*/ |
22 |
|
23 |
#include <stdio.h> |
24 |
#include <stdlib.h> |
25 |
#include <string.h> |
26 |
#include <stdarg.h> |
27 |
#include <unistd.h> |
28 |
#include <time.h> |
29 |
#include <errno.h> |
30 |
#include <assert.h> |
31 |
|
32 |
#include "crc.h" |
33 |
#include "utils.h" |
34 |
#include "cpu.h" |
35 |
#include "vm.h" |
36 |
#include "dynamips.h" |
37 |
#include "memory.h" |
38 |
#include "device.h" |
39 |
#include "net.h" |
40 |
#include "net_io.h" |
41 |
#include "ptask.h" |
42 |
#include "dev_dec21140.h" |
43 |
|
44 |
/* Debugging flags */ |
45 |
#define DEBUG_MII_REGS 0 |
46 |
#define DEBUG_CSR_REGS 0 |
47 |
#define DEBUG_PCI_REGS 0 |
48 |
#define DEBUG_TRANSMIT 0 |
49 |
#define DEBUG_RECEIVE 0 |
50 |
|
51 |
/* DEC21140 PCI vendor/product codes */ |
52 |
#define DEC21140_PCI_VENDOR_ID 0x1011 |
53 |
#define DEC21140_PCI_PRODUCT_ID 0x0009 |
54 |
|
55 |
/* DEC21140 PCI registers */ |
56 |
#define DEC21140_PCI_CFID_REG_OFFSET 0x00 |
57 |
#define DEC21140_PCI_CFCS_REG_OFFSET 0x04 |
58 |
#define DEC21140_PCI_CFRV_REG_OFFSET 0x08 |
59 |
#define DEC21140_PCI_CFLT_REG_OFFSET 0x0C |
60 |
#define DEC21140_PCI_CBIO_REG_OFFSET 0x10 |
61 |
#define DEC21140_PCI_CBMA_REG_OFFSET 0x14 |
62 |
#define DEC21140_PCI_CFIT_REG_OFFSET 0x3C |
63 |
#define DEC21140_PCI_CFDA_REG_OFFSET 0x40 |
64 |
|
65 |
/* Number of CSR registers */ |
66 |
#define DEC21140_CSR_NR 16 |
67 |
|
68 |
/* CSR5: Status Register */ |
69 |
#define DEC21140_CSR5_TI 0x00000001 /* TX Interrupt */ |
70 |
#define DEC21140_CSR5_TPS 0x00000002 /* TX Process Stopped */ |
71 |
#define DEC21140_CSR5_TU 0x00000004 /* TX Buffer Unavailable */ |
72 |
#define DEC21140_CSR5_TJT 0x00000008 /* TX Jabber Timeout */ |
73 |
#define DEC21140_CSR5_UNF 0x00000020 /* TX Underflow */ |
74 |
#define DEC21140_CSR5_RI 0x00000040 /* RX Interrupt */ |
75 |
#define DEC21140_CSR5_RU 0x00000080 /* RX Buffer Unavailable */ |
76 |
#define DEC21140_CSR5_RPS 0x00000100 /* RX Process Stopped */ |
77 |
#define DEC21140_CSR5_RWT 0x00000200 /* RX Watchdog Timeout */ |
78 |
#define DEC21140_CSR5_GTE 0x00000800 /* Gen Purpose Timer Expired */ |
79 |
#define DEC21140_CSR5_FBE 0x00002000 /* Fatal Bus Error */ |
80 |
#define DEC21140_CSR5_AIS 0x00008000 /* Abnormal Interrupt Summary */ |
81 |
#define DEC21140_CSR5_NIS 0x00010000 /* Normal Interrupt Summary */ |
82 |
|
83 |
#define DEC21140_NIS_BITS \ |
84 |
(DEC21140_CSR5_TI|DEC21140_CSR5_RI|DEC21140_CSR5_TU) |
85 |
|
86 |
#define DEC21140_AIS_BITS \ |
87 |
(DEC21140_CSR5_TPS|DEC21140_CSR5_TJT|DEC21140_CSR5_UNF| \ |
88 |
DEC21140_CSR5_RU|DEC21140_CSR5_RPS|DEC21140_CSR5_RWT| \ |
89 |
DEC21140_CSR5_GTE|DEC21140_CSR5_FBE) |
90 |
|
91 |
#define DEC21140_CSR5_RS_SHIFT 17 |
92 |
#define DEC21140_CSR5_TS_SHIFT 20 |
93 |
|
94 |
/* CSR6: Operating Mode Register */ |
95 |
#define DEC21140_CSR6_START_RX 0x00000002 |
96 |
#define DEC21140_CSR6_START_TX 0x00002000 |
97 |
#define DEC21140_CSR6_PROMISC 0x00000040 |
98 |
|
99 |
/* CSR9: Serial EEPROM and MII */ |
100 |
#define DEC21140_CSR9_RX_BIT 0x00080000 |
101 |
#define DEC21140_CSR9_MII_READ 0x00040000 |
102 |
#define DEC21140_CSR9_TX_BIT 0x00020000 |
103 |
#define DEC21140_CSR9_MDC_CLOCK 0x00010000 |
104 |
#define DEC21140_CSR9_READ 0x00004000 |
105 |
#define DEC21140_CSR9_WRITE 0x00002000 |
106 |
|
107 |
/* Maximum packet size */ |
108 |
#define DEC21140_MAX_PKT_SIZE 2048 |
109 |
|
110 |
/* Send up to 32 packets in a TX ring scan pass */ |
111 |
#define DEC21140_TXRING_PASS_COUNT 32 |
112 |
|
113 |
/* Setup frame size */ |
114 |
#define DEC21140_SETUP_FRAME_SIZE 192 |
115 |
|
116 |
/* RX descriptors */ |
117 |
#define DEC21140_RXDESC_OWN 0x80000000 /* Ownership */ |
118 |
#define DEC21140_RXDESC_LS 0x00000100 /* Last Segment */ |
119 |
#define DEC21140_RXDESC_FS 0x00000200 /* First Segment */ |
120 |
#define DEC21140_RXDESC_MF 0x00000400 /* Multicast Frame */ |
121 |
#define DEC21140_RXDESC_DE 0x00004000 /* Descriptor Error */ |
122 |
#define DEC21140_RXDESC_RCH 0x01000000 /* Sec. Addr. Chained */ |
123 |
#define DEC21140_RXDESC_RER 0x02000000 /* Receive End of Ring */ |
124 |
#define DEC21140_RXDESC_FL_SHIFT 16 |
125 |
#define DEC21140_RXDESC_LEN_MASK 0x7ff |
126 |
|
127 |
/* TX descriptors */ |
128 |
#define DEC21140_TXDESC_OWN 0x80000000 /* Ownership */ |
129 |
#define DEC21140_TXDESC_TCH 0x01000000 /* Sec. Addr. Chained */ |
130 |
#define DEC21140_TXDESC_TER 0x02000000 /* Transmit End of Ring */ |
131 |
#define DEC21140_TXDESC_SET 0x08000000 /* Setup frame */ |
132 |
#define DEC21140_TXDESC_FS 0x20000000 /* First Segment */ |
133 |
#define DEC21140_TXDESC_LS 0x40000000 /* Last Segment */ |
134 |
#define DEC21140_TXDESC_IC 0x80000000 /* IRQ on completion */ |
135 |
|
136 |
#define DEC21140_TXDESC_LEN_MASK 0x7ff |
137 |
|
138 |
/* RX Descriptor */ |
139 |
struct rx_desc { |
140 |
m_uint32_t rdes[4]; |
141 |
}; |
142 |
|
143 |
/* TX Descriptor */ |
144 |
struct tx_desc { |
145 |
m_uint32_t tdes[4]; |
146 |
}; |
147 |
|
148 |
/* DEC21140 Data */ |
149 |
struct dec21140_data { |
150 |
char *name; |
151 |
|
152 |
/* Physical addresses of current RX and TX descriptors */ |
153 |
m_uint32_t rx_current; |
154 |
m_uint32_t tx_current; |
155 |
|
156 |
/* CSR registers */ |
157 |
m_uint32_t csr[DEC21140_CSR_NR]; |
158 |
|
159 |
/* MII registers */ |
160 |
m_uint32_t mii_state; |
161 |
m_uint32_t mii_phy; |
162 |
m_uint32_t mii_reg; |
163 |
m_uint32_t mii_data; |
164 |
m_uint32_t mii_outbits; |
165 |
m_uint16_t mii_regs[32][32]; |
166 |
|
167 |
/* Ethernet unicast addresses */ |
168 |
n_eth_addr_t mac_addr[16]; |
169 |
u_int mac_addr_count; |
170 |
|
171 |
/* Device information */ |
172 |
struct vdevice *dev; |
173 |
|
174 |
/* PCI device information */ |
175 |
struct pci_device *pci_dev; |
176 |
|
177 |
/* Virtual machine */ |
178 |
vm_instance_t *vm; |
179 |
|
180 |
/* NetIO descriptor */ |
181 |
netio_desc_t *nio; |
182 |
|
183 |
/* TX ring scanner task id */ |
184 |
ptask_id_t tx_tid; |
185 |
}; |
186 |
|
187 |
/* Log a dec21140 message */ |
188 |
#define DEC21140_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg) |
189 |
|
190 |
/* |
191 |
* ISL rewrite. |
192 |
* |
193 |
* See: http://www.cisco.com/en/US/tech/tk389/tk390/technologies_tech_note09186a0080094665.shtml |
194 |
*/ |
195 |
static void dec21140_isl_rewrite(m_uint8_t *pkt,m_uint32_t tot_len) |
196 |
{ |
197 |
static m_uint8_t isl_xaddr[N_ETH_ALEN] = { 0x01,0x00,0x0c,0x00,0x10,0x00 }; |
198 |
u_int real_offset,real_len; |
199 |
n_eth_hdr_t *hdr; |
200 |
m_uint32_t ifcs; |
201 |
|
202 |
hdr = (n_eth_hdr_t *)pkt; |
203 |
if (!memcmp(&hdr->daddr,isl_xaddr,N_ETH_ALEN)) { |
204 |
real_offset = N_ETH_HLEN + N_ISL_HDR_SIZE; |
205 |
real_len = ntohs(hdr->type); |
206 |
real_len -= (N_ISL_HDR_SIZE + 4); |
207 |
|
208 |
if ((real_offset+real_len) > tot_len) |
209 |
return; |
210 |
|
211 |
/* Rewrite the destination MAC address */ |
212 |
hdr->daddr.eth_addr_byte[4] = 0x00; |
213 |
|
214 |
/* Compute the internal FCS on the encapsulated packet */ |
215 |
ifcs = crc32_compute(0xFFFFFFFF,pkt+real_offset,real_len); |
216 |
pkt[tot_len-4] = ifcs & 0xff; |
217 |
pkt[tot_len-3] = (ifcs >> 8) & 0xff; |
218 |
pkt[tot_len-2] = (ifcs >> 16) & 0xff; |
219 |
pkt[tot_len-1] = ifcs >> 24; |
220 |
} |
221 |
} |
222 |
|
223 |
/* Check if a packet must be delivered to the emulated chip */ |
224 |
static inline int dec21140_handle_mac_addr(struct dec21140_data *d, |
225 |
m_uint8_t *pkt) |
226 |
{ |
227 |
n_eth_hdr_t *hdr = (n_eth_hdr_t *)pkt; |
228 |
int i; |
229 |
|
230 |
/* Accept systematically frames if we are running is promiscuous mode */ |
231 |
if (d->csr[6] & DEC21140_CSR6_PROMISC) |
232 |
return(TRUE); |
233 |
|
234 |
/* Accept systematically all multicast frames */ |
235 |
if (eth_addr_is_mcast(&hdr->daddr)) |
236 |
return(TRUE); |
237 |
|
238 |
/* Accept frames directly for us, discard others */ |
239 |
for(i=0;i<d->mac_addr_count;i++) |
240 |
if (!memcmp(&d->mac_addr[i],&hdr->daddr,N_ETH_ALEN)) |
241 |
return(TRUE); |
242 |
|
243 |
return(FALSE); |
244 |
} |
245 |
|
246 |
/* Update MAC addresses */ |
247 |
static void dec21140_update_mac_addr(struct dec21140_data *d, |
248 |
u_char *setup_frame) |
249 |
{ |
250 |
n_eth_addr_t addr; |
251 |
int i,nb_addr,addr_size; |
252 |
|
253 |
d->mac_addr_count = 0; |
254 |
|
255 |
addr_size = N_ETH_ALEN * 2; |
256 |
nb_addr = DEC21140_SETUP_FRAME_SIZE / addr_size; |
257 |
|
258 |
for(i=0;i<nb_addr;i++) { |
259 |
addr.eth_addr_byte[0] = setup_frame[(i * addr_size) + 0]; |
260 |
addr.eth_addr_byte[1] = setup_frame[(i * addr_size) + 1]; |
261 |
addr.eth_addr_byte[2] = setup_frame[(i * addr_size) + 4]; |
262 |
addr.eth_addr_byte[3] = setup_frame[(i * addr_size) + 5]; |
263 |
addr.eth_addr_byte[4] = setup_frame[(i * addr_size) + 8]; |
264 |
addr.eth_addr_byte[5] = setup_frame[(i * addr_size) + 9]; |
265 |
|
266 |
if (!eth_addr_is_mcast(&addr)) { |
267 |
memcpy(&d->mac_addr[d->mac_addr_count],&addr,N_ETH_ALEN); |
268 |
DEC21140_LOG(d,"unicast MAC address: " |
269 |
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", |
270 |
addr.eth_addr_byte[0],addr.eth_addr_byte[1], |
271 |
addr.eth_addr_byte[2],addr.eth_addr_byte[3], |
272 |
addr.eth_addr_byte[4],addr.eth_addr_byte[5]); |
273 |
d->mac_addr_count++; |
274 |
} |
275 |
} |
276 |
} |
277 |
|
278 |
/* Get a PCI register name */ |
279 |
static char *pci_cfgreg_name(int reg) |
280 |
{ |
281 |
static char *name[] = { |
282 |
"FID", "FCS", "FRV", "FLT", "BIO", "BMA", "?", "?", |
283 |
"?", "?", "?", "?", "?", "?", "?", "FIT", "FDA" |
284 |
}; |
285 |
|
286 |
return((reg>=0) && (reg<=DEC21140_CSR_NR*4) && ((reg&3)==0) ? |
287 |
name[reg>>2] : "?"); |
288 |
} |
289 |
|
290 |
/* |
291 |
* read from register of DP83840A PHY |
292 |
*/ |
293 |
static m_uint16_t mii_reg_read(struct dec21140_data *d) |
294 |
{ |
295 |
#if DEBUG_MII_REGS |
296 |
DEC21140_LOG(d,"MII PHY read %d reg %d\n",d->mii_phy,d->mii_reg); |
297 |
#endif |
298 |
|
299 |
/* |
300 |
* if it's BASIC MODE STATUS REGISTER (BMSR) at address 0x1 |
301 |
* then tell them that "Link Status" is up and no troubles. |
302 |
*/ |
303 |
if (d->mii_reg == 1) { |
304 |
if (d->nio != NULL) |
305 |
return(0x04); |
306 |
else |
307 |
return(0x00); |
308 |
} |
309 |
|
310 |
return(d->mii_regs[d->mii_phy][d->mii_reg]); |
311 |
} |
312 |
|
313 |
/* |
314 |
* write to register of DP83840A PHY |
315 |
*/ |
316 |
static void mii_reg_write(struct dec21140_data *d) |
317 |
{ |
318 |
#if DEBUG_MII_REGS |
319 |
DEC21140_LOG(d,"MII PHY write %d reg %d value %04x\n", |
320 |
d->mii_phy,d->mii_reg,d->mii_data); |
321 |
#endif |
322 |
assert(d->mii_phy < 32); |
323 |
assert(d->mii_reg < 32); |
324 |
d->mii_regs[d->mii_phy][d->mii_reg] = d->mii_data; |
325 |
} |
326 |
|
327 |
/* |
328 |
* process new bit sent by IOS to PHY. |
329 |
*/ |
330 |
static void mii_newbit(struct dec21140_data *d,int newbit) |
331 |
{ |
332 |
#if DEBUG_MII_REGS |
333 |
DEC21140_LOG(d,"MII state was %d\n",d->mii_state); |
334 |
#endif |
335 |
|
336 |
switch (d->mii_state) { |
337 |
case 0: /* init */ |
338 |
d->mii_state = newbit ? 0 : 1; |
339 |
d->mii_phy = 0; |
340 |
d->mii_reg = 0; |
341 |
d->mii_data = 0; |
342 |
break; |
343 |
|
344 |
case 1: /* already got 0 */ |
345 |
d->mii_state = newbit ? 2 : 0; |
346 |
break; |
347 |
|
348 |
case 2: /* already got attention */ |
349 |
d->mii_state = newbit ? 3 : 4; |
350 |
break; |
351 |
|
352 |
case 3: /* probably it's read */ |
353 |
d->mii_state = newbit ? 0 : 10; |
354 |
break; |
355 |
|
356 |
case 4: /* probably it's write */ |
357 |
d->mii_state = newbit ? 20 : 0; |
358 |
break; |
359 |
|
360 |
case 10: case 11: case 12: case 13: case 14: |
361 |
case 20: case 21: case 22: case 23: case 24: |
362 |
/* read or write state, read 5 bits of phy */ |
363 |
d->mii_phy <<= 1; |
364 |
d->mii_phy |= newbit; |
365 |
d->mii_state++; |
366 |
break; |
367 |
|
368 |
case 15: case 16: case 17: case 18: case 19: |
369 |
case 25: case 26: case 27: case 28: case 29: |
370 |
/* read or write state, read 5 bits of reg */ |
371 |
d->mii_reg <<= 1; |
372 |
d->mii_reg |= newbit; |
373 |
d->mii_state++; |
374 |
|
375 |
if (d->mii_state == 20) { |
376 |
/* read state, got everything */ |
377 |
d->mii_outbits = mii_reg_read (d) << 15; /* first bit will |
378 |
* be thrown away! |
379 |
*/ |
380 |
d->mii_state = 0; |
381 |
} |
382 |
|
383 |
break; |
384 |
|
385 |
case 30: /* write state, read first waiting bit */ |
386 |
d->mii_state = newbit ? 31 : 0; |
387 |
break; |
388 |
|
389 |
case 31: /* write state, read second waiting bit */ |
390 |
d->mii_state = newbit ? 0 : 32; |
391 |
break; |
392 |
|
393 |
case 32: case 33: case 34: case 35: case 36: case 37: case 38: case 39: |
394 |
case 40: case 41: case 42: case 43: case 44: case 45: case 46: case 47: |
395 |
/* write state, read 16 bits of data */ |
396 |
d->mii_data <<= 1; |
397 |
d->mii_data |= newbit; |
398 |
d->mii_state++; |
399 |
|
400 |
if (d->mii_state == 48) { |
401 |
/* write state, got everything */ |
402 |
mii_reg_write (d); |
403 |
d->mii_state = 0; |
404 |
} |
405 |
|
406 |
break; |
407 |
default: |
408 |
DEC21140_LOG(d,"MII impossible state\n"); |
409 |
} |
410 |
|
411 |
#if DEBUG_MII_REGS |
412 |
DEC21140_LOG(d,"MII state now %d\n",d->mii_state); |
413 |
#endif |
414 |
} |
415 |
|
416 |
/* Update the interrupt status */ |
417 |
static inline void dev_dec21140_update_irq_status(struct dec21140_data *d) |
418 |
{ |
419 |
int trigger = FALSE; |
420 |
m_uint32_t csr5; |
421 |
|
422 |
/* Work on a temporary copy of csr5 */ |
423 |
csr5 = d->csr[5]; |
424 |
|
425 |
/* Compute Interrupt Summary */ |
426 |
csr5 &= ~(DEC21140_CSR5_AIS|DEC21140_CSR5_NIS); |
427 |
|
428 |
if (csr5 & DEC21140_NIS_BITS) { |
429 |
csr5 |= DEC21140_CSR5_NIS; |
430 |
trigger = TRUE; |
431 |
} |
432 |
|
433 |
if (csr5 & DEC21140_AIS_BITS) { |
434 |
csr5 |= DEC21140_CSR5_AIS; |
435 |
trigger = TRUE; |
436 |
} |
437 |
|
438 |
d->csr[5] = csr5; |
439 |
|
440 |
if (trigger) |
441 |
pci_dev_trigger_irq(d->vm,d->pci_dev); |
442 |
else |
443 |
pci_dev_clear_irq(d->vm,d->pci_dev); |
444 |
} |
445 |
|
446 |
/* |
447 |
* dev_dec21140_access() |
448 |
*/ |
449 |
void *dev_dec21140_access(cpu_gen_t *cpu,struct vdevice *dev, |
450 |
m_uint32_t offset,u_int op_size,u_int op_type, |
451 |
m_uint64_t *data) |
452 |
{ |
453 |
struct dec21140_data *d = dev->priv_data; |
454 |
u_int reg; |
455 |
|
456 |
/* which CSR register ? */ |
457 |
reg = offset / 8; |
458 |
|
459 |
if ((reg >= DEC21140_CSR_NR) || (offset % 8) != 0) { |
460 |
cpu_log(cpu,d->name,"invalid access to offset 0x%x\n",offset); |
461 |
return NULL; |
462 |
} |
463 |
|
464 |
if (op_type == MTS_READ) { |
465 |
#if DEBUG_CSR_REGS |
466 |
cpu_log(cpu,d->name,"read CSR%u value 0x%x\n",reg,d->csr[reg]); |
467 |
#endif |
468 |
switch(reg) { |
469 |
case 5: |
470 |
/* Dynamically construct CSR5 */ |
471 |
*data = 0; |
472 |
|
473 |
if (d->csr[6] & DEC21140_CSR6_START_RX) |
474 |
*data |= 0x03 << DEC21140_CSR5_RS_SHIFT; |
475 |
|
476 |
if (d->csr[6] & DEC21140_CSR6_START_TX) |
477 |
*data |= 0x03 << DEC21140_CSR5_TS_SHIFT; |
478 |
|
479 |
*data |= d->csr[5]; |
480 |
break; |
481 |
|
482 |
case 8: |
483 |
/* CSR8 is cleared when read (missed frame counter) */ |
484 |
d->csr[reg] = 0; |
485 |
*data = 0; |
486 |
break; |
487 |
|
488 |
default: |
489 |
*data = d->csr[reg]; |
490 |
} |
491 |
} else { |
492 |
#if DEBUG_CSR_REGS |
493 |
cpu_log(cpu,d->name,"write CSR%u value 0x%x\n",reg,(m_uint32_t)*data); |
494 |
#endif |
495 |
switch(reg) { |
496 |
case 3: |
497 |
d->csr[reg] = *data; |
498 |
d->rx_current = d->csr[reg]; |
499 |
break; |
500 |
case 4: |
501 |
d->csr[reg] = *data; |
502 |
d->tx_current = d->csr[reg]; |
503 |
break; |
504 |
case 5: |
505 |
d->csr[reg] &= ~(*data); |
506 |
dev_dec21140_update_irq_status(d); |
507 |
break; |
508 |
case 9: |
509 |
/* |
510 |
* CSR9, probably they want to mess with MII PHY |
511 |
* The protocol to PHY is like serial over one bit. |
512 |
* We will ignore clock 0 of read or write. |
513 |
* |
514 |
* This whole code is needed only to tell IOS that "Link Status" |
515 |
* bit in BMSR register of DP83840A PHY is set. |
516 |
* |
517 |
* Also it makes "sh contr f0/0" happy. |
518 |
*/ |
519 |
d->csr[reg] = *data; |
520 |
|
521 |
if ((*data & ~DEC21140_CSR9_TX_BIT) == (DEC21140_CSR9_MII_READ| |
522 |
DEC21140_CSR9_READ|DEC21140_CSR9_MDC_CLOCK)) { |
523 |
/* |
524 |
* read, pop one bit from mii_outbits |
525 |
*/ |
526 |
if (d->mii_outbits & (1<<31)) |
527 |
d->csr[9] |= DEC21140_CSR9_RX_BIT; |
528 |
else |
529 |
d->csr[9] &= ~DEC21140_CSR9_RX_BIT; |
530 |
d->mii_outbits <<= 1; |
531 |
} else if((*data&~DEC21140_CSR9_TX_BIT) == |
532 |
(DEC21140_CSR9_WRITE|DEC21140_CSR9_MDC_CLOCK)) { |
533 |
/* |
534 |
* write, we've got input, do state machine |
535 |
*/ |
536 |
mii_newbit(d,(*data&DEC21140_CSR9_TX_BIT) ? 1 : 0); |
537 |
} |
538 |
break; |
539 |
|
540 |
default: |
541 |
d->csr[reg] = *data; |
542 |
} |
543 |
} |
544 |
|
545 |
return NULL; |
546 |
} |
547 |
|
548 |
/* |
549 |
* Get the address of the next RX descriptor. |
550 |
*/ |
551 |
static m_uint32_t rxdesc_get_next(struct dec21140_data *d,m_uint32_t rxd_addr, |
552 |
struct rx_desc *rxd) |
553 |
{ |
554 |
m_uint32_t nrxd_addr; |
555 |
|
556 |
/* go to the next descriptor */ |
557 |
if (rxd->rdes[1] & DEC21140_RXDESC_RER) |
558 |
nrxd_addr = d->csr[3]; |
559 |
else { |
560 |
if (rxd->rdes[1] & DEC21140_RXDESC_RCH) |
561 |
nrxd_addr = rxd->rdes[3]; |
562 |
else |
563 |
nrxd_addr = rxd_addr + sizeof(struct rx_desc); |
564 |
} |
565 |
|
566 |
return(nrxd_addr); |
567 |
} |
568 |
|
569 |
/* Read a RX descriptor */ |
570 |
static void rxdesc_read(struct dec21140_data *d,m_uint32_t rxd_addr, |
571 |
struct rx_desc *rxd) |
572 |
{ |
573 |
/* get the next descriptor from VM physical RAM */ |
574 |
physmem_copy_from_vm(d->vm,rxd,rxd_addr,sizeof(struct rx_desc)); |
575 |
|
576 |
/* byte-swapping */ |
577 |
rxd->rdes[0] = vmtoh32(rxd->rdes[0]); |
578 |
rxd->rdes[1] = vmtoh32(rxd->rdes[1]); |
579 |
rxd->rdes[2] = vmtoh32(rxd->rdes[2]); |
580 |
rxd->rdes[3] = vmtoh32(rxd->rdes[3]); |
581 |
} |
582 |
|
583 |
/* |
584 |
* Try to acquire the specified RX descriptor. Returns TRUE if we have it. |
585 |
* It assumes that the byte-swapping is done. |
586 |
*/ |
587 |
static inline int rxdesc_acquire(m_uint32_t rdes0) |
588 |
{ |
589 |
return(rdes0 & DEC21140_RXDESC_OWN); |
590 |
} |
591 |
|
592 |
/* Put a packet in buffer(s) of a descriptor */ |
593 |
static void rxdesc_put_pkt(struct dec21140_data *d,struct rx_desc *rxd, |
594 |
u_char **pkt,ssize_t *pkt_len) |
595 |
{ |
596 |
ssize_t len1,len2,cp_len; |
597 |
|
598 |
/* get rbs1 and rbs2 */ |
599 |
len1 = rxd->rdes[1] & DEC21140_RXDESC_LEN_MASK; |
600 |
len2 = (rxd->rdes[1] >> 10) & DEC21140_RXDESC_LEN_MASK; |
601 |
|
602 |
/* try with buffer #1 */ |
603 |
if (len1 != 0) |
604 |
{ |
605 |
/* compute the data length to copy */ |
606 |
cp_len = m_min(len1,*pkt_len); |
607 |
|
608 |
/* copy packet data to the VM physical RAM */ |
609 |
physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[2],cp_len); |
610 |
|
611 |
*pkt += cp_len; |
612 |
*pkt_len -= cp_len; |
613 |
} |
614 |
|
615 |
/* try with buffer #2 */ |
616 |
if ((len2 != 0) && !(rxd->rdes[1] & DEC21140_RXDESC_RCH)) |
617 |
{ |
618 |
/* compute the data length to copy */ |
619 |
cp_len = m_min(len2,*pkt_len); |
620 |
|
621 |
/* copy packet data to the VM physical RAM */ |
622 |
physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[3],cp_len); |
623 |
|
624 |
*pkt += cp_len; |
625 |
*pkt_len -= cp_len; |
626 |
} |
627 |
} |
628 |
|
629 |
/* |
630 |
* Put a packet in the RX ring of the DEC21140. |
631 |
*/ |
632 |
static int dev_dec21140_receive_pkt(struct dec21140_data *d, |
633 |
u_char *pkt,ssize_t pkt_len) |
634 |
{ |
635 |
m_uint32_t rx_start,rxdn_addr,rxdn_rdes0; |
636 |
struct rx_desc rxd0,rxdn,*rxdc; |
637 |
ssize_t tot_len = pkt_len; |
638 |
u_char *pkt_ptr = pkt; |
639 |
n_eth_hdr_t *hdr; |
640 |
int i; |
641 |
|
642 |
/* Truncate the packet if it is too big */ |
643 |
pkt_len = m_min(pkt_len,DEC21140_MAX_PKT_SIZE); |
644 |
|
645 |
/* Copy the current rxring descriptor */ |
646 |
rxdesc_read(d,d->rx_current,&rxd0); |
647 |
|
648 |
/* We must have the first descriptor... */ |
649 |
if (!rxdesc_acquire(rxd0.rdes[0])) |
650 |
return(FALSE); |
651 |
|
652 |
/* Remember the first RX descriptor address */ |
653 |
rx_start = d->rx_current; |
654 |
|
655 |
for(i=0,rxdc=&rxd0;tot_len>0;i++) |
656 |
{ |
657 |
/* Put data into the descriptor buffers */ |
658 |
rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len); |
659 |
|
660 |
/* Get address of the next descriptor */ |
661 |
rxdn_addr = rxdesc_get_next(d,d->rx_current,rxdc); |
662 |
|
663 |
/* We have finished if the complete packet has been stored */ |
664 |
if (tot_len == 0) { |
665 |
rxdc->rdes[0] = DEC21140_RXDESC_LS; |
666 |
rxdc->rdes[0] |= (pkt_len + 4) << DEC21140_RXDESC_FL_SHIFT; |
667 |
|
668 |
/* if this is a multicast frame, set the appropriate bit */ |
669 |
hdr = (n_eth_hdr_t *)pkt; |
670 |
if (eth_addr_is_mcast(&hdr->daddr)) |
671 |
rxdc->rdes[0] |= DEC21140_RXDESC_MF; |
672 |
|
673 |
if (i != 0) |
674 |
physmem_copy_u32_to_vm(d->vm,d->rx_current,rxdc->rdes[0]); |
675 |
|
676 |
d->rx_current = rxdn_addr; |
677 |
break; |
678 |
} |
679 |
|
680 |
/* Get status of the next descriptor to see if we can acquire it */ |
681 |
rxdn_rdes0 = physmem_copy_u32_from_vm(d->vm,rxdn_addr); |
682 |
|
683 |
if (!rxdesc_acquire(rxdn_rdes0)) |
684 |
rxdc->rdes[0] = DEC21140_RXDESC_LS | DEC21140_RXDESC_DE; |
685 |
else |
686 |
rxdc->rdes[0] = 0; /* ok, no special flag */ |
687 |
|
688 |
/* Update the new status (only if we are not on the first desc) */ |
689 |
if (i != 0) |
690 |
physmem_copy_u32_to_vm(d->vm,d->rx_current,rxdc->rdes[0]); |
691 |
|
692 |
/* Update the RX pointer */ |
693 |
d->rx_current = rxdn_addr; |
694 |
|
695 |
if (rxdc->rdes[0] != 0) |
696 |
break; |
697 |
|
698 |
/* Read the next descriptor from VM physical RAM */ |
699 |
rxdesc_read(d,rxdn_addr,&rxdn); |
700 |
rxdc = &rxdn; |
701 |
} |
702 |
|
703 |
/* Update the first RX descriptor */ |
704 |
rxd0.rdes[0] |= DEC21140_RXDESC_FS; |
705 |
physmem_copy_u32_to_vm(d->vm,rx_start,rxd0.rdes[0]); |
706 |
|
707 |
/* Indicate that we have a frame ready */ |
708 |
d->csr[5] |= DEC21140_CSR5_RI; |
709 |
dev_dec21140_update_irq_status(d); |
710 |
return(TRUE); |
711 |
} |
712 |
|
713 |
/* Handle the DEC21140 RX ring */ |
714 |
static int dev_dec21140_handle_rxring(netio_desc_t *nio, |
715 |
u_char *pkt,ssize_t pkt_len, |
716 |
struct dec21140_data *d) |
717 |
{ |
718 |
/* |
719 |
* Don't start receive if the RX ring address has not been set |
720 |
* and if the SR bit in CSR6 is not set yet. |
721 |
*/ |
722 |
if ((d->csr[3] == 0) || !(d->csr[6] & DEC21140_CSR6_START_RX)) |
723 |
return(FALSE); |
724 |
|
725 |
#if DEBUG_RECEIVE |
726 |
DEC21140_LOG(d,"receiving a packet of %d bytes\n",pkt_len); |
727 |
mem_dump(log_file,pkt,pkt_len); |
728 |
#endif |
729 |
|
730 |
/* |
731 |
* Receive only multicast/broadcast trafic + unicast traffic |
732 |
* for this virtual machine. |
733 |
*/ |
734 |
if (dec21140_handle_mac_addr(d,pkt)) |
735 |
return(dev_dec21140_receive_pkt(d,pkt,pkt_len)); |
736 |
|
737 |
return(FALSE); |
738 |
} |
739 |
|
740 |
/* Read a TX descriptor */ |
741 |
static void txdesc_read(struct dec21140_data *d,m_uint32_t txd_addr, |
742 |
struct tx_desc *txd) |
743 |
{ |
744 |
/* get the descriptor from VM physical RAM */ |
745 |
physmem_copy_from_vm(d->vm,txd,txd_addr,sizeof(struct tx_desc)); |
746 |
|
747 |
/* byte-swapping */ |
748 |
txd->tdes[0] = vmtoh32(txd->tdes[0]); |
749 |
txd->tdes[1] = vmtoh32(txd->tdes[1]); |
750 |
txd->tdes[2] = vmtoh32(txd->tdes[2]); |
751 |
txd->tdes[3] = vmtoh32(txd->tdes[3]); |
752 |
} |
753 |
|
754 |
/* Set the address of the next TX descriptor */ |
755 |
static void txdesc_set_next(struct dec21140_data *d,struct tx_desc *txd) |
756 |
{ |
757 |
if (txd->tdes[1] & DEC21140_TXDESC_TER) |
758 |
d->tx_current = d->csr[4]; |
759 |
else { |
760 |
if (txd->tdes[1] & DEC21140_TXDESC_TCH) |
761 |
d->tx_current = txd->tdes[3]; |
762 |
else |
763 |
d->tx_current += sizeof(struct tx_desc); |
764 |
} |
765 |
} |
766 |
|
767 |
/* Handle the TX ring (single packet) */ |
768 |
static int dev_dec21140_handle_txring_single(struct dec21140_data *d) |
769 |
{ |
770 |
u_char pkt[DEC21140_MAX_PKT_SIZE],*pkt_ptr; |
771 |
u_char setup_frame[DEC21140_SETUP_FRAME_SIZE]; |
772 |
m_uint32_t tx_start,len1,len2,clen,tot_len; |
773 |
struct tx_desc txd0,ctxd,*ptxd; |
774 |
int done = FALSE; |
775 |
|
776 |
/* |
777 |
* Don't start transmit if the txring address has not been set |
778 |
* and if the ST bit in CSR6 is not set yet. |
779 |
*/ |
780 |
if ((d->csr[4] == 0) || (!(d->csr[6] & DEC21140_CSR6_START_TX))) |
781 |
return(FALSE); |
782 |
|
783 |
/* Copy the current txring descriptor */ |
784 |
tx_start = d->tx_current; |
785 |
ptxd = &txd0; |
786 |
txdesc_read(d,tx_start,ptxd); |
787 |
|
788 |
/* If we don't own the first descriptor, we cannot transmit */ |
789 |
if (!(txd0.tdes[0] & DEC21140_TXDESC_OWN)) |
790 |
return(FALSE); |
791 |
|
792 |
/* |
793 |
* Ignore setup frames (clear the own bit and skip). |
794 |
* We extract unicast MAC addresses to allow only appropriate traffic |
795 |
* to pass. |
796 |
*/ |
797 |
if (!(txd0.tdes[1] & (DEC21140_TXDESC_FS|DEC21140_TXDESC_LS))) |
798 |
{ |
799 |
len1 = ptxd->tdes[1] & DEC21140_TXDESC_LEN_MASK; |
800 |
len2 = (ptxd->tdes[1] >> 11) & DEC21140_TXDESC_LEN_MASK; |
801 |
|
802 |
if (txd0.tdes[1] & DEC21140_TXDESC_SET) { |
803 |
physmem_copy_from_vm(d->vm,setup_frame,ptxd->tdes[2], |
804 |
sizeof(setup_frame)); |
805 |
dec21140_update_mac_addr(d,setup_frame); |
806 |
} |
807 |
|
808 |
txdesc_set_next(d,ptxd); |
809 |
goto clear_txd0_own_bit; |
810 |
} |
811 |
|
812 |
#if DEBUG_TRANSMIT |
813 |
DEC21140_LOG(d,"dec21140_handle_txring: 1st desc: " |
814 |
"tdes[0]=0x%x, tdes[1]=0x%x, tdes[2]=0x%x, tdes[3]=0x%x\n", |
815 |
ptxd->tdes[0],ptxd->tdes[1],ptxd->tdes[2],ptxd->tdes[3]); |
816 |
#endif |
817 |
|
818 |
/* Empty packet for now */ |
819 |
pkt_ptr = pkt; |
820 |
tot_len = 0; |
821 |
|
822 |
do { |
823 |
#if DEBUG_TRANSMIT |
824 |
DEC21140_LOG(d,"dec21140_handle_txring: loop: " |
825 |
"tdes[0]=0x%x, tdes[1]=0x%x, tdes[2]=0x%x, tdes[3]=0x%x\n", |
826 |
ptxd->tdes[0],ptxd->tdes[1],ptxd->tdes[2],ptxd->tdes[3]); |
827 |
#endif |
828 |
|
829 |
if (!(ptxd->tdes[0] & DEC21140_TXDESC_OWN)) { |
830 |
DEC21140_LOG(d,"dec21140_handle_txring: descriptor not owned!\n"); |
831 |
return(FALSE); |
832 |
} |
833 |
|
834 |
len1 = ptxd->tdes[1] & DEC21140_TXDESC_LEN_MASK; |
835 |
len2 = (ptxd->tdes[1] >> 11) & DEC21140_TXDESC_LEN_MASK; |
836 |
clen = len1 + len2; |
837 |
|
838 |
/* Be sure that we have either len1 or len2 not null */ |
839 |
if (clen != 0) |
840 |
{ |
841 |
if (len1 != 0) |
842 |
physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tdes[2],len1); |
843 |
|
844 |
if ((len2 != 0) && !(ptxd->tdes[1] & DEC21140_TXDESC_TCH)) |
845 |
physmem_copy_from_vm(d->vm,pkt_ptr+len1,ptxd->tdes[3],len2); |
846 |
} |
847 |
|
848 |
pkt_ptr += clen; |
849 |
tot_len += clen; |
850 |
|
851 |
/* Clear the OWN bit if this is not the first descriptor */ |
852 |
if (!(ptxd->tdes[1] & DEC21140_TXDESC_FS)) |
853 |
physmem_copy_u32_to_vm(d->vm,d->tx_current,0); |
854 |
|
855 |
/* Go to the next descriptor */ |
856 |
txdesc_set_next(d,ptxd); |
857 |
|
858 |
/* |
859 |
* Copy the next txring descriptor (ignore setup frames that |
860 |
* have both FS and LS bit cleared). |
861 |
*/ |
862 |
if (!(ptxd->tdes[1] & (DEC21140_TXDESC_LS|DEC21140_TXDESC_SET))) { |
863 |
txdesc_read(d,d->tx_current,&ctxd); |
864 |
ptxd = &ctxd; |
865 |
} else |
866 |
done = TRUE; |
867 |
}while(!done); |
868 |
|
869 |
if (tot_len != 0) { |
870 |
#if DEBUG_TRANSMIT |
871 |
DEC21140_LOG(d,"sending packet of %u bytes\n",tot_len); |
872 |
mem_dump(log_file,pkt,tot_len); |
873 |
#endif |
874 |
/* rewrite ISL header if required */ |
875 |
dec21140_isl_rewrite(pkt,tot_len); |
876 |
|
877 |
/* send it on wire */ |
878 |
netio_send(d->nio,pkt,tot_len); |
879 |
} |
880 |
|
881 |
clear_txd0_own_bit: |
882 |
/* Clear the OWN flag of the first descriptor */ |
883 |
physmem_copy_u32_to_vm(d->vm,tx_start,0); |
884 |
|
885 |
/* Interrupt on completion ? */ |
886 |
if (txd0.tdes[1] & DEC21140_TXDESC_IC) { |
887 |
d->csr[5] |= DEC21140_CSR5_TI; |
888 |
dev_dec21140_update_irq_status(d); |
889 |
} |
890 |
|
891 |
return(TRUE); |
892 |
} |
893 |
|
894 |
/* Handle the TX ring */ |
895 |
static int dev_dec21140_handle_txring(struct dec21140_data *d) |
896 |
{ |
897 |
int i; |
898 |
|
899 |
for(i=0;i<DEC21140_TXRING_PASS_COUNT;i++) |
900 |
if (!dev_dec21140_handle_txring_single(d)) |
901 |
break; |
902 |
|
903 |
return(TRUE); |
904 |
} |
905 |
|
906 |
/* |
907 |
* pci_dec21140_read() |
908 |
* |
909 |
* Read a PCI register. |
910 |
*/ |
911 |
static m_uint32_t pci_dec21140_read(cpu_gen_t *cpu,struct pci_device *dev, |
912 |
int reg) |
913 |
{ |
914 |
struct dec21140_data *d = dev->priv_data; |
915 |
|
916 |
#if DEBUG_PCI_REGS |
917 |
DEC21140_LOG(d,"read C%s(%u)\n",pci_cfgreg_name(reg),reg); |
918 |
#endif |
919 |
|
920 |
switch (reg) { |
921 |
case DEC21140_PCI_CFID_REG_OFFSET: |
922 |
return(0x00091011); |
923 |
case DEC21140_PCI_CFRV_REG_OFFSET: |
924 |
return(0x02000011); |
925 |
case DEC21140_PCI_CBMA_REG_OFFSET: |
926 |
return(d->dev->phys_addr); |
927 |
default: |
928 |
return(0); |
929 |
} |
930 |
} |
931 |
|
932 |
/* |
933 |
* pci_dec21140_write() |
934 |
* |
935 |
* Write a PCI register. |
936 |
*/ |
937 |
static void pci_dec21140_write(cpu_gen_t *cpu,struct pci_device *dev, |
938 |
int reg,m_uint32_t value) |
939 |
{ |
940 |
struct dec21140_data *d = dev->priv_data; |
941 |
|
942 |
#if DEBUG_PCI_REGS |
943 |
DEC21140_LOG(d,"write C%s(%u) value 0x%x\n",pci_cfgreg_name(reg),reg,value); |
944 |
#endif |
945 |
|
946 |
switch(reg) { |
947 |
case DEC21140_PCI_CBMA_REG_OFFSET: |
948 |
vm_map_device(cpu->vm,d->dev,(m_uint64_t)value); |
949 |
DEC21140_LOG(d,"registers are mapped at 0x%x\n",value); |
950 |
break; |
951 |
} |
952 |
} |
953 |
|
954 |
/* |
955 |
* dev_dec21140_init() |
956 |
* |
957 |
* Generic DEC21140 initialization code. |
958 |
*/ |
959 |
struct dec21140_data *dev_dec21140_init(vm_instance_t *vm,char *name, |
960 |
struct pci_bus *pci_bus,int pci_device, |
961 |
int irq) |
962 |
{ |
963 |
struct dec21140_data *d; |
964 |
struct pci_device *pci_dev; |
965 |
struct vdevice *dev; |
966 |
|
967 |
/* Allocate the private data structure for DEC21140 */ |
968 |
if (!(d = malloc(sizeof(*d)))) { |
969 |
fprintf(stderr,"%s (DEC21140): out of memory\n",name); |
970 |
return NULL; |
971 |
} |
972 |
|
973 |
memset(d,0,sizeof(*d)); |
974 |
|
975 |
/* Add as PCI device */ |
976 |
pci_dev = pci_dev_add(pci_bus,name, |
977 |
DEC21140_PCI_VENDOR_ID,DEC21140_PCI_PRODUCT_ID, |
978 |
pci_device,0,irq, |
979 |
d,NULL,pci_dec21140_read,pci_dec21140_write); |
980 |
|
981 |
if (!pci_dev) { |
982 |
fprintf(stderr,"%s (DEC21140): unable to create PCI device.\n",name); |
983 |
goto err_pci_dev; |
984 |
} |
985 |
|
986 |
/* Create the device itself */ |
987 |
if (!(dev = dev_create(name))) { |
988 |
fprintf(stderr,"%s (DEC21140): unable to create device.\n",name); |
989 |
goto err_dev; |
990 |
} |
991 |
|
992 |
d->name = name; |
993 |
d->vm = vm; |
994 |
d->pci_dev = pci_dev; |
995 |
d->dev = dev; |
996 |
|
997 |
/* Basic register setup */ |
998 |
d->csr[0] = 0xfff80000; |
999 |
d->csr[5] = 0xfc000000; |
1000 |
d->csr[8] = 0xfffe0000; |
1001 |
|
1002 |
dev->phys_addr = 0; |
1003 |
dev->phys_len = 0x20000; |
1004 |
dev->handler = dev_dec21140_access; |
1005 |
dev->priv_data = d; |
1006 |
return(d); |
1007 |
|
1008 |
err_dev: |
1009 |
pci_dev_remove(pci_dev); |
1010 |
err_pci_dev: |
1011 |
free(d); |
1012 |
return NULL; |
1013 |
} |
1014 |
|
1015 |
/* Remove a DEC21140 device */ |
1016 |
void dev_dec21140_remove(struct dec21140_data *d) |
1017 |
{ |
1018 |
if (d != NULL) { |
1019 |
pci_dev_remove(d->pci_dev); |
1020 |
vm_unbind_device(d->vm,d->dev); |
1021 |
cpu_group_rebuild_mts(d->vm->cpu_group); |
1022 |
free(d->dev); |
1023 |
free(d); |
1024 |
} |
1025 |
} |
1026 |
|
1027 |
/* Bind a NIO to DEC21140 device */ |
1028 |
int dev_dec21140_set_nio(struct dec21140_data *d,netio_desc_t *nio) |
1029 |
{ |
1030 |
/* check that a NIO is not already bound */ |
1031 |
if (d->nio != NULL) |
1032 |
return(-1); |
1033 |
|
1034 |
d->nio = nio; |
1035 |
d->tx_tid = ptask_add((ptask_callback)dev_dec21140_handle_txring,d,NULL); |
1036 |
netio_rxl_add(nio,(netio_rx_handler_t)dev_dec21140_handle_rxring,d,NULL); |
1037 |
return(0); |
1038 |
} |
1039 |
|
1040 |
/* Unbind a NIO from a DEC21140 device */ |
1041 |
void dev_dec21140_unset_nio(struct dec21140_data *d) |
1042 |
{ |
1043 |
if (d->nio != NULL) { |
1044 |
ptask_remove(d->tx_tid); |
1045 |
netio_rxl_remove(d->nio); |
1046 |
d->nio = NULL; |
1047 |
} |
1048 |
} |