1 |
/* |
2 |
* Cisco C7200 (Predator) AMD Am79c971 Module. |
3 |
* Copyright (C) 2006 Christophe Fillot. All rights reserved. |
4 |
* |
5 |
* AMD Am79c971 FastEthernet chip emulation. |
6 |
*/ |
7 |
|
8 |
#include <stdio.h> |
9 |
#include <stdlib.h> |
10 |
#include <string.h> |
11 |
#include <stdarg.h> |
12 |
#include <unistd.h> |
13 |
#include <time.h> |
14 |
#include <errno.h> |
15 |
#include <assert.h> |
16 |
|
17 |
#include "utils.h" |
18 |
#include "mips64.h" |
19 |
#include "dynamips.h" |
20 |
#include "memory.h" |
21 |
#include "device.h" |
22 |
#include "net.h" |
23 |
#include "net_io.h" |
24 |
#include "ptask.h" |
25 |
#include "dev_am79c971.h" |
26 |
|
27 |
/* Debugging flags */ |
28 |
#define DEBUG_CSR_REGS 0 |
29 |
#define DEBUG_BCR_REGS 0 |
30 |
#define DEBUG_PCI_REGS 0 |
31 |
#define DEBUG_ACCESS 0 |
32 |
#define DEBUG_TRANSMIT 0 |
33 |
#define DEBUG_RECEIVE 0 |
34 |
#define DEBUG_UNKNOWN 0 |
35 |
|
36 |
/* AMD Am79c971 PCI vendor/product codes */ |
37 |
#define AM79C971_PCI_VENDOR_ID 0x1022 |
38 |
#define AM79C971_PCI_PRODUCT_ID 0x2000 |
39 |
|
40 |
/* Maximum packet size */ |
41 |
#define AM79C971_MAX_PKT_SIZE 2048 |
42 |
|
43 |
/* Send up to 16 packets in a TX ring scan pass */ |
44 |
#define AM79C971_TXRING_PASS_COUNT 16 |
45 |
|
46 |
/* CSR0: Controller Status and Control Register */ |
47 |
#define AM79C971_CSR0_ERR 0x00008000 /* Error (BABL,CERR,MISS,MERR) */ |
48 |
#define AM79C971_CSR0_BABL 0x00004000 /* Transmitter Timeout Error */ |
49 |
#define AM79C971_CSR0_CERR 0x00002000 /* Collision Error */ |
50 |
#define AM79C971_CSR0_MISS 0x00001000 /* Missed Frame */ |
51 |
#define AM79C971_CSR0_MERR 0x00000800 /* Memory Error */ |
52 |
#define AM79C971_CSR0_RINT 0x00000400 /* Receive Interrupt */ |
53 |
#define AM79C971_CSR0_TINT 0x00000200 /* Transmit Interrupt */ |
54 |
#define AM79C971_CSR0_IDON 0x00000100 /* Initialization Done */ |
55 |
#define AM79C971_CSR0_INTR 0x00000080 /* Interrupt Flag */ |
56 |
#define AM79C971_CSR0_IENA 0x00000040 /* Interrupt Enable */ |
57 |
#define AM79C971_CSR0_RXON 0x00000020 /* Receive On */ |
58 |
#define AM79C971_CSR0_TXON 0x00000010 /* Transmit On */ |
59 |
#define AM79C971_CSR0_TDMD 0x00000008 /* Transmit Demand */ |
60 |
#define AM79C971_CSR0_STOP 0x00000004 /* Stop */ |
61 |
#define AM79C971_CSR0_STRT 0x00000002 /* Start */ |
62 |
#define AM79C971_CSR0_INIT 0x00000001 /* Initialization */ |
63 |
|
64 |
/* CSR3: Interrupt Masks and Deferral Control */ |
65 |
#define AM79C971_CSR3_BABLM 0x00004000 /* Transmit. Timeout Int. Mask */ |
66 |
#define AM79C971_CSR3_CERRM 0x00002000 /* Collision Error Int. Mask*/ |
67 |
#define AM79C971_CSR3_MISSM 0x00001000 /* Missed Frame Interrupt Mask */ |
68 |
#define AM79C971_CSR3_MERRM 0x00000800 /* Memory Error Interrupt Mask */ |
69 |
#define AM79C971_CSR3_RINTM 0x00000400 /* Receive Interrupt Mask */ |
70 |
#define AM79C971_CSR3_TINTM 0x00000200 /* Transmit Interrupt Mask */ |
71 |
#define AM79C971_CSR3_IDONM 0x00000100 /* Initialization Done Mask */ |
72 |
#define AM79C971_CSR3_BSWP 0x00000004 /* Byte Swap */ |
73 |
#define AM79C971_CSR3_IM_MASK 0x00007F00 /* Interrupt Masks for CSR3 */ |
74 |
|
75 |
/* CSR5: Extended Control and Interrupt 1 */ |
76 |
#define AM79C971_CSR5_TOKINTD 0x00008000 /* Receive Interrupt Mask */ |
77 |
#define AM79C971_CSR5_SPND 0x00000001 /* Suspend */ |
78 |
|
79 |
/* CSR15: Mode */ |
80 |
#define AM79C971_CSR15_PROM 0x00008000 /* Promiscous Mode */ |
81 |
#define AM79C971_CSR15_DRCVBC 0x00004000 /* Disable Receive Broadcast */ |
82 |
#define AM79C971_CSR15_DRCVPA 0x00002000 /* Disable Receive PHY address */ |
83 |
#define AM79C971_CSR15_DTX 0x00000002 /* Disable Transmit */ |
84 |
#define AM79C971_CSR15_DRX 0x00000001 /* Disable Receive */ |
85 |
|
86 |
/* AMD 79C971 Initialization block length */ |
87 |
#define AM79C971_INIT_BLOCK_LEN 0x1c |
88 |
|
89 |
/* RX descriptors */ |
90 |
#define AM79C971_RMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */ |
91 |
#define AM79C971_RMD1_ERR 0x40000000 /* Error */ |
92 |
#define AM79C971_RMD1_FRAM 0x20000000 /* Framing Error */ |
93 |
#define AM79C971_RMD1_OFLO 0x10000000 /* Overflow Error */ |
94 |
#define AM79C971_RMD1_CRC 0x08000000 /* Invalid CRC */ |
95 |
#define AM79C971_RMD1_BUFF 0x08000000 /* Buffer Error (chaining) */ |
96 |
#define AM79C971_RMD1_STP 0x02000000 /* Start of Packet */ |
97 |
#define AM79C971_RMD1_ENP 0x01000000 /* End of Packet */ |
98 |
#define AM79C971_RMD1_BPE 0x00800000 /* Bus Parity Error */ |
99 |
#define AM79C971_RMD1_PAM 0x00400000 /* Physical Address Match */ |
100 |
#define AM79C971_RMD1_LAFM 0x00200000 /* Logical Addr. Filter Match */ |
101 |
#define AM79C971_RMD1_BAM 0x00100000 /* Broadcast Address Match */ |
102 |
#define AM79C971_RMD1_LEN 0x00000FFF /* Buffer Length */ |
103 |
|
104 |
#define AM79C971_RMD2_LEN 0x00000FFF /* Received byte count */ |
105 |
|
106 |
/* TX descriptors */ |
107 |
#define AM79C971_TMD1_OWN 0x80000000 /* OWN=1: owned by Am79c971 */ |
108 |
#define AM79C971_TMD1_ERR 0x40000000 /* Error */ |
109 |
#define AM79C971_TMD1_ADD_FCS 0x20000000 /* FCS generation */ |
110 |
#define AM79C971_TMD1_STP 0x02000000 /* Start of Packet */ |
111 |
#define AM79C971_TMD1_ENP 0x01000000 /* End of Packet */ |
112 |
#define AM79C971_TMD1_LEN 0x00000FFF /* Buffer Length */ |
113 |
|
114 |
/* RX Descriptor */ |
115 |
struct rx_desc { |
116 |
m_uint32_t rmd[4]; |
117 |
}; |
118 |
|
119 |
/* TX Descriptor */ |
120 |
struct tx_desc { |
121 |
m_uint32_t tmd[4]; |
122 |
}; |
123 |
|
124 |
/* AMD 79C971 Data */ |
125 |
struct am79c971_data { |
126 |
char *name; |
127 |
|
128 |
/* Interface type (10baseT or 100baseTX) */ |
129 |
int type; |
130 |
|
131 |
/* Current RAP (Register Address Pointer) value */ |
132 |
m_uint8_t rap; |
133 |
|
134 |
/* CSR and BCR registers */ |
135 |
m_uint32_t csr[256],bcr[256]; |
136 |
|
137 |
/* RX/TX rings start addresses */ |
138 |
m_uint32_t rx_start,tx_start; |
139 |
|
140 |
/* RX/TX number of descriptors (log2) */ |
141 |
m_uint32_t rx_l2len,tx_l2len; |
142 |
|
143 |
/* RX/TX number of descriptors */ |
144 |
m_uint32_t rx_len,tx_len; |
145 |
|
146 |
/* RX/TX ring positions */ |
147 |
m_uint32_t rx_pos,tx_pos; |
148 |
|
149 |
/* MII registers */ |
150 |
m_uint16_t mii_regs[32][32]; |
151 |
|
152 |
/* Physical (MAC) address */ |
153 |
n_eth_addr_t mac_addr; |
154 |
|
155 |
/* Device information */ |
156 |
struct vdevice *dev; |
157 |
|
158 |
/* PCI device information */ |
159 |
struct pci_device *pci_dev; |
160 |
|
161 |
/* Virtual machine */ |
162 |
vm_instance_t *vm; |
163 |
|
164 |
/* NetIO descriptor */ |
165 |
netio_desc_t *nio; |
166 |
|
167 |
/* TX ring scanner task id */ |
168 |
ptask_id_t tx_tid; |
169 |
}; |
170 |
|
171 |
/* Log an am79c971 message */ |
172 |
#define AM79C971_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg) |
173 |
|
174 |
|
175 |
static m_uint16_t mii_reg_values[32] = { |
176 |
0x1000, 0x782D, 0x2000, 0x5C01, 0x01E1, 0x0000, 0x0000, 0x0000, |
177 |
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, |
178 |
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x8060, |
179 |
0x8020, 0x0820, 0x0000, 0x3800, 0xA3B9, 0x0000, 0x0000, 0x0000, |
180 |
}; |
181 |
|
182 |
/* Read a MII register */ |
183 |
static m_uint16_t mii_reg_read(struct am79c971_data *d,u_int phy,u_int reg) |
184 |
{ |
185 |
if ((phy >= 32) || (reg >= 32)) |
186 |
return(0); |
187 |
|
188 |
return(d->mii_regs[phy][reg]); |
189 |
} |
190 |
|
191 |
/* Write a MII register */ |
192 |
static void mii_reg_write(struct am79c971_data *d,u_int phy,u_int reg, |
193 |
m_uint16_t value) |
194 |
{ |
195 |
if ((phy < 32) && (reg < 32)) |
196 |
d->mii_regs[phy][reg] = value; |
197 |
} |
198 |
|
199 |
/* Check if a packet must be delivered to the emulated chip */ |
200 |
static inline int am79c971_handle_mac_addr(struct am79c971_data *d, |
201 |
m_uint8_t *pkt) |
202 |
{ |
203 |
n_eth_hdr_t *hdr = (n_eth_hdr_t *)pkt; |
204 |
|
205 |
/* Ignore traffic sent by us */ |
206 |
if (!memcmp(&d->mac_addr,&hdr->saddr,N_ETH_ALEN)) |
207 |
return(FALSE); |
208 |
|
209 |
/* Accept systematically frames if we are running is promiscuous mode */ |
210 |
if (d->csr[15] & AM79C971_CSR15_PROM) |
211 |
return(TRUE); |
212 |
|
213 |
/* Accept systematically all multicast frames */ |
214 |
if (eth_addr_is_mcast(&hdr->daddr)) |
215 |
return(TRUE); |
216 |
|
217 |
/* Accept frames directly for us, discard others */ |
218 |
if (!memcmp(&d->mac_addr,&hdr->daddr,N_ETH_ALEN)) |
219 |
return(TRUE); |
220 |
|
221 |
return(FALSE); |
222 |
} |
223 |
|
224 |
/* Update the Interrupt Flag bit of csr0 */ |
225 |
static void am79c971_update_intr_flag(struct am79c971_data *d) |
226 |
{ |
227 |
m_uint32_t mask; |
228 |
|
229 |
mask = d->csr[3] & AM79C971_CSR3_IM_MASK; |
230 |
|
231 |
if (d->csr[0] & mask) |
232 |
d->csr[0] |= AM79C971_CSR0_INTR; |
233 |
} |
234 |
|
235 |
/* Trigger an interrupt */ |
236 |
static int am79c971_trigger_irq(struct am79c971_data *d) |
237 |
{ |
238 |
if (d->csr[0] & (AM79C971_CSR0_INTR|AM79C971_CSR0_IENA)) { |
239 |
pci_dev_trigger_irq(d->vm,d->pci_dev); |
240 |
return(TRUE); |
241 |
} |
242 |
|
243 |
return(FALSE); |
244 |
} |
245 |
|
246 |
/* Update RX/TX ON bits of csr0 */ |
247 |
static void am79c971_update_rx_tx_on_bits(struct am79c971_data *d) |
248 |
{ |
249 |
/* |
250 |
* Set RX ON if DRX in csr15 is cleared, and set TX on if DTX |
251 |
* in csr15 is cleared. The START bit must be set. |
252 |
*/ |
253 |
d->csr[0] &= ~(AM79C971_CSR0_RXON|AM79C971_CSR0_TXON); |
254 |
|
255 |
if (d->csr[0] & AM79C971_CSR0_STRT) { |
256 |
if (!(d->csr[15] & AM79C971_CSR15_DRX)) |
257 |
d->csr[0] |= AM79C971_CSR0_RXON; |
258 |
|
259 |
if (!(d->csr[15] & AM79C971_CSR15_DTX)) |
260 |
d->csr[0] |= AM79C971_CSR0_TXON; |
261 |
} |
262 |
} |
263 |
|
264 |
/* Update RX/TX descriptor lengths */ |
265 |
static void am79c971_update_rx_tx_len(struct am79c971_data *d) |
266 |
{ |
267 |
d->rx_len = 1 << d->rx_l2len; |
268 |
d->tx_len = 1 << d->tx_l2len; |
269 |
|
270 |
/* Normalize ring sizes */ |
271 |
if (d->rx_len > 512) d->rx_len = 512; |
272 |
if (d->tx_len > 512) d->tx_len = 512; |
273 |
} |
274 |
|
275 |
/* Fetch the initialization block from memory */ |
276 |
static int am79c971_fetch_init_block(struct am79c971_data *d) |
277 |
{ |
278 |
m_uint32_t ib[AM79C971_INIT_BLOCK_LEN]; |
279 |
m_uint32_t ib_addr,ib_tmp; |
280 |
|
281 |
/* The init block address is contained in csr1 (low) and csr2 (high) */ |
282 |
ib_addr = (d->csr[2] << 16) | d->csr[1]; |
283 |
|
284 |
if (!ib_addr) { |
285 |
AM79C971_LOG(d,"trying to fetch init block at address 0...\n"); |
286 |
return(-1); |
287 |
} |
288 |
|
289 |
AM79C971_LOG(d,"fetching init block at address 0x%8.8x\n",ib_addr); |
290 |
physmem_copy_from_vm(d->vm,ib,ib_addr,sizeof(ib)); |
291 |
|
292 |
/* Extract RX/TX ring addresses */ |
293 |
d->rx_start = vmtoh32(ib[5]); |
294 |
d->tx_start = vmtoh32(ib[6]); |
295 |
|
296 |
/* Set csr15 from mode field */ |
297 |
ib_tmp = vmtoh32(ib[0]); |
298 |
d->csr[15] = ib_tmp & 0xffff; |
299 |
|
300 |
/* Extract RX/TX ring sizes */ |
301 |
d->rx_l2len = (ib_tmp >> 20) & 0x0F; |
302 |
d->tx_l2len = (ib_tmp >> 28) & 0x0F; |
303 |
am79c971_update_rx_tx_len(d); |
304 |
|
305 |
AM79C971_LOG(d,"rx_ring = 0x%8.8x (%u), tx_ring = 0x%8.8x (%u)\n", |
306 |
d->rx_start,d->rx_len,d->tx_start,d->tx_len); |
307 |
|
308 |
/* Get the physical MAC address */ |
309 |
ib_tmp = vmtoh32(ib[1]); |
310 |
d->csr[12] = ib_tmp & 0xFFFF; |
311 |
d->csr[13] = ib_tmp >> 16; |
312 |
|
313 |
d->mac_addr.eth_addr_byte[3] = (ib_tmp >> 24) & 0xFF; |
314 |
d->mac_addr.eth_addr_byte[2] = (ib_tmp >> 16) & 0xFF; |
315 |
d->mac_addr.eth_addr_byte[1] = (ib_tmp >> 8) & 0xFF; |
316 |
d->mac_addr.eth_addr_byte[0] = ib_tmp & 0xFF; |
317 |
|
318 |
ib_tmp = vmtoh32(ib[2]); |
319 |
d->csr[14] = ib_tmp & 0xFFFF; |
320 |
d->mac_addr.eth_addr_byte[5] = (ib_tmp >> 8) & 0xFF; |
321 |
d->mac_addr.eth_addr_byte[4] = ib_tmp & 0xFF; |
322 |
|
323 |
/* |
324 |
* Mark the initialization as done is csr0. |
325 |
*/ |
326 |
d->csr[0] |= AM79C971_CSR0_IDON; |
327 |
|
328 |
/* Update RX/TX ON bits of csr0 since csr15 has been modified */ |
329 |
am79c971_update_rx_tx_on_bits(d); |
330 |
AM79C971_LOG(d,"CSR0 = 0x%4.4x\n",d->csr[0]); |
331 |
|
332 |
am79c971_update_intr_flag(d); |
333 |
|
334 |
if (am79c971_trigger_irq(d)) |
335 |
AM79C971_LOG(d,"triggering IDON interrupt\n"); |
336 |
|
337 |
return(0); |
338 |
} |
339 |
|
340 |
/* RDP (Register Data Port) access */ |
341 |
static void am79c971_rdp_access(cpu_mips_t *cpu,struct am79c971_data *d, |
342 |
u_int op_type,m_uint64_t *data) |
343 |
{ |
344 |
m_uint32_t mask; |
345 |
|
346 |
#if DEBUG_CSR_REGS |
347 |
if (op_type == MTS_READ) { |
348 |
cpu_log(cpu,d->name,"read access to CSR %d\n",d->rap); |
349 |
} else { |
350 |
cpu_log(cpu,d->name,"write access to CSR %d, value=0x%x\n",d->rap,*data); |
351 |
} |
352 |
#endif |
353 |
|
354 |
switch(d->rap) { |
355 |
case 0: /* CSR0: Controller Status and Control Register */ |
356 |
if (op_type == MTS_READ) { |
357 |
//AM79C971_LOG(d,"reading CSR0 (val=0x%4.4x)\n",d->csr[0]); |
358 |
*data = d->csr[0]; |
359 |
} else { |
360 |
/* |
361 |
* The STOP bit clears other bits. |
362 |
* It has precedence over INIT and START bits. |
363 |
*/ |
364 |
if (*data & AM79C971_CSR0_STOP) { |
365 |
//AM79C971_LOG(d,"stopping interface!\n"); |
366 |
d->csr[0] = AM79C971_CSR0_STOP; |
367 |
d->tx_pos = d->rx_pos = 0; |
368 |
break; |
369 |
} |
370 |
|
371 |
/* These bits are cleared when set to 1 */ |
372 |
mask = AM79C971_CSR0_BABL | AM79C971_CSR0_CERR; |
373 |
mask |= AM79C971_CSR0_MISS | AM79C971_CSR0_MERR; |
374 |
mask |= AM79C971_CSR0_RINT | AM79C971_CSR0_TINT; |
375 |
mask |= AM79C971_CSR0_IDON; |
376 |
d->csr[0] &= ~(*data & mask); |
377 |
|
378 |
/* Save the Interrupt Enable bit */ |
379 |
d->csr[0] |= *data & AM79C971_CSR0_IENA; |
380 |
|
381 |
/* If INIT bit is set, fetch the initialization block */ |
382 |
if (*data & AM79C971_CSR0_INIT) { |
383 |
d->csr[0] |= AM79C971_CSR0_INIT; |
384 |
d->csr[0] &= ~AM79C971_CSR0_STOP; |
385 |
am79c971_fetch_init_block(d); |
386 |
} |
387 |
|
388 |
/* If STRT bit is set, clear the stop bit */ |
389 |
if (*data & AM79C971_CSR0_STRT) { |
390 |
//AM79C971_LOG(d,"enabling interface!\n"); |
391 |
d->csr[0] |= AM79C971_CSR0_STRT; |
392 |
d->csr[0] &= ~AM79C971_CSR0_STOP; |
393 |
am79c971_update_rx_tx_on_bits(d); |
394 |
} |
395 |
} |
396 |
break; |
397 |
|
398 |
case 6: /* CSR6: RX/TX Descriptor Table Length */ |
399 |
if (op_type == MTS_WRITE) { |
400 |
d->rx_l2len = (*data >> 8) & 0x0F; |
401 |
d->tx_l2len = (*data >> 12) & 0x0F; |
402 |
am79c971_update_rx_tx_len(d); |
403 |
} else { |
404 |
*data = (d->tx_l2len << 12) | (d->rx_l2len << 8); |
405 |
} |
406 |
break; |
407 |
|
408 |
case 15: /* CSR15: Mode */ |
409 |
if (op_type == MTS_WRITE) { |
410 |
d->csr[15] = *data; |
411 |
am79c971_update_rx_tx_on_bits(d); |
412 |
} else { |
413 |
*data = d->csr[15]; |
414 |
} |
415 |
break; |
416 |
|
417 |
case 88: |
418 |
if (op_type == MTS_READ) { |
419 |
switch(d->type) { |
420 |
case AM79C971_TYPE_100BASE_TX: |
421 |
*data = 0x2623003; |
422 |
break; |
423 |
default: |
424 |
*data = 0; |
425 |
break; |
426 |
} |
427 |
} |
428 |
break; |
429 |
|
430 |
default: |
431 |
if (op_type == MTS_READ) { |
432 |
*data = d->csr[d->rap]; |
433 |
} else { |
434 |
d->csr[d->rap] = *data; |
435 |
} |
436 |
|
437 |
#if DEBUG_UNKNOWN |
438 |
if (op_type == MTS_READ) { |
439 |
cpu_log(cpu,d->name,"read access to unknown CSR %d\n",d->rap); |
440 |
} else { |
441 |
cpu_log(cpu,d->name,"write access to unknown CSR %d, value=0x%x\n", |
442 |
d->rap,*data); |
443 |
} |
444 |
#endif |
445 |
} |
446 |
} |
447 |
|
448 |
/* BDP (BCR Data Port) access */ |
449 |
static void am79c971_bdp_access(cpu_mips_t *cpu,struct am79c971_data *d, |
450 |
u_int op_type,m_uint64_t *data) |
451 |
{ |
452 |
u_int mii_phy,mii_reg; |
453 |
|
454 |
#if DEBUG_BCR_REGS |
455 |
if (op_type == MTS_READ) { |
456 |
cpu_log(cpu,d->name,"read access to BCR %d\n",d->rap); |
457 |
} else { |
458 |
cpu_log(cpu,d->name,"write access to BCR %d, value=0x%x\n",d->rap,*data); |
459 |
} |
460 |
#endif |
461 |
|
462 |
switch(d->rap) { |
463 |
case 9: |
464 |
if (op_type == MTS_READ) |
465 |
*data = 1; |
466 |
break; |
467 |
|
468 |
case 34: /* BCR34: MII Management Data Register */ |
469 |
mii_phy = (d->bcr[33] >> 5) & 0x1F; |
470 |
mii_reg = (d->bcr[33] >> 0) & 0x1F; |
471 |
|
472 |
if (op_type == MTS_READ) |
473 |
*data = mii_reg_read(d,mii_phy,mii_reg); |
474 |
//else |
475 |
//mii_reg_write(d,mii_phy,mii_reg,*data); |
476 |
break; |
477 |
|
478 |
default: |
479 |
if (op_type == MTS_READ) { |
480 |
*data = d->bcr[d->rap]; |
481 |
} else { |
482 |
d->bcr[d->rap] = *data; |
483 |
} |
484 |
|
485 |
#if DEBUG_UNKNOWN |
486 |
if (op_type == MTS_READ) { |
487 |
cpu_log(cpu,d->name,"read access to unknown BCR %d\n",d->rap); |
488 |
} else { |
489 |
cpu_log(cpu,d->name,"write access to unknown BCR %d, value=0x%x\n", |
490 |
d->rap,*data); |
491 |
} |
492 |
#endif |
493 |
} |
494 |
} |
495 |
|
496 |
/* |
497 |
* dev_am79c971_access() |
498 |
*/ |
499 |
void *dev_am79c971_access(cpu_mips_t *cpu,struct vdevice *dev, |
500 |
m_uint32_t offset,u_int op_size,u_int op_type, |
501 |
m_uint64_t *data) |
502 |
{ |
503 |
struct am79c971_data *d = dev->priv_data; |
504 |
|
505 |
if (op_type == MTS_READ) |
506 |
*data = 0; |
507 |
|
508 |
#if DEBUG_ACCESS |
509 |
if (op_type == MTS_READ) { |
510 |
cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n", |
511 |
offset,cpu->pc,op_size); |
512 |
} else { |
513 |
cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, " |
514 |
"val=0x%llx, size=%u\n",offset,cpu->pc,*data,op_size); |
515 |
} |
516 |
#endif |
517 |
|
518 |
switch(offset) { |
519 |
case 0x14: /* RAP (Register Address Pointer) */ |
520 |
if (op_type == MTS_WRITE) { |
521 |
d->rap = *data & 0xFF; |
522 |
} else { |
523 |
*data = d->rap; |
524 |
} |
525 |
break; |
526 |
|
527 |
case 0x10: /* RDP (Register Data Port) */ |
528 |
am79c971_rdp_access(cpu,d,op_type,data); |
529 |
break; |
530 |
|
531 |
case 0x1c: /* BDP (BCR Data Port) */ |
532 |
am79c971_bdp_access(cpu,d,op_type,data); |
533 |
break; |
534 |
} |
535 |
|
536 |
return NULL; |
537 |
} |
538 |
|
539 |
/* Read a RX descriptor */ |
540 |
static int rxdesc_read(struct am79c971_data *d,m_uint32_t rxd_addr, |
541 |
struct rx_desc *rxd) |
542 |
{ |
543 |
m_uint32_t buf[4]; |
544 |
m_uint8_t sw_style; |
545 |
|
546 |
/* Get the software style */ |
547 |
sw_style = d->bcr[20]; |
548 |
|
549 |
/* Read the descriptor from VM physical RAM */ |
550 |
physmem_copy_from_vm(d->vm,&buf,rxd_addr,sizeof(struct rx_desc)); |
551 |
|
552 |
switch(sw_style) { |
553 |
case 2: |
554 |
rxd->rmd[0] = vmtoh32(buf[0]); /* rb addr */ |
555 |
rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */ |
556 |
rxd->rmd[2] = vmtoh32(buf[2]); /* rfrtag, mcnt, ... */ |
557 |
rxd->rmd[3] = vmtoh32(buf[3]); /* user */ |
558 |
break; |
559 |
|
560 |
case 3: |
561 |
rxd->rmd[0] = vmtoh32(buf[2]); /* rb addr */ |
562 |
rxd->rmd[1] = vmtoh32(buf[1]); /* own flag, ... */ |
563 |
rxd->rmd[2] = vmtoh32(buf[0]); /* rfrtag, mcnt, ... */ |
564 |
rxd->rmd[3] = vmtoh32(buf[3]); /* user */ |
565 |
break; |
566 |
|
567 |
default: |
568 |
AM79C971_LOG(d,"invalid software style %u!\n",sw_style); |
569 |
return(-1); |
570 |
} |
571 |
|
572 |
return(0); |
573 |
} |
574 |
|
575 |
/* Set the address of the next RX descriptor */ |
576 |
static inline void rxdesc_set_next(struct am79c971_data *d) |
577 |
{ |
578 |
d->rx_pos++; |
579 |
|
580 |
if (d->rx_pos == d->rx_len) |
581 |
d->rx_pos = 0; |
582 |
} |
583 |
|
584 |
/* Compute the address of the current RX descriptor */ |
585 |
static inline m_uint32_t rxdesc_get_current(struct am79c971_data *d) |
586 |
{ |
587 |
return(d->rx_start + (d->rx_pos * sizeof(struct rx_desc))); |
588 |
} |
589 |
|
590 |
/* Put a packet in buffer of a descriptor */ |
591 |
static void rxdesc_put_pkt(struct am79c971_data *d,struct rx_desc *rxd, |
592 |
u_char **pkt,ssize_t *pkt_len) |
593 |
{ |
594 |
ssize_t len,cp_len; |
595 |
|
596 |
/* Compute the data length to copy */ |
597 |
len = ~((rxd->rmd[1] & AM79C971_RMD1_LEN) - 1); |
598 |
len &= AM79C971_RMD1_LEN; |
599 |
cp_len = m_min(len,*pkt_len); |
600 |
|
601 |
/* Copy packet data to the VM physical RAM */ |
602 |
#if DEBUG_RECEIVE |
603 |
AM79C971_LOG(d,"am79c971_handle_rxring: storing %u bytes at 0x%8.8x\n", |
604 |
cp_len, rxd->rmd[0]); |
605 |
#endif |
606 |
physmem_copy_to_vm(d->vm,*pkt,rxd->rmd[0],cp_len); |
607 |
|
608 |
*pkt += cp_len; |
609 |
*pkt_len -= cp_len; |
610 |
} |
611 |
|
612 |
/* |
613 |
* Put a packet in the RX ring. |
614 |
*/ |
615 |
static int am79c971_receive_pkt(struct am79c971_data *d, |
616 |
u_char *pkt,ssize_t pkt_len) |
617 |
{ |
618 |
m_uint32_t rx_start,rx_current,rx_next,rxdn_rmd1; |
619 |
struct rx_desc rxd0,rxdn,*rxdc; |
620 |
ssize_t tot_len = pkt_len; |
621 |
u_char *pkt_ptr = pkt; |
622 |
m_uint8_t sw_style; |
623 |
int i; |
624 |
|
625 |
/* Truncate the packet if it is too big */ |
626 |
pkt_len = m_min(pkt_len,AM79C971_MAX_PKT_SIZE); |
627 |
|
628 |
/* Copy the current rxring descriptor */ |
629 |
rx_start = rx_current = rxdesc_get_current(d); |
630 |
rxdesc_read(d,rx_start,&rxd0); |
631 |
|
632 |
/* We must have the first descriptor... */ |
633 |
if (!(rxd0.rmd[1] & AM79C971_RMD1_OWN)) |
634 |
return(FALSE); |
635 |
|
636 |
for(i=0,rxdc=&rxd0;;i++) |
637 |
{ |
638 |
#if DEBUG_RECEIVE |
639 |
AM79C971_LOG(d,"am79c971_handle_rxring: i=%d, addr=0x%8.8x: " |
640 |
"rmd[0]=0x%x, rmd[1]=0x%x, rmd[2]=0x%x, rmd[3]=0x%x\n", |
641 |
i,rx_current, |
642 |
rxdc->rmd[0],rxdc->rmd[1],rxdc->rmd[2],rxdc->rmd[3]); |
643 |
#endif |
644 |
/* Put data into the descriptor buffer */ |
645 |
rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len); |
646 |
|
647 |
/* Go to the next descriptor */ |
648 |
rxdesc_set_next(d); |
649 |
|
650 |
/* If this is not the first descriptor, clear the OWN bit */ |
651 |
if (i != 0) |
652 |
rxdc->rmd[1] &= ~AM79C971_RMD1_OWN; |
653 |
|
654 |
/* If we have finished, mark the descriptor as end of packet */ |
655 |
if (tot_len == 0) { |
656 |
rxdc->rmd[1] |= AM79C971_RMD1_ENP; |
657 |
physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]); |
658 |
|
659 |
/* Get the software style */ |
660 |
sw_style = d->bcr[20]; |
661 |
|
662 |
/* Update the message byte count field */ |
663 |
rxdc->rmd[2] &= ~AM79C971_RMD2_LEN; |
664 |
rxdc->rmd[2] |= pkt_len + 4; |
665 |
|
666 |
switch(sw_style) { |
667 |
case 2: |
668 |
physmem_copy_u32_to_vm(d->vm,rx_current+8,rxdc->rmd[2]); |
669 |
break; |
670 |
case 3: |
671 |
physmem_copy_u32_to_vm(d->vm,rx_current,rxdc->rmd[2]); |
672 |
break; |
673 |
default: |
674 |
AM79C971_LOG(d,"invalid software style %u!\n",sw_style); |
675 |
} |
676 |
|
677 |
break; |
678 |
} |
679 |
|
680 |
/* Try to acquire the next descriptor */ |
681 |
rx_next = rxdesc_get_current(d); |
682 |
rxdn_rmd1 = physmem_copy_u32_from_vm(d->vm,rx_next+4); |
683 |
|
684 |
if (!(rxdn_rmd1 & AM79C971_RMD1_OWN)) { |
685 |
rxdc->rmd[1] |= AM79C971_RMD1_ERR | AM79C971_RMD1_BUFF; |
686 |
rxdc->rmd[1] |= AM79C971_RMD1_ENP; |
687 |
physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]); |
688 |
break; |
689 |
} |
690 |
|
691 |
/* Update rmd1 to store change of OWN bit */ |
692 |
physmem_copy_u32_to_vm(d->vm,rx_current+4,rxdc->rmd[1]); |
693 |
|
694 |
/* Read the next descriptor from VM physical RAM */ |
695 |
rxdesc_read(d,rx_next,&rxdn); |
696 |
rxdc = &rxdn; |
697 |
rx_current = rx_next; |
698 |
} |
699 |
|
700 |
/* Update the first RX descriptor */ |
701 |
rxd0.rmd[1] &= ~AM79C971_RMD1_OWN; |
702 |
rxd0.rmd[1] |= AM79C971_RMD1_STP; |
703 |
physmem_copy_u32_to_vm(d->vm,rx_start+4,rxd0.rmd[1]); |
704 |
|
705 |
/* Generate RX interrupt */ |
706 |
d->csr[0] |= AM79C971_CSR0_RINT; |
707 |
am79c971_update_intr_flag(d); |
708 |
am79c971_trigger_irq(d); |
709 |
return(TRUE); |
710 |
} |
711 |
|
712 |
/* Handle the RX ring */ |
713 |
static int am79c971_handle_rxring(netio_desc_t *nio, |
714 |
u_char *pkt,ssize_t pkt_len, |
715 |
struct am79c971_data *d) |
716 |
{ |
717 |
n_eth_hdr_t *hdr; |
718 |
|
719 |
/* |
720 |
* Don't start receive if the RX ring address has not been set |
721 |
* and if RX ON is not set. |
722 |
*/ |
723 |
if ((d->rx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON)) |
724 |
return(FALSE); |
725 |
|
726 |
#if DEBUG_RECEIVE |
727 |
AM79C971_LOG(d,"receiving a packet of %d bytes\n",pkt_len); |
728 |
mem_dump(log_file,pkt,pkt_len); |
729 |
#endif |
730 |
|
731 |
/* |
732 |
* Receive only multicast/broadcast trafic + unicast traffic |
733 |
* for this virtual machine. |
734 |
*/ |
735 |
hdr = (n_eth_hdr_t *)pkt; |
736 |
if (am79c971_handle_mac_addr(d,pkt)) |
737 |
am79c971_receive_pkt(d,pkt,pkt_len); |
738 |
|
739 |
return(TRUE); |
740 |
} |
741 |
|
742 |
/* Read a TX descriptor */ |
743 |
static int txdesc_read(struct am79c971_data *d,m_uint32_t txd_addr, |
744 |
struct tx_desc *txd) |
745 |
{ |
746 |
m_uint32_t buf[4]; |
747 |
m_uint8_t sw_style; |
748 |
|
749 |
/* Get the software style */ |
750 |
sw_style = d->bcr[20]; |
751 |
|
752 |
/* Read the descriptor from VM physical RAM */ |
753 |
physmem_copy_from_vm(d->vm,&buf,txd_addr,sizeof(struct tx_desc)); |
754 |
|
755 |
switch(sw_style) { |
756 |
case 2: |
757 |
txd->tmd[0] = vmtoh32(buf[0]); /* tb addr */ |
758 |
txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */ |
759 |
txd->tmd[2] = vmtoh32(buf[2]); /* buff, uflo, ... */ |
760 |
txd->tmd[3] = vmtoh32(buf[3]); /* user */ |
761 |
break; |
762 |
|
763 |
case 3: |
764 |
txd->tmd[0] = vmtoh32(buf[2]); /* tb addr */ |
765 |
txd->tmd[1] = vmtoh32(buf[1]); /* own flag, ... */ |
766 |
txd->tmd[2] = vmtoh32(buf[0]); /* buff, uflo, ... */ |
767 |
txd->tmd[3] = vmtoh32(buf[3]); /* user */ |
768 |
break; |
769 |
|
770 |
default: |
771 |
AM79C971_LOG(d,"invalid software style %u!\n",sw_style); |
772 |
return(-1); |
773 |
} |
774 |
|
775 |
return(0); |
776 |
} |
777 |
|
778 |
/* Set the address of the next TX descriptor */ |
779 |
static inline void txdesc_set_next(struct am79c971_data *d) |
780 |
{ |
781 |
d->tx_pos++; |
782 |
|
783 |
if (d->tx_pos == d->tx_len) |
784 |
d->tx_pos = 0; |
785 |
} |
786 |
|
787 |
/* Compute the address of the current TX descriptor */ |
788 |
static inline m_uint32_t txdesc_get_current(struct am79c971_data *d) |
789 |
{ |
790 |
return(d->tx_start + (d->tx_pos * sizeof(struct tx_desc))); |
791 |
} |
792 |
|
793 |
/* Handle the TX ring (single packet) */ |
794 |
static int am79c971_handle_txring_single(struct am79c971_data *d) |
795 |
{ |
796 |
u_char pkt[AM79C971_MAX_PKT_SIZE],*pkt_ptr; |
797 |
struct tx_desc txd0,ctxd,ntxd,*ptxd; |
798 |
m_uint32_t tx_start,tx_current; |
799 |
m_uint32_t clen,tot_len; |
800 |
|
801 |
if ((d->tx_start == 0) || !(d->csr[0] & AM79C971_CSR0_TXON)) |
802 |
return(FALSE); |
803 |
|
804 |
/* Copy the current txring descriptor */ |
805 |
tx_start = tx_current = txdesc_get_current(d); |
806 |
ptxd = &txd0; |
807 |
txdesc_read(d,tx_start,ptxd); |
808 |
|
809 |
/* If we don't own the first descriptor, we cannot transmit */ |
810 |
if (!(ptxd->tmd[1] & AM79C971_TMD1_OWN)) |
811 |
return(FALSE); |
812 |
|
813 |
#if DEBUG_TRANSMIT |
814 |
AM79C971_LOG(d,"am79c971_handle_txring: 1st desc: " |
815 |
"tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n", |
816 |
ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]); |
817 |
#endif |
818 |
|
819 |
/* Empty packet for now */ |
820 |
pkt_ptr = pkt; |
821 |
tot_len = 0; |
822 |
|
823 |
for(;;) { |
824 |
#if DEBUG_TRANSMIT |
825 |
AM79C971_LOG(d,"am79c971_handle_txring: loop: " |
826 |
"tmd[0]=0x%x, tmd[1]=0x%x, tmd[2]=0x%x, tmd[3]=0x%x\n", |
827 |
ptxd->tmd[0],ptxd->tmd[1],ptxd->tmd[2],ptxd->tmd[3]); |
828 |
#endif |
829 |
/* Copy packet data */ |
830 |
clen = ~((ptxd->tmd[1] & AM79C971_TMD1_LEN) - 1); |
831 |
clen &= AM79C971_TMD1_LEN; |
832 |
physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tmd[0],clen); |
833 |
|
834 |
pkt_ptr += clen; |
835 |
tot_len += clen; |
836 |
|
837 |
/* Clear the OWN bit if this is not the first descriptor */ |
838 |
if (!(ptxd->tmd[1] & AM79C971_TMD1_STP)) { |
839 |
ptxd->tmd[1] &= ~AM79C971_TMD1_OWN; |
840 |
physmem_copy_u32_to_vm(d->vm,tx_current+4,ptxd->tmd[1]); |
841 |
} |
842 |
|
843 |
/* Set the next descriptor */ |
844 |
txdesc_set_next(d); |
845 |
|
846 |
/* Stop now if end of packet has been reached */ |
847 |
if (ptxd->tmd[1] & AM79C971_TMD1_ENP) |
848 |
break; |
849 |
|
850 |
/* Read the next descriptor and try to acquire it */ |
851 |
tx_current = txdesc_get_current(d); |
852 |
txdesc_read(d,tx_current,&ntxd); |
853 |
|
854 |
if (!(ntxd.tmd[1] & AM79C971_TMD1_OWN)) { |
855 |
AM79C971_LOG(d,"am79c971_handle_txring: UNDERFLOW!\n"); |
856 |
return(FALSE); |
857 |
} |
858 |
|
859 |
memcpy(&ctxd,&ntxd,sizeof(struct tx_desc)); |
860 |
ptxd = &ctxd; |
861 |
} |
862 |
|
863 |
if (tot_len != 0) { |
864 |
#if DEBUG_TRANSMIT |
865 |
AM79C971_LOG(d,"sending packet of %u bytes\n",tot_len); |
866 |
mem_dump(log_file,pkt,tot_len); |
867 |
#endif |
868 |
/* send it on wire */ |
869 |
netio_send(d->nio,pkt,tot_len); |
870 |
} |
871 |
|
872 |
/* Clear the OWN flag of the first descriptor */ |
873 |
txd0.tmd[1] &= ~AM79C971_TMD1_OWN; |
874 |
physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.tmd[1]); |
875 |
|
876 |
/* Generate TX interrupt */ |
877 |
d->csr[0] |= AM79C971_CSR0_TINT; |
878 |
am79c971_update_intr_flag(d); |
879 |
am79c971_trigger_irq(d); |
880 |
return(TRUE); |
881 |
} |
882 |
|
883 |
/* Handle the TX ring */ |
884 |
static int am79c971_handle_txring(struct am79c971_data *d) |
885 |
{ |
886 |
int i; |
887 |
|
888 |
for(i=0;i<AM79C971_TXRING_PASS_COUNT;i++) |
889 |
if (!am79c971_handle_txring_single(d)) |
890 |
break; |
891 |
|
892 |
return(TRUE); |
893 |
} |
894 |
|
895 |
/* |
896 |
* pci_am79c971_read() |
897 |
* |
898 |
* Read a PCI register. |
899 |
*/ |
900 |
static m_uint32_t pci_am79c971_read(cpu_mips_t *cpu,struct pci_device *dev, |
901 |
int reg) |
902 |
{ |
903 |
struct am79c971_data *d = dev->priv_data; |
904 |
|
905 |
#if DEBUG_PCI_REGS |
906 |
AM79C971_LOG(d,"read PCI register 0x%x\n",reg); |
907 |
#endif |
908 |
|
909 |
switch (reg) { |
910 |
case 0x00: |
911 |
return((AM79C971_PCI_PRODUCT_ID << 16) | AM79C971_PCI_VENDOR_ID); |
912 |
case 0x08: |
913 |
return(0x02000002); |
914 |
case PCI_REG_BAR1: |
915 |
return(d->dev->phys_addr); |
916 |
default: |
917 |
return(0); |
918 |
} |
919 |
} |
920 |
|
921 |
/* |
922 |
* pci_am79c971_write() |
923 |
* |
924 |
* Write a PCI register. |
925 |
*/ |
926 |
static void pci_am79c971_write(cpu_mips_t *cpu,struct pci_device *dev, |
927 |
int reg,m_uint32_t value) |
928 |
{ |
929 |
struct am79c971_data *d = dev->priv_data; |
930 |
|
931 |
#if DEBUG_PCI_REGS |
932 |
AM79C971_LOG(d,"write PCI register 0x%x, value 0x%x\n",reg,value); |
933 |
#endif |
934 |
|
935 |
switch(reg) { |
936 |
case PCI_REG_BAR1: |
937 |
vm_map_device(cpu->vm,d->dev,(m_uint64_t)value); |
938 |
AM79C971_LOG(d,"registers are mapped at 0x%x\n",value); |
939 |
break; |
940 |
} |
941 |
} |
942 |
|
943 |
/* |
944 |
* dev_am79c971_init() |
945 |
* |
946 |
* Generic AMD Am79c971 initialization code. |
947 |
*/ |
948 |
struct am79c971_data * |
949 |
dev_am79c971_init(vm_instance_t *vm,char *name,int interface_type, |
950 |
struct pci_bus *pci_bus,int pci_device,int irq) |
951 |
{ |
952 |
struct am79c971_data *d; |
953 |
struct pci_device *pci_dev; |
954 |
struct vdevice *dev; |
955 |
|
956 |
/* Allocate the private data structure for AM79C971 */ |
957 |
if (!(d = malloc(sizeof(*d)))) { |
958 |
fprintf(stderr,"%s (AM79C971): out of memory\n",name); |
959 |
return NULL; |
960 |
} |
961 |
|
962 |
memset(d,0,sizeof(*d)); |
963 |
memcpy(d->mii_regs[0],mii_reg_values,sizeof(mii_reg_values)); |
964 |
|
965 |
/* Add as PCI device */ |
966 |
pci_dev = pci_dev_add(pci_bus,name, |
967 |
AM79C971_PCI_VENDOR_ID,AM79C971_PCI_PRODUCT_ID, |
968 |
pci_device,0,irq, |
969 |
d,NULL,pci_am79c971_read,pci_am79c971_write); |
970 |
|
971 |
if (!pci_dev) { |
972 |
fprintf(stderr,"%s (AM79C971): unable to create PCI device.\n",name); |
973 |
goto err_pci_dev; |
974 |
} |
975 |
|
976 |
/* Create the device itself */ |
977 |
if (!(dev = dev_create(name))) { |
978 |
fprintf(stderr,"%s (AM79C971): unable to create device.\n",name); |
979 |
goto err_dev; |
980 |
} |
981 |
|
982 |
d->name = name; |
983 |
d->vm = vm; |
984 |
d->type = interface_type; |
985 |
d->pci_dev = pci_dev; |
986 |
d->dev = dev; |
987 |
|
988 |
dev->phys_addr = 0; |
989 |
dev->phys_len = 0x4000; |
990 |
dev->handler = dev_am79c971_access; |
991 |
dev->priv_data = d; |
992 |
return(d); |
993 |
|
994 |
err_dev: |
995 |
pci_dev_remove(pci_dev); |
996 |
err_pci_dev: |
997 |
free(d); |
998 |
return NULL; |
999 |
} |
1000 |
|
1001 |
/* Remove an AMD Am79c971 device */ |
1002 |
void dev_am79c971_remove(struct am79c971_data *d) |
1003 |
{ |
1004 |
if (d != NULL) { |
1005 |
pci_dev_remove(d->pci_dev); |
1006 |
vm_unbind_device(d->vm,d->dev); |
1007 |
cpu_group_rebuild_mts(d->vm->cpu_group); |
1008 |
free(d->dev); |
1009 |
free(d); |
1010 |
} |
1011 |
} |
1012 |
|
1013 |
/* Bind a NIO to an AMD Am79c971 device */ |
1014 |
int dev_am79c971_set_nio(struct am79c971_data *d,netio_desc_t *nio) |
1015 |
{ |
1016 |
/* check that a NIO is not already bound */ |
1017 |
if (d->nio != NULL) |
1018 |
return(-1); |
1019 |
|
1020 |
d->nio = nio; |
1021 |
d->tx_tid = ptask_add((ptask_callback)am79c971_handle_txring,d,NULL); |
1022 |
netio_rxl_add(nio,(netio_rx_handler_t)am79c971_handle_rxring,d,NULL); |
1023 |
return(0); |
1024 |
} |
1025 |
|
1026 |
/* Unbind a NIO from an AMD Am79c971 device */ |
1027 |
void dev_am79c971_unset_nio(struct am79c971_data *d) |
1028 |
{ |
1029 |
if (d->nio != NULL) { |
1030 |
ptask_remove(d->tx_tid); |
1031 |
netio_rxl_remove(d->nio); |
1032 |
d->nio = NULL; |
1033 |
} |
1034 |
} |