/[dynamips]/trunk/dev_gt.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/dev_gt.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 12 - (show annotations)
Sat Oct 6 16:45:40 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 80527 byte(s)
make working copy

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4 *
5 * Galileo GT64010/GT64120A/GT96100A system controller.
6 *
7 * The DMA stuff is not complete, only "normal" transfers are working
8 * (source and destination addresses incrementing).
9 *
10 * Also, these transfers are "instantaneous" from a CPU point-of-view: when
11 * a channel is enabled, the transfer is immediately done. So, this is not
12 * very realistic.
13 */
14
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <string.h>
18
19 #include "utils.h"
20 #include "net.h"
21 #include "cpu.h"
22 #include "vm.h"
23 #include "dynamips.h"
24 #include "memory.h"
25 #include "device.h"
26 #include "net_io.h"
27 #include "ptask.h"
28 #include "dev_gt.h"
29
30 /* Debugging flags */
31 #define DEBUG_UNKNOWN 0
32 #define DEBUG_DMA 0
33 #define DEBUG_SDMA 0
34 #define DEBUG_MPSC 0
35 #define DEBUG_MII 0
36 #define DEBUG_ETH_TX 0
37 #define DEBUG_ETH_RX 0
38 #define DEBUG_ETH_HASH 0
39
40 /* PCI identification */
41 #define PCI_VENDOR_GALILEO 0x11ab /* Galileo Technology */
42 #define PCI_PRODUCT_GALILEO_GT64010 0x0146 /* GT-64010 */
43 #define PCI_PRODUCT_GALILEO_GT64011 0x4146 /* GT-64011 */
44 #define PCI_PRODUCT_GALILEO_GT64120 0x4620 /* GT-64120 */
45 #define PCI_PRODUCT_GALILEO_GT96100 0x9653 /* GT-96100 */
46
47 /* === Global definitions ================================================= */
48
49 /* Interrupt High Cause Register */
50 #define GT_IHCR_ETH0_SUM 0x00000001
51 #define GT_IHCR_ETH1_SUM 0x00000002
52 #define GT_IHCR_SDMA_SUM 0x00000010
53
54 /* Serial Cause Register */
55 #define GT_SCR_ETH0_SUM 0x00000001
56 #define GT_SCR_ETH1_SUM 0x00000002
57 #define GT_SCR_SDMA_SUM 0x00000010
58 #define GT_SCR_SDMA0_SUM 0x00000100
59 #define GT_SCR_MPSC0_SUM 0x00000200
60
61 /* === DMA definitions ==================================================== */
62 #define GT_DMA_CHANNELS 4
63
64 #define GT_DMA_FLYBY_ENABLE 0x00000001 /* FlyBy Enable */
65 #define GT_DMA_FLYBY_RDWR 0x00000002 /* SDRAM Read/Write (FlyBy) */
66 #define GT_DMA_SRC_DIR 0x0000000c /* Source Direction */
67 #define GT_DMA_DST_DIR 0x00000030 /* Destination Direction */
68 #define GT_DMA_DATA_LIMIT 0x000001c0 /* Data Transfer Limit */
69 #define GT_DMA_CHAIN_MODE 0x00000200 /* Chained Mode */
70 #define GT_DMA_INT_MODE 0x00000400 /* Interrupt Mode */
71 #define GT_DMA_TRANS_MODE 0x00000800 /* Transfer Mode */
72 #define GT_DMA_CHAN_ENABLE 0x00001000 /* Channel Enable */
73 #define GT_DMA_FETCH_NEXT 0x00002000 /* Fetch Next Record */
74 #define GT_DMA_ACT_STATUS 0x00004000 /* DMA Activity Status */
75 #define GT_DMA_SDA 0x00008000 /* Source/Destination Alignment */
76 #define GT_DMA_MDREQ 0x00010000 /* Mask DMA Requests */
77 #define GT_DMA_CDE 0x00020000 /* Close Descriptor Enable */
78 #define GT_DMA_EOTE 0x00040000 /* End-of-Transfer (EOT) Enable */
79 #define GT_DMA_EOTIE 0x00080000 /* EOT Interrupt Enable */
80 #define GT_DMA_ABORT 0x00100000 /* Abort DMA Transfer */
81 #define GT_DMA_SLP 0x00600000 /* Override Source Address */
82 #define GT_DMA_DLP 0x01800000 /* Override Dest Address */
83 #define GT_DMA_RLP 0x06000000 /* Override Record Address */
84 #define GT_DMA_REQ_SRC 0x10000000 /* DMA Request Source */
85
86 /* Galileo DMA channel */
87 struct dma_channel {
88 m_uint32_t byte_count;
89 m_uint32_t src_addr;
90 m_uint32_t dst_addr;
91 m_uint32_t cdptr;
92 m_uint32_t nrptr;
93 m_uint32_t ctrl;
94 };
95
96 /* === Serial DMA (SDMA) ================================================== */
97
98 /* SDMA: 2 groups of 8 channels */
99 #define GT_SDMA_CHANNELS 8
100 #define GT_SDMA_GROUPS 2
101
102 /* SDMA channel */
103 struct sdma_channel {
104 u_int id;
105
106 m_uint32_t sdc;
107 m_uint32_t sdcm;
108 m_uint32_t rx_desc;
109 m_uint32_t rx_buf_ptr;
110 m_uint32_t scrdp;
111 m_uint32_t tx_desc;
112 m_uint32_t sctdp;
113 m_uint32_t sftdp;
114 };
115
116 /* SGCR: SDMA Group Register */
117 #define GT_REG_SGC 0x101af0
118
119 /* SDMA cause register: 8 fields (1 for each channel) of 4 bits */
120 #define GT_SDMA_CAUSE_RXBUF0 0x01
121 #define GT_SDMA_CAUSE_RXERR0 0x02
122 #define GT_SDMA_CAUSE_TXBUF0 0x04
123 #define GT_SDMA_CAUSE_TXEND0 0x08
124
125 /* SDMA channel register offsets */
126 #define GT_SDMA_SDC 0x000900 /* Configuration Register */
127 #define GT_SDMA_SDCM 0x000908 /* Command Register */
128 #define GT_SDMA_RX_DESC 0x008900 /* RX descriptor */
129 #define GT_SDMA_SCRDP 0x008910 /* Current RX descriptor */
130 #define GT_SDMA_TX_DESC 0x00c900 /* TX descriptor */
131 #define GT_SDMA_SCTDP 0x00c910 /* Current TX desc. pointer */
132 #define GT_SDMA_SFTDP 0x00c914 /* First TX desc. pointer */
133
134 /* SDMA RX/TX descriptor */
135 struct sdma_desc {
136 m_uint32_t buf_size;
137 m_uint32_t cmd_stat;
138 m_uint32_t next_ptr;
139 m_uint32_t buf_ptr;
140 };
141
142 /* SDMA Descriptor Command/Status word */
143 #define GT_SDMA_CMD_O 0x80000000 /* Owner bit */
144 #define GT_SDMA_CMD_AM 0x40000000 /* Auto-mode */
145 #define GT_SDMA_CMD_EI 0x00800000 /* Enable Interrupt */
146 #define GT_SDMA_CMD_F 0x00020000 /* First buffer */
147 #define GT_SDMA_CMD_L 0x00010000 /* Last buffer */
148
149 /* SDCR: SDMA Configuration Register */
150 #define GT_SDCR_RFT 0x00000001 /* Receive FIFO Threshold */
151 #define GT_SDCR_SFM 0x00000002 /* Single Frame Mode */
152 #define GT_SDCR_RC 0x0000003c /* Retransmit count */
153 #define GT_SDCR_BLMR 0x00000040 /* Big/Little Endian RX mode */
154 #define GT_SDCR_BLMT 0x00000080 /* Big/Litlle Endian TX mode */
155 #define GT_SDCR_POVR 0x00000100 /* PCI override */
156 #define GT_SDCR_RIFB 0x00000200 /* RX IRQ on frame boundary */
157 #define GT_SDCR_BSZ 0x00003000 /* Burst size */
158
159 /* SDCMR: SDMA Command Register */
160 #define GT_SDCMR_ERD 0x00000080 /* Enable RX DMA */
161 #define GT_SDCMR_AR 0x00008000 /* Abort Receive */
162 #define GT_SDCMR_STD 0x00010000 /* Stop TX */
163 #define GT_SDCMR_STDH GT_SDCMR_STD /* Stop TX High */
164 #define GT_SDCMR_STDL 0x00020000 /* Stop TX Low */
165 #define GT_SDCMR_TXD 0x00800000 /* TX Demand */
166 #define GT_SDCMR_TXDH GT_SDCMR_TXD /* Start TX High */
167 #define GT_SDCMR_TXDL 0x01000000 /* Start TX Low */
168 #define GT_SDCMR_AT 0x80000000 /* Abort Transmit */
169
170 /* === MultiProtocol Serial Controller (MPSC) ============================= */
171
172 /* 8 MPSC channels */
173 #define GT_MPSC_CHANNELS 8
174
175 /* MPSC channel */
176 struct mpsc_channel {
177 m_uint32_t mmcrl;
178 m_uint32_t mmcrh;
179 m_uint32_t mpcr;
180 m_uint32_t chr[10];
181
182 vtty_t *vtty;
183 netio_desc_t *nio;
184 };
185
186 #define GT_MPSC_MMCRL 0x000A00 /* Main Config Register Low */
187 #define GT_MPSC_MMCRH 0x000A04 /* Main Config Register High */
188 #define GT_MPSC_MPCR 0x000A08 /* Protocol Config Register */
189 #define GT_MPSC_CHR1 0x000A0C
190 #define GT_MPSC_CHR2 0x000A10
191 #define GT_MPSC_CHR3 0x000A14
192 #define GT_MPSC_CHR4 0x000A18
193 #define GT_MPSC_CHR5 0x000A1C
194 #define GT_MPSC_CHR6 0x000A20
195 #define GT_MPSC_CHR7 0x000A24
196 #define GT_MPSC_CHR8 0x000A28
197 #define GT_MPSC_CHR9 0x000A2C
198 #define GT_MPSC_CHR10 0x000A30
199
200 #define GT_MMCRL_MODE_MASK 0x0000007
201
202 #define GT_MPSC_MODE_HDLC 0
203 #define GT_MPSC_MODE_UART 4
204 #define GT_MPSC_MODE_BISYNC 5
205
206 /* === Ethernet definitions =============================================== */
207 #define GT_ETH_PORTS 2
208 #define GT_MAX_PKT_SIZE 2048
209
210 /* SMI register */
211 #define GT_SMIR_DATA_MASK 0x0000FFFF
212 #define GT_SMIR_PHYAD_MASK 0x001F0000 /* PHY Device Address */
213 #define GT_SMIR_PHYAD_SHIFT 16
214 #define GT_SMIR_REGAD_MASK 0x03e00000 /* PHY Device Register Address */
215 #define GT_SMIR_REGAD_SHIFT 21
216 #define GT_SMIR_OPCODE_MASK 0x04000000 /* Opcode (0: write, 1: read) */
217 #define GT_SMIR_OPCODE_READ 0x04000000
218 #define GT_SMIR_RVALID_FLAG 0x08000000 /* Read Valid */
219 #define GT_SMIR_BUSY_FLAG 0x10000000 /* Busy: 1=op in progress */
220
221 /* PCR: Port Configuration Register */
222 #define GT_PCR_PM 0x00000001 /* Promiscuous mode */
223 #define GT_PCR_RBM 0x00000002 /* Reject broadcast mode */
224 #define GT_PCR_PBF 0x00000004 /* Pass bad frames */
225 #define GT_PCR_EN 0x00000080 /* Port Enabled/Disabled */
226 #define GT_PCR_LPBK 0x00000300 /* Loopback mode */
227 #define GT_PCR_FC 0x00000400 /* Force collision */
228 #define GT_PCR_HS 0x00001000 /* Hash size */
229 #define GT_PCR_HM 0x00002000 /* Hash mode */
230 #define GT_PCR_HDM 0x00004000 /* Hash default mode */
231 #define GT_PCR_HD 0x00008000 /* Duplex Mode */
232 #define GT_PCR_ISL 0x70000000 /* ISL enabled (0x06) */
233 #define GT_PCR_ACCS 0x80000000 /* Accelerate Slot Time */
234
235 /* PCXR: Port Configuration Extend Register */
236 #define GT_PCXR_IGMP 0x00000001 /* IGMP packet capture */
237 #define GT_PCXR_SPAN 0x00000002 /* BPDU packet capture */
238 #define GT_PCXR_PAR 0x00000004 /* Partition Enable */
239 #define GT_PCXR_PRIOTX 0x00000038 /* Priority weight for TX */
240 #define GT_PCXR_PRIORX 0x000000C0 /* Priority weight for RX */
241 #define GT_PCXR_PRIORX_OV 0x00000100 /* Prio RX override */
242 #define GT_PCXR_DPLX_EN 0x00000200 /* Autoneg for Duplex */
243 #define GT_PCXR_FCTL_EN 0x00000400 /* Autoneg for 802.3x */
244 #define GT_PCXR_FLP 0x00000800 /* Force Link Pass */
245 #define GT_PCXR_FCTL 0x00001000 /* Flow Control Mode */
246 #define GT_PCXR_MFL 0x0000C000 /* Maximum Frame Length */
247 #define GT_PCXR_MIB_CLR_MODE 0x00010000 /* MIB counters clear mode */
248 #define GT_PCXR_SPEED 0x00040000 /* Port Speed */
249 #define GT_PCXR_SPEED_EN 0x00080000 /* Autoneg for Speed */
250 #define GT_PCXR_RMII_EN 0x00100000 /* RMII Enable */
251 #define GT_PCXR_DSCP_EN 0x00200000 /* DSCP decoding enable */
252
253 /* PCMR: Port Command Register */
254 #define GT_PCMR_FJ 0x00008000 /* Force Jam / Flow Control */
255
256 /* PSR: Port Status Register */
257 #define GT_PSR_SPEED 0x00000001 /* Speed: 10/100 Mb/s (100=>1)*/
258 #define GT_PSR_DUPLEX 0x00000002 /* Duplex (1: full) */
259 #define GT_PSR_FCTL 0x00000004 /* Flow Control Mode */
260 #define GT_PSR_LINK 0x00000008 /* Link Up/Down */
261 #define GT_PSR_PAUSE 0x00000010 /* Flow-control disabled state */
262 #define GT_PSR_TXLOW 0x00000020 /* TX Low priority status */
263 #define GT_PSR_TXHIGH 0x00000040 /* TX High priority status */
264 #define GT_PSR_TXINP 0x00000080 /* TX in Progress */
265
266 /* ICR: Interrupt Cause Register */
267 #define GT_ICR_RXBUF 0x00000001 /* RX Buffer returned to host */
268 #define GT_ICR_TXBUFH 0x00000004 /* TX Buffer High */
269 #define GT_ICR_TXBUFL 0x00000008 /* TX Buffer Low */
270 #define GT_ICR_TXENDH 0x00000040 /* TX End High */
271 #define GT_ICR_TXENDL 0x00000080 /* TX End Low */
272 #define GT_ICR_RXERR 0x00000100 /* RX Error */
273 #define GT_ICR_TXERRH 0x00000400 /* TX Error High */
274 #define GT_ICR_TXERRL 0x00000800 /* TX Error Low */
275 #define GT_ICR_RXOVR 0x00001000 /* RX Overrun */
276 #define GT_ICR_TXUDR 0x00002000 /* TX Underrun */
277 #define GT_ICR_RXBUFQ0 0x00010000 /* RX Buffer in Prio Queue 0 */
278 #define GT_ICR_RXBUFQ1 0x00020000 /* RX Buffer in Prio Queue 1 */
279 #define GT_ICR_RXBUFQ2 0x00040000 /* RX Buffer in Prio Queue 2 */
280 #define GT_ICR_RXBUFQ3 0x00080000 /* RX Buffer in Prio Queue 3 */
281 #define GT_ICR_RXERRQ0 0x00010000 /* RX Error in Prio Queue 0 */
282 #define GT_ICR_RXERRQ1 0x00020000 /* RX Error in Prio Queue 1 */
283 #define GT_ICR_RXERRQ2 0x00040000 /* RX Error in Prio Queue 2 */
284 #define GT_ICR_RXERRQ3 0x00080000 /* RX Error in Prio Queue 3 */
285 #define GT_ICR_MII_STC 0x10000000 /* MII PHY Status Change */
286 #define GT_ICR_SMI_DONE 0x20000000 /* SMI Command Done */
287 #define GT_ICR_INT_SUM 0x80000000 /* Ethernet Interrupt Summary */
288 #define GT_ICR_MASK 0x7FFFFFFF
289
290 /* Ethernet hash entry */
291 #define GT_HTE_VALID 0x00000001 /* Valid entry */
292 #define GT_HTE_SKIP 0x00000002 /* Skip entry in a chain */
293 #define GT_HTE_RD 0x00000004 /* 0: Discard, 1: Receive */
294 #define GT_HTE_ADDR_MASK 0x7fffffffffff8ULL
295
296 #define GT_HTE_HOPNUM 12 /* Hash Table Hop Number */
297
298 enum {
299 GT_HTLOOKUP_MISS,
300 GT_HTLOOKUP_MATCH,
301 GT_HTLOOKUP_HOP_EXCEEDED,
302 };
303
304 /* TX Descriptor */
305 #define GT_TXDESC_OWN 0x80000000 /* Ownership */
306 #define GT_TXDESC_AM 0x40000000 /* Auto-mode */
307 #define GT_TXDESC_EI 0x00800000 /* Enable Interrupt */
308 #define GT_TXDESC_GC 0x00400000 /* Generate CRC */
309 #define GT_TXDESC_P 0x00040000 /* Padding */
310 #define GT_TXDESC_F 0x00020000 /* First buffer of packet */
311 #define GT_TXDESC_L 0x00010000 /* Last buffer of packet */
312 #define GT_TXDESC_ES 0x00008000 /* Error Summary */
313 #define GT_TXDESC_RC 0x00003c00 /* Retransmit Count */
314 #define GT_TXDESC_COL 0x00000200 /* Collision */
315 #define GT_TXDESC_RL 0x00000100 /* Retransmit Limit Error */
316 #define GT_TXDESC_UR 0x00000040 /* Underrun Error */
317 #define GT_TXDESC_LC 0x00000020 /* Late Collision Error */
318
319 #define GT_TXDESC_BC_MASK 0xFFFF0000 /* Number of bytes to transmit */
320 #define GT_TXDESC_BC_SHIFT 16
321
322 /* RX Descriptor */
323 #define GT_RXDESC_OWN 0x80000000 /* Ownership */
324 #define GT_RXDESC_AM 0x40000000 /* Auto-mode */
325 #define GT_RXDESC_EI 0x00800000 /* Enable Interrupt */
326 #define GT_RXDESC_F 0x00020000 /* First buffer of packet */
327 #define GT_RXDESC_L 0x00010000 /* Last buffer of packet */
328 #define GT_RXDESC_ES 0x00008000 /* Error Summary */
329 #define GT_RXDESC_IGMP 0x00004000 /* IGMP packet detected */
330 #define GT_RXDESC_HE 0x00002000 /* Hash Table Expired */
331 #define GT_RXDESC_M 0x00001000 /* Missed Frame */
332 #define GT_RXDESC_FT 0x00000800 /* Frame Type (802.3/Ethernet) */
333 #define GT_RXDESC_SF 0x00000100 /* Short Frame Error */
334 #define GT_RXDESC_MFL 0x00000080 /* Maximum Frame Length Error */
335 #define GT_RXDESC_OR 0x00000040 /* Overrun Error */
336 #define GT_RXDESC_COL 0x00000010 /* Collision */
337 #define GT_RXDESC_CE 0x00000001 /* CRC Error */
338
339 #define GT_RXDESC_BC_MASK 0x0000FFFF /* Byte count */
340 #define GT_RXDESC_BS_MASK 0xFFFF0000 /* Buffer size */
341 #define GT_RXDESC_BS_SHIFT 16
342
343 /* Galileo Ethernet port */
344 struct eth_port {
345 netio_desc_t *nio;
346
347 /* First and Current RX descriptors (4 queues) */
348 m_uint32_t rx_start[4],rx_current[4];
349
350 /* Current TX descriptors (2 queues) */
351 m_uint32_t tx_current[2];
352
353 /* Port registers */
354 m_uint32_t pcr,pcxr,pcmr,psr;
355
356 /* SDMA registers */
357 m_uint32_t sdcr,sdcmr;
358
359 /* Interrupt register */
360 m_uint32_t icr,imr;
361
362 /* Hash Table pointer */
363 m_uint32_t ht_addr;
364
365 /* Ethernet MIB counters */
366 m_uint32_t rx_bytes,tx_bytes,rx_frames,tx_frames;
367 };
368
369 /* ======================================================================== */
370
371 /* Galileo GT64xxx/GT96xxx system controller */
372 struct gt_data {
373 char *name;
374 vm_obj_t vm_obj;
375 struct vdevice dev;
376 struct pci_device *pci_dev;
377 vm_instance_t *vm;
378 pthread_mutex_t lock;
379
380 struct pci_bus *bus[2];
381 struct dma_channel dma[GT_DMA_CHANNELS];
382
383 /* Interrupts (common) */
384 m_uint32_t int_cause_reg;
385 m_uint32_t int_high_cause_reg;
386 m_uint32_t int_mask_reg;
387
388 /* Interrupts (GT96100) */
389 m_uint32_t int0_main_mask_reg,int0_high_mask_reg;
390 m_uint32_t int1_main_mask_reg,int1_high_mask_reg;
391 m_uint32_t ser_cause_reg;
392 m_uint32_t serint0_mask_reg,serint1_mask_reg;
393 u_int int0_irq,int1_irq,serint0_irq,serint1_irq;
394
395 /* SDMA - Serial DMA (GT96100) */
396 m_uint32_t sgcr;
397 m_uint32_t sdma_cause_reg,sdma_mask_reg;
398 struct sdma_channel sdma[GT_SDMA_GROUPS][GT_SDMA_CHANNELS];
399
400 /* MPSC - MultiProtocol Serial Controller (GT96100) */
401 struct mpsc_channel mpsc[GT_MPSC_CHANNELS];
402
403 /* Ethernet ports (GT96100) */
404 u_int eth_irq;
405 ptask_id_t eth_tx_tid;
406 struct eth_port eth_ports[GT_ETH_PORTS];
407 m_uint32_t smi_reg;
408 m_uint16_t mii_regs[32][32];
409
410 /* IRQ status update */
411 void (*gt_update_irq_status)(struct gt_data *gt_data);
412 };
413
414 #define GT_LOCK(d) pthread_mutex_lock(&(d)->lock)
415 #define GT_UNLOCK(d) pthread_mutex_unlock(&(d)->lock)
416
417 /* Log a GT message */
418 #define GT_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg)
419
420 /* Update the interrupt status */
421 static void gt64k_update_irq_status(struct gt_data *gt_data)
422 {
423 if (gt_data->pci_dev) {
424 if (gt_data->int_cause_reg & gt_data->int_mask_reg)
425 pci_dev_trigger_irq(gt_data->vm,gt_data->pci_dev);
426 else
427 pci_dev_clear_irq(gt_data->vm,gt_data->pci_dev);
428 }
429 }
430
431 /* Fetch a DMA record (chained mode) */
432 static void gt_dma_fetch_rec(vm_instance_t *vm,struct dma_channel *channel)
433 {
434 m_uint32_t ptr;
435
436 #if DEBUG_DMA
437 vm_log(vm,"GT_DMA","fetching record at address 0x%x\n",channel->nrptr);
438 #endif
439
440 /* fetch the record from RAM */
441 ptr = channel->nrptr;
442 channel->byte_count = swap32(physmem_copy_u32_from_vm(vm,ptr));
443 channel->src_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x04));
444 channel->dst_addr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x08));
445 channel->nrptr = swap32(physmem_copy_u32_from_vm(vm,ptr+0x0c));
446
447 /* clear the "fetch next record bit" */
448 channel->ctrl &= ~GT_DMA_FETCH_NEXT;
449 }
450
451 /* Handle control register of a DMA channel */
452 static void gt_dma_handle_ctrl(struct gt_data *gt_data,int chan_id)
453 {
454 struct dma_channel *channel = &gt_data->dma[chan_id];
455 vm_instance_t *vm = gt_data->vm;
456 int done;
457
458 if (channel->ctrl & GT_DMA_FETCH_NEXT) {
459 if (channel->nrptr == 0) {
460 vm_log(vm,"GT_DMA","trying to load a NULL DMA record...\n");
461 return;
462 }
463
464 gt_dma_fetch_rec(vm,channel);
465 }
466
467 if (channel->ctrl & GT_DMA_CHAN_ENABLE)
468 {
469 do {
470 done = TRUE;
471
472 #if DEBUG_DMA
473 vm_log(vm,"GT_DMA",
474 "starting transfer from 0x%x to 0x%x (size=%u bytes)\n",
475 channel->src_addr,channel->dst_addr,
476 channel->byte_count & 0xFFFF);
477 #endif
478 physmem_dma_transfer(vm,channel->src_addr,channel->dst_addr,
479 channel->byte_count & 0xFFFF);
480
481 /* chained mode */
482 if (!(channel->ctrl & GT_DMA_CHAIN_MODE)) {
483 if (channel->nrptr) {
484 gt_dma_fetch_rec(vm,channel);
485 done = FALSE;
486 }
487 }
488 }while(!done);
489
490 #if DEBUG_DMA
491 vm_log(vm,"GT_DMA","finished transfer.\n");
492 #endif
493 /* Trigger DMA interrupt */
494 gt_data->int_cause_reg |= 1 << (4 + chan_id);
495 gt_data->gt_update_irq_status(gt_data);
496 }
497 }
498
499 #define DMA_REG(ch,reg_name) \
500 if (op_type == MTS_WRITE) \
501 gt_data->dma[ch].reg_name = *data; \
502 else \
503 *data = gt_data->dma[ch].reg_name;
504
505 /* Handle a DMA channel */
506 static int gt_dma_access(cpu_gen_t *cpu,struct vdevice *dev,
507 m_uint32_t offset,u_int op_size,u_int op_type,
508 m_uint64_t *data)
509 {
510 struct gt_data *gt_data = dev->priv_data;
511
512 switch(offset) {
513 /* DMA Source Address */
514 case 0x810: DMA_REG(0,src_addr); return(1);
515 case 0x814: DMA_REG(1,src_addr); return(1);
516 case 0x818: DMA_REG(2,src_addr); return(1);
517 case 0x81c: DMA_REG(3,src_addr); return(1);
518
519 /* DMA Destination Address */
520 case 0x820: DMA_REG(0,dst_addr); return(1);
521 case 0x824: DMA_REG(1,dst_addr); return(1);
522 case 0x828: DMA_REG(2,dst_addr); return(1);
523 case 0x82c: DMA_REG(3,dst_addr); return(1);
524
525 /* DMA Next Record Pointer */
526 case 0x830:
527 gt_data->dma[0].cdptr = *data;
528 DMA_REG(0,nrptr);
529 return(1);
530
531 case 0x834:
532 gt_data->dma[1].cdptr = *data;
533 DMA_REG(1,nrptr);
534 return(1);
535
536 case 0x838:
537 gt_data->dma[2].cdptr = *data;
538 DMA_REG(2,nrptr);
539 return(1);
540
541 case 0x83c:
542 gt_data->dma[3].cdptr = *data;
543 DMA_REG(3,nrptr);
544 return(1);
545
546 /* DMA Channel Control */
547 case 0x840:
548 DMA_REG(0,ctrl);
549 if (op_type == MTS_WRITE)
550 gt_dma_handle_ctrl(gt_data,0);
551 return(1);
552
553 case 0x844:
554 DMA_REG(1,ctrl);
555 if (op_type == MTS_WRITE)
556 gt_dma_handle_ctrl(gt_data,1);
557 return(1);
558
559 case 0x848:
560 DMA_REG(2,ctrl);
561 if (op_type == MTS_WRITE)
562 gt_dma_handle_ctrl(gt_data,2);
563 return(1);
564
565 case 0x84c:
566 DMA_REG(3,ctrl);
567 if (op_type == MTS_WRITE)
568 gt_dma_handle_ctrl(gt_data,3);
569 return(1);
570 }
571
572 return(0);
573 }
574
575 /*
576 * dev_gt64010_access()
577 */
578 void *dev_gt64010_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
579 u_int op_size,u_int op_type,m_uint64_t *data)
580 {
581 struct gt_data *gt_data = dev->priv_data;
582
583 if (op_type == MTS_READ) {
584 *data = 0;
585 } else {
586 *data = swap32(*data);
587 }
588
589 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
590 goto done;
591
592 switch(offset) {
593 /* ===== DRAM Settings (completely faked, 128 Mb) ===== */
594 case 0x008: /* ras10_low */
595 if (op_type == MTS_READ)
596 *data = 0x000;
597 break;
598 case 0x010: /* ras10_high */
599 if (op_type == MTS_READ)
600 *data = 0x7F;
601 break;
602 case 0x018: /* ras32_low */
603 if (op_type == MTS_READ)
604 *data = 0x080;
605 break;
606 case 0x020: /* ras32_high */
607 if (op_type == MTS_READ)
608 *data = 0x7F;
609 break;
610 case 0x400: /* ras0_low */
611 if (op_type == MTS_READ)
612 *data = 0x00;
613 break;
614 case 0x404: /* ras0_high */
615 if (op_type == MTS_READ)
616 *data = 0xFF;
617 break;
618 case 0x408: /* ras1_low */
619 if (op_type == MTS_READ)
620 *data = 0x7F;
621 break;
622 case 0x40c: /* ras1_high */
623 if (op_type == MTS_READ)
624 *data = 0x00;
625 break;
626 case 0x410: /* ras2_low */
627 if (op_type == MTS_READ)
628 *data = 0x00;
629 break;
630 case 0x414: /* ras2_high */
631 if (op_type == MTS_READ)
632 *data = 0xFF;
633 break;
634 case 0x418: /* ras3_low */
635 if (op_type == MTS_READ)
636 *data = 0x7F;
637 break;
638 case 0x41c: /* ras3_high */
639 if (op_type == MTS_READ)
640 *data = 0x00;
641 break;
642 case 0xc08: /* pci0_cs10 */
643 if (op_type == MTS_READ)
644 *data = 0xFFF;
645 break;
646 case 0xc0c: /* pci0_cs32 */
647 if (op_type == MTS_READ)
648 *data = 0xFFF;
649 break;
650
651 case 0xc00: /* pci_cmd */
652 if (op_type == MTS_READ)
653 *data = 0x00008001;
654 break;
655
656 /* ===== Interrupt Cause Register ===== */
657 case 0xc18:
658 if (op_type == MTS_READ) {
659 *data = gt_data->int_cause_reg;
660 } else {
661 gt_data->int_cause_reg &= *data;
662 gt64k_update_irq_status(gt_data);
663 }
664 break;
665
666 /* ===== Interrupt Mask Register ===== */
667 case 0xc1c:
668 if (op_type == MTS_READ)
669 *data = gt_data->int_mask_reg;
670 else {
671 gt_data->int_mask_reg = *data;
672 gt64k_update_irq_status(gt_data);
673 }
674 break;
675
676 /* ===== PCI Configuration ===== */
677 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
678 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data);
679 break;
680
681 case PCI_BUS_DATA: /* pci data address (0xcfc) */
682 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data);
683 break;
684
685 #if DEBUG_UNKNOWN
686 default:
687 if (op_type == MTS_READ) {
688 cpu_log(cpu,"GT64010","read from addr 0x%x, pc=0x%llx\n",
689 offset,cpu_get_pc(cpu));
690 } else {
691 cpu_log(cpu,"GT64010","write to addr 0x%x, value=0x%llx, "
692 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
693 }
694 #endif
695 }
696
697 done:
698 if (op_type == MTS_READ)
699 *data = swap32(*data);
700 return NULL;
701 }
702
703 /*
704 * dev_gt64120_access()
705 */
706 void *dev_gt64120_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
707 u_int op_size,u_int op_type,m_uint64_t *data)
708 {
709 struct gt_data *gt_data = dev->priv_data;
710
711 if (op_type == MTS_READ) {
712 *data = 0;
713 } else {
714 *data = swap32(*data);
715 }
716
717 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
718 goto done;
719
720 switch(offset) {
721 case 0x008: /* ras10_low */
722 if (op_type == MTS_READ)
723 *data = 0x000;
724 break;
725 case 0x010: /* ras10_high */
726 if (op_type == MTS_READ)
727 *data = 0x7F;
728 break;
729 case 0x018: /* ras32_low */
730 if (op_type == MTS_READ)
731 *data = 0x100;
732 break;
733 case 0x020: /* ras32_high */
734 if (op_type == MTS_READ)
735 *data = 0x7F;
736 break;
737 case 0x400: /* ras0_low */
738 if (op_type == MTS_READ)
739 *data = 0x00;
740 break;
741 case 0x404: /* ras0_high */
742 if (op_type == MTS_READ)
743 *data = 0xFF;
744 break;
745 case 0x408: /* ras1_low */
746 if (op_type == MTS_READ)
747 *data = 0x7F;
748 break;
749 case 0x40c: /* ras1_high */
750 if (op_type == MTS_READ)
751 *data = 0x00;
752 break;
753 case 0x410: /* ras2_low */
754 if (op_type == MTS_READ)
755 *data = 0x00;
756 break;
757 case 0x414: /* ras2_high */
758 if (op_type == MTS_READ)
759 *data = 0xFF;
760 break;
761 case 0x418: /* ras3_low */
762 if (op_type == MTS_READ)
763 *data = 0x7F;
764 break;
765 case 0x41c: /* ras3_high */
766 if (op_type == MTS_READ)
767 *data = 0x00;
768 break;
769 case 0xc08: /* pci0_cs10 */
770 if (op_type == MTS_READ)
771 *data = 0xFFF;
772 break;
773 case 0xc0c: /* pci0_cs32 */
774 if (op_type == MTS_READ)
775 *data = 0xFFF;
776 break;
777
778 case 0xc00: /* pci_cmd */
779 if (op_type == MTS_READ)
780 *data = 0x00008001;
781 break;
782
783 /* ===== Interrupt Cause Register ===== */
784 case 0xc18:
785 if (op_type == MTS_READ)
786 *data = gt_data->int_cause_reg;
787 else {
788 gt_data->int_cause_reg &= *data;
789 gt64k_update_irq_status(gt_data);
790 }
791 break;
792
793 /* ===== Interrupt Mask Register ===== */
794 case 0xc1c:
795 if (op_type == MTS_READ) {
796 *data = gt_data->int_mask_reg;
797 } else {
798 gt_data->int_mask_reg = *data;
799 gt64k_update_irq_status(gt_data);
800 }
801 break;
802
803 /* ===== PCI Bus 1 ===== */
804 case 0xcf0:
805 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,FALSE,data);
806 break;
807
808 case 0xcf4:
809 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,FALSE,data);
810 break;
811
812 /* ===== PCI Bus 0 ===== */
813 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
814 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data);
815 break;
816
817 case PCI_BUS_DATA: /* pci data address (0xcfc) */
818 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data);
819 break;
820
821 #if DEBUG_UNKNOWN
822 default:
823 if (op_type == MTS_READ) {
824 cpu_log(cpu,"GT64120","read from addr 0x%x, pc=0x%llx\n",
825 offset,cpu_get_pc(cpu));
826 } else {
827 cpu_log(cpu,"GT64120","write to addr 0x%x, value=0x%llx, "
828 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
829 }
830 #endif
831 }
832
833 done:
834 if (op_type == MTS_READ)
835 *data = swap32(*data);
836 return NULL;
837 }
838
839 /* ======================================================================== */
840 /* GT96k Interrupts */
841 /* ======================================================================== */
842 static void gt96k_update_irq_status(struct gt_data *d)
843 {
844 /* Interrupt0* active ? */
845 if ((d->int_cause_reg & d->int0_main_mask_reg) ||
846 (d->int_high_cause_reg & d->int0_high_mask_reg))
847 {
848 d->int_cause_reg |= 1 << 30;
849 vm_set_irq(d->vm,d->int0_irq);
850 }
851 else
852 {
853 d->int_cause_reg &= ~(1 << 30);
854 vm_clear_irq(d->vm,d->int0_irq);
855 }
856
857 /* Interrupt1* active ? */
858 if ((d->int_cause_reg & d->int1_main_mask_reg) ||
859 (d->int_high_cause_reg & d->int1_high_mask_reg))
860 {
861 d->int_cause_reg |= 1 << 31;
862 vm_set_irq(d->vm,d->int1_irq);
863 }
864 else
865 {
866 d->int_cause_reg &= ~(1 << 31);
867 vm_clear_irq(d->vm,d->int1_irq);
868 }
869
870 /* SerInt0* active ? */
871 if (d->ser_cause_reg & d->serint0_mask_reg) {
872 vm_set_irq(d->vm,d->serint0_irq);
873 } else {
874 vm_clear_irq(d->vm,d->serint0_irq);
875 }
876
877 /* SerInt1* active ? */
878 if (d->ser_cause_reg & d->serint1_mask_reg) {
879 vm_set_irq(d->vm,d->serint1_irq);
880 } else {
881 vm_clear_irq(d->vm,d->serint1_irq);
882 }
883 }
884
885 /* ======================================================================== */
886 /* SDMA (Serial DMA) */
887 /* ======================================================================== */
888
889 /* Update SDMA interrupt status */
890 static void gt_sdma_update_int_status(struct gt_data *d)
891 {
892 /* Update general SDMA status */
893 if (d->sdma_cause_reg & d->sdma_mask_reg) {
894 d->ser_cause_reg |= GT_SCR_SDMA_SUM;
895 d->int_high_cause_reg |= GT_IHCR_SDMA_SUM;
896 } else {
897 d->ser_cause_reg &= ~GT_SCR_SDMA_SUM;
898 d->int_high_cause_reg &= ~GT_IHCR_SDMA_SUM;
899 }
900
901 gt96k_update_irq_status(d);
902 }
903
904 /* Update SDMA interrupt status for the specified channel */
905 static void gt_sdma_update_channel_int_status(struct gt_data *d,u_int chan_id)
906 {
907 m_uint32_t ch_st;
908
909 /* Get the status of the specified SDMA channel */
910 ch_st = d->sdma_cause_reg & (0x0000000F << (chan_id << 2));
911
912 if (ch_st)
913 d->ser_cause_reg |= GT_SCR_SDMA0_SUM << (chan_id << 1);
914 else
915 d->ser_cause_reg &= ~(GT_SCR_SDMA0_SUM << (chan_id << 1));
916
917 gt_sdma_update_int_status(d);
918 }
919
920 /* Set SDMA cause register for a channel */
921 static inline void gt_sdma_set_cause(struct gt_data *d,u_int chan_id,
922 u_int value)
923 {
924 d->sdma_cause_reg |= value << (chan_id << 2);
925 }
926
927 /* Read a SDMA descriptor from memory */
928 static void gt_sdma_desc_read(struct gt_data *d,m_uint32_t addr,
929 struct sdma_desc *desc)
930 {
931 physmem_copy_from_vm(d->vm,desc,addr,sizeof(struct sdma_desc));
932
933 /* byte-swapping */
934 desc->buf_size = vmtoh32(desc->buf_size);
935 desc->cmd_stat = vmtoh32(desc->cmd_stat);
936 desc->next_ptr = vmtoh32(desc->next_ptr);
937 desc->buf_ptr = vmtoh32(desc->buf_ptr);
938 }
939
940 /* Write a SDMA descriptor to memory */
941 static void gt_sdma_desc_write(struct gt_data *d,m_uint32_t addr,
942 struct sdma_desc *desc)
943 {
944 struct sdma_desc tmp;
945
946 /* byte-swapping */
947 tmp.cmd_stat = vmtoh32(desc->cmd_stat);
948 tmp.buf_size = vmtoh32(desc->buf_size);
949 tmp.next_ptr = vmtoh32(desc->next_ptr);
950 tmp.buf_ptr = vmtoh32(desc->buf_ptr);
951
952 physmem_copy_to_vm(d->vm,&tmp,addr,sizeof(struct sdma_desc));
953 }
954
955 /* Send contents of a SDMA buffer */
956 static void gt_sdma_send_buffer(struct gt_data *d,u_int chan_id,
957 u_char *buffer,m_uint32_t len)
958 {
959 struct mpsc_channel *channel;
960 u_int mode;
961
962 channel = &d->mpsc[chan_id];
963 mode = channel->mmcrl & GT_MMCRL_MODE_MASK;
964
965 switch(mode) {
966 case GT_MPSC_MODE_HDLC:
967 if (channel->nio != NULL)
968 netio_send(channel->nio,buffer,len);
969 break;
970
971 case GT_MPSC_MODE_UART:
972 if (channel->vtty != NULL)
973 vtty_put_buffer(channel->vtty,(char *)buffer,len);
974 break;
975 }
976 }
977
978 /* Start TX DMA process */
979 static int gt_sdma_tx_start(struct gt_data *d,struct sdma_channel *chan)
980 {
981 u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr;
982 struct sdma_desc txd0,ctxd,*ptxd;
983 m_uint32_t tx_start,tx_current;
984 m_uint32_t len,tot_len;
985 int abort = FALSE;
986
987 tx_start = tx_current = chan->sctdp;
988
989 if (!tx_start)
990 return(FALSE);
991
992 ptxd = &txd0;
993 gt_sdma_desc_read(d,tx_start,ptxd);
994
995 /* If we don't own the first descriptor, we cannot transmit */
996 if (!(txd0.cmd_stat & GT_TXDESC_OWN))
997 return(FALSE);
998
999 /* Empty packet for now */
1000 pkt_ptr = pkt;
1001 tot_len = 0;
1002
1003 for(;;)
1004 {
1005 /* Copy packet data to the buffer */
1006 len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT;
1007
1008 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len);
1009 pkt_ptr += len;
1010 tot_len += len;
1011
1012 /* Clear the OWN bit if this is not the first descriptor */
1013 if (!(ptxd->cmd_stat & GT_TXDESC_F)) {
1014 ptxd->cmd_stat &= ~GT_TXDESC_OWN;
1015 physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat);
1016 }
1017
1018 tx_current = ptxd->next_ptr;
1019
1020 /* Last descriptor or no more desc available ? */
1021 if (ptxd->cmd_stat & GT_TXDESC_L)
1022 break;
1023
1024 if (!tx_current) {
1025 abort = TRUE;
1026 break;
1027 }
1028
1029 /* Fetch the next descriptor */
1030 gt_sdma_desc_read(d,tx_current,&ctxd);
1031 ptxd = &ctxd;
1032 }
1033
1034 if ((tot_len != 0) && !abort) {
1035 #if DEBUG_SDMA
1036 GT_LOG(d,"SDMA%u: sending packet of %u bytes\n",tot_len);
1037 mem_dump(log_file,pkt,tot_len);
1038 #endif
1039 /* send it on wire */
1040 gt_sdma_send_buffer(d,chan->id,pkt,tot_len);
1041
1042 /* Signal that a TX buffer has been transmitted */
1043 gt_sdma_set_cause(d,chan->id,GT_SDMA_CAUSE_TXBUF0);
1044 }
1045
1046 /* Clear the OWN flag of the first descriptor */
1047 txd0.cmd_stat &= ~GT_TXDESC_OWN;
1048 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat);
1049
1050 chan->sctdp = tx_current;
1051
1052 if (abort || !tx_current) {
1053 gt_sdma_set_cause(d,chan->id,GT_SDMA_CAUSE_TXEND0);
1054 chan->sdcm &= ~GT_SDCMR_TXD;
1055 }
1056
1057 /* Update interrupt status $*/
1058 gt_sdma_update_channel_int_status(d,chan->id);
1059 return(TRUE);
1060 }
1061
1062 /* Put a packet in buffer of a descriptor */
1063 static void gt_sdma_rxdesc_put_pkt(struct gt_data *d,struct sdma_desc *rxd,
1064 u_char **pkt,ssize_t *pkt_len)
1065 {
1066 ssize_t len,cp_len;
1067
1068 len = (rxd->buf_size & GT_RXDESC_BS_MASK) >> GT_RXDESC_BS_SHIFT;
1069
1070 /* compute the data length to copy */
1071 cp_len = m_min(len,*pkt_len);
1072
1073 /* copy packet data to the VM physical RAM */
1074 physmem_copy_to_vm(d->vm,*pkt,rxd->buf_ptr,cp_len);
1075
1076 /* set the byte count in descriptor */
1077 rxd->buf_size |= cp_len;
1078
1079 *pkt += cp_len;
1080 *pkt_len -= cp_len;
1081 }
1082
1083 /* Put a packet into SDMA buffers */
1084 static int gt_sdma_handle_rxqueue(struct gt_data *d,
1085 struct sdma_channel *channel,
1086 u_char *pkt,ssize_t pkt_len)
1087 {
1088 m_uint32_t rx_start,rx_current;
1089 struct sdma_desc rxd0,rxdn,*rxdc;
1090 ssize_t tot_len = pkt_len;
1091 u_char *pkt_ptr = pkt;
1092 int i;
1093
1094 /* Truncate the packet if it is too big */
1095 pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE);
1096
1097 /* Copy the first RX descriptor */
1098 if (!(rx_start = rx_current = channel->scrdp))
1099 goto dma_error;
1100
1101 /* Load the first RX descriptor */
1102 gt_sdma_desc_read(d,rx_start,&rxd0);
1103
1104 #if DEBUG_SDMA
1105 GT_LOG(d,"SDMA channel %u: reading desc at 0x%8.8x "
1106 "[buf_size=0x%8.8x,cmd_stat=0x%8.8x,"
1107 "next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n",
1108 channel->id,rx_start,rxd0.buf_size,rxd0.cmd_stat,
1109 rxd0.next_ptr,rxd0.buf_ptr);
1110 #endif
1111
1112 for(i=0,rxdc=&rxd0;tot_len>0;i++)
1113 {
1114 /* We must own the descriptor */
1115 if (!(rxdc->cmd_stat & GT_RXDESC_OWN))
1116 goto dma_error;
1117
1118 /* Put data into the descriptor buffer */
1119 gt_sdma_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
1120
1121 /* Clear the OWN bit */
1122 rxdc->cmd_stat &= ~GT_RXDESC_OWN;
1123
1124 /* We have finished if the complete packet has been stored */
1125 if (tot_len == 0) {
1126 rxdc->cmd_stat |= GT_RXDESC_L;
1127 rxdc->buf_size += 4; /* Add 4 bytes for CRC */
1128 }
1129
1130 /* Update the descriptor in host memory (but not the 1st) */
1131 if (i != 0)
1132 gt_sdma_desc_write(d,rx_current,rxdc);
1133
1134 /* Get address of the next descriptor */
1135 rx_current = rxdc->next_ptr;
1136
1137 if (tot_len == 0)
1138 break;
1139
1140 if (!rx_current)
1141 goto dma_error;
1142
1143 /* Read the next descriptor from VM physical RAM */
1144 gt_sdma_desc_read(d,rx_current,&rxdn);
1145 rxdc = &rxdn;
1146 }
1147
1148 /* Update the RX pointers */
1149 channel->scrdp = rx_current;
1150
1151 /* Update the first RX descriptor */
1152 rxd0.cmd_stat |= GT_RXDESC_F;
1153 gt_sdma_desc_write(d,rx_start,&rxd0);
1154
1155 /* Indicate that we have a frame ready */
1156 gt_sdma_set_cause(d,channel->id,GT_SDMA_CAUSE_RXBUF0);
1157 gt_sdma_update_channel_int_status(d,channel->id);
1158 return(TRUE);
1159
1160 dma_error:
1161 gt_sdma_set_cause(d,channel->id,GT_SDMA_CAUSE_RXERR0);
1162 gt_sdma_update_channel_int_status(d,channel->id);
1163 return(FALSE);
1164 }
1165
1166 /* Handle RX packet for a SDMA channel*/
1167 static int gt_sdma_handle_rx_pkt(netio_desc_t *nio,
1168 u_char *pkt,ssize_t pkt_len,
1169 struct gt_data *d,void *arg)
1170 {
1171 struct sdma_channel *channel;
1172 u_int chan_id = (int)arg;
1173 u_int group_id;
1174
1175 GT_LOCK(d);
1176
1177 /* Find the SDMA group associated to the MPSC channel for receiving */
1178 group_id = (d->sgcr >> chan_id) & 0x01;
1179 channel = &d->sdma[group_id][chan_id];
1180
1181 gt_sdma_handle_rxqueue(d,channel,pkt,pkt_len);
1182 GT_UNLOCK(d);
1183 return(TRUE);
1184 }
1185
1186 /* Handle a SDMA channel */
1187 static int gt_sdma_access(cpu_gen_t *cpu,struct vdevice *dev,
1188 m_uint32_t offset,u_int op_size,u_int op_type,
1189 m_uint64_t *data)
1190 {
1191 struct gt_data *gt_data = dev->priv_data;
1192 struct sdma_channel *channel;
1193 u_int group,chan_id,reg;
1194
1195 if ((offset & 0x000F00) != 0x000900)
1196 return(FALSE);
1197
1198 /* Decode group, channel and register */
1199 group = (offset >> 20) & 0x0F;
1200 chan_id = (offset >> 16) & 0x0F;
1201 reg = offset & 0xFFFF;
1202
1203 if ((group >= GT_SDMA_GROUPS) || (chan_id >= GT_SDMA_CHANNELS)) {
1204 cpu_log(cpu,"GT96100","invalid SDMA register 0x%8.8x\n",offset);
1205 return(TRUE);
1206 }
1207
1208 channel = &gt_data->sdma[group][chan_id];
1209
1210 #if 0
1211 printf("SDMA: access to reg 0x%6.6x (group=%u, channel=%u)\n",
1212 offset, group, chan_id);
1213 #endif
1214
1215 switch(reg) {
1216 /* Configuration Register */
1217 case GT_SDMA_SDC:
1218 break;
1219
1220 /* Command Register */
1221 case GT_SDMA_SDCM:
1222 if (op_type == MTS_WRITE) {
1223 channel->sdcm = *data;
1224
1225 if (channel->sdcm & GT_SDCMR_TXD) {
1226 #if DEBUG_SDMA
1227 cpu_log(cpu,"GT96100-SDMA","starting TX transfer (%u/%u)\n",
1228 group,chan_id);
1229 #endif
1230 gt_sdma_tx_start(gt_data,channel);
1231 }
1232 } else {
1233 *data = 0xFF; //0xFFFFFFFF;
1234 }
1235 break;
1236
1237 /* Current RX descriptor */
1238 case GT_SDMA_SCRDP:
1239 if (op_type == MTS_READ)
1240 *data = channel->scrdp;
1241 else
1242 channel->scrdp = *data;
1243 break;
1244
1245 /* Current TX desc. pointer */
1246 case GT_SDMA_SCTDP:
1247 if (op_type == MTS_READ)
1248 *data = channel->sctdp;
1249 else
1250 channel->sctdp = *data;
1251 break;
1252
1253 /* First TX desc. pointer */
1254 case GT_SDMA_SFTDP:
1255 if (op_type == MTS_READ)
1256 *data = channel->sftdp;
1257 else
1258 channel->sftdp = *data;
1259 break;
1260
1261 default:
1262 /* unknown/unmanaged register */
1263 return(FALSE);
1264 }
1265
1266 return(TRUE);
1267 }
1268
1269 /* ======================================================================== */
1270 /* MPSC (MultiProtocol Serial Controller) */
1271 /* ======================================================================== */
1272
1273 /* Handle a MPSC channel */
1274 static int gt_mpsc_access(cpu_gen_t *cpu,struct vdevice *dev,
1275 m_uint32_t offset,u_int op_size,u_int op_type,
1276 m_uint64_t *data)
1277 {
1278 struct gt_data *gt_data = dev->priv_data;
1279 struct mpsc_channel *channel;
1280 u_int chan_id,reg,reg2;
1281
1282 if ((offset & 0x000F00) != 0x000A00)
1283 return(FALSE);
1284
1285 /* Decode channel ID and register */
1286 chan_id = offset >> 15;
1287 reg = offset & 0xFFF;
1288
1289 if (chan_id >= GT_MPSC_CHANNELS)
1290 return(FALSE);
1291
1292 channel = &gt_data->mpsc[chan_id];
1293
1294 switch(reg) {
1295 /* Main Config Register Low */
1296 case GT_MPSC_MMCRL:
1297 if (op_type == MTS_READ) {
1298 *data = channel->mmcrl;
1299 } else {
1300 #if DEBUG_MPSC
1301 GT_LOG(gt_data,"MPSC channel %u set in mode %llu\n",
1302 chan_id,*data & 0x07);
1303 #endif
1304 channel->mmcrl = *data;
1305 }
1306 break;
1307
1308 /* Main Config Register High */
1309 case GT_MPSC_MMCRH:
1310 if (op_type == MTS_READ)
1311 *data = channel->mmcrh;
1312 else
1313 channel->mmcrh = *data;
1314 break;
1315
1316 /* Protocol Config Register */
1317 case GT_MPSC_MPCR:
1318 if (op_type == MTS_READ)
1319 *data = channel->mpcr;
1320 else
1321 channel->mpcr = *data;
1322 break;
1323
1324 /* Channel registers */
1325 case GT_MPSC_CHR1:
1326 case GT_MPSC_CHR2:
1327 case GT_MPSC_CHR3:
1328 case GT_MPSC_CHR4:
1329 case GT_MPSC_CHR5:
1330 case GT_MPSC_CHR6:
1331 case GT_MPSC_CHR7:
1332 case GT_MPSC_CHR8:
1333 case GT_MPSC_CHR9:
1334 //case GT_MPSC_CHR10:
1335 reg2 = (reg - GT_MPSC_CHR1) >> 2;
1336 if (op_type == MTS_READ)
1337 *data = channel->chr[reg2];
1338 else
1339 channel->chr[reg2] = *data;
1340 break;
1341
1342 case GT_MPSC_CHR10:
1343 if (op_type == MTS_READ)
1344 *data = channel->chr[9] | 0x20;
1345 else
1346 channel->chr[9] = *data;
1347 break;
1348
1349 default:
1350 /* unknown/unmanaged register */
1351 return(FALSE);
1352 }
1353
1354 return(TRUE);
1355 }
1356
1357 /* Set NIO for a MPSC channel */
1358 int dev_gt96100_mpsc_set_nio(struct gt_data *d,u_int chan_id,netio_desc_t *nio)
1359 {
1360 struct mpsc_channel *channel;
1361
1362 if (chan_id >= GT_MPSC_CHANNELS)
1363 return(-1);
1364
1365 channel = &d->mpsc[chan_id];
1366
1367 if (channel->nio != NULL)
1368 return(-1);
1369
1370 channel->nio = nio;
1371 netio_rxl_add(nio,(netio_rx_handler_t)gt_sdma_handle_rx_pkt,
1372 d,(void *)chan_id);
1373 return(0);
1374 }
1375
1376 /* Unset NIO for a MPSC channel */
1377 int dev_gt96100_mpsc_unset_nio(struct gt_data *d,u_int chan_id)
1378 {
1379 struct mpsc_channel *channel;
1380
1381 if (chan_id >= GT_MPSC_CHANNELS)
1382 return(-1);
1383
1384 channel = &d->mpsc[chan_id];
1385
1386 if (channel->nio != NULL) {
1387 netio_rxl_remove(channel->nio);
1388 channel->nio = NULL;
1389 }
1390
1391 return(0);
1392 }
1393
1394 /* Set a VTTY for a MPSC channel */
1395 int dev_gt96100_mpsc_set_vtty(struct gt_data *d,u_int chan_id,vtty_t *vtty)
1396 {
1397 struct mpsc_channel *channel;
1398
1399 if (chan_id >= GT_MPSC_CHANNELS)
1400 return(-1);
1401
1402 channel = &d->mpsc[chan_id];
1403
1404 if (channel->vtty != NULL)
1405 return(-1);
1406
1407 channel->vtty = vtty;
1408 return(0);
1409 }
1410
1411 /* Unset a VTTY for a MPSC channel */
1412 int dev_gt96100_mpsc_unset_vtty(struct gt_data *d,u_int chan_id)
1413 {
1414 struct mpsc_channel *channel;
1415
1416 if (chan_id >= GT_MPSC_CHANNELS)
1417 return(-1);
1418
1419 channel = &d->mpsc[chan_id];
1420
1421 if (channel->vtty != NULL) {
1422 channel->vtty = NULL;
1423 }
1424
1425 return(0);
1426 }
1427
1428 /* ======================================================================== */
1429 /* Ethernet */
1430 /* ======================================================================== */
1431
1432 /* Trigger/clear Ethernet interrupt if one or both port have pending events */
1433 static void gt_eth_set_int_status(struct gt_data *d)
1434 {
1435 /* Compute Ether0 summary */
1436 if (d->eth_ports[0].icr & GT_ICR_INT_SUM) {
1437 d->ser_cause_reg |= GT_SCR_ETH0_SUM;
1438 d->int_high_cause_reg |= GT_IHCR_ETH0_SUM;
1439 } else {
1440 d->ser_cause_reg &= ~GT_SCR_ETH0_SUM;
1441 d->int_high_cause_reg &= ~GT_IHCR_ETH0_SUM;
1442 }
1443
1444 /* Compute Ether1 summary */
1445 if (d->eth_ports[1].icr & GT_ICR_INT_SUM) {
1446 d->ser_cause_reg |= GT_SCR_ETH1_SUM;
1447 d->int_high_cause_reg |= GT_IHCR_ETH1_SUM;
1448 } else {
1449 d->ser_cause_reg &= ~GT_SCR_ETH1_SUM;
1450 d->int_high_cause_reg &= ~GT_IHCR_ETH1_SUM;
1451 }
1452
1453 gt96k_update_irq_status(d);
1454 }
1455
1456 /* Update the Ethernet port interrupt status */
1457 static void gt_eth_update_int_status(struct gt_data *d,struct eth_port *port)
1458 {
1459 if (port->icr & port->imr & GT_ICR_MASK) {
1460 port->icr |= GT_ICR_INT_SUM;
1461 } else {
1462 port->icr &= ~GT_ICR_INT_SUM;
1463 }
1464
1465 gt_eth_set_int_status(d);
1466 }
1467
1468 /* Read a MII register */
1469 static m_uint32_t gt_mii_read(struct gt_data *d)
1470 {
1471 m_uint8_t port,reg;
1472 m_uint32_t res = 0;
1473
1474 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
1475 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
1476
1477 #if DEBUG_MII
1478 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: reading.\n",port,reg);
1479 #endif
1480
1481 if ((port < GT_ETH_PORTS) && (reg < 32)) {
1482 res = d->mii_regs[port][reg];
1483
1484 switch(reg) {
1485 case 0x00:
1486 res &= ~0x8200; /* clear reset bit and autoneg restart */
1487 break;
1488 case 0x01:
1489 #if 0
1490 if (d->ports[port].nio && bcm5600_mii_port_status(d,port))
1491 d->mii_output = 0x782C;
1492 else
1493 d->mii_output = 0;
1494 #endif
1495 res = 0x782c;
1496 break;
1497 case 0x02:
1498 res = 0x40;
1499 break;
1500 case 0x03:
1501 res = 0x61d4;
1502 break;
1503 case 0x04:
1504 res = 0x1E1;
1505 break;
1506 case 0x05:
1507 res = 0x41E1;
1508 break;
1509 default:
1510 res = 0;
1511 }
1512 }
1513
1514 /* Mark the data as ready */
1515 res |= GT_SMIR_RVALID_FLAG;
1516
1517 return(res);
1518 }
1519
1520 /* Write a MII register */
1521 static void gt_mii_write(struct gt_data *d)
1522 {
1523 m_uint8_t port,reg;
1524 m_uint16_t isolation;
1525
1526 port = (d->smi_reg & GT_SMIR_PHYAD_MASK) >> GT_SMIR_PHYAD_SHIFT;
1527 reg = (d->smi_reg & GT_SMIR_REGAD_MASK) >> GT_SMIR_REGAD_SHIFT;
1528
1529 if ((port < GT_ETH_PORTS) && (reg < 32))
1530 {
1531 #if DEBUG_MII
1532 GT_LOG(d,"MII: port 0x%4.4x, reg 0x%2.2x: writing 0x%4.4x\n",
1533 port,reg,d->smi_reg & GT_SMIR_DATA_MASK);
1534 #endif
1535
1536 /* Check if PHY isolation status is changing */
1537 if (reg == 0) {
1538 isolation = (d->smi_reg ^ d->mii_regs[port][reg]) & 0x400;
1539
1540 if (isolation) {
1541 #if DEBUG_MII
1542 GT_LOG(d,"MII: port 0x%4.4x: generating IRQ\n",port);
1543 #endif
1544 d->eth_ports[port].icr |= GT_ICR_MII_STC;
1545 gt_eth_update_int_status(d,&d->eth_ports[port]);
1546 }
1547 }
1548
1549 d->mii_regs[port][reg] = d->smi_reg & GT_SMIR_DATA_MASK;
1550 }
1551 }
1552
1553 /* Handle registers of Ethernet ports */
1554 static int gt_eth_access(cpu_gen_t *cpu,struct vdevice *dev,
1555 m_uint32_t offset,u_int op_size,u_int op_type,
1556 m_uint64_t *data)
1557 {
1558 struct gt_data *d = dev->priv_data;
1559 struct eth_port *port = NULL;
1560 u_int queue;
1561
1562 if ((offset < 0x80000) || (offset >= 0x90000))
1563 return(FALSE);
1564
1565 /* Determine the Ethernet port */
1566 if ((offset >= 0x84800) && (offset < 0x88800))
1567 port = &d->eth_ports[0];
1568 else if ((offset >= 0x88800) && (offset < 0x8c800))
1569 port = &d->eth_ports[1];
1570
1571 switch(offset) {
1572 /* SMI register */
1573 case 0x80810:
1574 if (op_type == MTS_WRITE) {
1575 d->smi_reg = *data;
1576
1577 if (!(d->smi_reg & GT_SMIR_OPCODE_READ))
1578 gt_mii_write(d);
1579 } else {
1580 *data = 0;
1581
1582 if (d->smi_reg & GT_SMIR_OPCODE_READ)
1583 *data = gt_mii_read(d);
1584 }
1585 break;
1586
1587 /* ICR: Interrupt Cause Register */
1588 case 0x84850:
1589 case 0x88850:
1590 if (op_type == MTS_READ) {
1591 *data = port->icr;
1592 } else {
1593 port->icr &= *data;
1594 gt_eth_update_int_status(d,port);
1595 }
1596 break;
1597
1598 /* IMR: Interrupt Mask Register */
1599 case 0x84858:
1600 case 0x88858:
1601 if (op_type == MTS_READ) {
1602 *data = port->imr;
1603 } else {
1604 port->imr = *data;
1605 gt_eth_update_int_status(d,port);
1606 }
1607 break;
1608
1609 /* PCR: Port Configuration Register */
1610 case 0x84800:
1611 case 0x88800:
1612 if (op_type == MTS_READ)
1613 *data = port->pcr;
1614 else
1615 port->pcr = *data;
1616 break;
1617
1618 /* PCXR: Port Configuration Extend Register */
1619 case 0x84808:
1620 case 0x88808:
1621 if (op_type == MTS_READ) {
1622 *data = port->pcxr;
1623 *data |= GT_PCXR_SPEED;
1624 } else
1625 port->pcxr = *data;
1626 break;
1627
1628 /* PCMR: Port Command Register */
1629 case 0x84810:
1630 case 0x88810:
1631 if (op_type == MTS_READ)
1632 *data = port->pcmr;
1633 else
1634 port->pcmr = *data;
1635 break;
1636
1637 /* Port Status Register */
1638 case 0x84818:
1639 case 0x88818:
1640 if (op_type == MTS_READ)
1641 *data = 0x0F;
1642 break;
1643
1644 /* First RX descriptor */
1645 case 0x84880:
1646 case 0x88880:
1647 case 0x84884:
1648 case 0x88884:
1649 case 0x84888:
1650 case 0x88888:
1651 case 0x8488C:
1652 case 0x8888C:
1653 queue = (offset >> 2) & 0x03;
1654 if (op_type == MTS_READ)
1655 *data = port->rx_start[queue];
1656 else
1657 port->rx_start[queue] = *data;
1658 break;
1659
1660 /* Current RX descriptor */
1661 case 0x848A0:
1662 case 0x888A0:
1663 case 0x848A4:
1664 case 0x888A4:
1665 case 0x848A8:
1666 case 0x888A8:
1667 case 0x848AC:
1668 case 0x888AC:
1669 queue = (offset >> 2) & 0x03;
1670 if (op_type == MTS_READ)
1671 *data = port->rx_current[queue];
1672 else
1673 port->rx_current[queue] = *data;
1674 break;
1675
1676 /* Current TX descriptor */
1677 case 0x848E0:
1678 case 0x888E0:
1679 case 0x848E4:
1680 case 0x888E4:
1681 queue = (offset >> 2) & 0x01;
1682 if (op_type == MTS_READ)
1683 *data = port->tx_current[queue];
1684 else
1685 port->tx_current[queue] = *data;
1686 break;
1687
1688 /* Hash Table Pointer */
1689 case 0x84828:
1690 case 0x88828:
1691 if (op_type == MTS_READ)
1692 *data = port->ht_addr;
1693 else
1694 port->ht_addr = *data;
1695 break;
1696
1697 /* SDCR: SDMA Configuration Register */
1698 case 0x84840:
1699 case 0x88840:
1700 if (op_type == MTS_READ)
1701 *data = port->sdcr;
1702 else
1703 port->sdcr = *data;
1704 break;
1705
1706 /* SDCMR: SDMA Command Register */
1707 case 0x84848:
1708 case 0x88848:
1709 if (op_type == MTS_WRITE) {
1710 /* Start RX DMA */
1711 if (*data & GT_SDCMR_ERD) {
1712 port->sdcmr |= GT_SDCMR_ERD;
1713 port->sdcmr &= ~GT_SDCMR_AR;
1714 }
1715
1716 /* Abort RX DMA */
1717 if (*data & GT_SDCMR_AR)
1718 port->sdcmr &= ~GT_SDCMR_ERD;
1719
1720 /* Start TX High */
1721 if (*data & GT_SDCMR_TXDH) {
1722 port->sdcmr |= GT_SDCMR_TXDH;
1723 port->sdcmr &= ~GT_SDCMR_STDH;
1724 }
1725
1726 /* Start TX Low */
1727 if (*data & GT_SDCMR_TXDL) {
1728 port->sdcmr |= GT_SDCMR_TXDL;
1729 port->sdcmr &= ~GT_SDCMR_STDL;
1730 }
1731
1732 /* Stop TX High */
1733 if (*data & GT_SDCMR_STDH) {
1734 port->sdcmr &= ~GT_SDCMR_TXDH;
1735 port->sdcmr |= GT_SDCMR_STDH;
1736 }
1737
1738 /* Stop TX Low */
1739 if (*data & GT_SDCMR_STDL) {
1740 port->sdcmr &= ~GT_SDCMR_TXDL;
1741 port->sdcmr |= GT_SDCMR_STDL;
1742 }
1743 } else {
1744 *data = port->sdcmr;
1745 }
1746 break;
1747
1748 case 0x85800:
1749 case 0x89800:
1750 if (op_type == MTS_READ) {
1751 *data = port->rx_bytes;
1752 port->rx_bytes = 0;
1753 }
1754 break;
1755
1756 case 0x85804:
1757 case 0x89804:
1758 if (op_type == MTS_READ) {
1759 *data = port->tx_bytes;
1760 port->tx_bytes = 0;
1761 }
1762 break;
1763
1764 case 0x85808:
1765 case 0x89808:
1766 if (op_type == MTS_READ) {
1767 *data = port->rx_frames;
1768 port->rx_frames = 0;
1769 }
1770 break;
1771
1772 case 0x8580C:
1773 case 0x8980C:
1774 if (op_type == MTS_READ) {
1775 *data = port->tx_frames;
1776 port->tx_frames = 0;
1777 }
1778 break;
1779
1780 #if DEBUG_UNKNOWN
1781 default:
1782 if (op_type == MTS_READ) {
1783 cpu_log(cpu,"GT96100/ETH",
1784 "read access to unknown register 0x%x, pc=0x%llx\n",
1785 offset,cpu_get_pc(cpu));
1786 } else {
1787 cpu_log(cpu,"GT96100/ETH",
1788 "write access to unknown register 0x%x, value=0x%llx, "
1789 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
1790 }
1791 #endif
1792 }
1793
1794 return(TRUE);
1795 }
1796
1797 /*
1798 * dev_gt96100_access()
1799 */
1800 void *dev_gt96100_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset,
1801 u_int op_size,u_int op_type,m_uint64_t *data)
1802 {
1803 struct gt_data *gt_data = dev->priv_data;
1804
1805 GT_LOCK(gt_data);
1806
1807 if (op_type == MTS_READ) {
1808 *data = 0;
1809 } else {
1810 if (op_size == 4)
1811 *data = swap32(*data);
1812 }
1813
1814 #if 0 /* DEBUG */
1815 if (offset != 0x101a80) {
1816 if (op_type == MTS_READ) {
1817 cpu_log(cpu,"GT96100","READ OFFSET 0x%6.6x\n",offset);
1818 } else {
1819 cpu_log(cpu,"GT96100","WRITE OFFSET 0x%6.6x, DATA=0x%8.8llx\n",
1820 offset,*data);
1821 }
1822 }
1823 #endif
1824
1825 /* DMA registers */
1826 if (gt_dma_access(cpu,dev,offset,op_size,op_type,data) != 0)
1827 goto done;
1828
1829 /* Serial DMA channel registers */
1830 if (gt_sdma_access(cpu,dev,offset,op_size,op_type,data) != 0)
1831 goto done;
1832
1833 /* MPSC registers */
1834 if (gt_mpsc_access(cpu,dev,offset,op_size,op_type,data) != 0)
1835 goto done;
1836
1837 /* Ethernet registers */
1838 if (gt_eth_access(cpu,dev,offset,op_size,op_type,data) != 0)
1839 goto done;
1840
1841 switch(offset) {
1842 /* Watchdog configuration register */
1843 case 0x101a80:
1844 break;
1845
1846 /* Watchdog value register */
1847 case 0x101a84:
1848 break;
1849
1850 case 0x008: /* ras10_low */
1851 if (op_type == MTS_READ)
1852 *data = 0x000;
1853 break;
1854 case 0x010: /* ras10_high */
1855 if (op_type == MTS_READ)
1856 *data = 0x7F;
1857 break;
1858 case 0x018: /* ras32_low */
1859 if (op_type == MTS_READ)
1860 *data = 0x100;
1861 break;
1862 case 0x020: /* ras32_high */
1863 if (op_type == MTS_READ)
1864 *data = 0x7F;
1865 break;
1866 case 0x400: /* ras0_low */
1867 if (op_type == MTS_READ)
1868 *data = 0x00;
1869 break;
1870 case 0x404: /* ras0_high */
1871 if (op_type == MTS_READ)
1872 *data = 0xFF;
1873 break;
1874 case 0x408: /* ras1_low */
1875 if (op_type == MTS_READ)
1876 *data = 0x7F;
1877 break;
1878 case 0x40c: /* ras1_high */
1879 if (op_type == MTS_READ)
1880 *data = 0x00;
1881 break;
1882 case 0x410: /* ras2_low */
1883 if (op_type == MTS_READ)
1884 *data = 0x00;
1885 break;
1886 case 0x414: /* ras2_high */
1887 if (op_type == MTS_READ)
1888 *data = 0xFF;
1889 break;
1890 case 0x418: /* ras3_low */
1891 if (op_type == MTS_READ)
1892 *data = 0x7F;
1893 break;
1894 case 0x41c: /* ras3_high */
1895 if (op_type == MTS_READ)
1896 *data = 0x00;
1897 break;
1898 case 0xc08: /* pci0_cs10 */
1899 if (op_type == MTS_READ)
1900 *data = 0xFFF;
1901 break;
1902 case 0xc0c: /* pci0_cs32 */
1903 if (op_type == MTS_READ)
1904 *data = 0xFFF;
1905 break;
1906
1907 case 0xc00: /* pci_cmd */
1908 if (op_type == MTS_READ)
1909 *data = 0x00008001;
1910 break;
1911
1912 /* ===== Interrupt Main Cause Register ===== */
1913 case 0xc18:
1914 if (op_type == MTS_READ) {
1915 *data = gt_data->int_cause_reg;
1916 } else {
1917 /* Don't touch bit 0, 30 and 31 which are read-only */
1918 gt_data->int_cause_reg &= (*data | 0xC0000001);
1919 gt96k_update_irq_status(gt_data);
1920 }
1921 break;
1922
1923 /* ===== Interrupt High Cause Register ===== */
1924 case 0xc98:
1925 if (op_type == MTS_READ)
1926 *data = gt_data->int_high_cause_reg;
1927 break;
1928
1929 /* ===== Interrupt0 Main Mask Register ===== */
1930 case 0xc1c:
1931 if (op_type == MTS_READ) {
1932 *data = gt_data->int0_main_mask_reg;
1933 } else {
1934 gt_data->int0_main_mask_reg = *data;
1935 gt96k_update_irq_status(gt_data);
1936 }
1937 break;
1938
1939 /* ===== Interrupt0 High Mask Register ===== */
1940 case 0xc9c:
1941 if (op_type == MTS_READ) {
1942 *data = gt_data->int0_high_mask_reg;
1943 } else {
1944 gt_data->int0_high_mask_reg = *data;
1945 gt96k_update_irq_status(gt_data);
1946 }
1947 break;
1948
1949 /* ===== Interrupt1 Main Mask Register ===== */
1950 case 0xc24:
1951 if (op_type == MTS_READ) {
1952 *data = gt_data->int1_main_mask_reg;
1953 } else {
1954 gt_data->int1_main_mask_reg = *data;
1955 gt96k_update_irq_status(gt_data);
1956 }
1957 break;
1958
1959 /* ===== Interrupt1 High Mask Register ===== */
1960 case 0xca4:
1961 if (op_type == MTS_READ) {
1962 *data = gt_data->int1_high_mask_reg;
1963 } else {
1964 gt_data->int1_high_mask_reg = *data;
1965 gt96k_update_irq_status(gt_data);
1966 }
1967 break;
1968
1969 /* ===== Serial Cause Register (read-only) ===== */
1970 case 0x103a00:
1971 if (op_type == MTS_READ)
1972 *data = gt_data->ser_cause_reg;
1973 break;
1974
1975 /* ===== SerInt0 Mask Register ===== */
1976 case 0x103a80:
1977 if (op_type == MTS_READ) {
1978 *data = gt_data->serint0_mask_reg;
1979 } else {
1980 gt_data->serint0_mask_reg = *data;
1981 gt96k_update_irq_status(gt_data);
1982 }
1983 break;
1984
1985 /* ===== SerInt1 Mask Register ===== */
1986 case 0x103a88:
1987 if (op_type == MTS_READ) {
1988 *data = gt_data->serint1_mask_reg;
1989 } else {
1990 gt_data->serint1_mask_reg = *data;
1991 gt96k_update_irq_status(gt_data);
1992 }
1993 break;
1994
1995 /* ===== SDMA cause register ===== */
1996 case 0x103a10:
1997 if (op_type == MTS_READ) {
1998 *data = gt_data->sdma_cause_reg;
1999 } else {
2000 gt_data->sdma_cause_reg &= *data;
2001 gt_sdma_update_int_status(gt_data);
2002 }
2003 break;
2004
2005 case 0x103a13:
2006 if (op_type == MTS_WRITE) {
2007 //printf("Writing 0x103a13, *data = 0x%8.8llx, "
2008 // "sdma_cause_reg=0x%8.8x\n",
2009 // *data, gt_data->sdma_cause_reg);
2010
2011 gt_data->sdma_cause_reg = 0;
2012 gt_sdma_update_channel_int_status(gt_data,6);
2013 gt_sdma_update_channel_int_status(gt_data,7);
2014 }
2015 break;
2016
2017 /* ==== SDMA mask register */
2018 case 0x103a90:
2019 if (op_type == MTS_READ) {
2020 *data = gt_data->sdma_mask_reg;
2021 } else {
2022 gt_data->sdma_mask_reg = *data;
2023 gt_sdma_update_int_status(gt_data);
2024 }
2025 break;
2026
2027 case 0x103a38:
2028 case 0x103a3c:
2029 case 0x100A48:
2030 if (op_type == MTS_READ) {
2031 //*data = 0xFFFFFFFF;
2032 }
2033 break;
2034
2035 /* CIU Arbiter Configuration Register */
2036 case 0x101ac0:
2037 if (op_type == MTS_READ)
2038 *data = 0x80000000;
2039 break;
2040
2041 /* SGCR - SDMA Global Configuration Register */
2042 case GT_REG_SGC:
2043 if (op_type == MTS_READ)
2044 *data = gt_data->sgcr;
2045 else
2046 gt_data->sgcr = *data;
2047 break;
2048
2049 /* ===== PCI Bus 1 ===== */
2050 case 0xcf0:
2051 pci_dev_addr_handler(cpu,gt_data->bus[1],op_type,FALSE,data);
2052 break;
2053
2054 case 0xcf4:
2055 pci_dev_data_handler(cpu,gt_data->bus[1],op_type,FALSE,data);
2056 break;
2057
2058 /* ===== PCI Bus 0 ===== */
2059 case PCI_BUS_ADDR: /* pci configuration address (0xcf8) */
2060 pci_dev_addr_handler(cpu,gt_data->bus[0],op_type,FALSE,data);
2061 break;
2062
2063 case PCI_BUS_DATA: /* pci data address (0xcfc) */
2064 pci_dev_data_handler(cpu,gt_data->bus[0],op_type,FALSE,data);
2065 break;
2066
2067 #if DEBUG_UNKNOWN
2068 default:
2069 if (op_type == MTS_READ) {
2070 cpu_log(cpu,"GT96100","read from addr 0x%x, pc=0x%llx\n",
2071 offset,cpu_get_pc(cpu));
2072 } else {
2073 cpu_log(cpu,"GT96100","write to addr 0x%x, value=0x%llx, "
2074 "pc=0x%llx\n",offset,*data,cpu_get_pc(cpu));
2075 }
2076 #endif
2077 }
2078
2079 done:
2080 GT_UNLOCK(gt_data);
2081 if ((op_type == MTS_READ) && (op_size == 4))
2082 *data = swap32(*data);
2083 return NULL;
2084 }
2085
2086 /* Handle a TX queue (single packet) */
2087 static int gt_eth_handle_txqueue(struct gt_data *d,struct eth_port *port,
2088 int queue)
2089 {
2090 u_char pkt[GT_MAX_PKT_SIZE],*pkt_ptr;
2091 struct sdma_desc txd0,ctxd,*ptxd;
2092 m_uint32_t tx_start,tx_current;
2093 m_uint32_t len,tot_len;
2094 int abort = FALSE;
2095
2096 /* Check if this TX queue is active */
2097 if ((queue == 0) && (port->sdcmr & GT_SDCMR_STDL))
2098 return(FALSE);
2099
2100 if ((queue == 1) && (port->sdcmr & GT_SDCMR_STDH))
2101 return(FALSE);
2102
2103 /* Copy the current txring descriptor */
2104 tx_start = tx_current = port->tx_current[queue];
2105
2106 if (!tx_start)
2107 return(FALSE);
2108
2109 ptxd = &txd0;
2110 gt_sdma_desc_read(d,tx_start,ptxd);
2111
2112 /* If we don't own the first descriptor, we cannot transmit */
2113 if (!(txd0.cmd_stat & GT_TXDESC_OWN))
2114 return(FALSE);
2115
2116 /* Empty packet for now */
2117 pkt_ptr = pkt;
2118 tot_len = 0;
2119
2120 for(;;) {
2121 #if DEBUG_ETH_TX
2122 GT_LOG(d,"gt_eth_handle_txqueue: loop: "
2123 "cmd_stat=0x%x, buf_size=0x%x, next_ptr=0x%x, buf_ptr=0x%x\n",
2124 ptxd->cmd_stat,ptxd->buf_size,ptxd->next_ptr,ptxd->buf_ptr);
2125 #endif
2126
2127 if (!(ptxd->cmd_stat & GT_TXDESC_OWN)) {
2128 GT_LOG(d,"gt_eth_handle_txqueue: descriptor not owned!\n");
2129 abort = TRUE;
2130 break;
2131 }
2132
2133 /* Copy packet data to the buffer */
2134 len = (ptxd->buf_size & GT_TXDESC_BC_MASK) >> GT_TXDESC_BC_SHIFT;
2135
2136 physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->buf_ptr,len);
2137 pkt_ptr += len;
2138 tot_len += len;
2139
2140 /* Clear the OWN bit if this is not the first descriptor */
2141 if (!(ptxd->cmd_stat & GT_TXDESC_F)) {
2142 ptxd->cmd_stat &= ~GT_TXDESC_OWN;
2143 physmem_copy_u32_to_vm(d->vm,tx_current,ptxd->cmd_stat);
2144 }
2145
2146 tx_current = ptxd->next_ptr;
2147
2148 /* Last descriptor or no more desc available ? */
2149 if (ptxd->cmd_stat & GT_TXDESC_L)
2150 break;
2151
2152 if (!tx_current) {
2153 abort = TRUE;
2154 break;
2155 }
2156
2157 /* Fetch the next descriptor */
2158 gt_sdma_desc_read(d,tx_current,&ctxd);
2159 ptxd = &ctxd;
2160 }
2161
2162 if ((tot_len != 0) && !abort) {
2163 #if DEBUG_ETH_TX
2164 GT_LOG(d,"Ethernet: sending packet of %u bytes\n",tot_len);
2165 mem_dump(log_file,pkt,tot_len);
2166 #endif
2167 /* send it on wire */
2168 netio_send(port->nio,pkt,tot_len);
2169
2170 /* Update MIB counters */
2171 port->tx_bytes += tot_len;
2172 port->tx_frames++;
2173 }
2174
2175 /* Clear the OWN flag of the first descriptor */
2176 txd0.cmd_stat &= ~GT_TXDESC_OWN;
2177 physmem_copy_u32_to_vm(d->vm,tx_start+4,txd0.cmd_stat);
2178
2179 port->tx_current[queue] = tx_current;
2180
2181 /* Notify host about transmitted packet */
2182 if (queue == 0)
2183 port->icr |= GT_ICR_TXBUFL;
2184 else
2185 port->icr |= GT_ICR_TXBUFH;
2186
2187 if (abort) {
2188 /* TX underrun */
2189 port->icr |= GT_ICR_TXUDR;
2190
2191 if (queue == 0)
2192 port->icr |= GT_ICR_TXERRL;
2193 else
2194 port->icr |= GT_ICR_TXERRH;
2195 } else {
2196 /* End of queue has been reached */
2197 if (!tx_current) {
2198 if (queue == 0)
2199 port->icr |= GT_ICR_TXENDL;
2200 else
2201 port->icr |= GT_ICR_TXENDH;
2202 }
2203 }
2204
2205 /* Update the interrupt status */
2206 gt_eth_update_int_status(d,port);
2207 return(TRUE);
2208 }
2209
2210 /* Handle TX ring of the specified port */
2211 static void gt_eth_handle_port_txqueues(struct gt_data *d,u_int port)
2212 {
2213 gt_eth_handle_txqueue(d,&d->eth_ports[port],0); /* TX Low */
2214 gt_eth_handle_txqueue(d,&d->eth_ports[port],1); /* TX High */
2215 }
2216
2217 /* Handle all TX rings of all Ethernet ports */
2218 static int gt_eth_handle_txqueues(struct gt_data *d)
2219 {
2220 int i;
2221
2222 GT_LOCK(d);
2223
2224 for(i=0;i<GT_ETH_PORTS;i++)
2225 gt_eth_handle_port_txqueues(d,i);
2226
2227 GT_UNLOCK(d);
2228 return(TRUE);
2229 }
2230
2231 /* Inverse a nibble */
2232 static const int inv_nibble[16] = {
2233 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2234 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF
2235 };
2236
2237 /* Inverse a 9-bit value */
2238 static inline u_int gt_hash_inv_9bit(u_int val)
2239 {
2240 u_int res;
2241
2242 res = inv_nibble[val & 0x0F] << 5;
2243 res |= inv_nibble[(val & 0xF0) >> 4] << 1;
2244 res |= (val & 0x100) >> 8;
2245 return(res);
2246 }
2247
2248 /*
2249 * Compute hash value for Ethernet address filtering.
2250 * Two modes are available (p.271 of the GT96100 doc).
2251 */
2252 static u_int gt_eth_hash_value(n_eth_addr_t *addr,int mode)
2253 {
2254 m_uint64_t tmp;
2255 u_int res;
2256 int i;
2257
2258 /* Swap the nibbles */
2259 for(i=0,tmp=0;i<N_ETH_ALEN;i++) {
2260 tmp <<= 8;
2261 tmp |= (inv_nibble[addr->eth_addr_byte[i] & 0x0F]) << 4;
2262 tmp |= inv_nibble[(addr->eth_addr_byte[i] & 0xF0) >> 4];
2263 }
2264
2265 if (mode == 0) {
2266 /* Fill bits 0:8 */
2267 res = (tmp & 0x00000003) | ((tmp & 0x00007f00) >> 6);
2268 res ^= (tmp & 0x00ff8000) >> 15;
2269 res ^= (tmp & 0x1ff000000ULL) >> 24;
2270
2271 /* Fill bits 9:14 */
2272 res |= (tmp & 0xfc) << 7;
2273 } else {
2274 /* Fill bits 0:8 */
2275 res = gt_hash_inv_9bit((tmp & 0x00007fc0) >> 6);
2276 res ^= gt_hash_inv_9bit((tmp & 0x00ff8000) >> 15);
2277 res ^= gt_hash_inv_9bit((tmp & 0x1ff000000ULL) >> 24);
2278
2279 /* Fill bits 9:14 */
2280 res |= (tmp & 0x3f) << 9;
2281 }
2282
2283 return(res);
2284 }
2285
2286 /*
2287 * Walk through the Ethernet hash table.
2288 */
2289 static int gt_eth_hash_lookup(struct gt_data *d,struct eth_port *port,
2290 n_eth_addr_t *addr,m_uint64_t *entry)
2291 {
2292 m_uint64_t eth_val;
2293 m_uint32_t hte_addr;
2294 u_int hash_val;
2295 int i;
2296
2297 eth_val = (m_uint64_t)addr->eth_addr_byte[0] << 3;
2298 eth_val |= (m_uint64_t)addr->eth_addr_byte[1] << 11;
2299 eth_val |= (m_uint64_t)addr->eth_addr_byte[2] << 19;
2300 eth_val |= (m_uint64_t)addr->eth_addr_byte[3] << 27;
2301 eth_val |= (m_uint64_t)addr->eth_addr_byte[4] << 35;
2302 eth_val |= (m_uint64_t)addr->eth_addr_byte[5] << 43;
2303
2304 /* Compute hash value for Ethernet address filtering */
2305 hash_val = gt_eth_hash_value(addr,port->pcr & GT_PCR_HM);
2306
2307 if (port->pcr & GT_PCR_HS) {
2308 /* 1/2K address filtering */
2309 hte_addr = port->ht_addr + ((hash_val & 0x7ff) << 3);
2310 } else {
2311 /* 8K address filtering */
2312 hte_addr = port->ht_addr + (hash_val << 3);
2313 }
2314
2315 #if DEBUG_ETH_HASH
2316 GT_LOG(d,"Hash Lookup for Ethernet address "
2317 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x: addr=0x%x\n",
2318 addr->eth_addr_byte[0], addr->eth_addr_byte[1],
2319 addr->eth_addr_byte[2], addr->eth_addr_byte[3],
2320 addr->eth_addr_byte[4], addr->eth_addr_byte[5],
2321 hte_addr);
2322 #endif
2323
2324 for(i=0;i<GT_HTE_HOPNUM;i++,hte_addr+=8) {
2325 *entry = ((m_uint64_t)physmem_copy_u32_from_vm(d->vm,hte_addr)) << 32;
2326 *entry |= physmem_copy_u32_from_vm(d->vm,hte_addr+4);
2327
2328 /* Empty entry ? */
2329 if (!(*entry & GT_HTE_VALID))
2330 return(GT_HTLOOKUP_MISS);
2331
2332 /* Skip flag or different Ethernet address: jump to next entry */
2333 if ((*entry & GT_HTE_SKIP) || ((*entry & GT_HTE_ADDR_MASK) != eth_val))
2334 continue;
2335
2336 /* We have the good MAC address in this entry */
2337 return(GT_HTLOOKUP_MATCH);
2338 }
2339
2340 return(GT_HTLOOKUP_HOP_EXCEEDED);
2341 }
2342
2343 /*
2344 * Check if a packet (given its destination address) must be handled
2345 * at RX path.
2346 *
2347 * Return values:
2348 * - 0: Discard packet ;
2349 * - 1: Receive packet ;
2350 * - 2: Receive packet and set "M" bit in RX descriptor.
2351 *
2352 * The documentation is not clear about the M bit in RX descriptor.
2353 * It is described as "Miss" or "Match" depending on the section.
2354 */
2355 static inline int gt_eth_handle_rx_daddr(struct gt_data *d,
2356 struct eth_port *port,
2357 u_int hash_res,
2358 m_uint64_t hash_entry)
2359 {
2360 /* Hop Number exceeded */
2361 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
2362 return(1);
2363
2364 /* Match and hash entry marked as "Receive" */
2365 if ((hash_res == GT_HTLOOKUP_MATCH) && (hash_entry & GT_HTE_RD))
2366 return(2);
2367
2368 /* Miss but hash table default mode to forward ? */
2369 if ((hash_res == GT_HTLOOKUP_MISS) && (port->pcr & GT_PCR_HDM))
2370 return(2);
2371
2372 /* Promiscous Mode */
2373 if (port->pcr & GT_PCR_PM)
2374 return(1);
2375
2376 /* Drop packet for other cases */
2377 return(0);
2378 }
2379
2380 /* Put a packet in the specified RX queue */
2381 static int gt_eth_handle_rxqueue(struct gt_data *d,u_int port_id,u_int queue,
2382 u_char *pkt,ssize_t pkt_len)
2383 {
2384 struct eth_port *port = &d->eth_ports[port_id];
2385 m_uint32_t rx_start,rx_current;
2386 struct sdma_desc rxd0,rxdn,*rxdc;
2387 ssize_t tot_len = pkt_len;
2388 u_char *pkt_ptr = pkt;
2389 n_eth_dot1q_hdr_t *hdr;
2390 m_uint64_t hash_entry;
2391 int i,hash_res,addr_action;
2392
2393 /* Truncate the packet if it is too big */
2394 pkt_len = m_min(pkt_len,GT_MAX_PKT_SIZE);
2395
2396 /* Copy the first RX descriptor */
2397 if (!(rx_start = rx_current = port->rx_start[queue]))
2398 goto dma_error;
2399
2400 /* Analyze the Ethernet header */
2401 hdr = (n_eth_dot1q_hdr_t *)pkt;
2402
2403 /* Hash table lookup for address filtering */
2404 hash_res = gt_eth_hash_lookup(d,port,&hdr->daddr,&hash_entry);
2405
2406 #if DEBUG_ETH_HASH
2407 GT_LOG(d,"Hash result: %d, hash_entry=0x%llx\n",hash_res,hash_entry);
2408 #endif
2409
2410 if (!(addr_action = gt_eth_handle_rx_daddr(d,port,hash_res,hash_entry)))
2411 return(FALSE);
2412
2413 /* Load the first RX descriptor */
2414 gt_sdma_desc_read(d,rx_start,&rxd0);
2415
2416 #if DEBUG_ETH_RX
2417 GT_LOG(d,"port %u/queue %u: reading desc at 0x%8.8x "
2418 "[buf_size=0x%8.8x,cmd_stat=0x%8.8x,"
2419 "next_ptr=0x%8.8x,buf_ptr=0x%8.8x]\n",
2420 port_id,queue,rx_start,
2421 rxd0.buf_size,rxd0.cmd_stat,rxd0.next_ptr,rxd0.buf_ptr);
2422 #endif
2423
2424 for(i=0,rxdc=&rxd0;tot_len>0;i++)
2425 {
2426 /* We must own the descriptor */
2427 if (!(rxdc->cmd_stat & GT_RXDESC_OWN))
2428 goto dma_error;
2429
2430 /* Put data into the descriptor buffer */
2431 gt_sdma_rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len);
2432
2433 /* Clear the OWN bit */
2434 rxdc->cmd_stat &= ~GT_RXDESC_OWN;
2435
2436 /* We have finished if the complete packet has been stored */
2437 if (tot_len == 0) {
2438 rxdc->cmd_stat |= GT_RXDESC_L;
2439 rxdc->buf_size += 4; /* Add 4 bytes for CRC */
2440 }
2441
2442 /* Update the descriptor in host memory (but not the 1st) */
2443 if (i != 0)
2444 gt_sdma_desc_write(d,rx_current,rxdc);
2445
2446 /* Get address of the next descriptor */
2447 rx_current = rxdc->next_ptr;
2448
2449 if (tot_len == 0)
2450 break;
2451
2452 if (!rx_current)
2453 goto dma_error;
2454
2455 /* Read the next descriptor from VM physical RAM */
2456 gt_sdma_desc_read(d,rx_current,&rxdn);
2457 rxdc = &rxdn;
2458 }
2459
2460 /* Update the RX pointers */
2461 port->rx_start[queue] = port->rx_current[queue] = rx_current;
2462
2463 /* Update the first RX descriptor */
2464 rxd0.cmd_stat |= GT_RXDESC_F;
2465
2466 if (hash_res == GT_HTLOOKUP_HOP_EXCEEDED)
2467 rxd0.cmd_stat |= GT_RXDESC_HE;
2468
2469 if (addr_action == 2)
2470 rxd0.cmd_stat |= GT_RXDESC_M;
2471
2472 if (ntohs(hdr->type) <= N_ETH_MTU) /* 802.3 frame */
2473 rxd0.cmd_stat |= GT_RXDESC_FT;
2474
2475 gt_sdma_desc_write(d,rx_start,&rxd0);
2476
2477 /* Update MIB counters */
2478 port->rx_bytes += pkt_len;
2479 port->rx_frames++;
2480
2481 /* Indicate that we have a frame ready */
2482 port->icr |= (GT_ICR_RXBUFQ0 << queue) | GT_ICR_RXBUF;
2483 gt_eth_update_int_status(d,port);
2484 return(TRUE);
2485
2486 dma_error:
2487 port->icr |= (GT_ICR_RXERRQ0 << queue) | GT_ICR_RXERR;
2488 gt_eth_update_int_status(d,port);
2489 return(FALSE);
2490 }
2491
2492 /* Handle RX packet for an Ethernet port */
2493 static int gt_eth_handle_rx_pkt(netio_desc_t *nio,
2494 u_char *pkt,ssize_t pkt_len,
2495 struct gt_data *d,void *arg)
2496 {
2497 u_int queue,port_id = (int)arg;
2498 struct eth_port *port;
2499
2500 port = &d->eth_ports[port_id];
2501
2502 GT_LOCK(d);
2503
2504 /* Check if RX DMA is active */
2505 if (!(port->sdcmr & GT_SDCMR_ERD)) {
2506 GT_UNLOCK(d);
2507 return(FALSE);
2508 }
2509
2510 queue = 0; /* At this time, only put packet in queue 0 */
2511 gt_eth_handle_rxqueue(d,port_id,queue,pkt,pkt_len);
2512 GT_UNLOCK(d);
2513 return(TRUE);
2514 }
2515
2516 /* Shutdown a GT system controller */
2517 void dev_gt_shutdown(vm_instance_t *vm,struct gt_data *d)
2518 {
2519 if (d != NULL) {
2520 /* Stop the TX ring scanner */
2521 ptask_remove(d->eth_tx_tid);
2522
2523 /* Remove the device */
2524 dev_remove(vm,&d->dev);
2525
2526 /* Remove the PCI device */
2527 pci_dev_remove(d->pci_dev);
2528
2529 /* Free the structure itself */
2530 free(d);
2531 }
2532 }
2533
2534 /* Create a new GT64010 controller */
2535 int dev_gt64010_init(vm_instance_t *vm,char *name,
2536 m_uint64_t paddr,m_uint32_t len,u_int irq)
2537 {
2538 struct gt_data *d;
2539
2540 if (!(d = malloc(sizeof(*d)))) {
2541 fprintf(stderr,"gt64010: unable to create device data.\n");
2542 return(-1);
2543 }
2544
2545 memset(d,0,sizeof(*d));
2546 pthread_mutex_init(&d->lock,NULL);
2547 d->vm = vm;
2548 d->bus[0] = vm->pci_bus[0];
2549 d->gt_update_irq_status = gt64k_update_irq_status;
2550
2551 vm_object_init(&d->vm_obj);
2552 d->vm_obj.name = name;
2553 d->vm_obj.data = d;
2554 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
2555
2556 dev_init(&d->dev);
2557 d->dev.name = name;
2558 d->dev.priv_data = d;
2559 d->dev.phys_addr = paddr;
2560 d->dev.phys_len = len;
2561 d->dev.handler = dev_gt64010_access;
2562
2563 /* Add the controller as a PCI device */
2564 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
2565 d->pci_dev = pci_dev_add(d->bus[0],name,
2566 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64010,
2567 0,0,irq,d,NULL,NULL,NULL);
2568
2569 if (!d->pci_dev) {
2570 fprintf(stderr,"gt64010: unable to create PCI device.\n");
2571 return(-1);
2572 }
2573 }
2574
2575 /* Map this device to the VM */
2576 vm_bind_device(vm,&d->dev);
2577 vm_object_add(vm,&d->vm_obj);
2578 return(0);
2579 }
2580
2581 /*
2582 * pci_gt64120_read()
2583 *
2584 * Read a PCI register.
2585 */
2586 static m_uint32_t pci_gt64120_read(cpu_gen_t *cpu,struct pci_device *dev,
2587 int reg)
2588 {
2589 switch (reg) {
2590 case 0x08:
2591 return(0x03008005);
2592 default:
2593 return(0);
2594 }
2595 }
2596
2597 /* Create a new GT64120 controller */
2598 int dev_gt64120_init(vm_instance_t *vm,char *name,
2599 m_uint64_t paddr,m_uint32_t len,u_int irq)
2600 {
2601 struct gt_data *d;
2602
2603 if (!(d = malloc(sizeof(*d)))) {
2604 fprintf(stderr,"gt64120: unable to create device data.\n");
2605 return(-1);
2606 }
2607
2608 memset(d,0,sizeof(*d));
2609 pthread_mutex_init(&d->lock,NULL);
2610 d->vm = vm;
2611 d->bus[0] = vm->pci_bus[0];
2612 d->bus[1] = vm->pci_bus[1];
2613 d->gt_update_irq_status = gt64k_update_irq_status;
2614
2615 vm_object_init(&d->vm_obj);
2616 d->vm_obj.name = name;
2617 d->vm_obj.data = d;
2618 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
2619
2620 dev_init(&d->dev);
2621 d->dev.name = name;
2622 d->dev.priv_data = d;
2623 d->dev.phys_addr = paddr;
2624 d->dev.phys_len = len;
2625 d->dev.handler = dev_gt64120_access;
2626
2627 /* Add the controller as a PCI device */
2628 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
2629 d->pci_dev = pci_dev_add(d->bus[0],name,
2630 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT64120,
2631 0,0,irq,d,NULL,pci_gt64120_read,NULL);
2632 if (!d->pci_dev) {
2633 fprintf(stderr,"gt64120: unable to create PCI device.\n");
2634 return(-1);
2635 }
2636 }
2637
2638 /* Map this device to the VM */
2639 vm_bind_device(vm,&d->dev);
2640 vm_object_add(vm,&d->vm_obj);
2641 return(0);
2642 }
2643
2644 /*
2645 * pci_gt96100_read()
2646 *
2647 * Read a PCI register.
2648 */
2649 static m_uint32_t pci_gt96100_read(cpu_gen_t *cpu,struct pci_device *dev,
2650 int reg)
2651 {
2652 switch (reg) {
2653 case 0x08:
2654 return(0x03008005);
2655 default:
2656 return(0);
2657 }
2658 }
2659
2660 /* Create a new GT96100 controller */
2661 int dev_gt96100_init(vm_instance_t *vm,char *name,
2662 m_uint64_t paddr,m_uint32_t len,
2663 u_int int0_irq,u_int int1_irq,
2664 u_int serint0_irq,u_int serint1_irq)
2665 {
2666 struct gt_data *d;
2667 u_int i;
2668
2669 if (!(d = malloc(sizeof(*d)))) {
2670 fprintf(stderr,"gt96100: unable to create device data.\n");
2671 return(-1);
2672 }
2673
2674 memset(d,0,sizeof(*d));
2675 pthread_mutex_init(&d->lock,NULL);
2676 d->name = name;
2677 d->vm = vm;
2678 d->gt_update_irq_status = gt96k_update_irq_status;
2679
2680 for(i=0;i<GT_SDMA_CHANNELS;i++) {
2681 d->sdma[0][i].id = i;
2682 d->sdma[1][i].id = i;
2683 }
2684
2685 /* IRQ setup */
2686 d->int0_irq = int0_irq;
2687 d->int1_irq = int1_irq;
2688 d->serint0_irq = serint0_irq;
2689 d->serint1_irq = serint1_irq;
2690
2691 d->bus[0] = vm->pci_bus[0];
2692 d->bus[1] = vm->pci_bus[1];
2693
2694 vm_object_init(&d->vm_obj);
2695 d->vm_obj.name = name;
2696 d->vm_obj.data = d;
2697 d->vm_obj.shutdown = (vm_shutdown_t)dev_gt_shutdown;
2698
2699 dev_init(&d->dev);
2700 d->dev.name = name;
2701 d->dev.priv_data = d;
2702 d->dev.phys_addr = paddr;
2703 d->dev.phys_len = len;
2704 d->dev.handler = dev_gt96100_access;
2705
2706 /* Add the controller as a PCI device */
2707 if (!pci_dev_lookup(d->bus[0],0,0,0)) {
2708 d->pci_dev = pci_dev_add(d->bus[0],name,
2709 PCI_VENDOR_GALILEO,PCI_PRODUCT_GALILEO_GT96100,
2710 0,0,-1,d,NULL,pci_gt96100_read,NULL);
2711 if (!d->pci_dev) {
2712 fprintf(stderr,"gt96100: unable to create PCI device.\n");
2713 return(-1);
2714 }
2715 }
2716
2717 /* Start the TX ring scanner */
2718 d->eth_tx_tid = ptask_add((ptask_callback)gt_eth_handle_txqueues,d,NULL);
2719
2720 /* Map this device to the VM */
2721 vm_bind_device(vm,&d->dev);
2722 vm_object_add(vm,&d->vm_obj);
2723 return(0);
2724 }
2725
2726 /* Bind a NIO to GT96100 Ethernet device */
2727 int dev_gt96100_eth_set_nio(struct gt_data *d,u_int port_id,netio_desc_t *nio)
2728 {
2729 struct eth_port *port;
2730
2731 if (!d || (port_id >= GT_ETH_PORTS))
2732 return(-1);
2733
2734 port = &d->eth_ports[port_id];
2735
2736 /* check that a NIO is not already bound */
2737 if (port->nio != NULL)
2738 return(-1);
2739
2740 port->nio = nio;
2741 netio_rxl_add(nio,(netio_rx_handler_t)gt_eth_handle_rx_pkt,
2742 d,(void *)port_id);
2743 return(0);
2744 }
2745
2746 /* Unbind a NIO from a GT96100 device */
2747 int dev_gt96100_eth_unset_nio(struct gt_data *d,u_int port_id)
2748 {
2749 struct eth_port *port;
2750
2751 if (!d || (port_id >= GT_ETH_PORTS))
2752 return(-1);
2753
2754 port = &d->eth_ports[port_id];
2755
2756 if (port->nio != NULL) {
2757 netio_rxl_remove(port->nio);
2758 port->nio = NULL;
2759 }
2760
2761 return(0);
2762 }
2763
2764 /* Show debugging information */
2765 static void dev_gt96100_show_eth_info(struct gt_data *d,u_int port_id)
2766 {
2767 struct eth_port *port;
2768
2769 port = &d->eth_ports[port_id];
2770
2771 printf("GT96100 Ethernet port %u:\n",port_id);
2772 printf(" PCR = 0x%8.8x\n",port->pcr);
2773 printf(" PCXR = 0x%8.8x\n",port->pcxr);
2774 printf(" PCMR = 0x%8.8x\n",port->pcmr);
2775 printf(" PSR = 0x%8.8x\n",port->psr);
2776 printf(" ICR = 0x%8.8x\n",port->icr);
2777 printf(" IMR = 0x%8.8x\n",port->imr);
2778
2779 printf("\n");
2780 }
2781
2782 /* Show debugging information */
2783 int dev_gt96100_show_info(struct gt_data *d)
2784 {
2785 GT_LOCK(d);
2786 dev_gt96100_show_eth_info(d,0);
2787 dev_gt96100_show_eth_info(d,1);
2788 GT_UNLOCK(d);
2789 return(0);
2790 }

  ViewVC Help
Powered by ViewVC 1.1.26