1 |
dpavlin |
1 |
/* |
2 |
|
|
* Cisco 7200 (Predator) simulation platform. |
3 |
|
|
* Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) |
4 |
|
|
*/ |
5 |
|
|
|
6 |
|
|
#ifndef __AMD64_TRANS_H__ |
7 |
|
|
#define __AMD64_TRANS_H__ |
8 |
|
|
|
9 |
|
|
#include "utils.h" |
10 |
|
|
#include "amd64-codegen.h" |
11 |
|
|
#include "mips64.h" |
12 |
|
|
#include "dynamips.h" |
13 |
|
|
#include "cp0.h" |
14 |
|
|
#include "mips64_exec.h" |
15 |
|
|
|
16 |
|
|
#define JIT_SUPPORT 1 |
17 |
|
|
|
18 |
|
|
/* Manipulate bitmasks atomically */ |
19 |
|
|
static forced_inline void atomic_or(m_uint32_t *v,m_uint32_t m) |
20 |
|
|
{ |
21 |
|
|
__asm__ __volatile__("lock; orl %1,%0":"=m"(*v):"ir"(m),"m"(*v)); |
22 |
|
|
} |
23 |
|
|
|
24 |
|
|
static forced_inline void atomic_and(m_uint32_t *v,m_uint32_t m) |
25 |
|
|
{ |
26 |
|
|
__asm__ __volatile__("lock; andl %1,%0":"=m"(*v):"ir"(m),"m"(*v)); |
27 |
|
|
} |
28 |
|
|
|
29 |
|
|
/* Wrappers to amd64-codegen functions */ |
30 |
|
|
#define insn_block_set_patch amd64_patch |
31 |
|
|
#define insn_block_set_jump amd64_jump_code |
32 |
|
|
|
33 |
|
|
/* MIPS instruction array */ |
34 |
|
|
extern struct insn_tag mips64_insn_tags[]; |
35 |
|
|
|
36 |
|
|
/* Push epilog for an amd64 instruction block */ |
37 |
|
|
static forced_inline void insn_block_push_epilog(insn_block_t *block) |
38 |
|
|
{ |
39 |
|
|
amd64_ret(block->jit_ptr); |
40 |
|
|
} |
41 |
|
|
|
42 |
|
|
/* Execute JIT code */ |
43 |
|
|
static forced_inline |
44 |
|
|
void insn_block_exec_jit_code(cpu_mips_t *cpu,insn_block_t *block) |
45 |
|
|
{ |
46 |
|
|
insn_tblock_fptr jit_code; |
47 |
|
|
m_uint32_t offset; |
48 |
|
|
|
49 |
|
|
offset = (cpu->pc & MIPS_MIN_PAGE_IMASK) >> 2; |
50 |
|
|
jit_code = (insn_tblock_fptr)block->jit_insn_ptr[offset]; |
51 |
|
|
|
52 |
|
|
if (unlikely(!jit_code)) { |
53 |
|
|
mips64_exec_single_step(cpu,vmtoh32(block->mips_code[offset])); |
54 |
|
|
return; |
55 |
|
|
} |
56 |
|
|
|
57 |
|
|
asm volatile ("movq %0,%%r15"::"r"(cpu): |
58 |
|
|
"r14","r15","rax","rbx","rcx","rdx","rdi","rsi"); |
59 |
|
|
jit_code(); |
60 |
|
|
} |
61 |
|
|
|
62 |
|
|
static inline void amd64_patch(u_char *code,u_char *target) |
63 |
|
|
{ |
64 |
|
|
/* Skip REX */ |
65 |
|
|
if ((code[0] >= 0x40) && (code[0] <= 0x4f)) |
66 |
|
|
code += 1; |
67 |
|
|
|
68 |
|
|
if ((code [0] & 0xf8) == 0xb8) { |
69 |
|
|
/* amd64_set_reg_template */ |
70 |
|
|
*(m_uint64_t *)(code + 1) = (m_uint64_t)target; |
71 |
|
|
} |
72 |
|
|
else if (code [0] == 0x8b) { |
73 |
|
|
/* mov 0(%rip), %dreg */ |
74 |
|
|
*(m_uint32_t *)(code + 2) = (m_uint32_t)(m_uint64_t)target - 7; |
75 |
|
|
} |
76 |
|
|
else if ((code [0] == 0xff) && (code [1] == 0x15)) { |
77 |
|
|
/* call *<OFFSET>(%rip) */ |
78 |
|
|
*(m_uint32_t *)(code + 2) = ((m_uint32_t)(m_uint64_t)target) - 7; |
79 |
|
|
} |
80 |
|
|
else |
81 |
|
|
x86_patch(code,target); |
82 |
|
|
} |
83 |
|
|
|
84 |
|
|
/* Set the Pointer Counter (PC) register */ |
85 |
|
|
void mips64_set_pc(insn_block_t *b,m_uint64_t new_pc); |
86 |
|
|
|
87 |
|
|
/* Set the Return Address (RA) register */ |
88 |
|
|
void mips64_set_ra(insn_block_t *b,m_uint64_t ret_pc); |
89 |
|
|
|
90 |
|
|
/* Virtual Breakpoint */ |
91 |
|
|
void mips64_emit_breakpoint(insn_block_t *b); |
92 |
|
|
|
93 |
|
|
/* Emit unhandled instruction code */ |
94 |
|
|
int mips64_emit_invalid_delay_slot(insn_block_t *b); |
95 |
|
|
|
96 |
|
|
/* |
97 |
|
|
* Increment count register and trigger the timer IRQ if value in compare |
98 |
|
|
* register is the same. |
99 |
|
|
*/ |
100 |
|
|
void mips64_inc_cp0_count_reg(insn_block_t *b); |
101 |
|
|
|
102 |
|
|
/* Increment the number of executed instructions (performance debugging) */ |
103 |
|
|
void mips64_inc_perf_counter(insn_block_t *b); |
104 |
|
|
|
105 |
|
|
#endif |