1 |
/* |
2 |
* Cisco router simulation platform. |
3 |
* Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) |
4 |
*/ |
5 |
|
6 |
#ifndef __PPC32_AMD64_TRANS_H__ |
7 |
#define __PPC32_AMD64_TRANS_H__ |
8 |
|
9 |
#include "utils.h" |
10 |
#include "amd64-codegen.h" |
11 |
#include "cpu.h" |
12 |
#include "dynamips.h" |
13 |
#include "ppc32_exec.h" |
14 |
|
15 |
#define JIT_SUPPORT 1 |
16 |
|
17 |
/* Manipulate bitmasks atomically */ |
18 |
static forced_inline void atomic_or(m_uint32_t *v,m_uint32_t m) |
19 |
{ |
20 |
__asm__ __volatile__("lock; orl %1,%0":"=m"(*v):"ir"(m),"m"(*v)); |
21 |
} |
22 |
|
23 |
static forced_inline void atomic_and(m_uint32_t *v,m_uint32_t m) |
24 |
{ |
25 |
__asm__ __volatile__("lock; andl %1,%0":"=m"(*v):"ir"(m),"m"(*v)); |
26 |
} |
27 |
|
28 |
/* Wrappers to amd64-codegen functions */ |
29 |
#define ppc32_jit_tcb_set_patch amd64_patch |
30 |
#define ppc32_jit_tcb_set_jump amd64_jump_code |
31 |
|
32 |
/* PPC instruction array */ |
33 |
extern struct ppc32_insn_tag ppc32_insn_tags[]; |
34 |
|
35 |
/* Push epilog for an x86 instruction block */ |
36 |
static forced_inline void ppc32_jit_tcb_push_epilog(ppc32_jit_tcb_t *block) |
37 |
{ |
38 |
amd64_ret(block->jit_ptr); |
39 |
} |
40 |
|
41 |
/* Execute JIT code */ |
42 |
static forced_inline |
43 |
void ppc32_jit_tcb_exec(cpu_ppc_t *cpu,ppc32_jit_tcb_t *block) |
44 |
{ |
45 |
insn_tblock_fptr jit_code; |
46 |
m_uint32_t offset; |
47 |
|
48 |
offset = (cpu->ia & PPC32_MIN_PAGE_IMASK) >> 2; |
49 |
jit_code = (insn_tblock_fptr)block->jit_insn_ptr[offset]; |
50 |
|
51 |
#if 0 |
52 |
if (unlikely(!jit_code)) { |
53 |
ppc32_exec_single_step(cpu,vmtoh32(block->ppc_code[offset])); |
54 |
return; |
55 |
} |
56 |
#endif |
57 |
|
58 |
asm volatile ("movq %0,%%r15"::"r"(cpu): |
59 |
"r14","r15","rax","rbx","rcx","rdx","rdi","rsi"); |
60 |
jit_code(); |
61 |
} |
62 |
|
63 |
static inline void amd64_patch(u_char *code,u_char *target) |
64 |
{ |
65 |
/* Skip REX */ |
66 |
if ((code[0] >= 0x40) && (code[0] <= 0x4f)) |
67 |
code += 1; |
68 |
|
69 |
if ((code [0] & 0xf8) == 0xb8) { |
70 |
/* amd64_set_reg_template */ |
71 |
*(m_uint64_t *)(code + 1) = (m_uint64_t)target; |
72 |
} |
73 |
else if (code [0] == 0x8b) { |
74 |
/* mov 0(%rip), %dreg */ |
75 |
*(m_uint32_t *)(code + 2) = (m_uint32_t)(m_uint64_t)target - 7; |
76 |
} |
77 |
else if ((code [0] == 0xff) && (code [1] == 0x15)) { |
78 |
/* call *<OFFSET>(%rip) */ |
79 |
*(m_uint32_t *)(code + 2) = ((m_uint32_t)(m_uint64_t)target) - 7; |
80 |
} |
81 |
else |
82 |
x86_patch(code,target); |
83 |
} |
84 |
|
85 |
/* Set the Instruction Address (IA) register */ |
86 |
void ppc32_set_ia(ppc32_jit_tcb_t *b,m_uint32_t new_ia); |
87 |
|
88 |
/* Set the Link Register (LR) */ |
89 |
void ppc32_set_lr(ppc32_jit_tcb_t *b,m_uint32_t new_lr); |
90 |
|
91 |
/* Increment the number of executed instructions (performance debugging) */ |
92 |
void ppc32_inc_perf_counter(ppc32_jit_tcb_t *b); |
93 |
|
94 |
#endif |