/[gxemul]/upstream/0.3.5/src/cpu_run.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.3.5/src/cpu_run.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 13 - (show annotations)
Mon Oct 8 16:18:43 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 7586 byte(s)
0.3.5
1 /*
2 * Copyright (C) 2005 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: cpu_run.c,v 1.9 2005/08/03 20:42:43 debug Exp $
29 *
30 * Included from cpu_mips.c, cpu_ppc.c etc. (The reason for this is that
31 * the call to a specific cpu's routine that runs one instruction will
32 * be inlined from here.)
33 */
34
35 #include "console.h"
36 #include "debugger.h"
37
38
39 static int instrs_per_cycle(struct cpu *cpu) {
40 #ifdef CPU_RUN_MIPS
41 return cpu->cd.mips.cpu_type.instrs_per_cycle;
42 #else /* PPC or undefined */
43 return 1;
44 #endif
45 }
46
47
48 /*
49 * CPU_RUN():
50 *
51 * Run instructions on all CPUs in this machine, for a "medium duration"
52 * (or until all CPUs have halted).
53 *
54 * Return value is 1 if anything happened, 0 if all CPUs are stopped.
55 */
56 int CPU_RUN(struct emul *emul, struct machine *machine)
57 {
58 struct cpu **cpus = machine->cpus;
59 int ncpus = machine->ncpus;
60 int64_t max_instructions_cached = machine->max_instructions;
61 int64_t max_random_cycles_per_chunk_cached =
62 machine->max_random_cycles_per_chunk;
63 int64_t ncycles_chunk_end;
64 int running, rounds;
65
66 /* The main loop: */
67 running = 1;
68 rounds = 0;
69 while (running || single_step) {
70 ncycles_chunk_end = machine->ncycles + (1 << 17);
71
72 machine->a_few_instrs = machine->a_few_cycles *
73 instrs_per_cycle(cpus[0]);
74
75 /* Do a chunk of cycles: */
76 do {
77 int i, j, te, cpu0instrs, a_few_instrs2;
78
79 running = 0;
80 cpu0instrs = 0;
81
82 /*
83 * Run instructions from each CPU:
84 */
85
86 /* Is any cpu alive? */
87 for (i=0; i<ncpus; i++)
88 if (cpus[i]->running)
89 running = 1;
90
91 if (single_step) {
92 if (single_step == 1) {
93 /*
94 * TODO: (Important!)
95 *
96 * If these are enabled, and focus is
97 * shifted to another machine in the
98 * debugger, then the wrong machine
99 * gets its variables restored!
100 */
101 old_instruction_trace =
102 machine->instruction_trace;
103 old_quiet_mode = quiet_mode;
104 old_show_trace_tree =
105 machine->show_trace_tree;
106 machine->instruction_trace = 1;
107 machine->show_trace_tree = 1;
108 quiet_mode = 0;
109 single_step = 2;
110 }
111
112 for (j=0; j<instrs_per_cycle(cpus[0]); j++) {
113 if (single_step)
114 debugger();
115 for (i=0; i<ncpus; i++)
116 if (cpus[i]->running) {
117 int instrs_run =
118 CPU_RINSTR(emul,
119 cpus[i]);
120 if (i == 0)
121 cpu0instrs +=
122 instrs_run;
123 }
124 }
125 } else if (max_random_cycles_per_chunk_cached > 0) {
126 for (i=0; i<ncpus; i++)
127 if (cpus[i]->running && !single_step) {
128 a_few_instrs2 = machine->
129 a_few_cycles;
130 if (a_few_instrs2 >=
131 max_random_cycles_per_chunk_cached)
132 a_few_instrs2 = max_random_cycles_per_chunk_cached;
133 j = (random() % a_few_instrs2) + 1;
134 j *= instrs_per_cycle(cpus[i]);
135 while (j-- >= 1 && cpus[i]->running) {
136 int instrs_run = CPU_RINSTR(emul, cpus[i]);
137 if (i == 0)
138 cpu0instrs += instrs_run;
139 if (single_step)
140 break;
141 }
142 }
143 } else {
144 /* CPU 0 is special, cpu0instr must be updated. */
145 for (j=0; j<machine->a_few_instrs; ) {
146 int instrs_run;
147 if (!cpus[0]->running || single_step)
148 break;
149 do {
150 instrs_run =
151 CPU_RINSTR(emul, cpus[0]);
152 if (instrs_run == 0 ||
153 single_step) {
154 j = machine->a_few_instrs;
155 break;
156 }
157 } while (instrs_run == 0);
158 j += instrs_run;
159 cpu0instrs += instrs_run;
160 }
161
162 /* CPU 1 and up: */
163 for (i=1; i<ncpus; i++) {
164 a_few_instrs2 = machine->a_few_cycles *
165 instrs_per_cycle(cpus[i]);
166 for (j=0; j<a_few_instrs2; )
167 if (cpus[i]->running) {
168 int instrs_run = 0;
169 while (!instrs_run) {
170 instrs_run = CPU_RINSTR(emul, cpus[i]);
171 if (instrs_run == 0 ||
172 single_step) {
173 j = a_few_instrs2;
174 break;
175 }
176 }
177 j += instrs_run;
178 } else
179 break;
180 }
181 }
182
183 /*
184 * Hardware 'ticks': (clocks, interrupt sources...)
185 *
186 * Here, cpu0instrs is the number of instructions
187 * executed on cpu0. (TODO: don't use cpu 0 for this,
188 * use some kind of "mainbus" instead.) Hardware
189 * ticks are not per instruction, but per cycle,
190 * so we divide by the number of
191 * instructions_per_cycle for cpu0.
192 *
193 * TODO: This doesn't work in a machine with, say,
194 * a mixture of R3000, R4000, and R10000 CPUs, if
195 * there ever was such a thing.
196 *
197 * TODO 2: A small bug occurs if cpu0instrs isn't
198 * evenly divisible by instrs_per_cycle. We then
199 * cause hardware ticks a fraction of a cycle too
200 * often.
201 */
202 i = instrs_per_cycle(cpus[0]);
203 switch (i) {
204 case 1: break;
205 case 2: cpu0instrs >>= 1; break;
206 case 4: cpu0instrs >>= 2; break;
207 default:
208 cpu0instrs /= i;
209 }
210
211 for (te=0; te<machine->n_tick_entries; te++) {
212 machine->ticks_till_next[te] -= cpu0instrs;
213
214 if (machine->ticks_till_next[te] <= 0) {
215 while (machine->ticks_till_next[te] <= 0)
216 machine->ticks_till_next[te] +=
217 machine->ticks_reset_value[te];
218 machine->tick_func[te](cpus[0], machine->tick_extra[te]);
219 }
220 }
221
222 /* Any CPU dead? */
223 for (i=0; i<ncpus; i++) {
224 if (cpus[i]->dead &&
225 machine->exit_without_entering_debugger == 0)
226 single_step = 1;
227 }
228
229 machine->ncycles += cpu0instrs;
230 } while (running && (machine->ncycles < ncycles_chunk_end));
231
232 /* If we've done buffered console output,
233 the flush stdout every now and then: */
234 if (machine->ncycles > machine->ncycles_flush + (1<<17)) {
235 console_flush();
236 machine->ncycles_flush = machine->ncycles;
237 }
238
239 if (machine->ncycles > machine->ncycles_show + (1<<25)) {
240 machine->ncycles_since_gettimeofday +=
241 (machine->ncycles - machine->ncycles_show);
242 cpu_show_cycles(machine, 0);
243 machine->ncycles_show = machine->ncycles;
244 }
245
246 if (max_instructions_cached != 0 &&
247 machine->ncycles >= max_instructions_cached)
248 running = 0;
249
250 /* Let's allow other machines to run. */
251 rounds ++;
252 if (rounds > 2)
253 break;
254 }
255
256 return running;
257 }
258

  ViewVC Help
Powered by ViewVC 1.1.26