/[gxemul]/upstream/0.3.6.2/src/cpus/cpu_alpha_instr_loadstore.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.3.6.2/src/cpus/cpu_alpha_instr_loadstore.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 19 - (show annotations)
Mon Oct 8 16:19:16 2007 UTC (16 years, 8 months ago) by dpavlin
File MIME type: text/plain
File size: 8755 byte(s)
0.3.6.2
1 /*
2 * Copyright (C) 2005 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: cpu_alpha_instr_loadstore.c,v 1.1 2005/08/29 14:36:41 debug Exp $
29 *
30 * Alpha load/store instructions. (Included from cpu_alpha_instr_inc.c.)
31 *
32 *
33 * Load/store instructions have the following arguments:
34 *
35 * arg[0] = pointer to the register to load to or store from (uint64_t)
36 * arg[1] = pointer to the base register (uint64_t)
37 * arg[2] = offset (as an int32_t)
38 *
39 * NOTE:
40 * Alpha byte and word loads (8- and 16-bit) are unsigned, while
41 * 32-bit long words are sign-extended up to 64 bits during a load!
42 */
43
44
45 #ifndef LS_IGNORE_OFFSET
46 #ifndef LS_ALIGN_CHECK
47 static void LS_GENERIC_N(struct cpu *cpu, struct alpha_instr_call *ic)
48 {
49 #ifdef LS_B
50 unsigned char data[1];
51 #endif
52 #ifdef LS_W
53 unsigned char data[2];
54 #endif
55 #ifdef LS_L
56 unsigned char data[4];
57 #endif
58 #ifdef LS_Q
59 unsigned char data[8];
60 #endif
61 uint64_t addr = *((uint64_t *)ic->arg[1]);
62 uint64_t data_x;
63
64 addr += (int32_t)ic->arg[2];
65 #ifdef LS_UNALIGNED
66 addr &= ~7;
67 #endif
68
69 #ifdef LS_LOAD
70 /* Load: */
71 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, sizeof(data),
72 MEM_READ, CACHE_DATA)) {
73 fatal("store failed: TODO\n");
74 exit(1);
75 }
76
77 data_x = data[0];
78 #ifndef LS_B
79 data_x += (data[1] << 8);
80 #ifndef LS_W
81 data_x += (data[2] << 16);
82 data_x += (data[3] << 24);
83 #ifdef LS_L
84 data_x = (int64_t)(int32_t)data_x;
85 #endif
86 #ifndef LS_L
87 data_x += ((uint64_t)data[4] << 32);
88 data_x += ((uint64_t)data[5] << 40);
89 data_x += ((uint64_t)data[6] << 48);
90 data_x += ((uint64_t)data[7] << 56);
91 #endif
92 #endif
93 #endif
94 *((uint64_t *)ic->arg[0]) = data_x;
95 #else
96 /* Store: */
97 data_x = *((uint64_t *)ic->arg[0]);
98 data[0] = data_x;
99 #ifndef LS_B
100 data[1] = data_x >> 8;
101 #ifndef LS_W
102 data[2] = data_x >> 16;
103 data[3] = data_x >> 24;
104 #ifndef LS_L
105 data[4] = data_x >> 32;
106 data[5] = data_x >> 40;
107 data[6] = data_x >> 48;
108 data[7] = data_x >> 56;
109 #endif
110 #endif
111 #endif
112
113 if (!cpu->memory_rw(cpu, cpu->mem, addr, data, sizeof(data),
114 MEM_WRITE, CACHE_DATA)) {
115 fatal("store failed: TODO\n");
116 exit(1);
117 }
118
119 #ifdef LS_LLSC
120 #ifndef LS_LOAD
121 *((uint64_t *)ic->arg[0]) = 1;
122 #endif
123 #endif
124
125 #endif
126 }
127 #endif
128 #endif
129
130
131 static void LS_N(struct cpu *cpu, struct alpha_instr_call *ic)
132 {
133 int first, a, b, c;
134 uint64_t addr;
135
136 addr = (*((uint64_t *)ic->arg[1]))
137 #ifndef LS_IGNORE_OFFSET
138 + (int32_t)ic->arg[2]
139 #endif
140 ;
141
142 #ifdef LS_UNALIGNED
143 addr &= ~7;
144 #endif
145
146 #ifdef LS_LLSC
147 #ifdef LS_LOAD
148 /* TODO: cache-line size! */
149 cpu->cd.alpha.load_linked_addr = addr & ~63;
150 cpu->cd.alpha.ll_flag = 1;
151 #else
152 /* TODO: only invalidate per cache line, not everything! */
153 if (cpu->cd.alpha.ll_flag == 1) {
154 int i;
155 for (i=0; i<cpu->machine->ncpus; i++)
156 cpu->machine->cpus[i]->cd.alpha.ll_flag = 0;
157 } else {
158 *((uint64_t *)ic->arg[0]) = 0;
159 return;
160 }
161 #endif
162 #endif
163
164 first = addr >> ALPHA_TOPSHIFT;
165 a = (addr >> ALPHA_LEVEL0_SHIFT) & (ALPHA_LEVEL0 - 1);
166 b = (addr >> ALPHA_LEVEL1_SHIFT) & (ALPHA_LEVEL1 - 1);
167 c = addr & 8191;
168
169 #ifdef LS_ALIGN_CHECK
170 #ifndef LS_B
171 if (c &
172 #ifdef LS_W
173 1
174 #endif
175 #ifdef LS_L
176 3
177 #endif
178 #ifdef LS_Q
179 7
180 #endif
181 ) {
182 LS_GENERIC_N(cpu, ic);
183 return;
184 }
185 else
186 #endif
187 #endif
188
189 if (first == 0) {
190 struct alpha_vph_page *vph_p;
191 unsigned char *page;
192 vph_p = cpu->cd.alpha.vph_table0[a];
193 #ifdef LS_LOAD
194 page = vph_p->host_load[b];
195 #else
196 page = vph_p->host_store[b];
197 #endif
198 if (page != NULL) {
199 #ifdef LS_LOAD
200 #ifdef HOST_BIG_ENDIAN
201 uint64_t data_x;
202 data_x = page[c];
203 #ifndef LS_B
204 data_x += (page[c+1] << 8);
205 #ifndef LS_W
206 data_x += (page[c+2] << 16);
207 data_x += (page[c+3] << 24);
208 #ifndef LS_L
209 data_x += ((uint64_t)page[c+4] << 32);
210 data_x += ((uint64_t)page[c+5] << 40);
211 data_x += ((uint64_t)page[c+6] << 48);
212 data_x += ((uint64_t)page[c+7] << 56);
213 #endif
214 #endif
215 #endif
216 #ifdef LS_L
217 *((uint64_t *)ic->arg[0]) = (int64_t)(int32_t)data_x;
218 #else
219 *((uint64_t *)ic->arg[0]) = data_x;
220 #endif
221 #else
222 #ifdef LS_B
223 *((uint64_t *)ic->arg[0]) = page[c];
224 #endif
225 #ifdef LS_W
226 uint16_t d = *((uint16_t *) (page + c));
227 *((uint64_t *)ic->arg[0]) = d;
228 #endif
229 #ifdef LS_L
230 int32_t d = *((int32_t *) (page + c));
231 *((uint64_t *)ic->arg[0]) = (int64_t)d;
232 #endif
233 #ifdef LS_Q
234 uint64_t d = *((uint64_t *) (page + c));
235 *((uint64_t *)ic->arg[0]) = d;
236 #endif
237 #endif
238 #else
239 /* Store: */
240 #ifdef HOST_BIG_ENDIAN
241 uint64_t data_x = *((uint64_t *)ic->arg[0]);
242 page[c] = data_x;
243 #ifndef LS_B
244 page[c+1] = data_x >> 8;
245 #ifndef LS_W
246 page[c+2] = data_x >> 16;
247 page[c+3] = data_x >> 24;
248 #ifndef LS_L
249 page[c+4] = data_x >> 32;
250 page[c+5] = data_x >> 40;
251 page[c+6] = data_x >> 48;
252 page[c+7] = data_x >> 56;
253 #endif
254 #endif
255 #endif
256 #else
257 /* Native byte order: */
258 #ifdef LS_B
259 page[c] = *((uint64_t *)ic->arg[0]);
260 #endif
261 #ifdef LS_W
262 uint32_t d = *((uint64_t *)ic->arg[0]);
263 *((uint16_t *) (page + c)) = d;
264 #endif
265 #ifdef LS_L
266 uint32_t d = *((uint64_t *)ic->arg[0]);
267 *((uint32_t *) (page + c)) = d;
268 #endif
269 #ifdef LS_Q
270 uint64_t d = *((uint64_t *)ic->arg[0]);
271 *((uint64_t *) (page + c)) = d;
272 #endif
273 #endif
274
275 #ifdef LS_LLSC
276 #ifndef LS_LOAD
277 *((uint64_t *)ic->arg[0]) = 1;
278 #endif
279 #endif
280
281 #endif /* !LS_LOAD */
282 } else
283 LS_GENERIC_N(cpu, ic);
284 } else if (first == ALPHA_TOP_KERNEL) {
285 struct alpha_vph_page *vph_p;
286 unsigned char *page;
287 vph_p = cpu->cd.alpha.vph_table0_kernel[a];
288 #ifdef LS_LOAD
289 page = vph_p->host_load[b];
290 #else
291 page = vph_p->host_store[b];
292 #endif
293 if (page != NULL) {
294 #ifdef LS_LOAD
295 #ifdef HOST_BIG_ENDIAN
296 uint64_t data_x;
297 data_x = page[c];
298 #ifndef LS_B
299 data_x += (page[c+1] << 8);
300 #ifndef LS_W
301 data_x += (page[c+2] << 16);
302 data_x += (page[c+3] << 24);
303 #ifndef LS_L
304 data_x += ((uint64_t)page[c+4] << 32);
305 data_x += ((uint64_t)page[c+5] << 40);
306 data_x += ((uint64_t)page[c+6] << 48);
307 data_x += ((uint64_t)page[c+7] << 56);
308 #endif
309 #endif
310 #endif
311 #ifdef LS_L
312 *((uint64_t *)ic->arg[0]) = (int64_t)(int32_t)data_x;
313 #else
314 *((uint64_t *)ic->arg[0]) = data_x;
315 #endif
316 #else
317 #ifdef LS_B
318 *((uint64_t *)ic->arg[0]) = page[c];
319 #endif
320 #ifdef LS_W
321 uint16_t d = *((uint16_t *) (page + c));
322 *((uint64_t *)ic->arg[0]) = d;
323 #endif
324 #ifdef LS_L
325 int32_t d = *((int32_t *) (page + c));
326 *((uint64_t *)ic->arg[0]) = (int64_t)d;
327 #endif
328 #ifdef LS_Q
329 uint64_t d = *((uint64_t *) (page + c));
330 *((uint64_t *)ic->arg[0]) = d;
331 #endif
332 #endif
333 #else
334 /* Store: */
335 #ifdef HOST_BIG_ENDIAN
336 uint64_t data_x = *((uint64_t *)ic->arg[0]);
337 page[c] = data_x;
338 #ifndef LS_B
339 page[c+1] = data_x >> 8;
340 #ifndef LS_W
341 page[c+2] = data_x >> 16;
342 page[c+3] = data_x >> 24;
343 #ifndef LS_L
344 page[c+4] = data_x >> 32;
345 page[c+5] = data_x >> 40;
346 page[c+6] = data_x >> 48;
347 page[c+7] = data_x >> 56;
348 #endif
349 #endif
350 #endif
351 #else
352 /* Native byte order: */
353 #ifdef LS_B
354 page[c] = *((uint64_t *)ic->arg[0]);
355 #endif
356 #ifdef LS_W
357 uint32_t d = *((uint64_t *)ic->arg[0]);
358 *((uint16_t *) (page + c)) = d;
359 #endif
360 #ifdef LS_L
361 uint32_t d = *((uint64_t *)ic->arg[0]);
362 *((uint32_t *) (page + c)) = d;
363 #endif
364 #ifdef LS_Q
365 uint64_t d = *((uint64_t *)ic->arg[0]);
366 *((uint64_t *) (page + c)) = d;
367 #endif
368 #endif
369
370 #ifdef LS_LLSC
371 #ifndef LS_LOAD
372 *((uint64_t *)ic->arg[0]) = 1;
373 #endif
374 #endif
375
376 #endif /* !LS_LOAD */
377 } else
378 LS_GENERIC_N(cpu, ic);
379 } else
380 LS_GENERIC_N(cpu, ic);
381 }
382

  ViewVC Help
Powered by ViewVC 1.1.26