]>
Commit | Line | Data |
---|---|---|
b34f6357 | 1 | /* frv cache model. |
e930b1f5 | 2 | Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc. |
b34f6357 DB |
3 | Contributed by Red Hat. |
4 | ||
5 | This file is part of the GNU simulators. | |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 2, or (at your option) | |
10 | any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License along | |
18 | with this program; if not, write to the Free Software Foundation, Inc., | |
19 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ | |
20 | ||
21 | #define WANT_CPU frvbf | |
22 | #define WANT_CPU_FRVBF | |
23 | ||
24 | #include "libiberty.h" | |
25 | #include "sim-main.h" | |
26 | #include "cache.h" | |
27 | #include "bfd.h" | |
28 | ||
29 | void | |
30 | frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache) | |
31 | { | |
32 | int elements; | |
33 | int i, j; | |
34 | SIM_DESC sd; | |
35 | ||
36 | /* Set defaults for fields which are not initialized. */ | |
37 | sd = CPU_STATE (cpu); | |
38 | switch (STATE_ARCHITECTURE (sd)->mach) | |
39 | { | |
40 | case bfd_mach_fr400: | |
676a64f4 | 41 | case bfd_mach_fr450: |
e930b1f5 | 42 | if (cache->configured_sets == 0) |
c7a48b9a | 43 | cache->configured_sets = 512; |
e930b1f5 DB |
44 | if (cache->configured_ways == 0) |
45 | cache->configured_ways = 2; | |
b34f6357 DB |
46 | if (cache->line_size == 0) |
47 | cache->line_size = 32; | |
48 | if (cache->memory_latency == 0) | |
49 | cache->memory_latency = 20; | |
50 | break; | |
e930b1f5 DB |
51 | case bfd_mach_fr550: |
52 | if (cache->configured_sets == 0) | |
53 | cache->configured_sets = 128; | |
54 | if (cache->configured_ways == 0) | |
55 | cache->configured_ways = 4; | |
56 | if (cache->line_size == 0) | |
57 | cache->line_size = 64; | |
58 | if (cache->memory_latency == 0) | |
59 | cache->memory_latency = 20; | |
60 | break; | |
b34f6357 | 61 | default: |
e930b1f5 DB |
62 | if (cache->configured_sets == 0) |
63 | cache->configured_sets = 64; | |
64 | if (cache->configured_ways == 0) | |
65 | cache->configured_ways = 4; | |
b34f6357 DB |
66 | if (cache->line_size == 0) |
67 | cache->line_size = 64; | |
68 | if (cache->memory_latency == 0) | |
69 | cache->memory_latency = 20; | |
70 | break; | |
71 | } | |
72 | ||
e930b1f5 DB |
73 | frv_cache_reconfigure (cpu, cache); |
74 | ||
b34f6357 DB |
75 | /* First allocate the cache storage based on the given dimensions. */ |
76 | elements = cache->sets * cache->ways; | |
77 | cache->tag_storage = (FRV_CACHE_TAG *) | |
78 | zalloc (elements * sizeof (*cache->tag_storage)); | |
79 | cache->data_storage = (char *) xmalloc (elements * cache->line_size); | |
80 | ||
81 | /* Initialize the pipelines and status buffers. */ | |
82 | for (i = LS; i < FRV_CACHE_PIPELINES; ++i) | |
83 | { | |
84 | cache->pipeline[i].requests = NULL; | |
85 | cache->pipeline[i].status.flush.valid = 0; | |
86 | cache->pipeline[i].status.return_buffer.valid = 0; | |
87 | cache->pipeline[i].status.return_buffer.data | |
88 | = (char *) xmalloc (cache->line_size); | |
89 | for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j) | |
90 | cache->pipeline[i].stages[j].request = NULL; | |
91 | } | |
92 | cache->BARS.valid = 0; | |
93 | cache->NARS.valid = 0; | |
94 | ||
95 | /* Now set the cache state. */ | |
96 | cache->cpu = cpu; | |
97 | cache->statistics.accesses = 0; | |
98 | cache->statistics.hits = 0; | |
99 | } | |
100 | ||
101 | void | |
102 | frv_cache_term (FRV_CACHE *cache) | |
103 | { | |
104 | /* Free the cache storage. */ | |
105 | free (cache->tag_storage); | |
106 | free (cache->data_storage); | |
107 | free (cache->pipeline[LS].status.return_buffer.data); | |
108 | free (cache->pipeline[LD].status.return_buffer.data); | |
109 | } | |
110 | ||
e930b1f5 DB |
111 | /* Reset the cache configuration based on registers in the cpu. */ |
112 | void | |
113 | frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache) | |
114 | { | |
115 | int ihsr8; | |
116 | int icdm; | |
117 | SIM_DESC sd; | |
118 | ||
119 | /* Set defaults for fields which are not initialized. */ | |
120 | sd = CPU_STATE (current_cpu); | |
121 | switch (STATE_ARCHITECTURE (sd)->mach) | |
122 | { | |
123 | case bfd_mach_fr550: | |
124 | if (cache == CPU_INSN_CACHE (current_cpu)) | |
125 | { | |
126 | ihsr8 = GET_IHSR8 (); | |
127 | icdm = GET_IHSR8_ICDM (ihsr8); | |
128 | /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */ | |
129 | if (icdm) | |
130 | { | |
131 | cache->sets = cache->sets * cache->ways; | |
132 | cache->ways = 1; | |
133 | break; | |
134 | } | |
135 | } | |
136 | /* fall through */ | |
137 | default: | |
138 | /* Set the cache to its original settings. */ | |
139 | cache->sets = cache->configured_sets; | |
140 | cache->ways = cache->configured_ways; | |
141 | break; | |
142 | } | |
143 | } | |
144 | ||
b34f6357 DB |
145 | /* Determine whether the given cache is enabled. */ |
146 | int | |
147 | frv_cache_enabled (FRV_CACHE *cache) | |
148 | { | |
149 | SIM_CPU *current_cpu = cache->cpu; | |
150 | int hsr0 = GET_HSR0 (); | |
151 | if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu)) | |
152 | return 1; | |
153 | if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu)) | |
154 | return 1; | |
155 | return 0; | |
156 | } | |
157 | ||
e930b1f5 DB |
158 | /* Determine whether the given address is RAM access, assuming that HSR0.RME |
159 | is set. */ | |
160 | static int | |
161 | ram_access (FRV_CACHE *cache, USI address) | |
162 | { | |
163 | int ihsr8; | |
164 | int cwe; | |
165 | USI start, end, way_size; | |
166 | SIM_CPU *current_cpu = cache->cpu; | |
167 | SIM_DESC sd = CPU_STATE (current_cpu); | |
168 | ||
169 | switch (STATE_ARCHITECTURE (sd)->mach) | |
170 | { | |
171 | case bfd_mach_fr550: | |
172 | /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */ | |
173 | ihsr8 = GET_IHSR8 (); | |
174 | if (cache == CPU_INSN_CACHE (current_cpu)) | |
175 | { | |
176 | start = 0xfe000000; | |
177 | end = 0xfe008000; | |
178 | cwe = GET_IHSR8_ICWE (ihsr8); | |
179 | } | |
180 | else | |
181 | { | |
182 | start = 0xfe400000; | |
183 | end = 0xfe408000; | |
184 | cwe = GET_IHSR8_DCWE (ihsr8); | |
185 | } | |
186 | way_size = (end - start) / 4; | |
187 | end -= way_size * cwe; | |
188 | return address >= start && address < end; | |
189 | default: | |
190 | break; | |
191 | } | |
192 | ||
193 | return 1; /* RAM access */ | |
194 | } | |
195 | ||
b34f6357 DB |
196 | /* Determine whether the given address should be accessed without using |
197 | the cache. */ | |
198 | static int | |
199 | non_cache_access (FRV_CACHE *cache, USI address) | |
200 | { | |
201 | int hsr0; | |
202 | SIM_DESC sd; | |
203 | SIM_CPU *current_cpu = cache->cpu; | |
204 | ||
205 | sd = CPU_STATE (current_cpu); | |
206 | switch (STATE_ARCHITECTURE (sd)->mach) | |
207 | { | |
208 | case bfd_mach_fr400: | |
676a64f4 | 209 | case bfd_mach_fr450: |
b34f6357 DB |
210 | if (address >= 0xff000000 |
211 | || address >= 0xfe000000 && address <= 0xfeffffff) | |
212 | return 1; /* non-cache access */ | |
c7a48b9a | 213 | break; |
e930b1f5 DB |
214 | case bfd_mach_fr550: |
215 | if (address >= 0xff000000 | |
216 | || address >= 0xfeff0000 && address <= 0xfeffffff) | |
217 | return 1; /* non-cache access */ | |
218 | if (cache == CPU_INSN_CACHE (current_cpu)) | |
219 | { | |
220 | if (address >= 0xfe000000 && address <= 0xfe007fff) | |
221 | return 1; /* non-cache access */ | |
222 | } | |
223 | else if (address >= 0xfe400000 && address <= 0xfe407fff) | |
224 | return 1; /* non-cache access */ | |
c7a48b9a | 225 | break; |
b34f6357 DB |
226 | default: |
227 | if (address >= 0xff000000 | |
228 | || address >= 0xfeff0000 && address <= 0xfeffffff) | |
229 | return 1; /* non-cache access */ | |
230 | if (cache == CPU_INSN_CACHE (current_cpu)) | |
231 | { | |
232 | if (address >= 0xfe000000 && address <= 0xfe003fff) | |
233 | return 1; /* non-cache access */ | |
234 | } | |
235 | else if (address >= 0xfe400000 && address <= 0xfe403fff) | |
236 | return 1; /* non-cache access */ | |
c7a48b9a | 237 | break; |
b34f6357 DB |
238 | } |
239 | ||
240 | hsr0 = GET_HSR0 (); | |
241 | if (GET_HSR0_RME (hsr0)) | |
e930b1f5 | 242 | return ram_access (cache, address); |
b34f6357 DB |
243 | |
244 | return 0; /* cache-access */ | |
245 | } | |
246 | ||
247 | /* Find the cache line corresponding to the given address. | |
248 | If it is found then 'return_tag' is set to point to the tag for that line | |
249 | and 1 is returned. | |
250 | If it is not found, 'return_tag' is set to point to the tag for the least | |
251 | recently used line and 0 is returned. | |
252 | */ | |
253 | static int | |
254 | get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag) | |
255 | { | |
256 | int set; | |
257 | int way; | |
258 | int bits; | |
259 | USI tag; | |
260 | FRV_CACHE_TAG *found; | |
261 | FRV_CACHE_TAG *available; | |
262 | ||
263 | ++cache->statistics.accesses; | |
264 | ||
265 | /* First calculate which set this address will fall into. Do this by | |
266 | shifting out the bits representing the offset within the line and | |
267 | then keeping enough bits to index the set. */ | |
268 | set = address & ~(cache->line_size - 1); | |
269 | for (bits = cache->line_size - 1; bits != 0; bits >>= 1) | |
270 | set >>= 1; | |
271 | set &= (cache->sets - 1); | |
272 | ||
273 | /* Now search the set for a valid tag which matches this address. At the | |
274 | same time make note of the least recently used tag, which we will return | |
275 | if no match is found. */ | |
276 | available = NULL; | |
277 | tag = CACHE_ADDRESS_TAG (cache, address); | |
278 | for (way = 0; way < cache->ways; ++way) | |
279 | { | |
280 | found = CACHE_TAG (cache, set, way); | |
281 | /* This tag is available as the least recently used if it is the | |
282 | least recently used seen so far and it is not locked. */ | |
283 | if (! found->locked && (available == NULL || available->lru > found->lru)) | |
284 | available = found; | |
285 | if (found->valid && found->tag == tag) | |
286 | { | |
287 | *return_tag = found; | |
288 | ++cache->statistics.hits; | |
289 | return 1; /* found it */ | |
290 | } | |
291 | } | |
292 | ||
293 | *return_tag = available; | |
294 | return 0; /* not found */ | |
295 | } | |
296 | ||
297 | /* Write the given data out to memory. */ | |
298 | static void | |
299 | write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length) | |
300 | { | |
301 | SIM_CPU *cpu = cache->cpu; | |
302 | IADDR pc = CPU_PC_GET (cpu); | |
303 | int write_index = 0; | |
304 | ||
305 | switch (length) | |
306 | { | |
307 | case 1: | |
308 | default: | |
309 | PROFILE_COUNT_WRITE (cpu, address, MODE_QI); | |
310 | break; | |
311 | case 2: | |
312 | PROFILE_COUNT_WRITE (cpu, address, MODE_HI); | |
313 | break; | |
314 | case 4: | |
315 | PROFILE_COUNT_WRITE (cpu, address, MODE_SI); | |
316 | break; | |
317 | case 8: | |
318 | PROFILE_COUNT_WRITE (cpu, address, MODE_DI); | |
319 | break; | |
320 | } | |
321 | ||
322 | for (write_index = 0; write_index < length; ++write_index) | |
323 | { | |
324 | /* TODO: Better way to copy memory than a byte at a time? */ | |
325 | sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index, | |
326 | data[write_index]); | |
327 | } | |
328 | } | |
329 | ||
330 | /* Write a cache line out to memory. */ | |
331 | static void | |
332 | write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag) | |
333 | { | |
334 | SI address = tag->tag; | |
335 | int set = CACHE_TAG_SET_NUMBER (cache, tag); | |
336 | int bits; | |
337 | for (bits = cache->line_size - 1; bits != 0; bits >>= 1) | |
338 | set <<= 1; | |
339 | address |= set; | |
340 | write_data_to_memory (cache, address, tag->line, cache->line_size); | |
341 | } | |
342 | ||
343 | static void | |
344 | read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer, | |
345 | int length) | |
346 | { | |
347 | PCADDR pc = CPU_PC_GET (current_cpu); | |
348 | int i; | |
349 | PROFILE_COUNT_READ (current_cpu, address, MODE_QI); | |
350 | for (i = 0; i < length; ++i) | |
351 | { | |
352 | /* TODO: Better way to copy memory than a byte at a time? */ | |
353 | buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map, | |
354 | address + i); | |
355 | } | |
356 | } | |
357 | ||
358 | /* Fill the given cache line from memory. */ | |
359 | static void | |
360 | fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address) | |
361 | { | |
362 | PCADDR pc; | |
363 | int line_alignment; | |
364 | SI read_address; | |
365 | SIM_CPU *current_cpu = cache->cpu; | |
366 | ||
367 | /* If this line is already valid and the cache is in copy-back mode, then | |
368 | write this line to memory before refilling it. | |
369 | Check the dirty bit first, since it is less likely to be set. */ | |
370 | if (tag->dirty && tag->valid) | |
371 | { | |
372 | int hsr0 = GET_HSR0 (); | |
373 | if (GET_HSR0_CBM (hsr0)) | |
374 | write_line_to_memory (cache, tag); | |
375 | } | |
376 | else if (tag->line == NULL) | |
377 | { | |
378 | int line_index = tag - cache->tag_storage; | |
379 | tag->line = cache->data_storage + (line_index * cache->line_size); | |
380 | } | |
381 | ||
382 | pc = CPU_PC_GET (current_cpu); | |
383 | line_alignment = cache->line_size - 1; | |
384 | read_address = address & ~line_alignment; | |
385 | read_data_from_memory (current_cpu, read_address, tag->line, | |
386 | cache->line_size); | |
387 | tag->tag = CACHE_ADDRESS_TAG (cache, address); | |
388 | tag->valid = 1; | |
389 | } | |
390 | ||
391 | /* Update the LRU information for the tags in the same set as the given tag. */ | |
392 | static void | |
393 | set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag) | |
394 | { | |
395 | /* All tags in the same set are contiguous, so find the beginning of the | |
396 | set by aligning to the size of a set. */ | |
397 | FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag); | |
398 | FRV_CACHE_TAG *limit = item + cache->ways; | |
399 | ||
400 | while (item < limit) | |
401 | { | |
402 | if (item->lru > tag->lru) | |
403 | --item->lru; | |
404 | ++item; | |
405 | } | |
406 | tag->lru = cache->ways; /* Mark as most recently used. */ | |
407 | } | |
408 | ||
409 | /* Update the LRU information for the tags in the same set as the given tag. */ | |
410 | static void | |
411 | set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag) | |
412 | { | |
413 | /* All tags in the same set are contiguous, so find the beginning of the | |
414 | set by aligning to the size of a set. */ | |
415 | FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag); | |
416 | FRV_CACHE_TAG *limit = item + cache->ways; | |
417 | ||
418 | while (item < limit) | |
419 | { | |
420 | if (item->lru != 0 && item->lru < tag->lru) | |
421 | ++item->lru; | |
422 | ++item; | |
423 | } | |
424 | tag->lru = 0; /* Mark as least recently used. */ | |
425 | } | |
426 | ||
427 | /* Find the line containing the given address and load it if it is not | |
428 | already loaded. | |
429 | Returns the tag of the requested line. */ | |
430 | static FRV_CACHE_TAG * | |
431 | find_or_retrieve_cache_line (FRV_CACHE *cache, SI address) | |
432 | { | |
433 | /* See if this data is already in the cache. */ | |
434 | FRV_CACHE_TAG *tag; | |
435 | int found = get_tag (cache, address, &tag); | |
436 | ||
437 | /* Fill the line from memory, if it is not valid. */ | |
438 | if (! found) | |
439 | { | |
440 | /* The tag could be NULL is all ways in the set were used and locked. */ | |
441 | if (tag == NULL) | |
442 | return tag; | |
443 | ||
444 | fill_line_from_memory (cache, tag, address); | |
445 | tag->dirty = 0; | |
446 | } | |
447 | ||
448 | /* Update the LRU information for the tags in this set. */ | |
449 | set_most_recently_used (cache, tag); | |
450 | ||
451 | return tag; | |
452 | } | |
453 | ||
454 | static void | |
455 | copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag, | |
456 | SI address) | |
457 | { | |
458 | /* A cache line was available for the data. | |
459 | Copy the data from the cache line to the output buffer. */ | |
460 | memcpy (cache->pipeline[pipe].status.return_buffer.data, | |
461 | tag->line, cache->line_size); | |
462 | cache->pipeline[pipe].status.return_buffer.address | |
463 | = address & ~(cache->line_size - 1); | |
464 | cache->pipeline[pipe].status.return_buffer.valid = 1; | |
465 | } | |
466 | ||
467 | static void | |
468 | copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address) | |
469 | { | |
470 | address &= ~(cache->line_size - 1); | |
471 | read_data_from_memory (cache->cpu, address, | |
472 | cache->pipeline[pipe].status.return_buffer.data, | |
473 | cache->line_size); | |
474 | cache->pipeline[pipe].status.return_buffer.address = address; | |
475 | cache->pipeline[pipe].status.return_buffer.valid = 1; | |
476 | } | |
477 | ||
478 | static void | |
479 | set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno) | |
480 | { | |
481 | cache->pipeline[pipe].status.return_buffer.reqno = reqno; | |
482 | } | |
483 | ||
484 | /* Read data from the given cache. | |
485 | Returns the number of cycles required to obtain the data. */ | |
486 | int | |
487 | frv_cache_read (FRV_CACHE *cache, int pipe, SI address) | |
488 | { | |
489 | FRV_CACHE_TAG *tag; | |
490 | ||
491 | if (non_cache_access (cache, address)) | |
492 | { | |
493 | copy_memory_to_return_buffer (cache, pipe, address); | |
494 | return 1; | |
495 | } | |
496 | ||
497 | tag = find_or_retrieve_cache_line (cache, address); | |
498 | ||
499 | if (tag == NULL) | |
500 | return 0; /* Indicate non-cache-access. */ | |
501 | ||
502 | /* A cache line was available for the data. | |
503 | Copy the data from the cache line to the output buffer. */ | |
504 | copy_line_to_return_buffer (cache, pipe, tag, address); | |
505 | ||
506 | return 1; /* TODO - number of cycles unknown */ | |
507 | } | |
508 | ||
509 | /* Writes data through the given cache. | |
510 | The data is assumed to be in target endian order. | |
511 | Returns the number of cycles required to write the data. */ | |
512 | int | |
513 | frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length) | |
514 | { | |
515 | int copy_back; | |
516 | ||
517 | /* See if this data is already in the cache. */ | |
518 | SIM_CPU *current_cpu = cache->cpu; | |
519 | USI hsr0 = GET_HSR0 (); | |
520 | FRV_CACHE_TAG *tag; | |
521 | int found; | |
522 | ||
523 | if (non_cache_access (cache, address)) | |
524 | { | |
525 | write_data_to_memory (cache, address, data, length); | |
526 | return 1; | |
527 | } | |
528 | ||
529 | found = get_tag (cache, address, &tag); | |
530 | ||
531 | /* Write the data to the cache line if one was available and if it is | |
532 | either a hit or a miss in copy-back mode. | |
533 | The tag may be NULL if all ways were in use and locked on a miss. | |
534 | */ | |
535 | copy_back = GET_HSR0_CBM (GET_HSR0 ()); | |
536 | if (tag != NULL && (found || copy_back)) | |
537 | { | |
538 | int line_offset; | |
539 | /* Load the line from memory first, if it was a miss. */ | |
540 | if (! found) | |
541 | fill_line_from_memory (cache, tag, address); | |
542 | line_offset = address & (cache->line_size - 1); | |
543 | memcpy (tag->line + line_offset, data, length); | |
544 | tag->dirty = 1; | |
545 | ||
546 | /* Update the LRU information for the tags in this set. */ | |
547 | set_most_recently_used (cache, tag); | |
548 | } | |
549 | ||
550 | /* Write the data to memory if there was no line available or we are in | |
551 | write-through (not copy-back mode). */ | |
552 | if (tag == NULL || ! copy_back) | |
553 | { | |
554 | write_data_to_memory (cache, address, data, length); | |
555 | if (tag != NULL) | |
556 | tag->dirty = 0; | |
557 | } | |
558 | ||
559 | return 1; /* TODO - number of cycles unknown */ | |
560 | } | |
561 | ||
562 | /* Preload the cache line containing the given address. Lock the | |
563 | data if requested. | |
564 | Returns the number of cycles required to write the data. */ | |
565 | int | |
566 | frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock) | |
567 | { | |
568 | int offset; | |
569 | int lines; | |
570 | ||
571 | if (non_cache_access (cache, address)) | |
572 | return 1; | |
573 | ||
574 | /* preload at least 1 line. */ | |
575 | if (length == 0) | |
576 | length = 1; | |
577 | ||
578 | offset = address & (cache->line_size - 1); | |
579 | lines = 1 + (offset + length - 1) / cache->line_size; | |
580 | ||
581 | /* Careful with this loop -- length is unsigned. */ | |
582 | for (/**/; lines > 0; --lines) | |
583 | { | |
584 | FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address); | |
585 | if (lock && tag != NULL) | |
586 | tag->locked = 1; | |
587 | address += cache->line_size; | |
588 | } | |
589 | ||
590 | return 1; /* TODO - number of cycles unknown */ | |
591 | } | |
592 | ||
593 | /* Unlock the cache line containing the given address. | |
594 | Returns the number of cycles required to unlock the line. */ | |
595 | int | |
596 | frv_cache_unlock (FRV_CACHE *cache, SI address) | |
597 | { | |
598 | FRV_CACHE_TAG *tag; | |
599 | int found; | |
600 | ||
601 | if (non_cache_access (cache, address)) | |
602 | return 1; | |
603 | ||
604 | found = get_tag (cache, address, &tag); | |
605 | ||
606 | if (found) | |
607 | tag->locked = 0; | |
608 | ||
609 | return 1; /* TODO - number of cycles unknown */ | |
610 | } | |
611 | ||
612 | static void | |
613 | invalidate_return_buffer (FRV_CACHE *cache, SI address) | |
614 | { | |
615 | /* If this address is in one of the return buffers, then invalidate that | |
616 | return buffer. */ | |
617 | address &= ~(cache->line_size - 1); | |
618 | if (address == cache->pipeline[LS].status.return_buffer.address) | |
619 | cache->pipeline[LS].status.return_buffer.valid = 0; | |
620 | if (address == cache->pipeline[LD].status.return_buffer.address) | |
621 | cache->pipeline[LD].status.return_buffer.valid = 0; | |
622 | } | |
623 | ||
624 | /* Invalidate the cache line containing the given address. Flush the | |
625 | data if requested. | |
626 | Returns the number of cycles required to write the data. */ | |
627 | int | |
628 | frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush) | |
629 | { | |
630 | /* See if this data is already in the cache. */ | |
631 | FRV_CACHE_TAG *tag; | |
632 | int found; | |
633 | ||
634 | /* Check for non-cache access. This operation is still perfromed even if | |
635 | the cache is not currently enabled. */ | |
636 | if (non_cache_access (cache, address)) | |
637 | return 1; | |
638 | ||
639 | /* If the line is found, invalidate it. If a flush is requested, then flush | |
640 | it if it is dirty. */ | |
641 | found = get_tag (cache, address, &tag); | |
642 | if (found) | |
643 | { | |
644 | SIM_CPU *cpu; | |
645 | /* If a flush is requested, then flush it if it is dirty. */ | |
646 | if (tag->dirty && flush) | |
647 | write_line_to_memory (cache, tag); | |
648 | set_least_recently_used (cache, tag); | |
649 | tag->valid = 0; | |
650 | tag->locked = 0; | |
651 | ||
652 | /* If this is the insn cache, then flush the cpu's scache as well. */ | |
653 | cpu = cache->cpu; | |
654 | if (cache == CPU_INSN_CACHE (cpu)) | |
655 | scache_flush_cpu (cpu); | |
656 | } | |
657 | ||
658 | invalidate_return_buffer (cache, address); | |
659 | ||
660 | return 1; /* TODO - number of cycles unknown */ | |
661 | } | |
662 | ||
663 | /* Invalidate the entire cache. Flush the data if requested. */ | |
664 | int | |
665 | frv_cache_invalidate_all (FRV_CACHE *cache, int flush) | |
666 | { | |
667 | /* See if this data is already in the cache. */ | |
668 | int elements = cache->sets * cache->ways; | |
669 | FRV_CACHE_TAG *tag = cache->tag_storage; | |
670 | SIM_CPU *cpu; | |
671 | int i; | |
672 | ||
673 | for(i = 0; i < elements; ++i, ++tag) | |
674 | { | |
675 | /* If a flush is requested, then flush it if it is dirty. */ | |
676 | if (tag->valid && tag->dirty && flush) | |
677 | write_line_to_memory (cache, tag); | |
678 | tag->valid = 0; | |
679 | tag->locked = 0; | |
680 | } | |
681 | ||
682 | ||
683 | /* If this is the insn cache, then flush the cpu's scache as well. */ | |
684 | cpu = cache->cpu; | |
685 | if (cache == CPU_INSN_CACHE (cpu)) | |
686 | scache_flush_cpu (cpu); | |
687 | ||
688 | /* Invalidate both return buffers. */ | |
689 | cache->pipeline[LS].status.return_buffer.valid = 0; | |
690 | cache->pipeline[LD].status.return_buffer.valid = 0; | |
691 | ||
692 | return 1; /* TODO - number of cycles unknown */ | |
693 | } | |
694 | ||
695 | /* --------------------------------------------------------------------------- | |
696 | Functions for operating the cache in cycle accurate mode. | |
697 | ------------------------------------------------------------------------- */ | |
698 | /* Convert a VLIW slot to a cache pipeline index. */ | |
699 | static int | |
700 | convert_slot_to_index (int slot) | |
701 | { | |
702 | switch (slot) | |
703 | { | |
704 | case UNIT_I0: | |
705 | case UNIT_C: | |
706 | return LS; | |
707 | case UNIT_I1: | |
708 | return LD; | |
709 | default: | |
710 | abort (); | |
711 | } | |
712 | return 0; | |
713 | } | |
714 | ||
715 | /* Allocate free chains of cache requests. */ | |
716 | #define FREE_CHAIN_SIZE 16 | |
717 | static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL; | |
718 | static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL; | |
719 | ||
720 | static void | |
721 | allocate_new_cache_requests (void) | |
722 | { | |
723 | int i; | |
724 | frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE | |
725 | * sizeof (FRV_CACHE_REQUEST)); | |
726 | for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i) | |
727 | { | |
728 | frv_cache_request_free_chain[i].next | |
729 | = & frv_cache_request_free_chain[i + 1]; | |
730 | } | |
731 | ||
732 | frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL; | |
733 | } | |
734 | ||
735 | /* Return the next free request in the queue for the given cache pipeline. */ | |
736 | static FRV_CACHE_REQUEST * | |
737 | new_cache_request (void) | |
738 | { | |
739 | FRV_CACHE_REQUEST *req; | |
740 | ||
741 | /* Allocate new elements for the free chain if necessary. */ | |
742 | if (frv_cache_request_free_chain == NULL) | |
743 | allocate_new_cache_requests (); | |
744 | ||
745 | req = frv_cache_request_free_chain; | |
746 | frv_cache_request_free_chain = req->next; | |
747 | ||
748 | return req; | |
749 | } | |
750 | ||
751 | /* Return the given cache request to the free chain. */ | |
752 | static void | |
753 | free_cache_request (FRV_CACHE_REQUEST *req) | |
754 | { | |
755 | if (req->kind == req_store) | |
756 | { | |
757 | req->next = frv_store_request_free_chain; | |
758 | frv_store_request_free_chain = req; | |
759 | } | |
760 | else | |
761 | { | |
762 | req->next = frv_cache_request_free_chain; | |
763 | frv_cache_request_free_chain = req; | |
764 | } | |
765 | } | |
766 | ||
767 | /* Search the free chain for an existing store request with a buffer that's | |
768 | large enough. */ | |
769 | static FRV_CACHE_REQUEST * | |
770 | new_store_request (int length) | |
771 | { | |
772 | FRV_CACHE_REQUEST *prev = NULL; | |
773 | FRV_CACHE_REQUEST *req; | |
774 | for (req = frv_store_request_free_chain; req != NULL; req = req->next) | |
775 | { | |
776 | if (req->u.store.length == length) | |
777 | break; | |
778 | prev = req; | |
779 | } | |
780 | if (req != NULL) | |
781 | { | |
782 | if (prev == NULL) | |
783 | frv_store_request_free_chain = req->next; | |
784 | else | |
785 | prev->next = req->next; | |
786 | return req; | |
787 | } | |
788 | ||
789 | /* No existing request buffer was found, so make a new one. */ | |
790 | req = new_cache_request (); | |
791 | req->kind = req_store; | |
792 | req->u.store.data = xmalloc (length); | |
793 | req->u.store.length = length; | |
794 | return req; | |
795 | } | |
796 | ||
797 | /* Remove the given request from the given pipeline. */ | |
798 | static void | |
799 | pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request) | |
800 | { | |
801 | FRV_CACHE_REQUEST *next = request->next; | |
802 | FRV_CACHE_REQUEST *prev = request->prev; | |
803 | ||
804 | if (prev == NULL) | |
805 | p->requests = next; | |
806 | else | |
807 | prev->next = next; | |
808 | ||
809 | if (next != NULL) | |
810 | next->prev = prev; | |
811 | } | |
812 | ||
813 | /* Add the given request to the given pipeline. */ | |
814 | static void | |
815 | pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request) | |
816 | { | |
817 | FRV_CACHE_REQUEST *prev = NULL; | |
818 | FRV_CACHE_REQUEST *item; | |
819 | ||
820 | /* Add the request in priority order. 0 is the highest priority. */ | |
821 | for (item = p->requests; item != NULL; item = item->next) | |
822 | { | |
823 | if (item->priority > request->priority) | |
824 | break; | |
825 | prev = item; | |
826 | } | |
827 | ||
828 | request->next = item; | |
829 | request->prev = prev; | |
830 | if (prev == NULL) | |
831 | p->requests = request; | |
832 | else | |
833 | prev->next = request; | |
834 | if (item != NULL) | |
835 | item->prev = request; | |
836 | } | |
837 | ||
838 | /* Requeu the given request from the last of the given pipeline. */ | |
839 | static void | |
840 | pipeline_requeue_request (FRV_CACHE_PIPELINE *p) | |
841 | { | |
842 | FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE]; | |
843 | FRV_CACHE_REQUEST *req = stage->request; | |
844 | stage->request = NULL; | |
845 | pipeline_add_request (p, req); | |
846 | } | |
847 | ||
848 | /* Return the priority lower than the lowest one in this cache pipeline. | |
849 | 0 is the highest priority. */ | |
850 | static int | |
851 | next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline) | |
852 | { | |
853 | int i, j; | |
854 | int pipe; | |
855 | int lowest = 0; | |
856 | FRV_CACHE_REQUEST *req; | |
857 | ||
858 | /* Check the priorities of any queued items. */ | |
859 | for (req = pipeline->requests; req != NULL; req = req->next) | |
860 | if (req->priority > lowest) | |
861 | lowest = req->priority; | |
862 | ||
863 | /* Check the priorities of items in the pipeline stages. */ | |
864 | for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i) | |
865 | { | |
866 | FRV_CACHE_STAGE *stage = & pipeline->stages[i]; | |
867 | if (stage->request != NULL && stage->request->priority > lowest) | |
868 | lowest = stage->request->priority; | |
869 | } | |
870 | ||
871 | /* Check the priorities of load requests waiting in WAR. These are one | |
872 | higher than the request that spawned them. */ | |
873 | for (i = 0; i < NUM_WARS; ++i) | |
874 | { | |
875 | FRV_CACHE_WAR *war = & pipeline->WAR[i]; | |
876 | if (war->valid && war->priority > lowest) | |
877 | lowest = war->priority + 1; | |
878 | } | |
879 | ||
880 | /* Check the priorities of any BARS or NARS associated with this pipeline. | |
881 | These are one higher than the request that spawned them. */ | |
882 | pipe = pipeline - cache->pipeline; | |
883 | if (cache->BARS.valid && cache->BARS.pipe == pipe | |
884 | && cache->BARS.priority > lowest) | |
885 | lowest = cache->BARS.priority + 1; | |
886 | if (cache->NARS.valid && cache->NARS.pipe == pipe | |
887 | && cache->NARS.priority > lowest) | |
888 | lowest = cache->NARS.priority + 1; | |
889 | ||
890 | /* Return a priority 2 lower than the lowest found. This allows a WAR | |
891 | request to be generated with a priority greater than this but less than | |
892 | the next higher priority request. */ | |
893 | return lowest + 2; | |
894 | } | |
895 | ||
896 | static void | |
897 | add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war) | |
898 | { | |
899 | /* Add the load request to the indexed pipeline. */ | |
900 | FRV_CACHE_REQUEST *req = new_cache_request (); | |
901 | req->kind = req_WAR; | |
902 | req->reqno = war->reqno; | |
903 | req->priority = war->priority; | |
904 | req->address = war->address; | |
905 | req->u.WAR.preload = war->preload; | |
906 | req->u.WAR.lock = war->lock; | |
907 | pipeline_add_request (pipeline, req); | |
908 | } | |
909 | ||
910 | /* Remove the next request from the given pipeline and return it. */ | |
911 | static FRV_CACHE_REQUEST * | |
912 | pipeline_next_request (FRV_CACHE_PIPELINE *p) | |
913 | { | |
914 | FRV_CACHE_REQUEST *first = p->requests; | |
915 | if (first != NULL) | |
916 | pipeline_remove_request (p, first); | |
917 | return first; | |
918 | } | |
919 | ||
920 | /* Return the request which is at the given stage of the given pipeline. */ | |
921 | static FRV_CACHE_REQUEST * | |
922 | pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage) | |
923 | { | |
924 | return p->stages[stage].request; | |
925 | } | |
926 | ||
927 | static void | |
928 | advance_pipelines (FRV_CACHE *cache) | |
929 | { | |
930 | int stage; | |
931 | int pipe; | |
932 | FRV_CACHE_PIPELINE *pipelines = cache->pipeline; | |
933 | ||
934 | /* Free the final stage requests. */ | |
935 | for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe) | |
936 | { | |
937 | FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request; | |
938 | if (req != NULL) | |
939 | free_cache_request (req); | |
940 | } | |
941 | ||
942 | /* Shuffle the requests along the pipeline. */ | |
943 | for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage) | |
944 | { | |
945 | for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe) | |
946 | pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1]; | |
947 | } | |
948 | ||
949 | /* Add a new request to the pipeline. */ | |
950 | for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe) | |
951 | pipelines[pipe].stages[FIRST_STAGE].request | |
952 | = pipeline_next_request (& pipelines[pipe]); | |
953 | } | |
954 | ||
955 | /* Handle a request for a load from the given address. */ | |
956 | void | |
957 | frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot) | |
958 | { | |
959 | FRV_CACHE_REQUEST *req; | |
960 | ||
961 | /* slot is a UNIT_*. Convert it to a cache pipeline index. */ | |
962 | int pipe = convert_slot_to_index (slot); | |
963 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
964 | ||
965 | /* Add the load request to the indexed pipeline. */ | |
966 | req = new_cache_request (); | |
967 | req->kind = req_load; | |
968 | req->reqno = reqno; | |
969 | req->priority = next_priority (cache, pipeline); | |
970 | req->address = address; | |
971 | ||
972 | pipeline_add_request (pipeline, req); | |
973 | } | |
974 | ||
975 | void | |
976 | frv_cache_request_store (FRV_CACHE *cache, SI address, | |
977 | int slot, char *data, unsigned length) | |
978 | { | |
979 | FRV_CACHE_REQUEST *req; | |
980 | ||
981 | /* slot is a UNIT_*. Convert it to a cache pipeline index. */ | |
982 | int pipe = convert_slot_to_index (slot); | |
983 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
984 | ||
985 | /* Add the load request to the indexed pipeline. */ | |
986 | req = new_store_request (length); | |
987 | req->kind = req_store; | |
988 | req->reqno = NO_REQNO; | |
989 | req->priority = next_priority (cache, pipeline); | |
990 | req->address = address; | |
991 | req->u.store.length = length; | |
992 | memcpy (req->u.store.data, data, length); | |
993 | ||
994 | pipeline_add_request (pipeline, req); | |
995 | invalidate_return_buffer (cache, address); | |
996 | } | |
997 | ||
998 | /* Handle a request to invalidate the cache line containing the given address. | |
999 | Flush the data if requested. */ | |
1000 | void | |
1001 | frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address, | |
1002 | int slot, int all, int flush) | |
1003 | { | |
1004 | FRV_CACHE_REQUEST *req; | |
1005 | ||
1006 | /* slot is a UNIT_*. Convert it to a cache pipeline index. */ | |
1007 | int pipe = convert_slot_to_index (slot); | |
1008 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1009 | ||
1010 | /* Add the load request to the indexed pipeline. */ | |
1011 | req = new_cache_request (); | |
1012 | req->kind = req_invalidate; | |
1013 | req->reqno = reqno; | |
1014 | req->priority = next_priority (cache, pipeline); | |
1015 | req->address = address; | |
1016 | req->u.invalidate.all = all; | |
1017 | req->u.invalidate.flush = flush; | |
1018 | ||
1019 | pipeline_add_request (pipeline, req); | |
1020 | } | |
1021 | ||
1022 | /* Handle a request to preload the cache line containing the given address. */ | |
1023 | void | |
1024 | frv_cache_request_preload (FRV_CACHE *cache, SI address, | |
1025 | int slot, int length, int lock) | |
1026 | { | |
1027 | FRV_CACHE_REQUEST *req; | |
1028 | ||
1029 | /* slot is a UNIT_*. Convert it to a cache pipeline index. */ | |
1030 | int pipe = convert_slot_to_index (slot); | |
1031 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1032 | ||
1033 | /* Add the load request to the indexed pipeline. */ | |
1034 | req = new_cache_request (); | |
1035 | req->kind = req_preload; | |
1036 | req->reqno = NO_REQNO; | |
1037 | req->priority = next_priority (cache, pipeline); | |
1038 | req->address = address; | |
1039 | req->u.preload.length = length; | |
1040 | req->u.preload.lock = lock; | |
1041 | ||
1042 | pipeline_add_request (pipeline, req); | |
1043 | invalidate_return_buffer (cache, address); | |
1044 | } | |
1045 | ||
1046 | /* Handle a request to unlock the cache line containing the given address. */ | |
1047 | void | |
1048 | frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot) | |
1049 | { | |
1050 | FRV_CACHE_REQUEST *req; | |
1051 | ||
1052 | /* slot is a UNIT_*. Convert it to a cache pipeline index. */ | |
1053 | int pipe = convert_slot_to_index (slot); | |
1054 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1055 | ||
1056 | /* Add the load request to the indexed pipeline. */ | |
1057 | req = new_cache_request (); | |
1058 | req->kind = req_unlock; | |
1059 | req->reqno = NO_REQNO; | |
1060 | req->priority = next_priority (cache, pipeline); | |
1061 | req->address = address; | |
1062 | ||
1063 | pipeline_add_request (pipeline, req); | |
1064 | } | |
1065 | ||
1066 | /* Check whether this address interferes with a pending request of | |
1067 | higher priority. */ | |
1068 | static int | |
1069 | address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req, | |
1070 | int pipe) | |
1071 | { | |
1072 | int i, j; | |
1073 | int line_mask = ~(cache->line_size - 1); | |
1074 | int other_pipe; | |
1075 | int priority = req->priority; | |
1076 | FRV_CACHE_REQUEST *other_req; | |
1077 | SI other_address; | |
1078 | SI all_address; | |
1079 | ||
1080 | address &= line_mask; | |
1081 | all_address = -1 & line_mask; | |
1082 | ||
1083 | /* Check for collisions in the queue for this pipeline. */ | |
1084 | for (other_req = cache->pipeline[pipe].requests; | |
1085 | other_req != NULL; | |
1086 | other_req = other_req->next) | |
1087 | { | |
1088 | other_address = other_req->address & line_mask; | |
1089 | if ((address == other_address || address == all_address) | |
1090 | && priority > other_req->priority) | |
1091 | return 1; | |
1092 | } | |
1093 | ||
1094 | /* Check for a collision in the the other pipeline. */ | |
1095 | other_pipe = pipe ^ 1; | |
1096 | other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request; | |
1097 | if (other_req != NULL) | |
1098 | { | |
1099 | other_address = other_req->address & line_mask; | |
1100 | if (address == other_address || address == all_address) | |
1101 | return 1; | |
1102 | } | |
1103 | ||
1104 | /* Check for a collision with load requests waiting in WAR. */ | |
1105 | for (i = LS; i < FRV_CACHE_PIPELINES; ++i) | |
1106 | { | |
1107 | for (j = 0; j < NUM_WARS; ++j) | |
1108 | { | |
1109 | FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j]; | |
1110 | if (war->valid | |
1111 | && (address == (war->address & line_mask) | |
1112 | || address == all_address) | |
1113 | && priority > war->priority) | |
1114 | return 1; | |
1115 | } | |
1116 | /* If this is not a WAR request, then yield to any WAR requests in | |
0b01870b DB |
1117 | either pipeline or to a higher priority request in the same pipeline. |
1118 | */ | |
b34f6357 DB |
1119 | if (req->kind != req_WAR) |
1120 | { | |
1121 | for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j) | |
1122 | { | |
1123 | other_req = cache->pipeline[i].stages[j].request; | |
0b01870b DB |
1124 | if (other_req != NULL) |
1125 | { | |
1126 | if (other_req->kind == req_WAR) | |
1127 | return 1; | |
1128 | if (i == pipe | |
1129 | && (address == (other_req->address & line_mask) | |
1130 | || address == all_address) | |
1131 | && priority > other_req->priority) | |
1132 | return 1; | |
1133 | } | |
b34f6357 DB |
1134 | } |
1135 | } | |
1136 | } | |
1137 | ||
1138 | /* Check for a collision with load requests waiting in ARS. */ | |
1139 | if (cache->BARS.valid | |
1140 | && (address == (cache->BARS.address & line_mask) | |
1141 | || address == all_address) | |
1142 | && priority > cache->BARS.priority) | |
1143 | return 1; | |
1144 | if (cache->NARS.valid | |
1145 | && (address == (cache->NARS.address & line_mask) | |
1146 | || address == all_address) | |
1147 | && priority > cache->NARS.priority) | |
1148 | return 1; | |
1149 | ||
1150 | return 0; | |
1151 | } | |
1152 | ||
1153 | /* Wait for a free WAR register in BARS or NARS. */ | |
1154 | static void | |
1155 | wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req) | |
1156 | { | |
1157 | FRV_CACHE_WAR war; | |
1158 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1159 | ||
1160 | if (! cache->BARS.valid) | |
1161 | { | |
1162 | cache->BARS.pipe = pipe; | |
1163 | cache->BARS.reqno = req->reqno; | |
1164 | cache->BARS.address = req->address; | |
1165 | cache->BARS.priority = req->priority - 1; | |
1166 | switch (req->kind) | |
1167 | { | |
1168 | case req_load: | |
1169 | cache->BARS.preload = 0; | |
1170 | cache->BARS.lock = 0; | |
1171 | break; | |
1172 | case req_store: | |
1173 | cache->BARS.preload = 1; | |
1174 | cache->BARS.lock = 0; | |
1175 | break; | |
1176 | case req_preload: | |
1177 | cache->BARS.preload = 1; | |
1178 | cache->BARS.lock = req->u.preload.lock; | |
1179 | break; | |
1180 | } | |
1181 | cache->BARS.valid = 1; | |
1182 | return; | |
1183 | } | |
1184 | if (! cache->NARS.valid) | |
1185 | { | |
1186 | cache->NARS.pipe = pipe; | |
1187 | cache->NARS.reqno = req->reqno; | |
1188 | cache->NARS.address = req->address; | |
1189 | cache->NARS.priority = req->priority - 1; | |
1190 | switch (req->kind) | |
1191 | { | |
1192 | case req_load: | |
1193 | cache->NARS.preload = 0; | |
1194 | cache->NARS.lock = 0; | |
1195 | break; | |
1196 | case req_store: | |
1197 | cache->NARS.preload = 1; | |
1198 | cache->NARS.lock = 0; | |
1199 | break; | |
1200 | case req_preload: | |
1201 | cache->NARS.preload = 1; | |
1202 | cache->NARS.lock = req->u.preload.lock; | |
1203 | break; | |
1204 | } | |
1205 | cache->NARS.valid = 1; | |
1206 | return; | |
1207 | } | |
1208 | /* All wait registers are busy, so resubmit this request. */ | |
1209 | pipeline_requeue_request (pipeline); | |
1210 | } | |
1211 | ||
1212 | /* Find a free WAR register and wait for memory to fetch the data. */ | |
1213 | static void | |
1214 | wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req) | |
1215 | { | |
1216 | int war; | |
1217 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1218 | ||
1219 | /* Find a valid WAR to hold this request. */ | |
1220 | for (war = 0; war < NUM_WARS; ++war) | |
1221 | if (! pipeline->WAR[war].valid) | |
1222 | break; | |
1223 | if (war >= NUM_WARS) | |
1224 | { | |
1225 | wait_for_WAR (cache, pipe, req); | |
1226 | return; | |
1227 | } | |
1228 | ||
1229 | pipeline->WAR[war].address = req->address; | |
1230 | pipeline->WAR[war].reqno = req->reqno; | |
1231 | pipeline->WAR[war].priority = req->priority - 1; | |
1232 | pipeline->WAR[war].latency = cache->memory_latency + 1; | |
1233 | switch (req->kind) | |
1234 | { | |
1235 | case req_load: | |
1236 | pipeline->WAR[war].preload = 0; | |
1237 | pipeline->WAR[war].lock = 0; | |
1238 | break; | |
1239 | case req_store: | |
1240 | pipeline->WAR[war].preload = 1; | |
1241 | pipeline->WAR[war].lock = 0; | |
1242 | break; | |
1243 | case req_preload: | |
1244 | pipeline->WAR[war].preload = 1; | |
1245 | pipeline->WAR[war].lock = req->u.preload.lock; | |
1246 | break; | |
1247 | } | |
1248 | pipeline->WAR[war].valid = 1; | |
1249 | } | |
1250 | ||
1251 | static void | |
1252 | handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) | |
1253 | { | |
1254 | FRV_CACHE_TAG *tag; | |
1255 | SI address = req->address; | |
1256 | ||
1257 | /* If this address interferes with an existing request, then requeue it. */ | |
1258 | if (address_interference (cache, address, req, pipe)) | |
1259 | { | |
1260 | pipeline_requeue_request (& cache->pipeline[pipe]); | |
1261 | return; | |
1262 | } | |
1263 | ||
1264 | if (frv_cache_enabled (cache) && ! non_cache_access (cache, address)) | |
1265 | { | |
1266 | int found = get_tag (cache, address, &tag); | |
1267 | ||
1268 | /* If the data was found, return it to the caller. */ | |
1269 | if (found) | |
1270 | { | |
1271 | set_most_recently_used (cache, tag); | |
1272 | copy_line_to_return_buffer (cache, pipe, tag, address); | |
1273 | set_return_buffer_reqno (cache, pipe, req->reqno); | |
1274 | return; | |
1275 | } | |
1276 | } | |
1277 | ||
1278 | /* The data is not in the cache or this is a non-cache access. We need to | |
1279 | wait for the memory unit to fetch it. Store this request in the WAR in | |
1280 | the meantime. */ | |
1281 | wait_in_WAR (cache, pipe, req); | |
1282 | } | |
1283 | ||
1284 | static void | |
1285 | handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) | |
1286 | { | |
1287 | int found; | |
1288 | FRV_CACHE_WAR war; | |
1289 | FRV_CACHE_TAG *tag; | |
1290 | int length; | |
1291 | int lock; | |
1292 | int offset; | |
1293 | int lines; | |
1294 | int line; | |
1295 | SI address = req->address; | |
1296 | SI cur_address; | |
1297 | ||
1298 | if (! frv_cache_enabled (cache) || non_cache_access (cache, address)) | |
1299 | return; | |
1300 | ||
1301 | /* preload at least 1 line. */ | |
1302 | length = req->u.preload.length; | |
1303 | if (length == 0) | |
1304 | length = 1; | |
1305 | ||
1306 | /* Make sure that this request does not interfere with a pending request. */ | |
1307 | offset = address & (cache->line_size - 1); | |
1308 | lines = 1 + (offset + length - 1) / cache->line_size; | |
1309 | cur_address = address & ~(cache->line_size - 1); | |
1310 | for (line = 0; line < lines; ++line) | |
1311 | { | |
1312 | /* If this address interferes with an existing request, | |
1313 | then requeue it. */ | |
1314 | if (address_interference (cache, cur_address, req, pipe)) | |
1315 | { | |
1316 | pipeline_requeue_request (& cache->pipeline[pipe]); | |
1317 | return; | |
1318 | } | |
1319 | cur_address += cache->line_size; | |
1320 | } | |
1321 | ||
1322 | /* Now process each cache line. */ | |
1323 | /* Careful with this loop -- length is unsigned. */ | |
1324 | lock = req->u.preload.lock; | |
1325 | cur_address = address & ~(cache->line_size - 1); | |
1326 | for (line = 0; line < lines; ++line) | |
1327 | { | |
1328 | /* If the data was found, then lock it if requested. */ | |
1329 | found = get_tag (cache, cur_address, &tag); | |
1330 | if (found) | |
1331 | { | |
1332 | if (lock) | |
1333 | tag->locked = 1; | |
1334 | } | |
1335 | else | |
1336 | { | |
1337 | /* The data is not in the cache. We need to wait for the memory | |
1338 | unit to fetch it. Store this request in the WAR in the meantime. | |
1339 | */ | |
1340 | wait_in_WAR (cache, pipe, req); | |
1341 | } | |
1342 | cur_address += cache->line_size; | |
1343 | } | |
1344 | } | |
1345 | ||
1346 | static void | |
1347 | handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) | |
1348 | { | |
1349 | SIM_CPU *current_cpu; | |
1350 | FRV_CACHE_TAG *tag; | |
1351 | int found; | |
1352 | int copy_back; | |
1353 | SI address = req->address; | |
1354 | char *data = req->u.store.data; | |
1355 | int length = req->u.store.length; | |
1356 | ||
1357 | /* If this address interferes with an existing request, then requeue it. */ | |
1358 | if (address_interference (cache, address, req, pipe)) | |
1359 | { | |
1360 | pipeline_requeue_request (& cache->pipeline[pipe]); | |
1361 | return; | |
1362 | } | |
1363 | ||
1364 | /* Non-cache access. Write the data directly to memory. */ | |
1365 | if (! frv_cache_enabled (cache) || non_cache_access (cache, address)) | |
1366 | { | |
1367 | write_data_to_memory (cache, address, data, length); | |
1368 | return; | |
1369 | } | |
1370 | ||
1371 | /* See if the data is in the cache. */ | |
1372 | found = get_tag (cache, address, &tag); | |
1373 | ||
1374 | /* Write the data to the cache line if one was available and if it is | |
1375 | either a hit or a miss in copy-back mode. | |
1376 | The tag may be NULL if all ways were in use and locked on a miss. | |
1377 | */ | |
1378 | current_cpu = cache->cpu; | |
1379 | copy_back = GET_HSR0_CBM (GET_HSR0 ()); | |
1380 | if (tag != NULL && (found || copy_back)) | |
1381 | { | |
1382 | int line_offset; | |
1383 | /* Load the line from memory first, if it was a miss. */ | |
1384 | if (! found) | |
1385 | { | |
1386 | /* We need to wait for the memory unit to fetch the data. | |
1387 | Store this request in the WAR and requeue the store request. */ | |
1388 | wait_in_WAR (cache, pipe, req); | |
1389 | pipeline_requeue_request (& cache->pipeline[pipe]); | |
1390 | /* Decrement the counts of accesses and hits because when the requeued | |
1391 | request is processed again, it will appear to be a new access and | |
1392 | a hit. */ | |
1393 | --cache->statistics.accesses; | |
1394 | --cache->statistics.hits; | |
1395 | return; | |
1396 | } | |
1397 | line_offset = address & (cache->line_size - 1); | |
1398 | memcpy (tag->line + line_offset, data, length); | |
1399 | invalidate_return_buffer (cache, address); | |
1400 | tag->dirty = 1; | |
1401 | ||
1402 | /* Update the LRU information for the tags in this set. */ | |
1403 | set_most_recently_used (cache, tag); | |
1404 | } | |
1405 | ||
1406 | /* Write the data to memory if there was no line available or we are in | |
1407 | write-through (not copy-back mode). */ | |
1408 | if (tag == NULL || ! copy_back) | |
1409 | { | |
1410 | write_data_to_memory (cache, address, data, length); | |
1411 | if (tag != NULL) | |
1412 | tag->dirty = 0; | |
1413 | } | |
1414 | } | |
1415 | ||
1416 | static void | |
1417 | handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) | |
1418 | { | |
1419 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1420 | SI address = req->address; | |
1421 | SI interfere_address = req->u.invalidate.all ? -1 : address; | |
1422 | ||
1423 | /* If this address interferes with an existing request, then requeue it. */ | |
1424 | if (address_interference (cache, interfere_address, req, pipe)) | |
1425 | { | |
1426 | pipeline_requeue_request (pipeline); | |
1427 | return; | |
1428 | } | |
1429 | ||
1430 | /* Invalidate the cache line now. This function already checks for | |
1431 | non-cache access. */ | |
1432 | if (req->u.invalidate.all) | |
1433 | frv_cache_invalidate_all (cache, req->u.invalidate.flush); | |
1434 | else | |
1435 | frv_cache_invalidate (cache, address, req->u.invalidate.flush); | |
1436 | if (req->u.invalidate.flush) | |
1437 | { | |
1438 | pipeline->status.flush.reqno = req->reqno; | |
1439 | pipeline->status.flush.address = address; | |
1440 | pipeline->status.flush.valid = 1; | |
1441 | } | |
1442 | } | |
1443 | ||
1444 | static void | |
1445 | handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) | |
1446 | { | |
1447 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1448 | SI address = req->address; | |
1449 | ||
1450 | /* If this address interferes with an existing request, then requeue it. */ | |
1451 | if (address_interference (cache, address, req, pipe)) | |
1452 | { | |
1453 | pipeline_requeue_request (pipeline); | |
1454 | return; | |
1455 | } | |
1456 | ||
1457 | /* Unlock the cache line. This function checks for non-cache access. */ | |
1458 | frv_cache_unlock (cache, address); | |
1459 | } | |
1460 | ||
1461 | static void | |
1462 | handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) | |
1463 | { | |
1464 | char *buffer; | |
1465 | FRV_CACHE_TAG *tag; | |
1466 | SI address = req->address; | |
1467 | ||
1468 | if (frv_cache_enabled (cache) && ! non_cache_access (cache, address)) | |
1469 | { | |
1470 | /* Look for the data in the cache. The statistics of cache hit or | |
1471 | miss have already been recorded, so save and restore the stats before | |
1472 | and after obtaining the cache line. */ | |
1473 | FRV_CACHE_STATISTICS save_stats = cache->statistics; | |
1474 | tag = find_or_retrieve_cache_line (cache, address); | |
1475 | cache->statistics = save_stats; | |
1476 | if (tag != NULL) | |
1477 | { | |
1478 | if (! req->u.WAR.preload) | |
1479 | { | |
1480 | copy_line_to_return_buffer (cache, pipe, tag, address); | |
1481 | set_return_buffer_reqno (cache, pipe, req->reqno); | |
1482 | } | |
1483 | else | |
1484 | { | |
1485 | invalidate_return_buffer (cache, address); | |
1486 | if (req->u.WAR.lock) | |
1487 | tag->locked = 1; | |
1488 | } | |
1489 | return; | |
1490 | } | |
1491 | } | |
1492 | ||
1493 | /* All cache lines in the set were locked, so just copy the data to the | |
1494 | return buffer directly. */ | |
1495 | if (! req->u.WAR.preload) | |
1496 | { | |
1497 | copy_memory_to_return_buffer (cache, pipe, address); | |
1498 | set_return_buffer_reqno (cache, pipe, req->reqno); | |
1499 | } | |
1500 | } | |
1501 | ||
1502 | /* Resolve any conflicts and/or execute the given requests. */ | |
1503 | static void | |
1504 | arbitrate_requests (FRV_CACHE *cache) | |
1505 | { | |
1506 | int pipe; | |
1507 | /* Simply execute the requests in the final pipeline stages. */ | |
1508 | for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe) | |
1509 | { | |
1510 | FRV_CACHE_REQUEST *req | |
1511 | = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE); | |
1512 | /* Make sure that there is a request to handle. */ | |
1513 | if (req == NULL) | |
1514 | continue; | |
1515 | ||
1516 | /* Handle the request. */ | |
1517 | switch (req->kind) | |
1518 | { | |
1519 | case req_load: | |
1520 | handle_req_load (cache, pipe, req); | |
1521 | break; | |
1522 | case req_store: | |
1523 | handle_req_store (cache, pipe, req); | |
1524 | break; | |
1525 | case req_invalidate: | |
1526 | handle_req_invalidate (cache, pipe, req); | |
1527 | break; | |
1528 | case req_preload: | |
1529 | handle_req_preload (cache, pipe, req); | |
1530 | break; | |
1531 | case req_unlock: | |
1532 | handle_req_unlock (cache, pipe, req); | |
1533 | break; | |
1534 | case req_WAR: | |
1535 | handle_req_WAR (cache, pipe, req); | |
1536 | break; | |
1537 | default: | |
1538 | abort (); | |
1539 | } | |
1540 | } | |
1541 | } | |
1542 | ||
1543 | /* Move a waiting ARS register to a free WAR register. */ | |
1544 | static void | |
1545 | move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war) | |
1546 | { | |
1547 | /* If BARS is valid for this pipe, then move it to the given WAR. Move | |
1548 | NARS to BARS if it is valid. */ | |
1549 | if (cache->BARS.valid && cache->BARS.pipe == pipe) | |
1550 | { | |
1551 | war->address = cache->BARS.address; | |
1552 | war->reqno = cache->BARS.reqno; | |
1553 | war->priority = cache->BARS.priority; | |
1554 | war->preload = cache->BARS.preload; | |
1555 | war->lock = cache->BARS.lock; | |
1556 | war->latency = cache->memory_latency + 1; | |
1557 | war->valid = 1; | |
1558 | if (cache->NARS.valid) | |
1559 | { | |
1560 | cache->BARS = cache->NARS; | |
1561 | cache->NARS.valid = 0; | |
1562 | } | |
1563 | else | |
1564 | cache->BARS.valid = 0; | |
1565 | return; | |
1566 | } | |
1567 | /* If NARS is valid for this pipe, then move it to the given WAR. */ | |
1568 | if (cache->NARS.valid && cache->NARS.pipe == pipe) | |
1569 | { | |
1570 | war->address = cache->NARS.address; | |
1571 | war->reqno = cache->NARS.reqno; | |
1572 | war->priority = cache->NARS.priority; | |
1573 | war->preload = cache->NARS.preload; | |
1574 | war->lock = cache->NARS.lock; | |
1575 | war->latency = cache->memory_latency + 1; | |
1576 | war->valid = 1; | |
1577 | cache->NARS.valid = 0; | |
1578 | } | |
1579 | } | |
1580 | ||
1581 | /* Decrease the latencies of the various states in the cache. */ | |
1582 | static void | |
1583 | decrease_latencies (FRV_CACHE *cache) | |
1584 | { | |
1585 | int pipe, j; | |
1586 | /* Check the WAR registers. */ | |
1587 | for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe) | |
1588 | { | |
1589 | FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe]; | |
1590 | for (j = 0; j < NUM_WARS; ++j) | |
1591 | { | |
1592 | FRV_CACHE_WAR *war = & pipeline->WAR[j]; | |
1593 | if (war->valid) | |
1594 | { | |
1595 | --war->latency; | |
1596 | /* If the latency has expired, then submit a WAR request to the | |
1597 | pipeline. */ | |
1598 | if (war->latency <= 0) | |
1599 | { | |
1600 | add_WAR_request (pipeline, war); | |
1601 | war->valid = 0; | |
1602 | move_ARS_to_WAR (cache, pipe, war); | |
1603 | } | |
1604 | } | |
1605 | } | |
1606 | } | |
1607 | } | |
1608 | ||
1609 | /* Run the cache for the given number of cycles. */ | |
1610 | void | |
1611 | frv_cache_run (FRV_CACHE *cache, int cycles) | |
1612 | { | |
1613 | int i; | |
1614 | for (i = 0; i < cycles; ++i) | |
1615 | { | |
1616 | advance_pipelines (cache); | |
1617 | arbitrate_requests (cache); | |
1618 | decrease_latencies (cache); | |
1619 | } | |
1620 | } | |
1621 | ||
1622 | int | |
1623 | frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value) | |
1624 | { | |
1625 | SI offset; | |
1626 | FRV_CACHE_TAG *tag; | |
1627 | ||
1628 | if (non_cache_access (cache, address)) | |
1629 | return 0; | |
1630 | ||
1631 | { | |
1632 | FRV_CACHE_STATISTICS save_stats = cache->statistics; | |
1633 | int found = get_tag (cache, address, &tag); | |
1634 | cache->statistics = save_stats; | |
1635 | ||
1636 | if (! found) | |
1637 | return 0; /* Indicate non-cache-access. */ | |
1638 | } | |
1639 | ||
1640 | /* A cache line was available for the data. | |
1641 | Extract the target data from the line. */ | |
1642 | offset = address & (cache->line_size - 1); | |
b34f6357 DB |
1643 | *value = T2H_4 (*(SI *)(tag->line + offset)); |
1644 | return 1; | |
1645 | } | |
1646 | ||
1647 | /* Check the return buffers of the data cache to see if the requested data is | |
1648 | available. */ | |
1649 | int | |
1650 | frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address, | |
1651 | unsigned reqno) | |
1652 | { | |
1653 | return cache->pipeline[pipe].status.return_buffer.valid | |
1654 | && cache->pipeline[pipe].status.return_buffer.reqno == reqno | |
1655 | && cache->pipeline[pipe].status.return_buffer.address <= address | |
1656 | && cache->pipeline[pipe].status.return_buffer.address + cache->line_size | |
1657 | > address; | |
1658 | } | |
1659 | ||
1660 | /* Check to see if the requested data has been flushed. */ | |
1661 | int | |
1662 | frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno) | |
1663 | { | |
1664 | return cache->pipeline[pipe].status.flush.valid | |
1665 | && cache->pipeline[pipe].status.flush.reqno == reqno | |
1666 | && cache->pipeline[pipe].status.flush.address <= address | |
1667 | && cache->pipeline[pipe].status.flush.address + cache->line_size | |
1668 | > address; | |
1669 | } |