2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
24 #include "libiberty.h"
30 frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
36 /* Set defaults for fields which are not initialized. */
38 switch (STATE_ARCHITECTURE (sd)->mach)
41 if (cache->configured_sets == 0)
42 cache->configured_sets = 128;
43 if (cache->configured_ways == 0)
44 cache->configured_ways = 2;
45 if (cache->line_size == 0)
46 cache->line_size = 32;
47 if (cache->memory_latency == 0)
48 cache->memory_latency = 20;
51 if (cache->configured_sets == 0)
52 cache->configured_sets = 128;
53 if (cache->configured_ways == 0)
54 cache->configured_ways = 4;
55 if (cache->line_size == 0)
56 cache->line_size = 64;
57 if (cache->memory_latency == 0)
58 cache->memory_latency = 20;
61 if (cache->configured_sets == 0)
62 cache->configured_sets = 64;
63 if (cache->configured_ways == 0)
64 cache->configured_ways = 4;
65 if (cache->line_size == 0)
66 cache->line_size = 64;
67 if (cache->memory_latency == 0)
68 cache->memory_latency = 20;
72 frv_cache_reconfigure (cpu, cache);
74 /* First allocate the cache storage based on the given dimensions. */
75 elements = cache->sets * cache->ways;
76 cache->tag_storage = (FRV_CACHE_TAG *)
77 zalloc (elements * sizeof (*cache->tag_storage));
78 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
80 /* Initialize the pipelines and status buffers. */
81 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
83 cache->pipeline[i].requests = NULL;
84 cache->pipeline[i].status.flush.valid = 0;
85 cache->pipeline[i].status.return_buffer.valid = 0;
86 cache->pipeline[i].status.return_buffer.data
87 = (char *) xmalloc (cache->line_size);
88 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
89 cache->pipeline[i].stages[j].request = NULL;
91 cache->BARS.valid = 0;
92 cache->NARS.valid = 0;
94 /* Now set the cache state. */
96 cache->statistics.accesses = 0;
97 cache->statistics.hits = 0;
101 frv_cache_term (FRV_CACHE *cache)
103 /* Free the cache storage. */
104 free (cache->tag_storage);
105 free (cache->data_storage);
106 free (cache->pipeline[LS].status.return_buffer.data);
107 free (cache->pipeline[LD].status.return_buffer.data);
110 /* Reset the cache configuration based on registers in the cpu. */
112 frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
118 /* Set defaults for fields which are not initialized. */
119 sd = CPU_STATE (current_cpu);
120 switch (STATE_ARCHITECTURE (sd)->mach)
123 if (cache == CPU_INSN_CACHE (current_cpu))
125 ihsr8 = GET_IHSR8 ();
126 icdm = GET_IHSR8_ICDM (ihsr8);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
130 cache->sets = cache->sets * cache->ways;
137 /* Set the cache to its original settings. */
138 cache->sets = cache->configured_sets;
139 cache->ways = cache->configured_ways;
144 /* Determine whether the given cache is enabled. */
146 frv_cache_enabled (FRV_CACHE *cache)
148 SIM_CPU *current_cpu = cache->cpu;
149 int hsr0 = GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
152 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
157 /* Determine whether the given address is RAM access, assuming that HSR0.RME
160 ram_access (FRV_CACHE *cache, USI address)
164 USI start, end, way_size;
165 SIM_CPU *current_cpu = cache->cpu;
166 SIM_DESC sd = CPU_STATE (current_cpu);
168 switch (STATE_ARCHITECTURE (sd)->mach)
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8 = GET_IHSR8 ();
173 if (cache == CPU_INSN_CACHE (current_cpu))
177 cwe = GET_IHSR8_ICWE (ihsr8);
183 cwe = GET_IHSR8_DCWE (ihsr8);
185 way_size = (end - start) / 4;
186 end -= way_size * cwe;
187 return address >= start && address < end;
192 return 1; /* RAM access */
195 /* Determine whether the given address should be accessed without using
198 non_cache_access (FRV_CACHE *cache, USI address)
202 SIM_CPU *current_cpu = cache->cpu;
204 sd = CPU_STATE (current_cpu);
205 switch (STATE_ARCHITECTURE (sd)->mach)
208 if (address >= 0xff000000
209 || address >= 0xfe000000 && address <= 0xfeffffff)
210 return 1; /* non-cache access */
212 if (address >= 0xff000000
213 || address >= 0xfeff0000 && address <= 0xfeffffff)
214 return 1; /* non-cache access */
215 if (cache == CPU_INSN_CACHE (current_cpu))
217 if (address >= 0xfe000000 && address <= 0xfe007fff)
218 return 1; /* non-cache access */
220 else if (address >= 0xfe400000 && address <= 0xfe407fff)
221 return 1; /* non-cache access */
223 if (address >= 0xff000000
224 || address >= 0xfeff0000 && address <= 0xfeffffff)
225 return 1; /* non-cache access */
226 if (cache == CPU_INSN_CACHE (current_cpu))
228 if (address >= 0xfe000000 && address <= 0xfe003fff)
229 return 1; /* non-cache access */
231 else if (address >= 0xfe400000 && address <= 0xfe403fff)
232 return 1; /* non-cache access */
236 if (GET_HSR0_RME (hsr0))
237 return ram_access (cache, address);
239 return 0; /* cache-access */
242 /* Find the cache line corresponding to the given address.
243 If it is found then 'return_tag' is set to point to the tag for that line
245 If it is not found, 'return_tag' is set to point to the tag for the least
246 recently used line and 0 is returned.
249 get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
255 FRV_CACHE_TAG *found;
256 FRV_CACHE_TAG *available;
258 ++cache->statistics.accesses;
260 /* First calculate which set this address will fall into. Do this by
261 shifting out the bits representing the offset within the line and
262 then keeping enough bits to index the set. */
263 set = address & ~(cache->line_size - 1);
264 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
266 set &= (cache->sets - 1);
268 /* Now search the set for a valid tag which matches this address. At the
269 same time make note of the least recently used tag, which we will return
270 if no match is found. */
272 tag = CACHE_ADDRESS_TAG (cache, address);
273 for (way = 0; way < cache->ways; ++way)
275 found = CACHE_TAG (cache, set, way);
276 /* This tag is available as the least recently used if it is the
277 least recently used seen so far and it is not locked. */
278 if (! found->locked && (available == NULL || available->lru > found->lru))
280 if (found->valid && found->tag == tag)
283 ++cache->statistics.hits;
284 return 1; /* found it */
288 *return_tag = available;
289 return 0; /* not found */
292 /* Write the given data out to memory. */
294 write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
296 SIM_CPU *cpu = cache->cpu;
297 IADDR pc = CPU_PC_GET (cpu);
304 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
307 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
310 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
313 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
317 for (write_index = 0; write_index < length; ++write_index)
319 /* TODO: Better way to copy memory than a byte at a time? */
320 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
325 /* Write a cache line out to memory. */
327 write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
329 SI address = tag->tag;
330 int set = CACHE_TAG_SET_NUMBER (cache, tag);
332 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
335 write_data_to_memory (cache, address, tag->line, cache->line_size);
339 read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
342 PCADDR pc = CPU_PC_GET (current_cpu);
344 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
345 for (i = 0; i < length; ++i)
347 /* TODO: Better way to copy memory than a byte at a time? */
348 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
353 /* Fill the given cache line from memory. */
355 fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
360 SIM_CPU *current_cpu = cache->cpu;
362 /* If this line is already valid and the cache is in copy-back mode, then
363 write this line to memory before refilling it.
364 Check the dirty bit first, since it is less likely to be set. */
365 if (tag->dirty && tag->valid)
367 int hsr0 = GET_HSR0 ();
368 if (GET_HSR0_CBM (hsr0))
369 write_line_to_memory (cache, tag);
371 else if (tag->line == NULL)
373 int line_index = tag - cache->tag_storage;
374 tag->line = cache->data_storage + (line_index * cache->line_size);
377 pc = CPU_PC_GET (current_cpu);
378 line_alignment = cache->line_size - 1;
379 read_address = address & ~line_alignment;
380 read_data_from_memory (current_cpu, read_address, tag->line,
382 tag->tag = CACHE_ADDRESS_TAG (cache, address);
386 /* Update the LRU information for the tags in the same set as the given tag. */
388 set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
390 /* All tags in the same set are contiguous, so find the beginning of the
391 set by aligning to the size of a set. */
392 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
393 FRV_CACHE_TAG *limit = item + cache->ways;
397 if (item->lru > tag->lru)
401 tag->lru = cache->ways; /* Mark as most recently used. */
404 /* Update the LRU information for the tags in the same set as the given tag. */
406 set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
408 /* All tags in the same set are contiguous, so find the beginning of the
409 set by aligning to the size of a set. */
410 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
411 FRV_CACHE_TAG *limit = item + cache->ways;
415 if (item->lru != 0 && item->lru < tag->lru)
419 tag->lru = 0; /* Mark as least recently used. */
422 /* Find the line containing the given address and load it if it is not
424 Returns the tag of the requested line. */
425 static FRV_CACHE_TAG *
426 find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
428 /* See if this data is already in the cache. */
430 int found = get_tag (cache, address, &tag);
432 /* Fill the line from memory, if it is not valid. */
435 /* The tag could be NULL is all ways in the set were used and locked. */
439 fill_line_from_memory (cache, tag, address);
443 /* Update the LRU information for the tags in this set. */
444 set_most_recently_used (cache, tag);
450 copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
453 /* A cache line was available for the data.
454 Copy the data from the cache line to the output buffer. */
455 memcpy (cache->pipeline[pipe].status.return_buffer.data,
456 tag->line, cache->line_size);
457 cache->pipeline[pipe].status.return_buffer.address
458 = address & ~(cache->line_size - 1);
459 cache->pipeline[pipe].status.return_buffer.valid = 1;
463 copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
465 address &= ~(cache->line_size - 1);
466 read_data_from_memory (cache->cpu, address,
467 cache->pipeline[pipe].status.return_buffer.data,
469 cache->pipeline[pipe].status.return_buffer.address = address;
470 cache->pipeline[pipe].status.return_buffer.valid = 1;
474 set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
476 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
479 /* Read data from the given cache.
480 Returns the number of cycles required to obtain the data. */
482 frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
486 if (non_cache_access (cache, address))
488 copy_memory_to_return_buffer (cache, pipe, address);
492 tag = find_or_retrieve_cache_line (cache, address);
495 return 0; /* Indicate non-cache-access. */
497 /* A cache line was available for the data.
498 Copy the data from the cache line to the output buffer. */
499 copy_line_to_return_buffer (cache, pipe, tag, address);
501 return 1; /* TODO - number of cycles unknown */
504 /* Writes data through the given cache.
505 The data is assumed to be in target endian order.
506 Returns the number of cycles required to write the data. */
508 frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
512 /* See if this data is already in the cache. */
513 SIM_CPU *current_cpu = cache->cpu;
514 USI hsr0 = GET_HSR0 ();
518 if (non_cache_access (cache, address))
520 write_data_to_memory (cache, address, data, length);
524 found = get_tag (cache, address, &tag);
526 /* Write the data to the cache line if one was available and if it is
527 either a hit or a miss in copy-back mode.
528 The tag may be NULL if all ways were in use and locked on a miss.
530 copy_back = GET_HSR0_CBM (GET_HSR0 ());
531 if (tag != NULL && (found || copy_back))
534 /* Load the line from memory first, if it was a miss. */
536 fill_line_from_memory (cache, tag, address);
537 line_offset = address & (cache->line_size - 1);
538 memcpy (tag->line + line_offset, data, length);
541 /* Update the LRU information for the tags in this set. */
542 set_most_recently_used (cache, tag);
545 /* Write the data to memory if there was no line available or we are in
546 write-through (not copy-back mode). */
547 if (tag == NULL || ! copy_back)
549 write_data_to_memory (cache, address, data, length);
554 return 1; /* TODO - number of cycles unknown */
557 /* Preload the cache line containing the given address. Lock the
559 Returns the number of cycles required to write the data. */
561 frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
566 if (non_cache_access (cache, address))
569 /* preload at least 1 line. */
573 offset = address & (cache->line_size - 1);
574 lines = 1 + (offset + length - 1) / cache->line_size;
576 /* Careful with this loop -- length is unsigned. */
577 for (/**/; lines > 0; --lines)
579 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
580 if (lock && tag != NULL)
582 address += cache->line_size;
585 return 1; /* TODO - number of cycles unknown */
588 /* Unlock the cache line containing the given address.
589 Returns the number of cycles required to unlock the line. */
591 frv_cache_unlock (FRV_CACHE *cache, SI address)
596 if (non_cache_access (cache, address))
599 found = get_tag (cache, address, &tag);
604 return 1; /* TODO - number of cycles unknown */
608 invalidate_return_buffer (FRV_CACHE *cache, SI address)
610 /* If this address is in one of the return buffers, then invalidate that
612 address &= ~(cache->line_size - 1);
613 if (address == cache->pipeline[LS].status.return_buffer.address)
614 cache->pipeline[LS].status.return_buffer.valid = 0;
615 if (address == cache->pipeline[LD].status.return_buffer.address)
616 cache->pipeline[LD].status.return_buffer.valid = 0;
619 /* Invalidate the cache line containing the given address. Flush the
621 Returns the number of cycles required to write the data. */
623 frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
625 /* See if this data is already in the cache. */
629 /* Check for non-cache access. This operation is still perfromed even if
630 the cache is not currently enabled. */
631 if (non_cache_access (cache, address))
634 /* If the line is found, invalidate it. If a flush is requested, then flush
635 it if it is dirty. */
636 found = get_tag (cache, address, &tag);
640 /* If a flush is requested, then flush it if it is dirty. */
641 if (tag->dirty && flush)
642 write_line_to_memory (cache, tag);
643 set_least_recently_used (cache, tag);
647 /* If this is the insn cache, then flush the cpu's scache as well. */
649 if (cache == CPU_INSN_CACHE (cpu))
650 scache_flush_cpu (cpu);
653 invalidate_return_buffer (cache, address);
655 return 1; /* TODO - number of cycles unknown */
658 /* Invalidate the entire cache. Flush the data if requested. */
660 frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
662 /* See if this data is already in the cache. */
663 int elements = cache->sets * cache->ways;
664 FRV_CACHE_TAG *tag = cache->tag_storage;
668 for(i = 0; i < elements; ++i, ++tag)
670 /* If a flush is requested, then flush it if it is dirty. */
671 if (tag->valid && tag->dirty && flush)
672 write_line_to_memory (cache, tag);
678 /* If this is the insn cache, then flush the cpu's scache as well. */
680 if (cache == CPU_INSN_CACHE (cpu))
681 scache_flush_cpu (cpu);
683 /* Invalidate both return buffers. */
684 cache->pipeline[LS].status.return_buffer.valid = 0;
685 cache->pipeline[LD].status.return_buffer.valid = 0;
687 return 1; /* TODO - number of cycles unknown */
690 /* ---------------------------------------------------------------------------
691 Functions for operating the cache in cycle accurate mode.
692 ------------------------------------------------------------------------- */
693 /* Convert a VLIW slot to a cache pipeline index. */
695 convert_slot_to_index (int slot)
710 /* Allocate free chains of cache requests. */
711 #define FREE_CHAIN_SIZE 16
712 static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
713 static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
716 allocate_new_cache_requests (void)
719 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
720 * sizeof (FRV_CACHE_REQUEST));
721 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
723 frv_cache_request_free_chain[i].next
724 = & frv_cache_request_free_chain[i + 1];
727 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
730 /* Return the next free request in the queue for the given cache pipeline. */
731 static FRV_CACHE_REQUEST *
732 new_cache_request (void)
734 FRV_CACHE_REQUEST *req;
736 /* Allocate new elements for the free chain if necessary. */
737 if (frv_cache_request_free_chain == NULL)
738 allocate_new_cache_requests ();
740 req = frv_cache_request_free_chain;
741 frv_cache_request_free_chain = req->next;
746 /* Return the given cache request to the free chain. */
748 free_cache_request (FRV_CACHE_REQUEST *req)
750 if (req->kind == req_store)
752 req->next = frv_store_request_free_chain;
753 frv_store_request_free_chain = req;
757 req->next = frv_cache_request_free_chain;
758 frv_cache_request_free_chain = req;
762 /* Search the free chain for an existing store request with a buffer that's
764 static FRV_CACHE_REQUEST *
765 new_store_request (int length)
767 FRV_CACHE_REQUEST *prev = NULL;
768 FRV_CACHE_REQUEST *req;
769 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
771 if (req->u.store.length == length)
778 frv_store_request_free_chain = req->next;
780 prev->next = req->next;
784 /* No existing request buffer was found, so make a new one. */
785 req = new_cache_request ();
786 req->kind = req_store;
787 req->u.store.data = xmalloc (length);
788 req->u.store.length = length;
792 /* Remove the given request from the given pipeline. */
794 pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
796 FRV_CACHE_REQUEST *next = request->next;
797 FRV_CACHE_REQUEST *prev = request->prev;
808 /* Add the given request to the given pipeline. */
810 pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
812 FRV_CACHE_REQUEST *prev = NULL;
813 FRV_CACHE_REQUEST *item;
815 /* Add the request in priority order. 0 is the highest priority. */
816 for (item = p->requests; item != NULL; item = item->next)
818 if (item->priority > request->priority)
823 request->next = item;
824 request->prev = prev;
826 p->requests = request;
828 prev->next = request;
830 item->prev = request;
833 /* Requeu the given request from the last of the given pipeline. */
835 pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
837 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
838 FRV_CACHE_REQUEST *req = stage->request;
839 stage->request = NULL;
840 pipeline_add_request (p, req);
843 /* Return the priority lower than the lowest one in this cache pipeline.
844 0 is the highest priority. */
846 next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
851 FRV_CACHE_REQUEST *req;
853 /* Check the priorities of any queued items. */
854 for (req = pipeline->requests; req != NULL; req = req->next)
855 if (req->priority > lowest)
856 lowest = req->priority;
858 /* Check the priorities of items in the pipeline stages. */
859 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
861 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
862 if (stage->request != NULL && stage->request->priority > lowest)
863 lowest = stage->request->priority;
866 /* Check the priorities of load requests waiting in WAR. These are one
867 higher than the request that spawned them. */
868 for (i = 0; i < NUM_WARS; ++i)
870 FRV_CACHE_WAR *war = & pipeline->WAR[i];
871 if (war->valid && war->priority > lowest)
872 lowest = war->priority + 1;
875 /* Check the priorities of any BARS or NARS associated with this pipeline.
876 These are one higher than the request that spawned them. */
877 pipe = pipeline - cache->pipeline;
878 if (cache->BARS.valid && cache->BARS.pipe == pipe
879 && cache->BARS.priority > lowest)
880 lowest = cache->BARS.priority + 1;
881 if (cache->NARS.valid && cache->NARS.pipe == pipe
882 && cache->NARS.priority > lowest)
883 lowest = cache->NARS.priority + 1;
885 /* Return a priority 2 lower than the lowest found. This allows a WAR
886 request to be generated with a priority greater than this but less than
887 the next higher priority request. */
892 add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
894 /* Add the load request to the indexed pipeline. */
895 FRV_CACHE_REQUEST *req = new_cache_request ();
897 req->reqno = war->reqno;
898 req->priority = war->priority;
899 req->address = war->address;
900 req->u.WAR.preload = war->preload;
901 req->u.WAR.lock = war->lock;
902 pipeline_add_request (pipeline, req);
905 /* Remove the next request from the given pipeline and return it. */
906 static FRV_CACHE_REQUEST *
907 pipeline_next_request (FRV_CACHE_PIPELINE *p)
909 FRV_CACHE_REQUEST *first = p->requests;
911 pipeline_remove_request (p, first);
915 /* Return the request which is at the given stage of the given pipeline. */
916 static FRV_CACHE_REQUEST *
917 pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
919 return p->stages[stage].request;
923 advance_pipelines (FRV_CACHE *cache)
927 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
929 /* Free the final stage requests. */
930 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
932 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
934 free_cache_request (req);
937 /* Shuffle the requests along the pipeline. */
938 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
940 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
941 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
944 /* Add a new request to the pipeline. */
945 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
946 pipelines[pipe].stages[FIRST_STAGE].request
947 = pipeline_next_request (& pipelines[pipe]);
950 /* Handle a request for a load from the given address. */
952 frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
954 FRV_CACHE_REQUEST *req;
956 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
957 int pipe = convert_slot_to_index (slot);
958 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
960 /* Add the load request to the indexed pipeline. */
961 req = new_cache_request ();
962 req->kind = req_load;
964 req->priority = next_priority (cache, pipeline);
965 req->address = address;
967 pipeline_add_request (pipeline, req);
971 frv_cache_request_store (FRV_CACHE *cache, SI address,
972 int slot, char *data, unsigned length)
974 FRV_CACHE_REQUEST *req;
976 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
977 int pipe = convert_slot_to_index (slot);
978 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
980 /* Add the load request to the indexed pipeline. */
981 req = new_store_request (length);
982 req->kind = req_store;
983 req->reqno = NO_REQNO;
984 req->priority = next_priority (cache, pipeline);
985 req->address = address;
986 req->u.store.length = length;
987 memcpy (req->u.store.data, data, length);
989 pipeline_add_request (pipeline, req);
990 invalidate_return_buffer (cache, address);
993 /* Handle a request to invalidate the cache line containing the given address.
994 Flush the data if requested. */
996 frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
997 int slot, int all, int flush)
999 FRV_CACHE_REQUEST *req;
1001 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1002 int pipe = convert_slot_to_index (slot);
1003 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1005 /* Add the load request to the indexed pipeline. */
1006 req = new_cache_request ();
1007 req->kind = req_invalidate;
1009 req->priority = next_priority (cache, pipeline);
1010 req->address = address;
1011 req->u.invalidate.all = all;
1012 req->u.invalidate.flush = flush;
1014 pipeline_add_request (pipeline, req);
1017 /* Handle a request to preload the cache line containing the given address. */
1019 frv_cache_request_preload (FRV_CACHE *cache, SI address,
1020 int slot, int length, int lock)
1022 FRV_CACHE_REQUEST *req;
1024 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1025 int pipe = convert_slot_to_index (slot);
1026 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1028 /* Add the load request to the indexed pipeline. */
1029 req = new_cache_request ();
1030 req->kind = req_preload;
1031 req->reqno = NO_REQNO;
1032 req->priority = next_priority (cache, pipeline);
1033 req->address = address;
1034 req->u.preload.length = length;
1035 req->u.preload.lock = lock;
1037 pipeline_add_request (pipeline, req);
1038 invalidate_return_buffer (cache, address);
1041 /* Handle a request to unlock the cache line containing the given address. */
1043 frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1045 FRV_CACHE_REQUEST *req;
1047 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1048 int pipe = convert_slot_to_index (slot);
1049 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1051 /* Add the load request to the indexed pipeline. */
1052 req = new_cache_request ();
1053 req->kind = req_unlock;
1054 req->reqno = NO_REQNO;
1055 req->priority = next_priority (cache, pipeline);
1056 req->address = address;
1058 pipeline_add_request (pipeline, req);
1061 /* Check whether this address interferes with a pending request of
1064 address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1068 int line_mask = ~(cache->line_size - 1);
1070 int priority = req->priority;
1071 FRV_CACHE_REQUEST *other_req;
1075 address &= line_mask;
1076 all_address = -1 & line_mask;
1078 /* Check for collisions in the queue for this pipeline. */
1079 for (other_req = cache->pipeline[pipe].requests;
1081 other_req = other_req->next)
1083 other_address = other_req->address & line_mask;
1084 if ((address == other_address || address == all_address)
1085 && priority > other_req->priority)
1089 /* Check for a collision in the the other pipeline. */
1090 other_pipe = pipe ^ 1;
1091 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1092 if (other_req != NULL)
1094 other_address = other_req->address & line_mask;
1095 if (address == other_address || address == all_address)
1099 /* Check for a collision with load requests waiting in WAR. */
1100 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1102 for (j = 0; j < NUM_WARS; ++j)
1104 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1106 && (address == (war->address & line_mask)
1107 || address == all_address)
1108 && priority > war->priority)
1111 /* If this is not a WAR request, then yield to any WAR requests in
1112 either pipeline or to a higher priority request in the same pipeline.
1114 if (req->kind != req_WAR)
1116 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1118 other_req = cache->pipeline[i].stages[j].request;
1119 if (other_req != NULL)
1121 if (other_req->kind == req_WAR)
1124 && (address == (other_req->address & line_mask)
1125 || address == all_address)
1126 && priority > other_req->priority)
1133 /* Check for a collision with load requests waiting in ARS. */
1134 if (cache->BARS.valid
1135 && (address == (cache->BARS.address & line_mask)
1136 || address == all_address)
1137 && priority > cache->BARS.priority)
1139 if (cache->NARS.valid
1140 && (address == (cache->NARS.address & line_mask)
1141 || address == all_address)
1142 && priority > cache->NARS.priority)
1148 /* Wait for a free WAR register in BARS or NARS. */
1150 wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1153 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1155 if (! cache->BARS.valid)
1157 cache->BARS.pipe = pipe;
1158 cache->BARS.reqno = req->reqno;
1159 cache->BARS.address = req->address;
1160 cache->BARS.priority = req->priority - 1;
1164 cache->BARS.preload = 0;
1165 cache->BARS.lock = 0;
1168 cache->BARS.preload = 1;
1169 cache->BARS.lock = 0;
1172 cache->BARS.preload = 1;
1173 cache->BARS.lock = req->u.preload.lock;
1176 cache->BARS.valid = 1;
1179 if (! cache->NARS.valid)
1181 cache->NARS.pipe = pipe;
1182 cache->NARS.reqno = req->reqno;
1183 cache->NARS.address = req->address;
1184 cache->NARS.priority = req->priority - 1;
1188 cache->NARS.preload = 0;
1189 cache->NARS.lock = 0;
1192 cache->NARS.preload = 1;
1193 cache->NARS.lock = 0;
1196 cache->NARS.preload = 1;
1197 cache->NARS.lock = req->u.preload.lock;
1200 cache->NARS.valid = 1;
1203 /* All wait registers are busy, so resubmit this request. */
1204 pipeline_requeue_request (pipeline);
1207 /* Find a free WAR register and wait for memory to fetch the data. */
1209 wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1212 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1214 /* Find a valid WAR to hold this request. */
1215 for (war = 0; war < NUM_WARS; ++war)
1216 if (! pipeline->WAR[war].valid)
1218 if (war >= NUM_WARS)
1220 wait_for_WAR (cache, pipe, req);
1224 pipeline->WAR[war].address = req->address;
1225 pipeline->WAR[war].reqno = req->reqno;
1226 pipeline->WAR[war].priority = req->priority - 1;
1227 pipeline->WAR[war].latency = cache->memory_latency + 1;
1231 pipeline->WAR[war].preload = 0;
1232 pipeline->WAR[war].lock = 0;
1235 pipeline->WAR[war].preload = 1;
1236 pipeline->WAR[war].lock = 0;
1239 pipeline->WAR[war].preload = 1;
1240 pipeline->WAR[war].lock = req->u.preload.lock;
1243 pipeline->WAR[war].valid = 1;
1247 handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1250 SI address = req->address;
1252 /* If this address interferes with an existing request, then requeue it. */
1253 if (address_interference (cache, address, req, pipe))
1255 pipeline_requeue_request (& cache->pipeline[pipe]);
1259 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1261 int found = get_tag (cache, address, &tag);
1263 /* If the data was found, return it to the caller. */
1266 set_most_recently_used (cache, tag);
1267 copy_line_to_return_buffer (cache, pipe, tag, address);
1268 set_return_buffer_reqno (cache, pipe, req->reqno);
1273 /* The data is not in the cache or this is a non-cache access. We need to
1274 wait for the memory unit to fetch it. Store this request in the WAR in
1276 wait_in_WAR (cache, pipe, req);
1280 handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1290 SI address = req->address;
1293 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1296 /* preload at least 1 line. */
1297 length = req->u.preload.length;
1301 /* Make sure that this request does not interfere with a pending request. */
1302 offset = address & (cache->line_size - 1);
1303 lines = 1 + (offset + length - 1) / cache->line_size;
1304 cur_address = address & ~(cache->line_size - 1);
1305 for (line = 0; line < lines; ++line)
1307 /* If this address interferes with an existing request,
1309 if (address_interference (cache, cur_address, req, pipe))
1311 pipeline_requeue_request (& cache->pipeline[pipe]);
1314 cur_address += cache->line_size;
1317 /* Now process each cache line. */
1318 /* Careful with this loop -- length is unsigned. */
1319 lock = req->u.preload.lock;
1320 cur_address = address & ~(cache->line_size - 1);
1321 for (line = 0; line < lines; ++line)
1323 /* If the data was found, then lock it if requested. */
1324 found = get_tag (cache, cur_address, &tag);
1332 /* The data is not in the cache. We need to wait for the memory
1333 unit to fetch it. Store this request in the WAR in the meantime.
1335 wait_in_WAR (cache, pipe, req);
1337 cur_address += cache->line_size;
1342 handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1344 SIM_CPU *current_cpu;
1348 SI address = req->address;
1349 char *data = req->u.store.data;
1350 int length = req->u.store.length;
1352 /* If this address interferes with an existing request, then requeue it. */
1353 if (address_interference (cache, address, req, pipe))
1355 pipeline_requeue_request (& cache->pipeline[pipe]);
1359 /* Non-cache access. Write the data directly to memory. */
1360 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1362 write_data_to_memory (cache, address, data, length);
1366 /* See if the data is in the cache. */
1367 found = get_tag (cache, address, &tag);
1369 /* Write the data to the cache line if one was available and if it is
1370 either a hit or a miss in copy-back mode.
1371 The tag may be NULL if all ways were in use and locked on a miss.
1373 current_cpu = cache->cpu;
1374 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1375 if (tag != NULL && (found || copy_back))
1378 /* Load the line from memory first, if it was a miss. */
1381 /* We need to wait for the memory unit to fetch the data.
1382 Store this request in the WAR and requeue the store request. */
1383 wait_in_WAR (cache, pipe, req);
1384 pipeline_requeue_request (& cache->pipeline[pipe]);
1385 /* Decrement the counts of accesses and hits because when the requeued
1386 request is processed again, it will appear to be a new access and
1388 --cache->statistics.accesses;
1389 --cache->statistics.hits;
1392 line_offset = address & (cache->line_size - 1);
1393 memcpy (tag->line + line_offset, data, length);
1394 invalidate_return_buffer (cache, address);
1397 /* Update the LRU information for the tags in this set. */
1398 set_most_recently_used (cache, tag);
1401 /* Write the data to memory if there was no line available or we are in
1402 write-through (not copy-back mode). */
1403 if (tag == NULL || ! copy_back)
1405 write_data_to_memory (cache, address, data, length);
1412 handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1414 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1415 SI address = req->address;
1416 SI interfere_address = req->u.invalidate.all ? -1 : address;
1418 /* If this address interferes with an existing request, then requeue it. */
1419 if (address_interference (cache, interfere_address, req, pipe))
1421 pipeline_requeue_request (pipeline);
1425 /* Invalidate the cache line now. This function already checks for
1426 non-cache access. */
1427 if (req->u.invalidate.all)
1428 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1430 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1431 if (req->u.invalidate.flush)
1433 pipeline->status.flush.reqno = req->reqno;
1434 pipeline->status.flush.address = address;
1435 pipeline->status.flush.valid = 1;
1440 handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1442 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1443 SI address = req->address;
1445 /* If this address interferes with an existing request, then requeue it. */
1446 if (address_interference (cache, address, req, pipe))
1448 pipeline_requeue_request (pipeline);
1452 /* Unlock the cache line. This function checks for non-cache access. */
1453 frv_cache_unlock (cache, address);
1457 handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1461 SI address = req->address;
1463 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1465 /* Look for the data in the cache. The statistics of cache hit or
1466 miss have already been recorded, so save and restore the stats before
1467 and after obtaining the cache line. */
1468 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1469 tag = find_or_retrieve_cache_line (cache, address);
1470 cache->statistics = save_stats;
1473 if (! req->u.WAR.preload)
1475 copy_line_to_return_buffer (cache, pipe, tag, address);
1476 set_return_buffer_reqno (cache, pipe, req->reqno);
1480 invalidate_return_buffer (cache, address);
1481 if (req->u.WAR.lock)
1488 /* All cache lines in the set were locked, so just copy the data to the
1489 return buffer directly. */
1490 if (! req->u.WAR.preload)
1492 copy_memory_to_return_buffer (cache, pipe, address);
1493 set_return_buffer_reqno (cache, pipe, req->reqno);
1497 /* Resolve any conflicts and/or execute the given requests. */
1499 arbitrate_requests (FRV_CACHE *cache)
1502 /* Simply execute the requests in the final pipeline stages. */
1503 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1505 FRV_CACHE_REQUEST *req
1506 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1507 /* Make sure that there is a request to handle. */
1511 /* Handle the request. */
1515 handle_req_load (cache, pipe, req);
1518 handle_req_store (cache, pipe, req);
1520 case req_invalidate:
1521 handle_req_invalidate (cache, pipe, req);
1524 handle_req_preload (cache, pipe, req);
1527 handle_req_unlock (cache, pipe, req);
1530 handle_req_WAR (cache, pipe, req);
1538 /* Move a waiting ARS register to a free WAR register. */
1540 move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1542 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1543 NARS to BARS if it is valid. */
1544 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1546 war->address = cache->BARS.address;
1547 war->reqno = cache->BARS.reqno;
1548 war->priority = cache->BARS.priority;
1549 war->preload = cache->BARS.preload;
1550 war->lock = cache->BARS.lock;
1551 war->latency = cache->memory_latency + 1;
1553 if (cache->NARS.valid)
1555 cache->BARS = cache->NARS;
1556 cache->NARS.valid = 0;
1559 cache->BARS.valid = 0;
1562 /* If NARS is valid for this pipe, then move it to the given WAR. */
1563 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1565 war->address = cache->NARS.address;
1566 war->reqno = cache->NARS.reqno;
1567 war->priority = cache->NARS.priority;
1568 war->preload = cache->NARS.preload;
1569 war->lock = cache->NARS.lock;
1570 war->latency = cache->memory_latency + 1;
1572 cache->NARS.valid = 0;
1576 /* Decrease the latencies of the various states in the cache. */
1578 decrease_latencies (FRV_CACHE *cache)
1581 /* Check the WAR registers. */
1582 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1584 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1585 for (j = 0; j < NUM_WARS; ++j)
1587 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1591 /* If the latency has expired, then submit a WAR request to the
1593 if (war->latency <= 0)
1595 add_WAR_request (pipeline, war);
1597 move_ARS_to_WAR (cache, pipe, war);
1604 /* Run the cache for the given number of cycles. */
1606 frv_cache_run (FRV_CACHE *cache, int cycles)
1609 for (i = 0; i < cycles; ++i)
1611 advance_pipelines (cache);
1612 arbitrate_requests (cache);
1613 decrease_latencies (cache);
1618 frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1623 if (non_cache_access (cache, address))
1627 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1628 int found = get_tag (cache, address, &tag);
1629 cache->statistics = save_stats;
1632 return 0; /* Indicate non-cache-access. */
1635 /* A cache line was available for the data.
1636 Extract the target data from the line. */
1637 offset = address & (cache->line_size - 1);
1638 *value = T2H_4 (*(SI *)(tag->line + offset));
1642 /* Check the return buffers of the data cache to see if the requested data is
1645 frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1648 return cache->pipeline[pipe].status.return_buffer.valid
1649 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1650 && cache->pipeline[pipe].status.return_buffer.address <= address
1651 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1655 /* Check to see if the requested data has been flushed. */
1657 frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1659 return cache->pipeline[pipe].status.flush.valid
1660 && cache->pipeline[pipe].status.flush.reqno == reqno
1661 && cache->pipeline[pipe].status.flush.address <= address
1662 && cache->pipeline[pipe].status.flush.address + cache->line_size