]> Git Repo - binutils.git/blob - sim/ppc/core.c
* config/tc-ppc.c (ppc_biei): Force symbol into text_section.
[binutils.git] / sim / ppc / core.c
1 /*  This file is part of the program psim.
2
3     Copyright (C) 1994-1995, Andrew Cagney <[email protected]>
4
5     This program is free software; you can redistribute it and/or modify
6     it under the terms of the GNU General Public License as published by
7     the Free Software Foundation; either version 2 of the License, or
8     (at your option) any later version.
9
10     This program is distributed in the hope that it will be useful,
11     but WITHOUT ANY WARRANTY; without even the implied warranty of
12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13     GNU General Public License for more details.
14  
15     You should have received a copy of the GNU General Public License
16     along with this program; if not, write to the Free Software
17     Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  
19     */
20
21
22 #ifndef _CORE_C_
23 #define _CORE_C_
24
25 #ifndef STATIC_INLINE_CORE
26 #define STATIC_INLINE_CORE STATIC_INLINE
27 #endif
28
29
30 #include "basics.h"
31 #include "device_tree.h"
32 #include "core.h"
33
34
35 typedef struct _core_mapping core_mapping;
36 struct _core_mapping {
37   /* ram map */
38   int free_buffer;
39   void *buffer;
40   /* device map */
41   const device *device;
42   device_io_read_buffer_callback *reader;
43   device_io_write_buffer_callback *writer;
44   /* common */
45   int space;
46   unsigned_word base;
47   unsigned_word bound;
48   unsigned nr_bytes;
49   core_mapping *next;
50 };
51
52 struct _core_map {
53   core_mapping *first;
54   core_mapping *default_map;
55 };
56
57 typedef enum {
58   core_read_map,
59   core_write_map,
60   core_execute_map,
61   nr_core_map_types,
62 } core_map_types;
63
64 struct _core {
65   core_map map[nr_core_map_types];
66 };
67
68
69 INLINE_CORE core *
70 core_create(void)
71 {
72   core *new_core = ZALLOC(core);
73   return new_core;
74 }
75
76
77 STATIC_INLINE_CORE void
78 core_init(core *memory)
79 {
80   core_map_types access_type;
81   for (access_type = 0;
82        access_type < nr_core_map_types;
83        access_type++) {
84     core_map *map = memory->map + access_type;
85     /* blow away old mappings */
86     core_mapping *curr = map->first;
87     while (curr != NULL) {
88       core_mapping *tbd = curr;
89       curr = curr->next;
90       if (tbd->free_buffer) {
91         ASSERT(tbd->buffer != NULL);
92         zfree(tbd->buffer);
93       }
94       zfree(tbd);
95     }
96     map->first = NULL;
97     /* blow away the default */
98     if (map->default_map != NULL) {
99       ASSERT(map->default_map->buffer == NULL);
100       zfree(map->default_map);
101     }
102     map->default_map = NULL;
103   }
104 }
105
106
107
108 /* the core has three sub mappings that the more efficient
109    read/write fixed quantity functions use */
110
111 INLINE_CORE core_map *
112 core_readable(core *memory)
113 {
114   return memory->map + core_read_map;
115 }
116
117 INLINE_CORE core_map *
118 core_writeable(core *memory)
119 {
120   return memory->map + core_write_map;
121 }
122
123 INLINE_CORE core_map *
124 core_executable(core *memory)
125 {
126   return memory->map + core_execute_map;
127 }
128
129
130
131 STATIC_INLINE_CORE core_mapping *
132 new_core_mapping(attach_type attach,
133                  int space,
134                  unsigned_word addr,
135                  unsigned nr_bytes,
136                  const device *device,
137                  void *buffer,
138                  int free_buffer)
139 {
140   core_mapping *new_mapping = ZALLOC(core_mapping);
141   switch (attach) {
142   case attach_default:
143   case attach_callback:
144     new_mapping->device = device;
145     new_mapping->reader = device->callback->io_read_buffer;
146     new_mapping->writer = device->callback->io_write_buffer;
147     break;
148   case attach_raw_memory:
149     new_mapping->buffer = buffer;
150     new_mapping->free_buffer = free_buffer;
151     break;
152   default:
153     error("new_core_mapping() - internal error - unknown attach type %d\n",
154           attach);
155   }
156   /* common */
157   new_mapping->space = space;
158   new_mapping->base = addr;
159   new_mapping->nr_bytes = nr_bytes;
160   new_mapping->bound = addr + (nr_bytes - 1);
161   return new_mapping;
162 }
163
164
165 STATIC_INLINE_CORE void
166 core_map_attach(core_map *access_map,
167                 attach_type attach,
168                 int space,
169                 unsigned_word addr,
170                 unsigned nr_bytes, /* host limited */
171                 const device *device, /*callback/default*/
172                 void *buffer, /*raw_memory*/
173                 int free_buffer) /*raw_memory*/
174 {
175   if (attach == attach_default) {
176     if (access_map->default_map != NULL)
177       error("core_map_attach() default mapping already in place\n");
178     ASSERT(buffer == NULL);
179     access_map->default_map = new_core_mapping(attach, 
180                                                space, addr, nr_bytes,
181                                                device, buffer, free_buffer);
182   }
183   else {
184     /* find the insertion point for this additional mapping and insert */
185     core_mapping *next_mapping;
186     core_mapping **last_mapping;
187
188     /* actually do occasionally get a zero size map */
189     if (nr_bytes == 0)
190       error("core_map_attach() size == 0\n");
191
192     /* find the insertion point (between last/next) */
193     next_mapping = access_map->first;
194     last_mapping = &access_map->first;
195     while(next_mapping != NULL && next_mapping->bound < addr) {
196       /* assert: next_mapping->base > all bases before next_mapping */
197       /* assert: next_mapping->bound >= all bounds before next_mapping */
198       last_mapping = &next_mapping->next;
199       next_mapping = next_mapping->next;
200     }
201
202     /* check insertion point correct */
203     if (next_mapping != NULL && next_mapping->base < (addr + (nr_bytes - 1))) {
204       error("core_map_attach() map overlap\n");
205     }
206
207     /* create/insert the new mapping */
208     *last_mapping = new_core_mapping(attach,
209                                      space, addr, nr_bytes,
210                                      device, buffer, free_buffer);
211     (*last_mapping)->next = next_mapping;
212   }
213 }
214
215
216 INLINE_CORE void
217 core_attach(core *memory,
218             attach_type attach,
219             int space,
220             access_type access,
221             unsigned_word addr,
222             unsigned nr_bytes, /* host limited */
223             const device *device) /*callback/default*/
224 {
225   core_map_types access_map;
226   int free_buffer = 0;
227   void *buffer = NULL;
228   ASSERT(attach == attach_default || nr_bytes > 0);
229   if (attach == attach_raw_memory)
230     buffer = zalloc(nr_bytes);
231   for (access_map = 0; 
232        access_map < nr_core_map_types;
233        access_map++) {
234     switch (access_map) {
235     case core_read_map:
236       if (access & access_read)
237         core_map_attach(memory->map + access_map,
238                         attach,
239                         space, addr, nr_bytes,
240                         device, buffer, !free_buffer);
241       free_buffer ++;
242       break;
243     case core_write_map:
244       if (access & access_write)
245         core_map_attach(memory->map + access_map,
246                         attach,
247                         space, addr, nr_bytes,
248                         device, buffer, !free_buffer);
249       free_buffer ++;
250       break;
251     case core_execute_map:
252       if (access & access_exec)
253         core_map_attach(memory->map + access_map,
254                         attach,
255                         space, addr, nr_bytes,
256                         device, buffer, !free_buffer);
257       free_buffer ++;
258       break;
259     default:
260       error("core_attach() internal error\n");
261       break;
262     }
263   }
264   ASSERT(free_buffer > 0); /* must attach to at least one thing */
265 }
266
267
268 STATIC_INLINE_CORE core_mapping *
269 core_map_find_mapping(core_map *map,
270                       unsigned_word addr,
271                       unsigned nr_bytes,
272                       cpu *processor,
273                       unsigned_word cia,
274                       int abort) /*either 0 or 1 - helps inline */
275 {
276   core_mapping *mapping = map->first;
277   ASSERT((addr & (nr_bytes - 1)) == 0); /* must be aligned */
278   ASSERT((addr + (nr_bytes - 1)) >= addr); /* must not wrap */
279   while (mapping != NULL) {
280     if (addr >= mapping->base
281         && (addr + (nr_bytes - 1)) <= mapping->bound)
282       return mapping;
283     mapping = mapping->next;
284   }
285   if (map->default_map != NULL)
286     return map->default_map;
287   if (abort)
288     error("core_find_mapping() - access to unmaped address, attach a default map to handle this - addr=0x%x nr_bytes=0x%x processor=0x%x cia=0x%x\n",
289           addr, nr_bytes, processor, cia);
290   return NULL;
291 }
292
293
294 STATIC_INLINE_CORE void *
295 core_translate(core_mapping *mapping,
296                      unsigned_word addr)
297 {
298   return mapping->buffer + addr - mapping->base;
299 }
300
301
302 INLINE_CORE unsigned
303 core_map_read_buffer(core_map *map,
304                      void *buffer,
305                      unsigned_word addr,
306                      unsigned len)
307 {
308   unsigned count;
309   unsigned_1 byte;
310   for (count = 0; count < len; count++) {
311     unsigned_word raddr = addr + count;
312     core_mapping *mapping =
313       core_map_find_mapping(map,
314                             raddr, 1,
315                             NULL, /*processor*/
316                             0, /*cia*/
317                             0); /*dont-abort*/
318     if (mapping == NULL)
319       break;
320     if (mapping->reader != NULL) {
321       if (mapping->reader(mapping->device,
322                           &byte,
323                           mapping->space,
324                           raddr - mapping->base,
325                           1, /* nr_bytes */
326                           0, /*processor*/
327                           0 /*cpu*/) != 1)
328         break;
329     }
330     else
331       byte = *(unsigned_1*)core_translate(mapping,
332                                                 raddr);
333     ((unsigned_1*)buffer)[count] = T2H_1(byte);
334   }
335   return count;
336 }
337
338
339 INLINE_CORE unsigned
340 core_map_write_buffer(core_map *map,
341                       const void *buffer,
342                       unsigned_word addr,
343                       unsigned len)
344 {
345   unsigned count;
346   unsigned_1 byte;
347   for (count = 0; count < len; count++) {
348     unsigned_word raddr = addr + count;
349     core_mapping *mapping = core_map_find_mapping(map,
350                                                   raddr, 1,
351                                                   NULL, /*processor*/
352                                                   0, /*cia*/
353                                                   0); /*dont-abort*/
354     if (mapping == NULL)
355       break;
356     byte = H2T_1(((unsigned_1*)buffer)[count]);
357     if (mapping->writer != NULL) {
358       if (mapping->writer(mapping->device,
359                           &byte,
360                           mapping->space,
361                           raddr - mapping->base,
362                           1, /*nr_bytes*/
363                           0, /*processor*/
364                           0 /*cpu*/) != 1)
365         break;
366     }
367     else
368       *(unsigned_1*)core_translate(mapping, raddr) = byte;
369   }
370   return count;
371 }
372
373
374
375 /* Top level core(root) device: core@garbage
376
377    The core device captures incomming dma requests and changes them to
378    outgoing io requests. */
379
380 STATIC_INLINE_CORE void
381 core_init_callback(const device *me,
382                    psim *system)
383 {
384   core *memory = (core*)me->data;
385   DTRACE_INIT(core);
386   core_init(memory);
387 }
388
389
390 STATIC_INLINE_CORE void
391 core_attach_address_callback(const device *me,
392                              const char *name,
393                              attach_type attach,
394                              int space,
395                              unsigned_word addr,
396                              unsigned nr_bytes,
397                              access_type access,
398                              const device *who) /*callback/default*/
399 {
400   core *memory = (core*)me->data;
401   unsigned_word device_address;
402   DTRACE_ATTACH_ADDRESS(core);
403   if (space != 0)
404     error("core_attach_address_callback() invalid address space\n");
405   core_attach(memory,
406               attach,
407               space,
408               access,
409               addr,
410               nr_bytes,
411               who);
412 }
413
414
415 STATIC_INLINE_CORE unsigned
416 core_dma_read_buffer_callback(const device *me,
417                               void *dest,
418                               int space,
419                               unsigned_word addr,
420                               unsigned nr_bytes)
421 {
422   core *memory = (core*)me->data;
423   DTRACE_DMA_READ_BUFFER(core);
424   return core_map_read_buffer(core_readable(memory),
425                               dest,
426                               addr,
427                               nr_bytes);
428 }
429
430
431 STATIC_INLINE_CORE unsigned
432 core_dma_write_buffer_callback(const device *me,
433                                const void *source,
434                                int space,
435                                unsigned_word addr,
436                                unsigned nr_bytes,
437                                int violate_read_only_section)
438 {
439   core *memory = (core*)me->data;
440   core_map *map = (violate_read_only_section
441                    ? core_readable(memory)
442                    : core_writeable(memory));
443   DTRACE_DMA_WRITE_BUFFER(core);
444   return core_map_write_buffer(map,
445                                source,
446                                addr,
447                                nr_bytes);
448 }
449
450
451 static device_callbacks const core_callbacks = {
452   core_init_callback,
453   core_attach_address_callback,
454   unimp_device_detach_address,
455   unimp_device_io_read_buffer,
456   unimp_device_io_write_buffer,
457   core_dma_read_buffer_callback,
458   core_dma_write_buffer_callback,
459   unimp_device_attach_interrupt,
460   unimp_device_detach_interrupt,
461   unimp_device_interrupt,
462   unimp_device_interrupt_ack,
463   unimp_device_ioctl,
464 };
465
466
467 INLINE_CORE const device *
468 core_device_create(core *memory)
469 {
470   return device_create_from("core", "/", memory, &core_callbacks, NULL);
471 }
472
473
474
475 /* define the read/write 1/2/4/8/word functions */
476
477 #undef N
478 #define N 1
479 #include "core_n.h"
480
481 #undef N
482 #define N 2
483 #include "core_n.h"
484
485 #undef N
486 #define N 4
487 #include "core_n.h"
488
489 #undef N
490 #define N 8
491 #include "core_n.h"
492
493 #undef N
494 #define N word
495 #include "core_n.h"
496
497 #endif /* _CORE_C_ */
This page took 0.050572 seconds and 4 git commands to generate.