00001
00002
00011
00012
00013
00014
00015
00016
00017 #include <l4/sys/types.h>
00018 #include <l4/env/errno.h>
00019 #include <l4/sys/syscalls.h>
00020 #include <l4/sys/ipc.h>
00021 #include <l4/rmgr/librmgr.h>
00022 #include <l4/l4rm/l4rm.h>
00023 #include <l4/generic_io/generic_io-server.h>
00024 #include <l4/sigma0/sigma0.h>
00025
00026
00027 #include <stdio.h>
00028 #include <stdlib.h>
00029
00030
00031 #include "io.h"
00032 #include "res.h"
00033 #include "__config.h"
00034 #include "__macros.h"
00035
00036 #if IO_REQUEST_PAGE
00037 # define IO_REQ_PAGESIZE L4_PAGESIZE
00038 # define IO_REQ_LOG2_PAGESIZE L4_LOG2_PAGESIZE
00039 # define IO_REQ_PAGEMASK L4_PAGEMASK
00040 # define io_req_trunc_page(a) l4_trunc_page(a)
00041 #else
00042 # define IO_REQ_PAGESIZE L4_SUPERPAGESIZE
00043 # define IO_REQ_LOG2_PAGESIZE L4_LOG2_SUPERPAGESIZE
00044 # define IO_REQ_PAGEMASK L4_SUPERPAGEMASK
00045 # define io_req_trunc_page(a) l4_trunc_superpage(a)
00046 #endif
00047
00048 #if IO_SEND_PAGE
00049 # define IO_PAGESIZE L4_PAGESIZE
00050 # define IO_LOG2_PAGESIZE L4_LOG2_PAGESIZE
00051 # define IO_PAGEMASK L4_PAGEMASK
00052 # define io_trunc_page(a) l4_trunc_page(a)
00053 #else
00054 # define IO_PAGESIZE L4_SUPERPAGESIZE
00055 # define IO_LOG2_PAGESIZE L4_LOG2_SUPERPAGESIZE
00056 # define IO_PAGEMASK L4_SUPERPAGEMASK
00057 # define io_trunc_page(a) l4_trunc_superpage(a)
00058 #endif
00059
00060
00061
00062
00063
00072 typedef struct io_res {
00073 struct io_res *next;
00074 unsigned long start;
00075 unsigned long end;
00076 io_client_t *client;
00077 } io_res_t;
00078
00081 static io_res_t *io_port_res = NULL;
00082
00085 static io_res_t *io_mem_res = NULL;
00086
00092 typedef struct io_ares {
00093 struct io_ares *next;
00094 l4_addr_t start;
00095 l4_addr_t end;
00096 l4_addr_t vaddr;
00097 int flags;
00098 } io_ares_t;
00099
00102 static io_ares_t *io_mem_ares = NULL;
00103
00106 struct io_dma_res {
00107 int used;
00108 io_client_t *client;
00109 };
00110
00113 static struct io_dma_res isa_dma[8] = {
00114 {0, NULL},
00115 {0, NULL},
00116 {0, NULL},
00117 {0, NULL},
00118 {0, NULL},
00119 {0, NULL},
00120 {0, NULL},
00121 {0, NULL},
00122 };
00123
00125 static io_client_t *io_self;
00126
00128 static const l4_addr_t bios_paddr = 0xe0000;
00129 static l4_addr_t bios_vaddr = 0;
00130 static const l4_size_t bios_size = 0x20000;
00131
00137 static io_client_t *find_client(l4_threadid_t tid)
00138 {
00139 io_client_t *c;
00140 io_client_t tmp;
00141
00142 tmp.c_l4id = tid;
00143 for (c = io_self; c; c = c->next)
00144 if (client_equal(c, &tmp))
00145 return c;
00146
00147 return NULL;
00148 }
00149
00153 static int __request_region(unsigned long start, unsigned long len,
00154 unsigned long max, io_res_t ** root,
00155 io_client_t * c)
00156 {
00157 unsigned long end = start + len - 1;
00158 io_res_t *tmp = NULL, *s = *root,
00159 *p = NULL;
00160
00161
00162 if (end < start)
00163 return -L4_EINVAL;
00164 if (end > max)
00165 return -L4_EINVAL;
00166
00167
00168 for (;;)
00169 {
00170 if (!s || (end < s->start))
00171 {
00172 LOGd(DEBUG_RES, "allocating (0x%08lx-0x%08lx) for "l4util_idfmt,
00173 start, end, l4util_idstr(c->c_l4id));
00174
00175 tmp = malloc(sizeof(io_res_t));
00176 Assert(tmp);
00177 tmp->start = start;
00178 tmp->end = end;
00179 tmp->next = s;
00180 tmp->client = c;
00181 if (!p)
00182
00183 *root = tmp;
00184 else
00185 p->next = tmp;
00186 return 0;
00187 }
00188 p = s;
00189 if (start > p->end)
00190 {
00191 s = p->next;
00192 continue;
00193 }
00194 LOGd(DEBUG_RES, "(0x%08lx-0x%08lx) not available for "l4util_idfmt"",
00195 start, end, l4util_idstr(c->c_l4id));
00196 return -L4_EBUSY;
00197 }
00198 };
00199
00210 static int __search_region(unsigned long addr, io_ares_t * p,
00211 unsigned long *start, unsigned long *len)
00212 {
00213 while (p)
00214 {
00215 if (p->start <= addr && addr <= p->end)
00216 {
00217 *start = p->start;
00218 *len = p->end - p->start + 1;
00219 return 0;
00220 }
00221 p = p->next;
00222 }
00223 return -L4_EINVAL;
00224 }
00225
00229 static int __release_region(unsigned long start, unsigned long len,
00230 io_res_t ** root, io_client_t * c)
00231 {
00232 unsigned long end = start + len - 1;
00233 io_res_t *tmp = *root, *p = NULL;
00234
00235
00236 for (;;)
00237 {
00238 if (!tmp)
00239 break;
00240 if (tmp->end < start)
00241 {
00242 p = tmp;
00243 tmp = tmp->next;
00244 continue;
00245 }
00246 if ((tmp->start != start) || (tmp->end != end))
00247 break;
00248 #if !IORES_TOO_MUCH_POLICY
00249 if (!client_equal(tmp->client, c))
00250 {
00251 LOGd(DEBUG_RES, l4util_idfmt" not allowed to free "
00252 l4util_idfmt"'s region",
00253 l4util_idstr(c->c_l4id), l4util_idstr(tmp->client->c_l4id));
00254 return -L4_EPERM;
00255 }
00256 #endif
00257 if (!p)
00258 *root = tmp->next;
00259 else
00260 p->next = tmp->next;
00261 LOGd(DEBUG_RES, "freeing (0x%08lx-0x%08lx)", start, end);
00262 free(tmp);
00263 return 0;
00264 }
00265 printf("Non-existent region (0x%08lx-0x%08lx) not freed\n", start, end);
00266 return -L4_EINVAL;
00267 }
00268
00272 static int __release_region_client(io_res_t ** root, io_client_t * c)
00273 {
00274 io_res_t *tmp, *n, *p;
00275
00276 for (tmp=*root, p=NULL; tmp;)
00277 {
00278 n = tmp->next;
00279 if (l4_tasknum_equal(tmp->client->c_l4id, c->c_l4id))
00280 {
00281 if (!p)
00282 *root = tmp->next;
00283 else
00284 p->next = tmp->next;
00285 LOGd(DEBUG_RES, "freeing (0x%08lx-0x%08lx)", tmp->start, tmp->end);
00286 free(tmp);
00287 }
00288 else
00289 p = tmp;
00290 tmp = n;
00291 }
00292 return 0;
00293 }
00294
00304 static void process_port_region(unsigned start, unsigned length,
00305 l4_snd_fpage_t **regions, l4_size_t *num)
00306 {
00307 unsigned log2_start, log2, log2_length;
00308
00309 if (!length) return;
00310
00311
00312
00313 for (log2 = l4util_log2(length); ; log2--)
00314 {
00315 log2_length = 1 << log2;
00316 unsigned size_mask = log2_length - 1;
00317 log2_start = (start + size_mask) & ~size_mask;
00318 if (log2_start + log2_length <= start + length) break;
00319 }
00320
00321
00322 process_port_region(start, log2_start - start, regions, num);
00323
00324
00325 LOGd(DEBUG_RES, "I/O port chunk: 0x%04x-0x%04x [0x%04x]\n",
00326 log2_start, log2_start + log2_length - 1, log2_length);
00327
00328 l4_fpage_t fpage = l4_iofpage(log2_start, log2, L4_FPAGE_MAP);
00329
00330 (*regions)->snd_base = 0;
00331 (*regions)->fpage = fpage;
00332 (*regions)++;
00333 (*num)++;
00334
00335
00336 unsigned last_start = log2_start + log2_length;
00337 process_port_region(last_start, start + length - last_start, regions, num);
00338 }
00339
00360 long
00361 l4_io_request_region_component (CORBA_Object _dice_corba_obj,
00362 l4_uint16_t addr,
00363 l4_uint16_t len,
00364 l4_size_t *num,
00365 l4_snd_fpage_t regions[],
00366 CORBA_Server_Environment *_dice_corba_env)
00367 {
00368 #ifndef ARCH_arm
00369 int err;
00370
00371
00372
00373 *num = 0;
00374
00375
00376 if (!len) return -L4_EINVAL;
00377
00378
00379 if ((addr + len - 1) > MAX_IO_PORT) return -L4_EINVAL;
00380
00381 io_client_t *c = find_client(*_dice_corba_obj);
00382
00383
00384 if (!c) return -L4_ENOTFOUND;
00385
00386
00387 if ((err = __request_region(addr, len, MAX_IO_PORT, &io_port_res, c)))
00388 return err;
00389
00390 LOGd(DEBUG_RES, "requested I/O ports: 0x%04x-0x%04x [0x%04x]\n",
00391 addr, addr + len - 1, len);
00392
00393 l4_snd_fpage_t *r = regions;
00394 process_port_region(addr, len, &r, num);
00395 ASSERT(*num <= l4_io_max_fpages);
00396
00397
00398 unsigned i;
00399 l4_threadid_t pager = rmgr_pager_id();
00400 for (i = 0; i < *num; i++)
00401 {
00402 l4_umword_t dw0, dw1;
00403 l4_msgdope_t result;
00404 l4_msgtag_t tag = l4_msgtag(L4_MSGTAG_IO_PAGE_FAULT, 0, 0, 0);
00405 int err = l4_ipc_call_tag(pager,
00406 L4_IPC_SHORT_MSG, regions[i].fpage.fpage, 0, tag,
00407 L4_IPC_IOMAPMSG(0, L4_WHOLE_IOADDRESS_SPACE), &dw0, &dw1,
00408 L4_IPC_NEVER, &result, &tag);
00409
00410 if (err || !dw1)
00411 Panic("sigma0 request for I/O ports [%04x,%04x) failed (err=%d dw1=%ld)\n",
00412 regions[i].fpage.iofp.iopage,
00413 regions[i].fpage.iofp.iopage + (1 << regions[i].fpage.iofp.iosize),
00414 err, dw1);
00415 }
00416
00417 return 0;
00418
00419 #else
00420 return -L4_EINVAL;
00421 #endif
00422 }
00423
00435 long
00436 l4_io_release_region_component (CORBA_Object _dice_corba_obj,
00437 l4_uint16_t addr,
00438 l4_uint16_t len,
00439 CORBA_Server_Environment *_dice_corba_env)
00440 {
00441 l4_size_t num = 0;
00442 l4_snd_fpage_t regions[l4_io_max_fpages];
00443 l4_snd_fpage_t *r = regions;
00444
00445 io_client_t *c = find_client(*_dice_corba_obj);
00446
00447
00448 if (!c) return -L4_ENOTFOUND;
00449
00450 process_port_region(addr, len, &r, &num);
00451 ASSERT(*num <= l4_io_max_fpages);
00452
00453
00454 unsigned i;
00455 for (i = 0; i < num; i++)
00456 l4_fpage_unmap(regions[i].fpage, L4_FP_FLUSH_PAGE | L4_FP_OTHER_SPACES);
00457
00458 return (__release_region(addr, len, &io_port_res, c));
00459 }
00460
00479 long
00480 l4_io_request_mem_region_component (CORBA_Object _dice_corba_obj,
00481 unsigned long addr,
00482 unsigned long len,
00483 unsigned long flags,
00484 l4_snd_fpage_t *region,
00485 CORBA_Server_Environment *_dice_corba_env)
00486 {
00487 int error, size;
00488 unsigned int start = addr;
00489 unsigned int end = addr + len - 1;
00490 l4_addr_t vaddr;
00491 io_client_t *c = find_client(*_dice_corba_obj);
00492 io_ares_t *p = io_mem_ares;
00493
00494
00495 region->snd_base = 0;
00496 region->fpage.raw = 0;
00497
00498 if (!c)
00499
00500 return -L4_ENOTFOUND;
00501
00502
00503 for (;;)
00504 {
00505 if (!p)
00506 {
00507 LOGdL(DEBUG_ERRORS, "requested (0x%08lx-0x%08lx) not announced",
00508 addr, addr + len - 1);
00509 return -L4_EINVAL;
00510 }
00511 if ((start >= p->start) && (end <= p->end))
00512 break;
00513 p = p->next;
00514 }
00515
00516
00517 if ((error = __request_region(addr, len, MAX_IO_MEMORY, &io_mem_res, c)))
00518 return error;
00519
00520 size = len >> IO_LOG2_PAGESIZE;
00521 if (len > (size << IO_LOG2_PAGESIZE))
00522 size++;
00523
00524
00525
00526
00527 vaddr = io_trunc_page(p->vaddr);
00528
00529
00530 size = (len & IO_PAGEMASK) ? nLOG2(len) : IO_LOG2_PAGESIZE;
00531
00532 region->snd_base = addr;
00533 region->fpage = l4_fpage(vaddr, size, L4_FPAGE_RW, L4_FPAGE_MAP);
00534
00535 switch (flags)
00536 {
00537 case L4IO_MEM_WRITE_COMBINED:
00538 region->fpage.fp.cache = L4_FPAGE_BUFFERABLE;
00539 break;
00540 case L4IO_MEM_CACHED:
00541 region->fpage.fp.cache = L4_FPAGE_CACHEABLE;
00542 break;
00543 default:
00544 region->fpage.fp.cache = L4_FPAGE_UNCACHEABLE;
00545 break;
00546 }
00547
00548 LOGd(DEBUG_RES, "sending fpage {snd_base=%lx, 0x%08lx, 0x%08lx}",
00549 region->snd_base,
00550 (unsigned long)region->fpage.fp.page << 12,
00551 1UL << region->fpage.fp.size);
00552
00553
00554 return 0;
00555 }
00556
00569 long
00570 l4_io_search_mem_region_component (CORBA_Object _dice_corba_obj,
00571 unsigned long addr,
00572 unsigned long *start,
00573 l4_size_t *len,
00574 CORBA_Server_Environment *_dice_corba_env)
00575 {
00576 return __search_region(addr, io_mem_ares,
00577 (unsigned long *)start, (unsigned long *)len);
00578 }
00579
00591 long
00592 l4_io_release_mem_region_component (CORBA_Object _dice_corba_obj,
00593 unsigned long addr,
00594 unsigned long len,
00595 CORBA_Server_Environment *_dice_corba_env)
00596 {
00597 int error, size;
00598 io_client_t *c = find_client(*_dice_corba_obj);
00599 unsigned int start = addr;
00600 unsigned int end = addr + len - 1;
00601 io_ares_t *p = io_mem_ares;
00602 l4_fpage_t region;
00603
00604 if (!c)
00605
00606 return -L4_ENOTFOUND;
00607
00608 if ((error = __release_region(addr, len, &io_mem_res, c)))
00609 return error;
00610
00611
00612 for (;;)
00613 {
00614 if ((start >= p->start) && (end <= p->end))
00615 break;
00616 p = p->next;
00617 }
00618
00619
00620 size = (len & IO_PAGEMASK) ? nLOG2(len) : IO_LOG2_PAGESIZE;
00621 region = l4_fpage(io_trunc_page(p->vaddr), size, 0, 0);
00622
00623
00624 l4_fpage_unmap(region, L4_FP_FLUSH_PAGE | L4_FP_OTHER_SPACES);
00625
00626
00627 return 0;
00628 }
00629
00642 long
00643 l4_io_request_dma_component (CORBA_Object _dice_corba_obj,
00644 unsigned long channel,
00645 CORBA_Server_Environment *_dice_corba_env)
00646 {
00647 io_client_t *c = find_client(*_dice_corba_obj);
00648
00649 if (!c)
00650
00651 return -L4_ENOTFOUND;
00652
00653
00654 if (!(channel < MAX_ISA_DMA))
00655 return -L4_EINVAL;
00656 if (isa_dma[channel].used)
00657 return -L4_EINVAL;
00658
00659
00660 isa_dma[channel].used = 1;
00661 isa_dma[channel].client = c;
00662
00663 return 0;
00664 }
00665
00678 long
00679 l4_io_release_dma_component (CORBA_Object _dice_corba_obj,
00680 unsigned long channel,
00681 CORBA_Server_Environment *_dice_corba_env)
00682 {
00683 io_client_t *c = find_client(*_dice_corba_obj);
00684
00685 if (!c)
00686
00687 return -L4_ENOTFOUND;
00688
00689
00690 if (!(channel < MAX_ISA_DMA))
00691 return -L4_EINVAL;
00692 if (!isa_dma[channel].used)
00693 return -L4_EINVAL;
00694 #if !IORES_TOO_MUCH_POLICY
00695 if (!client_equal(isa_dma[channel].client, c))
00696 {
00697 LOGd(DEBUG_RES, l4util_idfmt" not allowed to release "
00698 l4util_idfmt"'s DMA channel",
00699 l4util_idstr(c->c_l4id),
00700 l4util_idstr(isa_dma[channel].client->c_l4id));
00701 return -L4_EPERM;
00702 }
00703 #endif
00704
00705 isa_dma[channel].used = 0;
00706 isa_dma[channel].client = NULL;
00707
00708 return 0;
00709 }
00710
00721 long
00722 l4_io_release_client_component (CORBA_Object _dice_corba_obj,
00723 const l4_threadid_t *client,
00724 CORBA_Server_Environment *_dice_corba_env)
00725 {
00726 io_client_t *c = find_client(*client);
00727 l4_uint32_t channel;
00728
00729 if (!c)
00730 return -L4_ENOTFOUND;
00731
00732 __release_region_client(&io_mem_res, c);
00733 __release_region_client(&io_port_res, c);
00734 for (channel=0; channel<MAX_ISA_DMA; channel++)
00735 {
00736 if (isa_dma[channel].used &&
00737 client_equal(isa_dma[channel].client, c))
00738 {
00739
00740 isa_dma[channel].used = 0;
00741 isa_dma[channel].client = NULL;
00742 }
00743 }
00744
00745 return 0;
00746 }
00747
00761 int callback_request_region(unsigned long addr, unsigned long len)
00762 {
00763 io_client_t *c = io_self;
00764
00765 return (__request_region(addr, len, MAX_IO_PORT, &io_port_res, c));
00766 }
00767
00771 int callback_request_mem_region(unsigned long addr, unsigned long len)
00772 {
00773 io_client_t *c = io_self;
00774
00775 return (__request_region(addr, len, MAX_IO_MEMORY, &io_mem_res, c));
00776 }
00777
00788 void callback_announce_mem_region(unsigned long addr, unsigned long len)
00789 {
00790 int error;
00791 l4_addr_t vaddr = 0;
00792 l4_uint32_t vaddr_area;
00793 l4_size_t size;
00794
00795 io_ares_t **s = &io_mem_ares;
00796 io_ares_t *p = io_mem_ares;
00797 l4_threadid_t pager = l4sigma0_id();
00798
00799
00800 size = len >> IO_REQ_LOG2_PAGESIZE;
00801 if (len > (size << IO_REQ_LOG2_PAGESIZE))
00802 size++;
00803
00804
00805 while (p)
00806 {
00807 if (io_req_trunc_page(p->start) <= addr
00808 && io_req_trunc_page(p->end + IO_REQ_PAGESIZE -1) - 1 >= addr + len - 1)
00809 break;
00810 p = p->next;
00811 }
00812
00813 if (p)
00814 {
00815
00816 LOGd(DEBUG_RES, "reuse mapping from (0x%08lx-0x%08lx); mapped to 0x%08lx",
00817 p->start, p->end, p->vaddr);
00818 vaddr = io_req_trunc_page(p->vaddr);
00819 }
00820
00821
00822 else
00823 {
00824
00825 error = l4rm_area_reserve(size * IO_REQ_PAGESIZE, L4RM_LOG2_ALIGNED,
00826 &vaddr, &vaddr_area);
00827 if (error)
00828 {
00829 Panic("no area for memory region announcement (%d)\n", error);
00830 }
00831
00832
00833 if ((error = l4sigma0_map_iomem(pager, io_req_trunc_page(addr),
00834 vaddr, size * IO_REQ_PAGESIZE, 0)))
00835 {
00836 switch (error)
00837 {
00838 case -2: Panic("sigma0 request IPC error");
00839 case -3: Panic("sigma0 request for phys addr %08lx failed", addr);
00840 }
00841 }
00842 }
00843
00844
00845 vaddr += addr & (IO_REQ_PAGESIZE - 1);
00846
00847
00848 while (*s) s = &((*s)->next);
00849
00850 p = malloc(sizeof(io_ares_t));
00851 Assert(p);
00852 p->next = NULL;
00853 p->start = addr;
00854 p->end = addr + len - 1;
00855 p->vaddr = vaddr;
00856 p->flags = 0;
00857
00858 *s = p;
00859
00860 LOGd(DEBUG_RES, "(0x%08lx-0x%08lx) was announced; mapped to 0x%08lx",
00861 p->start, p->end, p->vaddr);
00862 }
00863
00864 static struct device_inclusion_list
00865 {
00866 unsigned short vendor;
00867 unsigned short device;
00868 struct device_inclusion_list *next;
00869 } *device_handle_inclusion_list,
00870
00871 *device_handle_exclusion_list;
00872
00873
00874
00883 int callback_handle_pci_device(unsigned short vendor, unsigned short device)
00884 {
00885 struct device_inclusion_list *list;
00886
00887 if (device_handle_inclusion_list)
00888 {
00889 for (list=device_handle_inclusion_list; list; list=list->next)
00890 if (list->vendor==vendor && list->device==device)
00891 return 1;
00892 return 0;
00893 }
00894 for (list=device_handle_exclusion_list; list; list=list->next)
00895 if (list->vendor==vendor && list->device==device)
00896 return 0;
00897 return 1;
00898 }
00899
00904 static int parse_device_pair(const char *s, unsigned short *vendor,
00905 unsigned short *device)
00906 {
00907 char *t;
00908 *vendor = strtoul(s, &t, 16);
00909 if (*t != ':')
00910 return -L4_EINVAL;
00911 s = t + 1;
00912 *device = strtoul(s, &t, 16);
00913 if (*t != 0)
00914 return -L4_EINVAL;
00915 return 0;
00916 }
00917
00922 int add_device_inclusion(const char *s)
00923 {
00924 static unsigned short vendor, device;
00925 static struct device_inclusion_list *elem;
00926
00927 if (!parse_device_pair(s, &vendor, &device))
00928 {
00929 if ((elem=malloc(sizeof(struct device_inclusion_list)))==0)
00930 return -L4_ENOMEM;
00931
00932 elem->vendor = vendor;
00933 elem->device = device;
00934 elem->next = device_handle_inclusion_list;
00935 device_handle_inclusion_list = elem;
00936 LOG("taking care of device %04x:%04x\n", elem->vendor, elem->device);
00937 return 0;
00938 }
00939 return -L4_EINVAL;
00940 }
00941
00948 int add_device_exclusion(const char *s)
00949 {
00950 static unsigned short vendor, device;
00951 static struct device_inclusion_list *elem;
00952
00953 if (!parse_device_pair(s, &vendor, &device))
00954 {
00955 if ((elem=malloc(sizeof(struct device_inclusion_list)))==0)
00956 return -L4_ENOMEM;
00957
00958 elem->vendor = vendor;
00959 elem->device = device;
00960 elem->next = device_handle_exclusion_list;
00961 device_handle_exclusion_list = elem;
00962 LOG("ignoring device %04x:%04x\n", elem->vendor, elem->device);
00963
00964 return 0;
00965 }
00966 return -L4_EINVAL;
00967 }
00968
00969 #ifndef ARCH_arm
00970
00979 int bios_map_area(unsigned long *ret_vaddr)
00980 {
00981 int error;
00982 l4_umword_t size = bios_size;
00983 l4_umword_t vaddr;
00984 l4_umword_t paddr = bios_paddr;
00985 l4_umword_t dw0, dw1;
00986 l4_msgdope_t result;
00987 l4_threadid_t pager = rmgr_pager_id();
00988 l4_uint32_t vaddr_area;
00989 l4_msgtag_t tag;
00990
00991
00992 error = l4rm_area_reserve(size, L4RM_LOG2_ALIGNED,
00993 &vaddr, &vaddr_area);
00994 if (error)
00995 Panic("no area for BIOS32 (%d)\n", error);
00996
00997 *ret_vaddr = bios_vaddr = vaddr;
00998
00999
01000 while (size)
01001 {
01002 tag = l4_msgtag(L4_MSGTAG_PAGE_FAULT, 0, 0, 0);
01003 error = l4_ipc_call_tag(pager,
01004 L4_IPC_SHORT_MSG, paddr, 0, tag,
01005 L4_IPC_MAPMSG(vaddr, L4_LOG2_PAGESIZE), &dw0, &dw1,
01006 L4_IPC_NEVER, &result, &tag);
01007
01008 if (error || !dw1)
01009 Panic("sigma0 request for phys addr %p failed (err=%d dw1=%ld)\n",
01010 (void*)paddr, error, dw1);
01011
01012 paddr += L4_PAGESIZE;
01013 vaddr += L4_PAGESIZE;
01014 size -= L4_PAGESIZE;
01015 }
01016 return 0;
01017 }
01018 #endif
01019
01026 void * bios_phys_to_virt(unsigned long paddr)
01027 {
01028 if ((paddr >= bios_paddr) && (paddr < bios_paddr+bios_size))
01029 return (void*)(bios_vaddr + (paddr - bios_paddr));
01030 else
01031 {
01032
01033
01034 return NULL;
01035 }
01036 }
01037
01050 int io_res_init(io_client_t *c)
01051 {
01052
01053
01054 io_self = c;
01055
01056 #ifndef ARCH_arm
01057 int err;
01058
01059
01060 if ((err = __request_region(0, 0x20, MAX_IO_PORT, &io_port_res, c)))
01061 goto err;
01062
01063 if ((err = __request_region(0xc0, 0x20, MAX_IO_PORT, &io_port_res, c)))
01064 goto err;
01065
01066
01067 if ((err = __request_region(0x80, 0x10, MAX_IO_PORT, &io_port_res, c)))
01068 goto err;
01069
01070
01071 if ((err = __request_region(0x20, 0x20, MAX_IO_PORT, &io_port_res, c)))
01072 goto err;
01073
01074 if ((err = __request_region(0xa0, 0x20, MAX_IO_PORT, &io_port_res, c)))
01075 goto err;
01076
01077
01078 if ((err = __request_region(0x40, 0x20, MAX_IO_PORT, &io_port_res, c)))
01079 goto err;
01080
01081
01082 if ((err = __request_region(0xf0, 0x10, MAX_IO_PORT, &io_port_res, c)))
01083 goto err;
01084
01085
01086 isa_dma[4].used = 1;
01087 isa_dma[4].client = c;
01088
01089 return 0;
01090
01091 err:
01092 Panic("claiming reserved resource failed (%d)\n", err);
01093 return err;
01094
01095 #else
01096 return 0;
01097 #endif
01098 }
01099
01100
01101
01102
01103 static void list_regions(void)
01104 {
01105 io_res_t *p = io_port_res;
01106
01107 while (p)
01108 {
01109 printf(" port (0x%04lx - 0x%04lx) "l4util_idfmt" %s\n",
01110 p->start, p->end,
01111 l4util_idstr(p->client->c_l4id), p->client->name);
01112 p = p->next;
01113 }
01114 }
01115
01116 static void list_mem_regions(void)
01117 {
01118 io_res_t *p = io_mem_res;
01119
01120 while (p)
01121 {
01122 printf("memory (0x%08lx - 0x%08lx) "l4util_idfmt" %s\n",
01123 p->start, p->end,
01124 l4util_idstr(p->client->c_l4id), p->client->name);
01125 p = p->next;
01126 }
01127 }
01128
01129 static void list_amem_regions(void)
01130 {
01131 io_ares_t *p = io_mem_ares;
01132
01133 while (p)
01134 {
01135 printf("memory (0x%08lx - 0x%08lx) ANNOUNCED (0x%08lx)\n",
01136 p->start, p->end, p->vaddr);
01137 p = p->next;
01138 }
01139 }
01140
01141 static void list_dma(void)
01142 {
01143 int i;
01144
01145 for (i = 0; i < MAX_ISA_DMA; i++)
01146 if (isa_dma[i].used)
01147 printf(" DMA %d "l4util_idfmt" %s\n",
01148 i, l4util_idstr(isa_dma[i].client->c_l4id), isa_dma[i].client->name);
01149 else
01150 printf(" DMA %d "l4util_idfmt" UNUSED\n",
01151 i, l4util_idstr(L4_NIL_ID));
01152 }
01153
01154 void list_res(void)
01155 {
01156 list_regions();
01157 list_amem_regions();
01158 list_mem_regions();
01159 list_dma();
01160 }