memory_manager.hpp
00001 #if !defined(__MEMORY_MANAGER_HPP__)
00002 #define __MEMORY_MANAGER_HPP__
00003
00004
00005
00006
00007 #include "core/machine/generic/resource_manager.hpp"
00008 #include "core/machine/machine_config.hpp"
00009 #include "memory_space.hpp"
00010
00015 struct memory_manager : public resource_manager, private noncopyable
00016 {
00017 typedef memory_space<l4_gpa_t, mmio_handler> gpa_space;
00018 typedef gpa_space::region_type mmio_region;
00019 typedef memory_space<l4_hpa_t, mmio_handler> hpa_space;
00020
00021 static const bool VERBOSE_ALLOCATION = false;
00022
00023 protected:
00027 const machine_config &config;
00028
00032 lock gpa_lock, hpa_lock;
00033
00037 gpa_space gpa_map;
00038
00042 hpa_space hpa_map;
00043
00047 semaphore frame2page_lock;
00048
00053 map<l4_gpa_t, l4_hva_t> frame2page_cache;
00054
00055 public:
00056
00057
00058
00059 memory_manager(const machine_config &config);
00060 virtual ~memory_manager(void);
00061
00062 virtual int check_mmio_request(const mmio_handler *handler, l4_gpa_t base,
00063 l4_gpa_t size=1, bool verbose=VERBOSE_ALLOCATION) const;
00064
00065
00066
00067
00068 virtual inline const mmio_region *search_mmio_region(const l4_gpa_t base, const l4_gpa_t size=1)
00069 {
00070 return gpa_map.search_region(base, size);
00071 }
00072
00073 virtual int register_mmio_handler(mmio_handler *handler, l4_gpa_t base,
00074 l4_gpa_t size=1, bool verbose=VERBOSE_ALLOCATION);
00075 virtual int unregister_mmio_handler(const mmio_handler *handler, l4_gpa_t base,
00076 l4_gpa_t size=1, bool verbose=VERBOSE_ALLOCATION);
00077
00078
00079
00080
00081 virtual l4_hva_t request_mmio_region(mmio_handler *handler, int l4io_flags,
00082 l4_hpa_t base, l4_hpa_t size=1, bool verbose=VERBOSE_ALLOCATION);
00083 virtual int release_mmio_region(const mmio_handler *handler, l4_hpa_t base,
00084 l4_hpa_t size=1, bool verbose=VERBOSE_ALLOCATION);
00085 virtual int release_mmio_regions(const mmio_handler *handler=nullptr, bool verbose=VERBOSE_ALLOCATION);
00086
00087
00088
00089
00090
00091 inline l4_hva_t phys2virt(const l4_gpa_t phys)
00092 {
00093 return config.is(L4VMM_CACHE_PHYS_TO_VIRT) ?
00094 phys2virt_cached(phys) : (*config.phys_to_virt_func)(phys);
00095 }
00096
00097
00098
00099
00100
00101 inline l4_hva_t phys2virt_cached(const l4_gpa_t phys)
00102 {
00103 auto_lock<semaphore> lock_guard(frame2page_lock);
00104 const l4_gpa_t frame=phys & memory::PAGE_MASK;
00105 l4_hva_t &page=frame2page_cache[frame];
00106 if (!page) page=(*config.phys_to_virt_func)(frame);
00107 return page | (phys & ~memory::PAGE_MASK);
00108 }
00109
00110
00111
00112
00113 virtual void print_gpa_state(const char *header=nullptr);
00114 virtual void print_hpa_state(const char *header=nullptr);
00115
00116 protected:
00117 virtual int init_memory(void);
00118
00119 private:
00120 template <typename SpaceT, typename LockT>
00121 void print_memory_state(const SpaceT &map, LockT &lock, const char *header=nullptr);
00122 };
00123
00124 #endif
00125
00126
00127