Added memory backend
Updated by Viktor Reusch for the new l4re-base-25.08.0. Co-authored-by: vreusch <viktor.reusch@barkhauseninstitut.org>
This commit is contained in:
@@ -1,3 +1,3 @@
|
||||
requires: xyz
|
||||
requires: libstdc++ libc_be_mem xyz
|
||||
provides: abc
|
||||
maintainer: your@email.example.com
|
||||
|
||||
@@ -8,6 +8,6 @@ SRC_C = main.c
|
||||
SRC_CC =
|
||||
|
||||
# list requirements of your program here
|
||||
REQUIRES_LIBS =
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -10,4 +10,6 @@ SRC_CC-$(CONFIG_CONS_USE_ASYNC_FE) += async_vcon_fe.cc
|
||||
REQUIRES_LIBS = libstdc++ cxx_libc_io cxx_io
|
||||
REQUIRES_LIBS-$(CONFIG_CONS_USE_ASYNC_FE) = libpthread
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
requires: stdlibs
|
||||
requires: stdlibs libstdc++ libc_be_mem
|
||||
Maintainer: adam@os.inf.tu-dresden.de
|
||||
|
||||
@@ -4,6 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_clntsrv-server ex_clntsrv-client
|
||||
SRC_CC_ex_clntsrv-server = server.cc
|
||||
SRC_CC_ex_clntsrv-client = client.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
requires: libstdc++ libc_be_mem
|
||||
|
||||
# color, fractal, spectrum
|
||||
optional: l4re_c-util
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_fb_spectrum_c ex_fb_spectrum_cc
|
||||
SRC_CC_ex_fb_spectrum_cc = spectrum.cc
|
||||
SRC_C_ex_fb_spectrum_c = spectrum_c.c
|
||||
REQUIRES_LIBS = libevent l4re_c-util
|
||||
REQUIRES_LIBS = libevent l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
requires: stdlibs
|
||||
requires: stdlibs libstdc++ libc_be_mem
|
||||
|
||||
# input, led, uart
|
||||
optional: libstdc++ libio-vbus
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_gpio_input
|
||||
SRC_CC = gpio_input.cc
|
||||
REQUIRES_LIBS = libstdc++ libio-vbus
|
||||
REQUIRES_LIBS = libio-vbus libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_gpio_led
|
||||
SRC_CC = gpio_led.cc
|
||||
REQUIRES_LIBS = libstdc++ libio-vbus
|
||||
REQUIRES_LIBS = libio-vbus libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
PKGDIR ?= ..
|
||||
L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
REQUIRES_LIBS = libstdc++ libio-vbus drivers_uart libio
|
||||
REQUIRES_LIBS = libio-vbus drivers_uart libio libc_be_mem libstdc++
|
||||
|
||||
TARGET = rpi_uart
|
||||
SRC_CC = main.cc
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
requires: stdlibs
|
||||
requires: stdlibs libstdc++ libc_be_mem
|
||||
|
||||
# boost, cppunit, stdthread
|
||||
optional: libstdc++
|
||||
|
||||
@@ -4,6 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = inputtst
|
||||
SRC_C = main.c
|
||||
DEPENDS_PKGS = input
|
||||
REQUIRES_LIBS = input
|
||||
REQUIRES_LIBS = input libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_l4re_ma+rm_cc
|
||||
|
||||
SRC_CC = ma+rm.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_periodic_task
|
||||
|
||||
SRC_CC = main.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,5 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_l4re_physmem_cc
|
||||
SRC_CC = physmem.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_l4re_ds_clnt ex_l4re_ds_srv
|
||||
SRC_CC_ex_l4re_ds_clnt = ds_clnt.cc
|
||||
SRC_CC_ex_l4re_ds_srv = ds_srv.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_l4re_ma+rm_c
|
||||
SRC_C = ma+rm.c
|
||||
REQUIRES_LIBS = l4re_c-util
|
||||
REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,6 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_smap-server ex_smap-client
|
||||
SRC_CC_ex_smap-server = server.cc
|
||||
SRC_CC_ex_smap-client = client.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = uclibc_thread_safe
|
||||
SRC_CC = main.cc
|
||||
REQUIRES_LIBS = libpthread
|
||||
REQUIRES_LIBS = libpthread libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_libio
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = libio libirq
|
||||
REQUIRES_LIBS = libio libirq libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
SRC_C_ex_libirq_async = async_isr.c
|
||||
SRC_C_ex_libirq_loop = loop.c
|
||||
TARGET = ex_libirq_async ex_libirq_loop
|
||||
REQUIRES_LIBS = libirq libio
|
||||
REQUIRES_LIBS = libirq libio libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
SRC_C = main.c
|
||||
TARGET = rtc_test
|
||||
DEPENDS_PKGS = rtc
|
||||
REQUIRES_LIBS = rtc
|
||||
DEPENDS_PKGS = rtc libc_be_mem libstdc++
|
||||
REQUIRES_LIBS = rtc libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_shmc
|
||||
SRC_C = prodcons.c
|
||||
REQUIRES_LIBS = shmc libpthread
|
||||
REQUIRES_LIBS = shmc libpthread libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
requires: libc_be_mem
|
||||
|
||||
# cyclichpet, eb_leds, eb_leds_gfx, hpet
|
||||
optional: libio
|
||||
|
||||
|
||||
@@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = cat
|
||||
SRC_C = cat.c
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_eb_leds
|
||||
SRC_C = eb_leds.c
|
||||
REQUIRES_LIBS = libio
|
||||
REQUIRES_LIBS = libio libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = reboot
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_hello_shared
|
||||
MODE = shared
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
requires: stdlibs
|
||||
requires: stdlibs libc_be_mem
|
||||
|
||||
# aliens, isr, singlestep, start-with-exc, utcb-ipc, vm-tz
|
||||
optional: l4re_c-util
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_aliens
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = l4re_c-util
|
||||
REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = dump_obj
|
||||
SRC_CC = dump_obj.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_ipc1
|
||||
SRC_C = ipc_example.c
|
||||
REQUIRES_LIBS = libpthread
|
||||
REQUIRES_LIBS = libpthread libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_isr
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = l4re_c-util
|
||||
REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_map_irq_client ex_map_irq_server
|
||||
SRC_CC_ex_map_irq_client = client.cc
|
||||
SRC_CC_ex_map_irq_server = server.cc
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_thread_migrate ex_thread_migrate_irq
|
||||
SRC_CC_ex_thread_migrate = thread_migrate.cc
|
||||
SRC_CC_ex_thread_migrate_irq = thread_migrate_irq.cc
|
||||
REQUIRES_LIBS = libpthread
|
||||
REQUIRES_LIBS = libpthread libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_singlestep
|
||||
SYSTEMS = x86-l4f amd64-l4f
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = l4re_c-util
|
||||
REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = ex_start-with-exc
|
||||
SYSTEMS = x86-l4f arm-l4f arm64-l4f
|
||||
SRC_C = main.c
|
||||
REQUIRES_LIBS = l4re_c-util
|
||||
REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
PKGDIR ?= ..
|
||||
L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
TARGET = ex_timeouts
|
||||
SRC_C = main.c
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_uirq
|
||||
SRC_CC = ex_uirq.cc
|
||||
REQUIRES_LIBS = libstdc++ libpthread
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++ libpthread
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -2,7 +2,7 @@ PKGDIR ?= ..
|
||||
L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_utcb_ipc
|
||||
REQUIRES_LIBS = l4re_c-util
|
||||
REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
SRC_C = main.c
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = ex_vcpu
|
||||
SRC_CC = vcpu.cc
|
||||
REQUIRES_LIBS = libvcpu cxx_io cxx_libc_io
|
||||
REQUIRES_LIBS = libvcpu cxx_io cxx_libc_io libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -5,7 +5,7 @@ TARGET = ex_vmtest
|
||||
SYSTEMS = x86-l4f amd64-l4f
|
||||
SRC_S = guest.S
|
||||
SRC_CC = vm.cc vmx.cc svm.cc main.cc
|
||||
REQUIRES_LIBS = libvcpu l4util
|
||||
REQUIRES_LIBS = libvcpu l4util libc_be_mem libstdc++
|
||||
DEPENDS_PKGS = $(REQUIRES_LIBS)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
requires: libdrivers-lcd x86emu_int10 stdlibs libio-vbus
|
||||
requires: libdrivers-lcd x86emu_int10 stdlibs libio-vbus libstdc++ libc_be_mem
|
||||
maintainer: adam@os.inf.tu-dresden.de
|
||||
|
||||
@@ -11,7 +11,7 @@ REQUIRES_LIBS_x86-l4f = x86emu_int10
|
||||
REQUIRES_LIBS_amd64-l4f = x86emu_int10
|
||||
REQUIRES_LIBS_arm-l4f = libdrivers-lcd
|
||||
REQUIRES_LIBS_arm64-l4f = libdrivers-lcd
|
||||
REQUIRES_LIBS = libc_support_misc libio-vbus
|
||||
REQUIRES_LIBS = libc_support_misc libio-vbus libc_be_mem libstdc++
|
||||
|
||||
DEFINES = -DSPLASHNAME=gimp_image \
|
||||
-DSPLASHNAME_RUN_LENGTH_DECODE=GIMP_IMAGE_RUN_LENGTH_DECODE
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
requires: stdlibs
|
||||
requires: stdlibs libstdc++ libc_be_mem
|
||||
Maintainer: adam@os.inf.tu-dresden.de
|
||||
|
||||
@@ -4,4 +4,6 @@ L4DIR ?= $(PKGDIR)/../..
|
||||
TARGET = hello
|
||||
SRC_C = main.c
|
||||
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#MODE := shared
|
||||
TARGET = io
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
DEFINES-$(CONFIG_L4IO_PCIID_DB) += -DCONFIG_L4IO_PCIID_DB
|
||||
|
||||
SUBDIRS = drivers
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
requires: stdlibs
|
||||
requires: stdlibs libstdc++ libc_be_mem
|
||||
Maintainer: adam@l4re.org
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../..
|
||||
TARGET = ipcbench ipcbench_parallel \
|
||||
ipcbench_client ipcbench_server \
|
||||
syscallbench syscallbench_parallel
|
||||
REQUIRES_LIBS = libpthread
|
||||
REQUIRES_LIBS = libc_be_mem libstdc++ libpthread
|
||||
SRC_C_ipcbench = ipcbench.c ipc_common.c
|
||||
SRC_C_ipcbench_parallel = ipcbench_parallel.c ipc_common.c
|
||||
SRC_C_ipcbench_client = ipcclient.c
|
||||
|
||||
@@ -29,7 +29,7 @@ DEFINES += -DL4_CXX_NO_EXCEPTION_BACKTRACE -DL4_LOADER_RELOC_BASE=$(DEFAULT_RELO
|
||||
|
||||
REQUIRES_LIBS := cxx_io cxx_libc_io libc_minimal libsupc++_minimal libloader \
|
||||
libc_minimal_l4re libumalloc
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc --wrap=aligned_alloc --wrap=calloc
|
||||
CXXFLAGS += $(CXXFLAGS_LOW_LEVEL)
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -24,23 +24,26 @@ struct Vfs_init
|
||||
cxx::Static_container<L4Re::Vfs::File_factory_t<L4Re::Namespace, L4Re::Core::Ns_dir> > ns_dir;
|
||||
cxx::Static_container<L4Re::Vfs::File_factory_t<L4::Vcon, L4Re::Core::Vcon_stream> > vcon_stream;
|
||||
|
||||
// This is part of an ugly hack to avoid calling malloc here.
|
||||
char fac_items[3*sizeof(Vfs::File_factory_item)];
|
||||
|
||||
Vfs_init()
|
||||
{
|
||||
vfs.construct();
|
||||
__rtld_l4re_env_posix_vfs_ops = vfs;
|
||||
ns_dir.construct();
|
||||
auto ns_ptr = cxx::ref_ptr(ns_dir.get());
|
||||
vfs->register_file_factory(ns_ptr);
|
||||
vfs->register_file_factory(ns_ptr, &fac_items[0]);
|
||||
ns_ptr.release(); // prevent deletion of static object
|
||||
|
||||
ro_file.construct();
|
||||
auto ro_ptr = cxx::ref_ptr(ro_file.get());
|
||||
vfs->register_file_factory(ro_ptr);
|
||||
vfs->register_file_factory(ro_ptr, &fac_items[sizeof(Vfs::File_factory_item)]);
|
||||
ro_ptr.release(); // prevent deletion of static object
|
||||
|
||||
vcon_stream.construct();
|
||||
auto vcon_ptr = cxx::ref_ptr(vcon_stream.get());
|
||||
vfs->register_file_factory(vcon_ptr);
|
||||
vfs->register_file_factory(vcon_ptr, &fac_items[2*sizeof(Vfs::File_factory_item)]);
|
||||
vcon_ptr.release(); // prevent deletion of static object
|
||||
}
|
||||
};
|
||||
|
||||
@@ -134,6 +134,7 @@ public:
|
||||
L4Re::Vfs::File_system_list file_system_list() noexcept override;
|
||||
|
||||
int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
|
||||
int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f, void *x) noexcept;
|
||||
int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
|
||||
Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
|
||||
Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
|
||||
@@ -144,14 +145,6 @@ public:
|
||||
void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
|
||||
void free(void *m) noexcept override { Vfs_config::free(m); }
|
||||
|
||||
private:
|
||||
Root_mount_tree _root_mount;
|
||||
L4Re::Core::Env_dir _root;
|
||||
Ref_ptr<L4Re::Vfs::File> _cwd;
|
||||
Fd_store fds;
|
||||
|
||||
L4Re::Vfs::File_system *_fs_registry;
|
||||
|
||||
struct File_factory_item : cxx::H_list_item_t<File_factory_item>
|
||||
{
|
||||
cxx::Ref_ptr<L4Re::Vfs::File_factory> f;
|
||||
@@ -163,6 +156,14 @@ private:
|
||||
File_factory_item &operator = (File_factory_item const &) = delete;
|
||||
};
|
||||
|
||||
private:
|
||||
Root_mount_tree _root_mount;
|
||||
L4Re::Core::Env_dir _root;
|
||||
Ref_ptr<L4Re::Vfs::File> _cwd;
|
||||
Fd_store fds;
|
||||
|
||||
L4Re::Vfs::File_system *_fs_registry;
|
||||
|
||||
cxx::H_list_t<File_factory_item> _file_factories;
|
||||
|
||||
l4_addr_t _anon_offset;
|
||||
@@ -272,6 +273,20 @@ Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f, void *x) noexcept
|
||||
{
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
if (!x)
|
||||
return -ENOMEM;
|
||||
|
||||
auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
|
||||
_file_factories.push_front(ff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
|
||||
{
|
||||
@@ -740,7 +755,7 @@ Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_of
|
||||
|
||||
rm_flags |= Rm::F::In_area;
|
||||
|
||||
// Make sure to remove old mappings residing at the respective address
|
||||
// Make sure to remove old mappings residing at the respective address
|
||||
// range. If none exists, we are fine as well, allowing us to ignore
|
||||
// ENOENT here.
|
||||
err = munmap_regions(start, len);
|
||||
|
||||
@@ -14,3 +14,5 @@ CXXFLAGS += -fvisibility=hidden
|
||||
# No exception information as unwinder code might use malloc and friends
|
||||
DEFINES += -DNOT_IN_libc -DL4_NO_RTTI
|
||||
CXXFLAGS += -include libc-symbols.h -fno-exceptions -fno-rtti
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
|
||||
@@ -21,22 +21,22 @@
|
||||
extern int weak_function __libc_free_aligned(void *ptr) attribute_hidden;
|
||||
|
||||
#ifdef L_malloc
|
||||
void *malloc(size_t size)
|
||||
void *__wrap_malloc(size_t size)
|
||||
{
|
||||
void *result;
|
||||
void *result;
|
||||
|
||||
if (unlikely(size == 0)) {
|
||||
size++;
|
||||
}
|
||||
if (unlikely(size == 0)) {
|
||||
size++;
|
||||
}
|
||||
|
||||
/* prevent Undefined Behaviour for pointer arithmetic (substract) of too big pointers
|
||||
* see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303
|
||||
* No need to check for size + sizeof(size_t) integer overflow since we already check for PTRDIFF_MAX
|
||||
*/
|
||||
if (unlikely(size > PTRDIFF_MAX)) {
|
||||
__set_errno(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
/* prevent Undefined Behaviour for pointer arithmetic (substract) of too big pointers
|
||||
* see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303
|
||||
* No need to check for size + sizeof(size_t) integer overflow since we already check for PTRDIFF_MAX
|
||||
*/
|
||||
if (unlikely(size > PTRDIFF_MAX)) {
|
||||
__set_errno(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __ARCH_USE_MMU__
|
||||
# define MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
|
||||
@@ -44,74 +44,74 @@ void *malloc(size_t size)
|
||||
# define MMAP_FLAGS MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED
|
||||
#endif
|
||||
|
||||
result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE,
|
||||
MMAP_FLAGS, 0, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
__set_errno(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
* (size_t *) result = size;
|
||||
return(result + sizeof(size_t));
|
||||
result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE,
|
||||
MMAP_FLAGS, 0, 0);
|
||||
if (result == MAP_FAILED) {
|
||||
__set_errno(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
* (size_t *) result = size;
|
||||
return(result + sizeof(size_t));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef L_calloc
|
||||
void * calloc(size_t nmemb, size_t lsize)
|
||||
{
|
||||
void *result;
|
||||
size_t size=lsize * nmemb;
|
||||
void *result;
|
||||
size_t size=lsize * nmemb;
|
||||
|
||||
/* guard vs integer overflow, but allow nmemb
|
||||
* to fall through and call malloc(0) */
|
||||
if (nmemb && lsize != (size / nmemb)) {
|
||||
__set_errno(ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
result = malloc(size);
|
||||
/* guard vs integer overflow, but allow nmemb
|
||||
* to fall through and call malloc(0) */
|
||||
if (nmemb && lsize != (size / nmemb)) {
|
||||
__set_errno(ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
result = malloc(size);
|
||||
|
||||
#ifndef __ARCH_USE_MMU__
|
||||
/* mmap'd with MAP_UNINITIALIZED, we have to blank memory ourselves */
|
||||
if (result != NULL) {
|
||||
memset(result, 0, size);
|
||||
}
|
||||
/* mmap'd with MAP_UNINITIALIZED, we have to blank memory ourselves */
|
||||
if (result != NULL) {
|
||||
memset(result, 0, size);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef L_realloc
|
||||
void *realloc(void *ptr, size_t size)
|
||||
void *__wrap_realloc(void *ptr, size_t size)
|
||||
{
|
||||
void *newptr = NULL;
|
||||
void *newptr = NULL;
|
||||
|
||||
if (!ptr)
|
||||
return malloc(size);
|
||||
if (!size) {
|
||||
free(ptr);
|
||||
return malloc(0);
|
||||
}
|
||||
if (!ptr)
|
||||
return malloc(size);
|
||||
if (!size) {
|
||||
free(ptr);
|
||||
return malloc(0);
|
||||
}
|
||||
|
||||
newptr = malloc(size);
|
||||
if (newptr) {
|
||||
size_t old_size = *((size_t *) (ptr - sizeof(size_t)));
|
||||
memcpy(newptr, ptr, (old_size < size ? old_size : size));
|
||||
free(ptr);
|
||||
}
|
||||
return newptr;
|
||||
newptr = malloc(size);
|
||||
if (newptr) {
|
||||
size_t old_size = *((size_t *) (ptr - sizeof(size_t)));
|
||||
memcpy(newptr, ptr, (old_size < size ? old_size : size));
|
||||
free(ptr);
|
||||
}
|
||||
return newptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef L_free
|
||||
void free(void *ptr)
|
||||
void __wrap_free(void *ptr)
|
||||
{
|
||||
if (unlikely(ptr == NULL))
|
||||
return;
|
||||
if (unlikely(__libc_free_aligned != NULL)) {
|
||||
if (__libc_free_aligned(ptr))
|
||||
return;
|
||||
}
|
||||
ptr -= sizeof(size_t);
|
||||
munmap(ptr, * (size_t *) ptr + sizeof(size_t));
|
||||
if (unlikely(ptr == NULL))
|
||||
return;
|
||||
if (unlikely(__libc_free_aligned != NULL)) {
|
||||
if (__libc_free_aligned(ptr))
|
||||
return;
|
||||
}
|
||||
ptr -= sizeof(size_t);
|
||||
munmap(ptr, * (size_t *) ptr + sizeof(size_t));
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -119,84 +119,84 @@ void free(void *ptr)
|
||||
|
||||
#include <bits/uClibc_mutex.h>
|
||||
__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
|
||||
#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
|
||||
#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
|
||||
#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
|
||||
#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
|
||||
|
||||
/* List of blocks allocated with memalign or valloc */
|
||||
struct alignlist
|
||||
{
|
||||
struct alignlist *next;
|
||||
__ptr_t aligned; /* The address that memaligned returned. */
|
||||
__ptr_t exact; /* The address that malloc returned. */
|
||||
struct alignlist *next;
|
||||
__ptr_t aligned; /* The address that memaligned returned. */
|
||||
__ptr_t exact; /* The address that malloc returned. */
|
||||
};
|
||||
static struct alignlist *_aligned_blocks;
|
||||
|
||||
/* Return memory to the heap. */
|
||||
int __libc_free_aligned(void *ptr)
|
||||
{
|
||||
struct alignlist *l;
|
||||
struct alignlist *l;
|
||||
|
||||
if (ptr == NULL)
|
||||
return 0;
|
||||
if (ptr == NULL)
|
||||
return 0;
|
||||
|
||||
__MALLOC_LOCK;
|
||||
for (l = _aligned_blocks; l != NULL; l = l->next) {
|
||||
if (l->aligned == ptr) {
|
||||
/* Mark the block as free */
|
||||
l->aligned = NULL;
|
||||
ptr = l->exact;
|
||||
ptr -= sizeof(size_t);
|
||||
munmap(ptr, * (size_t *) ptr + sizeof(size_t));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
__MALLOC_UNLOCK;
|
||||
return 0;
|
||||
__MALLOC_LOCK;
|
||||
for (l = _aligned_blocks; l != NULL; l = l->next) {
|
||||
if (l->aligned == ptr) {
|
||||
/* Mark the block as free */
|
||||
l->aligned = NULL;
|
||||
ptr = l->exact;
|
||||
ptr -= sizeof(size_t);
|
||||
munmap(ptr, * (size_t *) ptr + sizeof(size_t));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
__MALLOC_UNLOCK;
|
||||
return 0;
|
||||
}
|
||||
void * memalign (size_t alignment, size_t size)
|
||||
{
|
||||
void * result;
|
||||
unsigned long int adj;
|
||||
void * result;
|
||||
unsigned long int adj;
|
||||
|
||||
if (unlikely(size > PTRDIFF_MAX)) {
|
||||
__set_errno(ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
if (unlikely(size > PTRDIFF_MAX)) {
|
||||
__set_errno(ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (unlikely((size + alignment - 1 < size) && (alignment != 0))) {
|
||||
__set_errno(ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
if (unlikely((size + alignment - 1 < size) && (alignment != 0))) {
|
||||
__set_errno(ENOMEM);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = malloc (size + alignment - 1);
|
||||
if (result == NULL)
|
||||
return NULL;
|
||||
result = malloc (size + alignment - 1);
|
||||
if (result == NULL)
|
||||
return NULL;
|
||||
|
||||
adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment;
|
||||
if (adj != 0) {
|
||||
struct alignlist *l;
|
||||
__MALLOC_LOCK;
|
||||
for (l = _aligned_blocks; l != NULL; l = l->next)
|
||||
if (l->aligned == NULL)
|
||||
/* This slot is free. Use it. */
|
||||
break;
|
||||
if (l == NULL) {
|
||||
l = (struct alignlist *) malloc (sizeof (struct alignlist));
|
||||
if (l == NULL) {
|
||||
free(result);
|
||||
result = NULL;
|
||||
goto DONE;
|
||||
}
|
||||
l->next = _aligned_blocks;
|
||||
_aligned_blocks = l;
|
||||
}
|
||||
l->exact = result;
|
||||
result = l->aligned = (char *) result + alignment - adj;
|
||||
adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment;
|
||||
if (adj != 0) {
|
||||
struct alignlist *l;
|
||||
__MALLOC_LOCK;
|
||||
for (l = _aligned_blocks; l != NULL; l = l->next)
|
||||
if (l->aligned == NULL)
|
||||
/* This slot is free. Use it. */
|
||||
break;
|
||||
if (l == NULL) {
|
||||
l = (struct alignlist *) malloc (sizeof (struct alignlist));
|
||||
if (l == NULL) {
|
||||
free(result);
|
||||
result = NULL;
|
||||
goto DONE;
|
||||
}
|
||||
l->next = _aligned_blocks;
|
||||
_aligned_blocks = l;
|
||||
}
|
||||
l->exact = result;
|
||||
result = l->aligned = (char *) result + alignment - adj;
|
||||
DONE:
|
||||
__MALLOC_UNLOCK;
|
||||
}
|
||||
__MALLOC_UNLOCK;
|
||||
}
|
||||
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
libc_hidden_def(memalign)
|
||||
#endif
|
||||
|
||||
@@ -42,38 +42,38 @@ static int __malloc_trim(size_t pad, mstate av)
|
||||
|
||||
if (extra > 0) {
|
||||
|
||||
/*
|
||||
Only proceed if end of memory is where we last set it.
|
||||
This avoids problems if there were foreign sbrk calls.
|
||||
*/
|
||||
current_brk = (char*)(MORECORE(0));
|
||||
if (current_brk == (char*)(av->top) + top_size) {
|
||||
/*
|
||||
Only proceed if end of memory is where we last set it.
|
||||
This avoids problems if there were foreign sbrk calls.
|
||||
*/
|
||||
current_brk = (char*)(MORECORE(0));
|
||||
if (current_brk == (char*)(av->top) + top_size) {
|
||||
|
||||
/*
|
||||
Attempt to release memory. We ignore MORECORE return value,
|
||||
and instead call again to find out where new end of memory is.
|
||||
This avoids problems if first call releases less than we asked,
|
||||
of if failure somehow altered brk value. (We could still
|
||||
encounter problems if it altered brk in some very bad way,
|
||||
but the only thing we can do is adjust anyway, which will cause
|
||||
some downstream failure.)
|
||||
*/
|
||||
/*
|
||||
Attempt to release memory. We ignore MORECORE return value,
|
||||
and instead call again to find out where new end of memory is.
|
||||
This avoids problems if first call releases less than we asked,
|
||||
of if failure somehow altered brk value. (We could still
|
||||
encounter problems if it altered brk in some very bad way,
|
||||
but the only thing we can do is adjust anyway, which will cause
|
||||
some downstream failure.)
|
||||
*/
|
||||
|
||||
MORECORE(-extra);
|
||||
new_brk = (char*)(MORECORE(0));
|
||||
MORECORE(-extra);
|
||||
new_brk = (char*)(MORECORE(0));
|
||||
|
||||
if (new_brk != (char*)MORECORE_FAILURE) {
|
||||
released = (long)(current_brk - new_brk);
|
||||
if (new_brk != (char*)MORECORE_FAILURE) {
|
||||
released = (long)(current_brk - new_brk);
|
||||
|
||||
if (released != 0) {
|
||||
/* Success. Adjust top. */
|
||||
av->sbrked_mem -= released;
|
||||
set_head(av->top, (top_size - released) | PREV_INUSE);
|
||||
check_malloc_state();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (released != 0) {
|
||||
/* Success. Adjust top. */
|
||||
av->sbrked_mem -= released;
|
||||
set_head(av->top, (top_size - released) | PREV_INUSE);
|
||||
check_malloc_state();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -129,8 +129,8 @@ static void malloc_init_state(mstate av)
|
||||
|
||||
/* Establish circular links for normal bins */
|
||||
for (i = 1; i < NBINS; ++i) {
|
||||
bin = bin_at(av,i);
|
||||
bin->fd = bin->bk = bin;
|
||||
bin = bin_at(av,i);
|
||||
bin->fd = bin->bk = bin;
|
||||
}
|
||||
|
||||
av->top_pad = DEFAULT_TOP_PAD;
|
||||
@@ -195,80 +195,80 @@ void attribute_hidden __malloc_consolidate(mstate av)
|
||||
*/
|
||||
|
||||
if (av->max_fast != 0) {
|
||||
clear_fastchunks(av);
|
||||
clear_fastchunks(av);
|
||||
|
||||
unsorted_bin = unsorted_chunks(av);
|
||||
unsorted_bin = unsorted_chunks(av);
|
||||
|
||||
/*
|
||||
Remove each chunk from fast bin and consolidate it, placing it
|
||||
then in unsorted bin. Among other reasons for doing this,
|
||||
placing in unsorted bin avoids needing to calculate actual bins
|
||||
until malloc is sure that chunks aren't immediately going to be
|
||||
reused anyway.
|
||||
*/
|
||||
/*
|
||||
Remove each chunk from fast bin and consolidate it, placing it
|
||||
then in unsorted bin. Among other reasons for doing this,
|
||||
placing in unsorted bin avoids needing to calculate actual bins
|
||||
until malloc is sure that chunks aren't immediately going to be
|
||||
reused anyway.
|
||||
*/
|
||||
|
||||
maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
|
||||
fb = &(av->fastbins[0]);
|
||||
do {
|
||||
if ( (p = *fb) != 0) {
|
||||
*fb = 0;
|
||||
maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
|
||||
fb = &(av->fastbins[0]);
|
||||
do {
|
||||
if ( (p = *fb) != 0) {
|
||||
*fb = 0;
|
||||
|
||||
do {
|
||||
do {
|
||||
CHECK_PTR(p);
|
||||
check_inuse_chunk(p);
|
||||
nextp = REVEAL_PTR(&p->fd, p->fd);
|
||||
check_inuse_chunk(p);
|
||||
nextp = REVEAL_PTR(&p->fd, p->fd);
|
||||
|
||||
/* Slightly streamlined version of consolidation code in free() */
|
||||
size = p->size & ~PREV_INUSE;
|
||||
nextchunk = chunk_at_offset(p, size);
|
||||
nextsize = chunksize(nextchunk);
|
||||
/* Slightly streamlined version of consolidation code in free() */
|
||||
size = p->size & ~PREV_INUSE;
|
||||
nextchunk = chunk_at_offset(p, size);
|
||||
nextsize = chunksize(nextchunk);
|
||||
|
||||
if (!prev_inuse(p)) {
|
||||
prevsize = p->prev_size;
|
||||
size += prevsize;
|
||||
p = chunk_at_offset(p, -((long) prevsize));
|
||||
unlink(p, bck, fwd);
|
||||
}
|
||||
if (!prev_inuse(p)) {
|
||||
prevsize = p->prev_size;
|
||||
size += prevsize;
|
||||
p = chunk_at_offset(p, -((long) prevsize));
|
||||
unlink(p, bck, fwd);
|
||||
}
|
||||
|
||||
if (nextchunk != av->top) {
|
||||
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||||
set_head(nextchunk, nextsize);
|
||||
if (nextchunk != av->top) {
|
||||
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||||
set_head(nextchunk, nextsize);
|
||||
|
||||
if (!nextinuse) {
|
||||
size += nextsize;
|
||||
unlink(nextchunk, bck, fwd);
|
||||
}
|
||||
if (!nextinuse) {
|
||||
size += nextsize;
|
||||
unlink(nextchunk, bck, fwd);
|
||||
}
|
||||
|
||||
first_unsorted = unsorted_bin->fd;
|
||||
unsorted_bin->fd = p;
|
||||
first_unsorted->bk = p;
|
||||
first_unsorted = unsorted_bin->fd;
|
||||
unsorted_bin->fd = p;
|
||||
first_unsorted->bk = p;
|
||||
|
||||
set_head(p, size | PREV_INUSE);
|
||||
p->bk = unsorted_bin;
|
||||
p->fd = first_unsorted;
|
||||
set_foot(p, size);
|
||||
}
|
||||
set_head(p, size | PREV_INUSE);
|
||||
p->bk = unsorted_bin;
|
||||
p->fd = first_unsorted;
|
||||
set_foot(p, size);
|
||||
}
|
||||
|
||||
else {
|
||||
size += nextsize;
|
||||
set_head(p, size | PREV_INUSE);
|
||||
av->top = p;
|
||||
}
|
||||
else {
|
||||
size += nextsize;
|
||||
set_head(p, size | PREV_INUSE);
|
||||
av->top = p;
|
||||
}
|
||||
|
||||
} while ( (p = nextp) != 0);
|
||||
} while ( (p = nextp) != 0);
|
||||
|
||||
}
|
||||
} while (fb++ != maxfb);
|
||||
}
|
||||
} while (fb++ != maxfb);
|
||||
}
|
||||
else {
|
||||
malloc_init_state(av);
|
||||
check_malloc_state();
|
||||
malloc_init_state(av);
|
||||
check_malloc_state();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* ------------------------------ free ------------------------------ */
|
||||
void free(void* mem)
|
||||
void __wrap_free(void* mem)
|
||||
{
|
||||
mstate av;
|
||||
|
||||
@@ -284,7 +284,7 @@ void free(void* mem)
|
||||
|
||||
/* free(0) has no effect */
|
||||
if (mem == NULL)
|
||||
return;
|
||||
return;
|
||||
|
||||
__MALLOC_LOCK;
|
||||
av = get_malloc_state();
|
||||
@@ -301,16 +301,16 @@ void free(void* mem)
|
||||
if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
|
||||
|
||||
#if TRIM_FASTBINS
|
||||
/* If TRIM_FASTBINS set, don't place chunks
|
||||
bordering top into fastbins */
|
||||
&& (chunk_at_offset(p, size) != av->top)
|
||||
/* If TRIM_FASTBINS set, don't place chunks
|
||||
bordering top into fastbins */
|
||||
&& (chunk_at_offset(p, size) != av->top)
|
||||
#endif
|
||||
) {
|
||||
|
||||
set_fastchunks(av);
|
||||
fb = &(av->fastbins[fastbin_index(size)]);
|
||||
p->fd = PROTECT_PTR(&p->fd, *fb);
|
||||
*fb = p;
|
||||
set_fastchunks(av);
|
||||
fb = &(av->fastbins[fastbin_index(size)]);
|
||||
p->fd = PROTECT_PTR(&p->fd, *fb);
|
||||
*fb = p;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -318,82 +318,82 @@ void free(void* mem)
|
||||
*/
|
||||
|
||||
else if (!chunk_is_mmapped(p)) {
|
||||
set_anychunks(av);
|
||||
set_anychunks(av);
|
||||
|
||||
nextchunk = chunk_at_offset(p, size);
|
||||
nextsize = chunksize(nextchunk);
|
||||
nextchunk = chunk_at_offset(p, size);
|
||||
nextsize = chunksize(nextchunk);
|
||||
|
||||
/* consolidate backward */
|
||||
if (!prev_inuse(p)) {
|
||||
prevsize = p->prev_size;
|
||||
size += prevsize;
|
||||
p = chunk_at_offset(p, -((long) prevsize));
|
||||
unlink(p, bck, fwd);
|
||||
}
|
||||
/* consolidate backward */
|
||||
if (!prev_inuse(p)) {
|
||||
prevsize = p->prev_size;
|
||||
size += prevsize;
|
||||
p = chunk_at_offset(p, -((long) prevsize));
|
||||
unlink(p, bck, fwd);
|
||||
}
|
||||
|
||||
if (nextchunk != av->top) {
|
||||
/* get and clear inuse bit */
|
||||
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||||
set_head(nextchunk, nextsize);
|
||||
if (nextchunk != av->top) {
|
||||
/* get and clear inuse bit */
|
||||
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||||
set_head(nextchunk, nextsize);
|
||||
|
||||
/* consolidate forward */
|
||||
if (!nextinuse) {
|
||||
unlink(nextchunk, bck, fwd);
|
||||
size += nextsize;
|
||||
}
|
||||
/* consolidate forward */
|
||||
if (!nextinuse) {
|
||||
unlink(nextchunk, bck, fwd);
|
||||
size += nextsize;
|
||||
}
|
||||
|
||||
/*
|
||||
Place the chunk in unsorted chunk list. Chunks are
|
||||
not placed into regular bins until after they have
|
||||
been given one chance to be used in malloc.
|
||||
*/
|
||||
/*
|
||||
Place the chunk in unsorted chunk list. Chunks are
|
||||
not placed into regular bins until after they have
|
||||
been given one chance to be used in malloc.
|
||||
*/
|
||||
|
||||
bck = unsorted_chunks(av);
|
||||
fwd = bck->fd;
|
||||
p->bk = bck;
|
||||
p->fd = fwd;
|
||||
bck->fd = p;
|
||||
fwd->bk = p;
|
||||
bck = unsorted_chunks(av);
|
||||
fwd = bck->fd;
|
||||
p->bk = bck;
|
||||
p->fd = fwd;
|
||||
bck->fd = p;
|
||||
fwd->bk = p;
|
||||
|
||||
set_head(p, size | PREV_INUSE);
|
||||
set_foot(p, size);
|
||||
set_head(p, size | PREV_INUSE);
|
||||
set_foot(p, size);
|
||||
|
||||
check_free_chunk(p);
|
||||
}
|
||||
check_free_chunk(p);
|
||||
}
|
||||
|
||||
/*
|
||||
If the chunk borders the current high end of memory,
|
||||
consolidate into top
|
||||
*/
|
||||
/*
|
||||
If the chunk borders the current high end of memory,
|
||||
consolidate into top
|
||||
*/
|
||||
|
||||
else {
|
||||
size += nextsize;
|
||||
set_head(p, size | PREV_INUSE);
|
||||
av->top = p;
|
||||
check_chunk(p);
|
||||
}
|
||||
else {
|
||||
size += nextsize;
|
||||
set_head(p, size | PREV_INUSE);
|
||||
av->top = p;
|
||||
check_chunk(p);
|
||||
}
|
||||
|
||||
/*
|
||||
If freeing a large space, consolidate possibly-surrounding
|
||||
chunks. Then, if the total unused topmost memory exceeds trim
|
||||
threshold, ask malloc_trim to reduce top.
|
||||
/*
|
||||
If freeing a large space, consolidate possibly-surrounding
|
||||
chunks. Then, if the total unused topmost memory exceeds trim
|
||||
threshold, ask malloc_trim to reduce top.
|
||||
|
||||
Unless max_fast is 0, we don't know if there are fastbins
|
||||
bordering top, so we cannot tell for sure whether threshold
|
||||
has been reached unless fastbins are consolidated. But we
|
||||
don't want to consolidate on each free. As a compromise,
|
||||
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
|
||||
is reached.
|
||||
*/
|
||||
Unless max_fast is 0, we don't know if there are fastbins
|
||||
bordering top, so we cannot tell for sure whether threshold
|
||||
has been reached unless fastbins are consolidated. But we
|
||||
don't want to consolidate on each free. As a compromise,
|
||||
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
|
||||
is reached.
|
||||
*/
|
||||
|
||||
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
|
||||
if (have_fastchunks(av))
|
||||
__malloc_consolidate(av);
|
||||
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
|
||||
if (have_fastchunks(av))
|
||||
__malloc_consolidate(av);
|
||||
|
||||
if ((unsigned long)(chunksize(av->top)) >=
|
||||
(unsigned long)(av->trim_threshold))
|
||||
__malloc_trim(av->top_pad, av);
|
||||
}
|
||||
if ((unsigned long)(chunksize(av->top)) >=
|
||||
(unsigned long)(av->trim_threshold))
|
||||
__malloc_trim(av->top_pad, av);
|
||||
}
|
||||
|
||||
}
|
||||
/*
|
||||
@@ -405,13 +405,13 @@ void free(void* mem)
|
||||
*/
|
||||
|
||||
else {
|
||||
size_t offset = p->prev_size;
|
||||
av->n_mmaps--;
|
||||
av->mmapped_mem -= (size + offset);
|
||||
munmap((char*)p - offset, size + offset);
|
||||
size_t offset = p->prev_size;
|
||||
av->n_mmaps--;
|
||||
av->mmapped_mem -= (size + offset);
|
||||
munmap((char*)p - offset, size + offset);
|
||||
}
|
||||
__MALLOC_UNLOCK;
|
||||
}
|
||||
|
||||
/* glibc compatibilty */
|
||||
weak_alias(free, __libc_free)
|
||||
weak_alias(__wrap_free, __libc_free)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,7 @@
|
||||
|
||||
|
||||
/* ------------------------------ realloc ------------------------------ */
|
||||
void* realloc(void* oldmem, size_t bytes)
|
||||
void* __wrap_realloc(void* oldmem, size_t bytes)
|
||||
{
|
||||
mstate av;
|
||||
|
||||
@@ -48,10 +48,10 @@ void* realloc(void* oldmem, size_t bytes)
|
||||
|
||||
/* Check for special cases. */
|
||||
if (! oldmem)
|
||||
return malloc(bytes);
|
||||
return malloc(bytes);
|
||||
if (! bytes) {
|
||||
free (oldmem);
|
||||
return NULL;
|
||||
free (oldmem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
checked_request2size(bytes, nb);
|
||||
@@ -65,117 +65,117 @@ void* realloc(void* oldmem, size_t bytes)
|
||||
|
||||
if (!chunk_is_mmapped(oldp)) {
|
||||
|
||||
if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
|
||||
/* already big enough; split below */
|
||||
newp = oldp;
|
||||
newsize = oldsize;
|
||||
}
|
||||
if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
|
||||
/* already big enough; split below */
|
||||
newp = oldp;
|
||||
newsize = oldsize;
|
||||
}
|
||||
|
||||
else {
|
||||
next = chunk_at_offset(oldp, oldsize);
|
||||
else {
|
||||
next = chunk_at_offset(oldp, oldsize);
|
||||
|
||||
/* Try to expand forward into top */
|
||||
if (next == av->top &&
|
||||
(unsigned long)(newsize = oldsize + chunksize(next)) >=
|
||||
(unsigned long)(nb + MINSIZE)) {
|
||||
set_head_size(oldp, nb);
|
||||
av->top = chunk_at_offset(oldp, nb);
|
||||
set_head(av->top, (newsize - nb) | PREV_INUSE);
|
||||
retval = chunk2mem(oldp);
|
||||
goto DONE;
|
||||
}
|
||||
/* Try to expand forward into top */
|
||||
if (next == av->top &&
|
||||
(unsigned long)(newsize = oldsize + chunksize(next)) >=
|
||||
(unsigned long)(nb + MINSIZE)) {
|
||||
set_head_size(oldp, nb);
|
||||
av->top = chunk_at_offset(oldp, nb);
|
||||
set_head(av->top, (newsize - nb) | PREV_INUSE);
|
||||
retval = chunk2mem(oldp);
|
||||
goto DONE;
|
||||
}
|
||||
|
||||
/* Try to expand forward into next chunk; split off remainder below */
|
||||
else if (next != av->top &&
|
||||
!inuse(next) &&
|
||||
(unsigned long)(newsize = oldsize + chunksize(next)) >=
|
||||
(unsigned long)(nb)) {
|
||||
newp = oldp;
|
||||
unlink(next, bck, fwd);
|
||||
}
|
||||
/* Try to expand forward into next chunk; split off remainder below */
|
||||
else if (next != av->top &&
|
||||
!inuse(next) &&
|
||||
(unsigned long)(newsize = oldsize + chunksize(next)) >=
|
||||
(unsigned long)(nb)) {
|
||||
newp = oldp;
|
||||
unlink(next, bck, fwd);
|
||||
}
|
||||
|
||||
/* allocate, copy, free */
|
||||
else {
|
||||
newmem = malloc(nb - MALLOC_ALIGN_MASK);
|
||||
if (newmem == 0) {
|
||||
retval = 0; /* propagate failure */
|
||||
goto DONE;
|
||||
}
|
||||
/* allocate, copy, free */
|
||||
else {
|
||||
newmem = malloc(nb - MALLOC_ALIGN_MASK);
|
||||
if (newmem == 0) {
|
||||
retval = 0; /* propagate failure */
|
||||
goto DONE;
|
||||
}
|
||||
|
||||
newp = mem2chunk(newmem);
|
||||
newsize = chunksize(newp);
|
||||
newp = mem2chunk(newmem);
|
||||
newsize = chunksize(newp);
|
||||
|
||||
/*
|
||||
Avoid copy if newp is next chunk after oldp.
|
||||
*/
|
||||
if (newp == next) {
|
||||
newsize += oldsize;
|
||||
newp = oldp;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
Unroll copy of <= 36 bytes (72 if 8byte sizes)
|
||||
We know that contents have an odd number of
|
||||
size_t-sized words; minimally 3.
|
||||
*/
|
||||
/*
|
||||
Avoid copy if newp is next chunk after oldp.
|
||||
*/
|
||||
if (newp == next) {
|
||||
newsize += oldsize;
|
||||
newp = oldp;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
Unroll copy of <= 36 bytes (72 if 8byte sizes)
|
||||
We know that contents have an odd number of
|
||||
size_t-sized words; minimally 3.
|
||||
*/
|
||||
|
||||
copysize = oldsize - (sizeof(size_t));
|
||||
s = (size_t*)(oldmem);
|
||||
d = (size_t*)(newmem);
|
||||
ncopies = copysize / sizeof(size_t);
|
||||
assert(ncopies >= 3);
|
||||
copysize = oldsize - (sizeof(size_t));
|
||||
s = (size_t*)(oldmem);
|
||||
d = (size_t*)(newmem);
|
||||
ncopies = copysize / sizeof(size_t);
|
||||
assert(ncopies >= 3);
|
||||
|
||||
if (ncopies > 9)
|
||||
memcpy(d, s, copysize);
|
||||
if (ncopies > 9)
|
||||
memcpy(d, s, copysize);
|
||||
|
||||
else {
|
||||
*(d+0) = *(s+0);
|
||||
*(d+1) = *(s+1);
|
||||
*(d+2) = *(s+2);
|
||||
if (ncopies > 4) {
|
||||
*(d+3) = *(s+3);
|
||||
*(d+4) = *(s+4);
|
||||
if (ncopies > 6) {
|
||||
*(d+5) = *(s+5);
|
||||
*(d+6) = *(s+6);
|
||||
if (ncopies > 8) {
|
||||
*(d+7) = *(s+7);
|
||||
*(d+8) = *(s+8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
*(d+0) = *(s+0);
|
||||
*(d+1) = *(s+1);
|
||||
*(d+2) = *(s+2);
|
||||
if (ncopies > 4) {
|
||||
*(d+3) = *(s+3);
|
||||
*(d+4) = *(s+4);
|
||||
if (ncopies > 6) {
|
||||
*(d+5) = *(s+5);
|
||||
*(d+6) = *(s+6);
|
||||
if (ncopies > 8) {
|
||||
*(d+7) = *(s+7);
|
||||
*(d+8) = *(s+8);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free(oldmem);
|
||||
check_inuse_chunk(newp);
|
||||
retval = chunk2mem(newp);
|
||||
goto DONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
free(oldmem);
|
||||
check_inuse_chunk(newp);
|
||||
retval = chunk2mem(newp);
|
||||
goto DONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If possible, free extra space in old or extended chunk */
|
||||
/* If possible, free extra space in old or extended chunk */
|
||||
|
||||
assert((unsigned long)(newsize) >= (unsigned long)(nb));
|
||||
assert((unsigned long)(newsize) >= (unsigned long)(nb));
|
||||
|
||||
remainder_size = newsize - nb;
|
||||
remainder_size = newsize - nb;
|
||||
|
||||
if (remainder_size < MINSIZE) { /* not enough extra to split off */
|
||||
set_head_size(newp, newsize);
|
||||
set_inuse_bit_at_offset(newp, newsize);
|
||||
}
|
||||
else { /* split remainder */
|
||||
remainder = chunk_at_offset(newp, nb);
|
||||
set_head_size(newp, nb);
|
||||
set_head(remainder, remainder_size | PREV_INUSE);
|
||||
/* Mark remainder as inuse so free() won't complain */
|
||||
set_inuse_bit_at_offset(remainder, remainder_size);
|
||||
free(chunk2mem(remainder));
|
||||
}
|
||||
if (remainder_size < MINSIZE) { /* not enough extra to split off */
|
||||
set_head_size(newp, newsize);
|
||||
set_inuse_bit_at_offset(newp, newsize);
|
||||
}
|
||||
else { /* split remainder */
|
||||
remainder = chunk_at_offset(newp, nb);
|
||||
set_head_size(newp, nb);
|
||||
set_head(remainder, remainder_size | PREV_INUSE);
|
||||
/* Mark remainder as inuse so free() won't complain */
|
||||
set_inuse_bit_at_offset(remainder, remainder_size);
|
||||
free(chunk2mem(remainder));
|
||||
}
|
||||
|
||||
check_inuse_chunk(newp);
|
||||
retval = chunk2mem(newp);
|
||||
goto DONE;
|
||||
check_inuse_chunk(newp);
|
||||
retval = chunk2mem(newp);
|
||||
goto DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -183,54 +183,54 @@ void* realloc(void* oldmem, size_t bytes)
|
||||
*/
|
||||
|
||||
else {
|
||||
size_t offset = oldp->prev_size;
|
||||
size_t pagemask = av->pagesize - 1;
|
||||
char *cp;
|
||||
unsigned long sum;
|
||||
size_t offset = oldp->prev_size;
|
||||
size_t pagemask = av->pagesize - 1;
|
||||
char *cp;
|
||||
unsigned long sum;
|
||||
|
||||
/* Note the extra (sizeof(size_t)) overhead */
|
||||
newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
|
||||
/* Note the extra (sizeof(size_t)) overhead */
|
||||
newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
|
||||
|
||||
/* don't need to remap if still within same page */
|
||||
if (oldsize == newsize - offset) {
|
||||
retval = oldmem;
|
||||
goto DONE;
|
||||
}
|
||||
/* don't need to remap if still within same page */
|
||||
if (oldsize == newsize - offset) {
|
||||
retval = oldmem;
|
||||
goto DONE;
|
||||
}
|
||||
|
||||
cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
|
||||
cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
|
||||
|
||||
if (cp != (char*)MORECORE_FAILURE) {
|
||||
if (cp != (char*)MORECORE_FAILURE) {
|
||||
|
||||
newp = (mchunkptr)(cp + offset);
|
||||
set_head(newp, (newsize - offset)|IS_MMAPPED);
|
||||
newp = (mchunkptr)(cp + offset);
|
||||
set_head(newp, (newsize - offset)|IS_MMAPPED);
|
||||
|
||||
assert(aligned_OK(chunk2mem(newp)));
|
||||
assert((newp->prev_size == offset));
|
||||
assert(aligned_OK(chunk2mem(newp)));
|
||||
assert((newp->prev_size == offset));
|
||||
|
||||
/* update statistics */
|
||||
sum = av->mmapped_mem += newsize - oldsize;
|
||||
if (sum > (unsigned long)(av->max_mmapped_mem))
|
||||
av->max_mmapped_mem = sum;
|
||||
sum += av->sbrked_mem;
|
||||
if (sum > (unsigned long)(av->max_total_mem))
|
||||
av->max_total_mem = sum;
|
||||
/* update statistics */
|
||||
sum = av->mmapped_mem += newsize - oldsize;
|
||||
if (sum > (unsigned long)(av->max_mmapped_mem))
|
||||
av->max_mmapped_mem = sum;
|
||||
sum += av->sbrked_mem;
|
||||
if (sum > (unsigned long)(av->max_total_mem))
|
||||
av->max_total_mem = sum;
|
||||
|
||||
retval = chunk2mem(newp);
|
||||
goto DONE;
|
||||
}
|
||||
retval = chunk2mem(newp);
|
||||
goto DONE;
|
||||
}
|
||||
|
||||
/* Note the extra (sizeof(size_t)) overhead. */
|
||||
if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
|
||||
newmem = oldmem; /* do nothing */
|
||||
else {
|
||||
/* Must alloc, copy, free. */
|
||||
newmem = malloc(nb - MALLOC_ALIGN_MASK);
|
||||
if (newmem != 0) {
|
||||
memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
|
||||
free(oldmem);
|
||||
}
|
||||
}
|
||||
retval = newmem;
|
||||
/* Note the extra (sizeof(size_t)) overhead. */
|
||||
if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
|
||||
newmem = oldmem; /* do nothing */
|
||||
else {
|
||||
/* Must alloc, copy, free. */
|
||||
newmem = malloc(nb - MALLOC_ALIGN_MASK);
|
||||
if (newmem != 0) {
|
||||
memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
|
||||
free(oldmem);
|
||||
}
|
||||
}
|
||||
retval = newmem;
|
||||
}
|
||||
|
||||
DONE:
|
||||
@@ -239,4 +239,4 @@ void* realloc(void* oldmem, size_t bytes)
|
||||
}
|
||||
|
||||
/* glibc compatibilty */
|
||||
weak_alias(realloc, __libc_realloc)
|
||||
weak_alias(__wrap_realloc, __libc_realloc)
|
||||
|
||||
@@ -28,9 +28,9 @@
|
||||
static void
|
||||
__free_to_heap (void *mem, struct heap_free_area **heap
|
||||
#ifdef HEAP_USE_LOCKING
|
||||
, __UCLIBC_MUTEX_TYPE *heap_lock
|
||||
, __UCLIBC_MUTEX_TYPE *heap_lock
|
||||
#endif
|
||||
)
|
||||
)
|
||||
{
|
||||
size_t size;
|
||||
struct heap_free_area *fa;
|
||||
@@ -42,7 +42,7 @@ __free_to_heap (void *mem, struct heap_free_area **heap
|
||||
/* Normal free. */
|
||||
|
||||
MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)",
|
||||
(long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem));
|
||||
(long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem));
|
||||
|
||||
size = MALLOC_SIZE (mem);
|
||||
mem = MALLOC_BASE (mem);
|
||||
@@ -73,45 +73,45 @@ __free_to_heap (void *mem, struct heap_free_area **heap
|
||||
|
||||
#ifdef MALLOC_USE_SBRK
|
||||
/* Get the sbrk lock so that the two possible calls to sbrk below
|
||||
are guaranteed to be contiguous. */
|
||||
are guaranteed to be contiguous. */
|
||||
__malloc_lock_sbrk ();
|
||||
/* When using sbrk, we only shrink the heap from the end. It would
|
||||
be possible to allow _both_ -- shrinking via sbrk when possible,
|
||||
and otherwise shrinking via munmap, but this results in holes in
|
||||
memory that prevent the brk from every growing back down; since
|
||||
we only ever grow the heap via sbrk, this tends to produce a
|
||||
continuously growing brk (though the actual memory is unmapped),
|
||||
which could eventually run out of address space. Note that
|
||||
`sbrk(0)' shouldn't normally do a system call, so this test is
|
||||
reasonably cheap. */
|
||||
be possible to allow _both_ -- shrinking via sbrk when possible,
|
||||
and otherwise shrinking via munmap, but this results in holes in
|
||||
memory that prevent the brk from every growing back down; since
|
||||
we only ever grow the heap via sbrk, this tends to produce a
|
||||
continuously growing brk (though the actual memory is unmapped),
|
||||
which could eventually run out of address space. Note that
|
||||
`sbrk(0)' shouldn't normally do a system call, so this test is
|
||||
reasonably cheap. */
|
||||
if ((void *)end != sbrk (0))
|
||||
{
|
||||
MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
|
||||
start, end, end - start);
|
||||
__malloc_unlock_sbrk ();
|
||||
__heap_unlock (heap_lock);
|
||||
return;
|
||||
}
|
||||
{
|
||||
MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
|
||||
start, end, end - start);
|
||||
__malloc_unlock_sbrk ();
|
||||
__heap_unlock (heap_lock);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)",
|
||||
start, end, end - start);
|
||||
start, end, end - start);
|
||||
|
||||
/* Remove FA from the heap. */
|
||||
__heap_delete (heap, fa);
|
||||
|
||||
if (__heap_is_empty (heap))
|
||||
/* We want to avoid the heap from losing all memory, so reserve
|
||||
a bit. This test is only a heuristic -- the existance of
|
||||
another free area, even if it's smaller than
|
||||
MALLOC_MIN_SIZE, will cause us not to reserve anything. */
|
||||
{
|
||||
/* Put the reserved memory back in the heap; we assume that
|
||||
MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so
|
||||
we use the latter unconditionally here. */
|
||||
__heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
|
||||
start += MALLOC_MIN_SIZE;
|
||||
}
|
||||
/* We want to avoid the heap from losing all memory, so reserve
|
||||
a bit. This test is only a heuristic -- the existance of
|
||||
another free area, even if it's smaller than
|
||||
MALLOC_MIN_SIZE, will cause us not to reserve anything. */
|
||||
{
|
||||
/* Put the reserved memory back in the heap; we assume that
|
||||
MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so
|
||||
we use the latter unconditionally here. */
|
||||
__heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
|
||||
start += MALLOC_MIN_SIZE;
|
||||
}
|
||||
|
||||
#ifdef MALLOC_USE_SBRK
|
||||
|
||||
@@ -126,99 +126,99 @@ __free_to_heap (void *mem, struct heap_free_area **heap
|
||||
|
||||
# ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
|
||||
/* Using the uClinux broken munmap, we have to only munmap blocks
|
||||
exactly as we got them from mmap, so scan through our list of
|
||||
mmapped blocks, and return them in order. */
|
||||
exactly as we got them from mmap, so scan through our list of
|
||||
mmapped blocks, and return them in order. */
|
||||
|
||||
MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...",
|
||||
start, end - start);
|
||||
start, end - start);
|
||||
|
||||
prev_mmb = 0;
|
||||
mmb = __malloc_mmapped_blocks;
|
||||
while (mmb
|
||||
&& ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size)
|
||||
<= end))
|
||||
{
|
||||
MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]",
|
||||
(unsigned)mmb, mmb_start, mmb_end - mmb_start);
|
||||
&& ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size)
|
||||
<= end))
|
||||
{
|
||||
MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]",
|
||||
(unsigned)mmb, mmb_start, mmb_end - mmb_start);
|
||||
|
||||
if (mmb_start >= start
|
||||
/* If the space between START and MMB_START is non-zero, but
|
||||
too small to return to the heap, we can't unmap MMB. */
|
||||
&& (start == mmb_start
|
||||
|| mmb_start - start > HEAP_MIN_FREE_AREA_SIZE))
|
||||
{
|
||||
struct malloc_mmb *next_mmb = mmb->next;
|
||||
if (mmb_start >= start
|
||||
/* If the space between START and MMB_START is non-zero, but
|
||||
too small to return to the heap, we can't unmap MMB. */
|
||||
&& (start == mmb_start
|
||||
|| mmb_start - start > HEAP_MIN_FREE_AREA_SIZE))
|
||||
{
|
||||
struct malloc_mmb *next_mmb = mmb->next;
|
||||
|
||||
if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end)
|
||||
/* There's too little space left at the end to deallocate
|
||||
this block, so give up. */
|
||||
break;
|
||||
if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end)
|
||||
/* There's too little space left at the end to deallocate
|
||||
this block, so give up. */
|
||||
break;
|
||||
|
||||
MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]",
|
||||
(unsigned)mmb, mmb_start, mmb_end - mmb_start);
|
||||
MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]",
|
||||
(unsigned)mmb, mmb_start, mmb_end - mmb_start);
|
||||
|
||||
if (mmb_start != start)
|
||||
/* We're going to unmap a part of the heap that begins after
|
||||
start, so put the intervening region back into the heap. */
|
||||
{
|
||||
MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]",
|
||||
start, mmb_start - start);
|
||||
__heap_free (heap, (void *)start, mmb_start - start);
|
||||
}
|
||||
if (mmb_start != start)
|
||||
/* We're going to unmap a part of the heap that begins after
|
||||
start, so put the intervening region back into the heap. */
|
||||
{
|
||||
MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]",
|
||||
start, mmb_start - start);
|
||||
__heap_free (heap, (void *)start, mmb_start - start);
|
||||
}
|
||||
|
||||
MALLOC_MMB_DEBUG_INDENT (-1);
|
||||
MALLOC_MMB_DEBUG_INDENT (-1);
|
||||
|
||||
/* Unlink MMB from the list. */
|
||||
if (prev_mmb)
|
||||
prev_mmb->next = next_mmb;
|
||||
else
|
||||
__malloc_mmapped_blocks = next_mmb;
|
||||
/* Unlink MMB from the list. */
|
||||
if (prev_mmb)
|
||||
prev_mmb->next = next_mmb;
|
||||
else
|
||||
__malloc_mmapped_blocks = next_mmb;
|
||||
|
||||
/* Start searching again from the end of this block. */
|
||||
start = mmb_end;
|
||||
/* Start searching again from the end of this block. */
|
||||
start = mmb_end;
|
||||
|
||||
/* Release the descriptor block we used. */
|
||||
free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
|
||||
/* Release the descriptor block we used. */
|
||||
free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
|
||||
|
||||
/* We have to unlock the heap before we recurse to free the mmb
|
||||
descriptor, because we might be unmapping from the mmb
|
||||
heap. */
|
||||
/* We have to unlock the heap before we recurse to free the mmb
|
||||
descriptor, because we might be unmapping from the mmb
|
||||
heap. */
|
||||
__heap_unlock (heap_lock);
|
||||
|
||||
/* Do the actual munmap. */
|
||||
munmap ((void *)mmb_start, mmb_end - mmb_start);
|
||||
/* Do the actual munmap. */
|
||||
munmap ((void *)mmb_start, mmb_end - mmb_start);
|
||||
|
||||
__heap_lock (heap_lock);
|
||||
__heap_lock (heap_lock);
|
||||
|
||||
# ifdef __UCLIBC_HAS_THREADS__
|
||||
/* In a multi-threaded program, it's possible that PREV_MMB has
|
||||
been invalidated by another thread when we released the
|
||||
heap lock to do the munmap system call, so just start over
|
||||
from the beginning of the list. It sucks, but oh well;
|
||||
it's probably not worth the bother to do better. */
|
||||
prev_mmb = 0;
|
||||
mmb = __malloc_mmapped_blocks;
|
||||
/* In a multi-threaded program, it's possible that PREV_MMB has
|
||||
been invalidated by another thread when we released the
|
||||
heap lock to do the munmap system call, so just start over
|
||||
from the beginning of the list. It sucks, but oh well;
|
||||
it's probably not worth the bother to do better. */
|
||||
prev_mmb = 0;
|
||||
mmb = __malloc_mmapped_blocks;
|
||||
# else
|
||||
mmb = next_mmb;
|
||||
mmb = next_mmb;
|
||||
# endif
|
||||
}
|
||||
else
|
||||
{
|
||||
prev_mmb = mmb;
|
||||
mmb = mmb->next;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
prev_mmb = mmb;
|
||||
mmb = mmb->next;
|
||||
}
|
||||
|
||||
MALLOC_MMB_DEBUG_INDENT (-1);
|
||||
}
|
||||
MALLOC_MMB_DEBUG_INDENT (-1);
|
||||
}
|
||||
|
||||
if (start != end)
|
||||
/* Hmm, well there's something we couldn't unmap, so put it back
|
||||
into the heap. */
|
||||
{
|
||||
MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]",
|
||||
start, end - start);
|
||||
__heap_free (heap, (void *)start, end - start);
|
||||
}
|
||||
/* Hmm, well there's something we couldn't unmap, so put it back
|
||||
into the heap. */
|
||||
{
|
||||
MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]",
|
||||
start, end - start);
|
||||
__heap_free (heap, (void *)start, end - start);
|
||||
}
|
||||
|
||||
/* Finally release the lock for good. */
|
||||
__heap_unlock (heap_lock);
|
||||
@@ -228,34 +228,34 @@ __free_to_heap (void *mem, struct heap_free_area **heap
|
||||
# else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
|
||||
|
||||
/* MEM/LEN may not be page-aligned, so we have to page-align them,
|
||||
and return any left-over bits on the end to the heap. */
|
||||
and return any left-over bits on the end to the heap. */
|
||||
unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
|
||||
unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
|
||||
|
||||
/* We have to be careful that any left-over bits are large enough to
|
||||
return. Note that we _don't check_ to make sure there's room to
|
||||
grow/shrink the start/end by another page, we just assume that
|
||||
the unmap threshold is high enough so that this is always safe
|
||||
(i.e., it should probably be at least 3 pages). */
|
||||
return. Note that we _don't check_ to make sure there's room to
|
||||
grow/shrink the start/end by another page, we just assume that
|
||||
the unmap threshold is high enough so that this is always safe
|
||||
(i.e., it should probably be at least 3 pages). */
|
||||
if (unmap_start > start)
|
||||
{
|
||||
if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
|
||||
unmap_start += MALLOC_PAGE_SIZE;
|
||||
__heap_free (heap, (void *)start, unmap_start - start);
|
||||
}
|
||||
{
|
||||
if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
|
||||
unmap_start += MALLOC_PAGE_SIZE;
|
||||
__heap_free (heap, (void *)start, unmap_start - start);
|
||||
}
|
||||
if (end > unmap_end)
|
||||
{
|
||||
if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
|
||||
unmap_end -= MALLOC_PAGE_SIZE;
|
||||
__heap_free (heap, (void *)unmap_end, end - unmap_end);
|
||||
}
|
||||
{
|
||||
if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
|
||||
unmap_end -= MALLOC_PAGE_SIZE;
|
||||
__heap_free (heap, (void *)unmap_end, end - unmap_end);
|
||||
}
|
||||
|
||||
/* Release the heap lock before we do the system call. */
|
||||
__heap_unlock (heap_lock);
|
||||
|
||||
if (unmap_end > unmap_start)
|
||||
/* Finally, actually unmap the memory. */
|
||||
munmap ((void *)unmap_start, unmap_end - unmap_start);
|
||||
/* Finally, actually unmap the memory. */
|
||||
munmap ((void *)unmap_start, unmap_end - unmap_start);
|
||||
|
||||
# endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
|
||||
|
||||
@@ -266,7 +266,7 @@ __free_to_heap (void *mem, struct heap_free_area **heap
|
||||
}
|
||||
|
||||
void
|
||||
free (void *mem)
|
||||
__wrap_free (void *mem)
|
||||
{
|
||||
free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock);
|
||||
}
|
||||
|
||||
@@ -59,9 +59,9 @@ __UCLIBC_MUTEX_INIT(__malloc_mmb_heap_lock,PTHREAD_RECURSIVE_MUTEX_INITIALIZER_N
|
||||
static void *
|
||||
__malloc_from_heap (size_t size, struct heap_free_area **heap
|
||||
#ifdef HEAP_USE_LOCKING
|
||||
, __UCLIBC_MUTEX_TYPE *heap_lock
|
||||
, __UCLIBC_MUTEX_TYPE *heap_lock
|
||||
#endif
|
||||
)
|
||||
)
|
||||
{
|
||||
void *mem;
|
||||
|
||||
@@ -82,12 +82,12 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
|
||||
from the system, add it to the heap, and try again. */
|
||||
{
|
||||
/* If we're trying to allocate a block bigger than the default
|
||||
MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
|
||||
MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
|
||||
void *block;
|
||||
size_t block_size
|
||||
= (size < MALLOC_HEAP_EXTEND_SIZE
|
||||
? MALLOC_HEAP_EXTEND_SIZE
|
||||
: MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
|
||||
= (size < MALLOC_HEAP_EXTEND_SIZE
|
||||
? MALLOC_HEAP_EXTEND_SIZE
|
||||
: MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
|
||||
|
||||
/* Allocate the new heap block. */
|
||||
#ifdef MALLOC_USE_SBRK
|
||||
@@ -95,24 +95,24 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
|
||||
__malloc_lock_sbrk ();
|
||||
|
||||
/* Use sbrk we can, as it's faster than mmap, and guarantees
|
||||
contiguous allocation. */
|
||||
contiguous allocation. */
|
||||
block = sbrk (block_size);
|
||||
if (likely (block != (void *)-1))
|
||||
{
|
||||
/* Because sbrk can return results of arbitrary
|
||||
alignment, align the result to a MALLOC_ALIGNMENT boundary. */
|
||||
long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
|
||||
if (block != (void *)aligned_block)
|
||||
/* Have to adjust. We should only have to actually do this
|
||||
the first time (after which we will have aligned the brk
|
||||
correctly). */
|
||||
{
|
||||
/* Move the brk to reflect the alignment; our next allocation
|
||||
should start on exactly the right alignment. */
|
||||
sbrk (aligned_block - (long)block);
|
||||
block = (void *)aligned_block;
|
||||
}
|
||||
}
|
||||
{
|
||||
/* Because sbrk can return results of arbitrary
|
||||
alignment, align the result to a MALLOC_ALIGNMENT boundary. */
|
||||
long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
|
||||
if (block != (void *)aligned_block)
|
||||
/* Have to adjust. We should only have to actually do this
|
||||
the first time (after which we will have aligned the brk
|
||||
correctly). */
|
||||
{
|
||||
/* Move the brk to reflect the alignment; our next allocation
|
||||
should start on exactly the right alignment. */
|
||||
sbrk (aligned_block - (long)block);
|
||||
block = (void *)aligned_block;
|
||||
}
|
||||
}
|
||||
|
||||
__malloc_unlock_sbrk ();
|
||||
|
||||
@@ -121,62 +121,62 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
|
||||
/* Otherwise, use mmap. */
|
||||
#ifdef __ARCH_USE_MMU__
|
||||
block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
#else
|
||||
block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0);
|
||||
MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0);
|
||||
#endif
|
||||
|
||||
#endif /* MALLOC_USE_SBRK */
|
||||
|
||||
if (likely (block != (void *)-1))
|
||||
{
|
||||
{
|
||||
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
|
||||
struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
|
||||
struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
|
||||
#endif
|
||||
|
||||
MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)",
|
||||
(long)block, (long)block + block_size, block_size);
|
||||
MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)",
|
||||
(long)block, (long)block + block_size, block_size);
|
||||
|
||||
/* Get back the heap lock. */
|
||||
__heap_lock (heap_lock);
|
||||
/* Get back the heap lock. */
|
||||
__heap_lock (heap_lock);
|
||||
|
||||
/* Put BLOCK into the heap. */
|
||||
__heap_free (heap, block, block_size);
|
||||
/* Put BLOCK into the heap. */
|
||||
__heap_free (heap, block, block_size);
|
||||
|
||||
MALLOC_DEBUG_INDENT (-1);
|
||||
MALLOC_DEBUG_INDENT (-1);
|
||||
|
||||
/* Try again to allocate. */
|
||||
mem = __heap_alloc (heap, &size);
|
||||
/* Try again to allocate. */
|
||||
mem = __heap_alloc (heap, &size);
|
||||
|
||||
|
||||
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
|
||||
/* Insert a record of BLOCK in sorted order into the
|
||||
__malloc_mmapped_blocks list. */
|
||||
/* Insert a record of BLOCK in sorted order into the
|
||||
__malloc_mmapped_blocks list. */
|
||||
|
||||
new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
|
||||
new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
|
||||
|
||||
for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
|
||||
mmb;
|
||||
prev_mmb = mmb, mmb = mmb->next)
|
||||
if (block < mmb->mem)
|
||||
break;
|
||||
for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
|
||||
mmb;
|
||||
prev_mmb = mmb, mmb = mmb->next)
|
||||
if (block < mmb->mem)
|
||||
break;
|
||||
|
||||
new_mmb->next = mmb;
|
||||
new_mmb->mem = block;
|
||||
new_mmb->size = block_size;
|
||||
new_mmb->next = mmb;
|
||||
new_mmb->mem = block;
|
||||
new_mmb->size = block_size;
|
||||
|
||||
if (prev_mmb)
|
||||
prev_mmb->next = new_mmb;
|
||||
else
|
||||
__malloc_mmapped_blocks = new_mmb;
|
||||
if (prev_mmb)
|
||||
prev_mmb->next = new_mmb;
|
||||
else
|
||||
__malloc_mmapped_blocks = new_mmb;
|
||||
|
||||
MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]",
|
||||
(unsigned)new_mmb,
|
||||
(unsigned)new_mmb->mem, block_size);
|
||||
MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]",
|
||||
(unsigned)new_mmb,
|
||||
(unsigned)new_mmb->mem, block_size);
|
||||
#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
|
||||
__heap_unlock (heap_lock);
|
||||
}
|
||||
__heap_unlock (heap_lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (likely (mem))
|
||||
@@ -185,7 +185,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
|
||||
mem = MALLOC_SETUP (mem, size);
|
||||
|
||||
MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)",
|
||||
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
|
||||
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
|
||||
}
|
||||
else
|
||||
MALLOC_DEBUG (-1, "malloc: returning 0");
|
||||
@@ -194,7 +194,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
|
||||
}
|
||||
|
||||
void *
|
||||
malloc (size_t size)
|
||||
__wrap_malloc (size_t size)
|
||||
{
|
||||
void *mem;
|
||||
#ifdef MALLOC_DEBUGGING
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
|
||||
void *
|
||||
realloc (void *mem, size_t new_size)
|
||||
__wrap_realloc (void *mem, size_t new_size)
|
||||
{
|
||||
size_t size;
|
||||
char *base_mem;
|
||||
@@ -56,7 +56,7 @@ realloc (void *mem, size_t new_size)
|
||||
new_size = HEAP_ADJUST_SIZE (sizeof (struct heap_free_area));
|
||||
|
||||
MALLOC_DEBUG (1, "realloc: 0x%lx, %d (base = 0x%lx, total_size = %d)",
|
||||
(long)mem, new_size, (long)base_mem, size);
|
||||
(long)mem, new_size, (long)base_mem, size);
|
||||
|
||||
if (new_size > size)
|
||||
/* Grow the block. */
|
||||
@@ -68,20 +68,20 @@ realloc (void *mem, size_t new_size)
|
||||
__heap_unlock (&__malloc_heap_lock);
|
||||
|
||||
if (extra)
|
||||
/* Record the changed size. */
|
||||
MALLOC_SET_SIZE (base_mem, size + extra);
|
||||
/* Record the changed size. */
|
||||
MALLOC_SET_SIZE (base_mem, size + extra);
|
||||
else
|
||||
/* Our attempts to extend MEM in place failed, just
|
||||
allocate-and-copy. */
|
||||
{
|
||||
void *new_mem = malloc (new_size - MALLOC_HEADER_SIZE);
|
||||
if (new_mem)
|
||||
{
|
||||
memcpy (new_mem, mem, size - MALLOC_HEADER_SIZE);
|
||||
free (mem);
|
||||
}
|
||||
mem = new_mem;
|
||||
}
|
||||
/* Our attempts to extend MEM in place failed, just
|
||||
allocate-and-copy. */
|
||||
{
|
||||
void *new_mem = malloc (new_size - MALLOC_HEADER_SIZE);
|
||||
if (new_mem)
|
||||
{
|
||||
memcpy (new_mem, mem, size - MALLOC_HEADER_SIZE);
|
||||
free (mem);
|
||||
}
|
||||
mem = new_mem;
|
||||
}
|
||||
}
|
||||
else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size)
|
||||
/* Shrink the block. */
|
||||
@@ -94,7 +94,7 @@ realloc (void *mem, size_t new_size)
|
||||
|
||||
if (mem)
|
||||
MALLOC_DEBUG (-1, "realloc: returning 0x%lx (base:0x%lx, total_size:%d)",
|
||||
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
|
||||
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
|
||||
else
|
||||
MALLOC_DEBUG (-1, "realloc: returning 0");
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Provides: libc_be_socket_noop libc_be_l4re libc_support_misc
|
||||
libc_be_fs_noop libc_be_math libc_be_l4refile libinitcwd
|
||||
libc_be_minimal_log_io libmount libc_be_sig
|
||||
libc_be_sem_noop libc_be_static_heap
|
||||
libc_be_sem_noop libc_be_static_heap libc_be_mem
|
||||
Requires: l4re libl4re-vfs libc-headers
|
||||
Maintainer: adam@os.inf.tu-dresden.de
|
||||
|
||||
12
src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/Makefile
Normal file
12
src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/Makefile
Normal file
@@ -0,0 +1,12 @@
|
||||
PKGDIR ?= ../..
|
||||
L4DIR ?= $(PKGDIR)/../../..
|
||||
|
||||
TARGET = libc_be_mem.a libc_be_mem.so
|
||||
LINK_INCR = libc_be_mem.a
|
||||
PC_FILENAME = libc_be_mem
|
||||
REQUIRES_LIBS = l4re
|
||||
SRC_CC = mem.cc
|
||||
|
||||
include $(L4DIR)/mk/lib.mk
|
||||
|
||||
LDFLAGS := $(filter-out -gc-sections,$(LDFLAGS))
|
||||
32
src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/mem.cc
Normal file
32
src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/mem.cc
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* \file libc_backends/l4re_mem/mem.cc
|
||||
*/
|
||||
/*
|
||||
* (c) 2004-2009 Technische Universität Dresden
|
||||
* This file is part of TUD:OS and distributed under the terms of the
|
||||
* GNU Lesser General Public License 2.1.
|
||||
* Please see the COPYING-LGPL-2.1 file for details.
|
||||
*/
|
||||
#include <stdlib.h>
|
||||
#include <l4/sys/kdebug.h>
|
||||
|
||||
void *malloc(size_t size) throw()
|
||||
{
|
||||
void *data = 0;
|
||||
enter_kdebug("malloc");
|
||||
return (void*)data;
|
||||
}
|
||||
|
||||
|
||||
void free(void *p) throw()
|
||||
{
|
||||
if (p)
|
||||
enter_kdebug("free");
|
||||
}
|
||||
|
||||
void *realloc(void *p, size_t size) throw()
|
||||
{
|
||||
void *data = 0;
|
||||
enter_kdebug("realloc");
|
||||
return (void*)data;
|
||||
}
|
||||
@@ -28,6 +28,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
void *__wrap_malloc();
|
||||
|
||||
// Common logic for version locks.
|
||||
struct version_lock
|
||||
{
|
||||
@@ -402,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner)
|
||||
|
||||
// No free node available, allocate a new one.
|
||||
struct btree_node *new_node
|
||||
= (struct btree_node *) (malloc (sizeof (struct btree_node)));
|
||||
= (struct btree_node *) (__wrap_malloc (sizeof (struct btree_node)));
|
||||
version_lock_initialize_locked_exclusive (
|
||||
&(new_node->version_lock)); // initialize the node in locked state.
|
||||
new_node->entry_count = 0;
|
||||
|
||||
@@ -28,6 +28,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
void *__wrap_malloc();
|
||||
|
||||
// Common logic for version locks.
|
||||
struct version_lock
|
||||
{
|
||||
@@ -402,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner)
|
||||
|
||||
// No free node available, allocate a new one.
|
||||
struct btree_node *new_node
|
||||
= (struct btree_node *) (malloc (sizeof (struct btree_node)));
|
||||
= (struct btree_node *) (__wrap_malloc (sizeof (struct btree_node)));
|
||||
version_lock_initialize_locked_exclusive (
|
||||
&(new_node->version_lock)); // initialize the node in locked state.
|
||||
new_node->entry_count = 0;
|
||||
|
||||
@@ -28,6 +28,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
void *__wrap_malloc(size_t);
|
||||
void __wrap_free(void *);
|
||||
|
||||
// Common logic for version locks.
|
||||
struct version_lock
|
||||
{
|
||||
@@ -362,7 +365,7 @@ btree_destroy (struct btree *t)
|
||||
while (t->free_list)
|
||||
{
|
||||
struct btree_node *next = t->free_list->content.children[0].child;
|
||||
free (t->free_list);
|
||||
__wrap_free (t->free_list);
|
||||
t->free_list = next;
|
||||
}
|
||||
}
|
||||
@@ -401,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner)
|
||||
|
||||
// No free node available, allocate a new one.
|
||||
struct btree_node *new_node
|
||||
= (struct btree_node *) malloc (sizeof (struct btree_node));
|
||||
= (struct btree_node *) __wrap_malloc (sizeof (struct btree_node));
|
||||
// Initialize the node in locked state.
|
||||
version_lock_initialize_locked_exclusive (&new_node->version_lock);
|
||||
new_node->entry_count = 0;
|
||||
|
||||
@@ -37,14 +37,16 @@
|
||||
#include <new>
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
using std::memset;
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
|
||||
// just for overhead.
|
||||
|
||||
#if INT_MAX == 32767
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
#else
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
#ifndef __GTHREADS
|
||||
# undef EMERGENCY_OBJ_COUNT
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
#endif
|
||||
|
||||
namespace __gnu_cxx
|
||||
@@ -85,20 +87,25 @@ namespace
|
||||
{
|
||||
public:
|
||||
pool();
|
||||
pool(char*, int);
|
||||
|
||||
_GLIBCXX_NODISCARD void *allocate (std::size_t);
|
||||
void free (void *);
|
||||
|
||||
bool in_pool (void *);
|
||||
|
||||
bool mem_static;
|
||||
|
||||
private:
|
||||
void init();
|
||||
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
// A single mutex controlling emergency allocations.
|
||||
@@ -119,15 +126,31 @@ namespace
|
||||
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
|
||||
// to make this tunable.
|
||||
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)malloc (arena_size);
|
||||
mem_static = false;
|
||||
|
||||
init();
|
||||
}
|
||||
|
||||
pool::pool(char * storage, int size)
|
||||
{
|
||||
arena_size = size;
|
||||
arena = storage;
|
||||
mem_static = true;
|
||||
|
||||
init();
|
||||
}
|
||||
|
||||
void pool::init()
|
||||
{
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
first_free_entry = reinterpret_cast <free_entry *> (arena);
|
||||
@@ -145,46 +168,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -192,74 +215,77 @@ namespace
|
||||
{
|
||||
__gnu_cxx::__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool pool::in_pool (void *ptr)
|
||||
{
|
||||
char *p = reinterpret_cast <char *> (ptr);
|
||||
return (p > arena
|
||||
&& p < arena + arena_size);
|
||||
&& p < arena + arena_size);
|
||||
}
|
||||
|
||||
pool emergency_pool;
|
||||
int const emergency_pool_size = EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception);
|
||||
char emergency_pool_storage[emergency_pool_size];
|
||||
pool emergency_pool{emergency_pool_storage, emergency_pool_size};
|
||||
}
|
||||
|
||||
namespace __gnu_cxx
|
||||
@@ -267,10 +293,11 @@ namespace __gnu_cxx
|
||||
void
|
||||
__freeres()
|
||||
{
|
||||
if (emergency_pool.arena)
|
||||
// why is this not a destructor?
|
||||
if (emergency_pool.arena and not emergency_pool.mem_static)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,7 +308,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
|
||||
void *ret;
|
||||
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
ret = malloc (thrown_size);
|
||||
ret = __wrap_malloc (thrown_size);
|
||||
|
||||
if (!ret)
|
||||
ret = emergency_pool.allocate (thrown_size);
|
||||
@@ -312,7 +339,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
|
||||
__cxa_dependent_exception *ret;
|
||||
|
||||
ret = static_cast<__cxa_dependent_exception*>
|
||||
(malloc (sizeof (__cxa_dependent_exception)));
|
||||
(__wrap_malloc(sizeof (__cxa_dependent_exception)));
|
||||
|
||||
if (!ret)
|
||||
ret = static_cast <__cxa_dependent_exception*>
|
||||
|
||||
@@ -37,14 +37,16 @@
|
||||
#include <new>
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
using std::memset;
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
|
||||
// just for overhead.
|
||||
|
||||
#if INT_MAX == 32767
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
#else
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
#ifndef __GTHREADS
|
||||
# undef EMERGENCY_OBJ_COUNT
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
#endif
|
||||
|
||||
namespace __gnu_cxx
|
||||
@@ -93,12 +95,12 @@ namespace
|
||||
|
||||
private:
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
// A single mutex controlling emergency allocations.
|
||||
@@ -119,15 +121,15 @@ namespace
|
||||
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
|
||||
// to make this tunable.
|
||||
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)malloc (arena_size);
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)__wrap_malloc (arena_size);
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
first_free_entry = reinterpret_cast <free_entry *> (arena);
|
||||
@@ -145,46 +147,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -192,71 +194,71 @@ namespace
|
||||
{
|
||||
__gnu_cxx::__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool pool::in_pool (void *ptr)
|
||||
{
|
||||
char *p = reinterpret_cast <char *> (ptr);
|
||||
return (p > arena
|
||||
&& p < arena + arena_size);
|
||||
&& p < arena + arena_size);
|
||||
}
|
||||
|
||||
pool emergency_pool;
|
||||
@@ -269,8 +271,8 @@ namespace __gnu_cxx
|
||||
{
|
||||
if (emergency_pool.arena)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
|
||||
void *ret;
|
||||
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
ret = malloc (thrown_size);
|
||||
ret = __wrap_malloc (thrown_size);
|
||||
|
||||
if (!ret)
|
||||
ret = emergency_pool.allocate (thrown_size);
|
||||
@@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
|
||||
__cxa_dependent_exception *ret;
|
||||
|
||||
ret = static_cast<__cxa_dependent_exception*>
|
||||
(malloc (sizeof (__cxa_dependent_exception)));
|
||||
(__wrap_malloc (sizeof (__cxa_dependent_exception)));
|
||||
|
||||
if (!ret)
|
||||
ret = static_cast <__cxa_dependent_exception*>
|
||||
|
||||
@@ -37,14 +37,16 @@
|
||||
#include <new>
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
using std::memset;
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
|
||||
// just for overhead.
|
||||
|
||||
#if INT_MAX == 32767
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
#else
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
#ifndef __GTHREADS
|
||||
# undef EMERGENCY_OBJ_COUNT
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
#endif
|
||||
|
||||
namespace __gnu_cxx
|
||||
@@ -93,12 +95,12 @@ namespace
|
||||
|
||||
private:
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
// A single mutex controlling emergency allocations.
|
||||
@@ -119,15 +121,15 @@ namespace
|
||||
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
|
||||
// to make this tunable.
|
||||
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)malloc (arena_size);
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)__wrap_malloc (arena_size);
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
first_free_entry = reinterpret_cast <free_entry *> (arena);
|
||||
@@ -145,46 +147,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -192,71 +194,71 @@ namespace
|
||||
{
|
||||
__gnu_cxx::__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool pool::in_pool (void *ptr)
|
||||
{
|
||||
char *p = reinterpret_cast <char *> (ptr);
|
||||
return (p > arena
|
||||
&& p < arena + arena_size);
|
||||
&& p < arena + arena_size);
|
||||
}
|
||||
|
||||
pool emergency_pool;
|
||||
@@ -269,8 +271,8 @@ namespace __gnu_cxx
|
||||
{
|
||||
if (emergency_pool.arena)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
|
||||
void *ret;
|
||||
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
ret = malloc (thrown_size);
|
||||
ret = __wrap_malloc (thrown_size);
|
||||
|
||||
if (!ret)
|
||||
ret = emergency_pool.allocate (thrown_size);
|
||||
@@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
|
||||
__cxa_dependent_exception *ret;
|
||||
|
||||
ret = static_cast<__cxa_dependent_exception*>
|
||||
(malloc (sizeof (__cxa_dependent_exception)));
|
||||
(__wrap_malloc (sizeof (__cxa_dependent_exception)));
|
||||
|
||||
if (!ret)
|
||||
ret = static_cast <__cxa_dependent_exception*>
|
||||
|
||||
@@ -73,14 +73,16 @@
|
||||
// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
using std::memset;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -91,16 +93,16 @@ using namespace __cxxabiv1;
|
||||
// N.B. sizeof(std::bad_alloc) == sizeof(void*)
|
||||
// and sizeof(std::runtime_error) == 2 * sizeof(void*)
|
||||
// and sizeof(std::system_error) == 4 * sizeof(void*).
|
||||
#define EMERGENCY_OBJ_SIZE 6
|
||||
#define EMERGENCY_OBJ_SIZE 6
|
||||
|
||||
#ifdef __GTHREADS
|
||||
// Assume that the number of concurrent exception objects scales with the
|
||||
// processor word size, i.e., 16-bit systems are not likely to have hundreds
|
||||
// of threads all simultaneously throwing on OOM conditions.
|
||||
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
|
||||
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
|
||||
# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
|
||||
#else
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define MAX_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
@@ -153,12 +155,12 @@ namespace
|
||||
|
||||
private:
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
@@ -176,7 +178,7 @@ namespace
|
||||
// to implement in_pool.
|
||||
#ifdef _GLIBCXX_EH_POOL_STATIC
|
||||
static constexpr std::size_t arena_size
|
||||
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
|
||||
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
|
||||
alignas(void*) char arena[arena_size];
|
||||
#else
|
||||
char *arena = nullptr;
|
||||
@@ -201,48 +203,48 @@ namespace
|
||||
#endif
|
||||
const std::string_view ns_name = "glibcxx.eh_pool";
|
||||
std::pair<std::string_view, int> tunables[]{
|
||||
{"obj_size", 0}, {"obj_count", obj_count}
|
||||
{"obj_size", 0}, {"obj_count", obj_count}
|
||||
};
|
||||
while (str)
|
||||
{
|
||||
if (*str == ':')
|
||||
++str;
|
||||
{
|
||||
if (*str == ':')
|
||||
++str;
|
||||
|
||||
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
|
||||
&& str[ns_name.size()] == '.')
|
||||
{
|
||||
str += ns_name.size() + 1;
|
||||
for (auto& t : tunables)
|
||||
if (!t.first.compare(0, t.first.size(), str, t.first.size())
|
||||
&& str[t.first.size()] == '=')
|
||||
{
|
||||
str += t.first.size() + 1;
|
||||
char* end;
|
||||
unsigned long val = strtoul(str, &end, 0);
|
||||
if ((*end == ':' || *end == '\0') && val <= INT_MAX)
|
||||
t.second = val;
|
||||
str = end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
str = strchr(str, ':');
|
||||
}
|
||||
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
|
||||
&& str[ns_name.size()] == '.')
|
||||
{
|
||||
str += ns_name.size() + 1;
|
||||
for (auto& t : tunables)
|
||||
if (!t.first.compare(0, t.first.size(), str, t.first.size())
|
||||
&& str[t.first.size()] == '=')
|
||||
{
|
||||
str += t.first.size() + 1;
|
||||
char* end;
|
||||
unsigned long val = strtoul(str, &end, 0);
|
||||
if ((*end == ':' || *end == '\0') && val <= INT_MAX)
|
||||
t.second = val;
|
||||
str = end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
str = strchr(str, ':');
|
||||
}
|
||||
obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
|
||||
if (tunables[0].second != 0)
|
||||
obj_size = tunables[0].second;
|
||||
obj_size = tunables[0].second;
|
||||
#endif // HOSTED
|
||||
#endif // NOT_FOR_L4
|
||||
|
||||
arena_size = buffer_size_in_bytes(obj_count, obj_size);
|
||||
if (arena_size == 0)
|
||||
return;
|
||||
arena = (char *)malloc (arena_size);
|
||||
return;
|
||||
arena = (char *)__wrap_malloc (arena_size);
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
return;
|
||||
}
|
||||
#endif // STATIC
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
@@ -261,46 +263,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -308,64 +310,64 @@ namespace
|
||||
{
|
||||
__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> (e) + sz
|
||||
> reinterpret_cast <char *> ((*fe)->next));
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> (e) + sz
|
||||
> reinterpret_cast <char *> ((*fe)->next));
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline bool pool::in_pool (void *ptr) const noexcept
|
||||
@@ -386,8 +388,8 @@ namespace __gnu_cxx
|
||||
#ifndef _GLIBCXX_EH_POOL_STATIC
|
||||
if (emergency_pool.arena)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
|
||||
{
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
|
||||
void *ret = malloc (thrown_size);
|
||||
void *ret = __wrap_malloc (thrown_size);
|
||||
|
||||
#if USE_POOL
|
||||
if (!ret)
|
||||
@@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
|
||||
extern "C" __cxa_dependent_exception*
|
||||
__cxxabiv1::__cxa_allocate_dependent_exception() noexcept
|
||||
{
|
||||
void *ret = malloc (sizeof (__cxa_dependent_exception));
|
||||
void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception));
|
||||
|
||||
#if USE_POOL
|
||||
if (!ret)
|
||||
|
||||
@@ -73,14 +73,16 @@
|
||||
// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
using std::memset;
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -91,16 +93,16 @@ using namespace __cxxabiv1;
|
||||
// N.B. sizeof(std::bad_alloc) == sizeof(void*)
|
||||
// and sizeof(std::runtime_error) == 2 * sizeof(void*)
|
||||
// and sizeof(std::system_error) == 4 * sizeof(void*).
|
||||
#define EMERGENCY_OBJ_SIZE 6
|
||||
#define EMERGENCY_OBJ_SIZE 6
|
||||
|
||||
#ifdef __GTHREADS
|
||||
// Assume that the number of concurrent exception objects scales with the
|
||||
// processor word size, i.e., 16-bit systems are not likely to have hundreds
|
||||
// of threads all simultaneously throwing on OOM conditions.
|
||||
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
|
||||
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
|
||||
# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
|
||||
#else
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define MAX_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
@@ -153,12 +155,12 @@ namespace
|
||||
|
||||
private:
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
@@ -176,7 +178,7 @@ namespace
|
||||
// to implement in_pool.
|
||||
#ifdef _GLIBCXX_EH_POOL_STATIC
|
||||
static constexpr std::size_t arena_size
|
||||
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
|
||||
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
|
||||
alignas(void*) char arena[arena_size];
|
||||
#else
|
||||
char *arena = nullptr;
|
||||
@@ -201,48 +203,48 @@ namespace
|
||||
#endif
|
||||
const std::string_view ns_name = "glibcxx.eh_pool";
|
||||
std::pair<std::string_view, int> tunables[]{
|
||||
{"obj_size", 0}, {"obj_count", obj_count}
|
||||
{"obj_size", 0}, {"obj_count", obj_count}
|
||||
};
|
||||
while (str)
|
||||
{
|
||||
if (*str == ':')
|
||||
++str;
|
||||
{
|
||||
if (*str == ':')
|
||||
++str;
|
||||
|
||||
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
|
||||
&& str[ns_name.size()] == '.')
|
||||
{
|
||||
str += ns_name.size() + 1;
|
||||
for (auto& t : tunables)
|
||||
if (!t.first.compare(0, t.first.size(), str, t.first.size())
|
||||
&& str[t.first.size()] == '=')
|
||||
{
|
||||
str += t.first.size() + 1;
|
||||
char* end;
|
||||
unsigned long val = strtoul(str, &end, 0);
|
||||
if ((*end == ':' || *end == '\0') && val <= INT_MAX)
|
||||
t.second = val;
|
||||
str = end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
str = strchr(str, ':');
|
||||
}
|
||||
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
|
||||
&& str[ns_name.size()] == '.')
|
||||
{
|
||||
str += ns_name.size() + 1;
|
||||
for (auto& t : tunables)
|
||||
if (!t.first.compare(0, t.first.size(), str, t.first.size())
|
||||
&& str[t.first.size()] == '=')
|
||||
{
|
||||
str += t.first.size() + 1;
|
||||
char* end;
|
||||
unsigned long val = strtoul(str, &end, 0);
|
||||
if ((*end == ':' || *end == '\0') && val <= INT_MAX)
|
||||
t.second = val;
|
||||
str = end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
str = strchr(str, ':');
|
||||
}
|
||||
obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
|
||||
if (tunables[0].second != 0)
|
||||
obj_size = tunables[0].second;
|
||||
obj_size = tunables[0].second;
|
||||
#endif // HOSTED
|
||||
#endif // NOT_FOR_L4
|
||||
|
||||
arena_size = buffer_size_in_bytes(obj_count, obj_size);
|
||||
if (arena_size == 0)
|
||||
return;
|
||||
arena = (char *)malloc (arena_size);
|
||||
return;
|
||||
arena = (char *)__wrap_malloc (arena_size);
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
return;
|
||||
}
|
||||
#endif // STATIC
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
@@ -261,46 +263,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -308,64 +310,64 @@ namespace
|
||||
{
|
||||
__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> (e) + sz
|
||||
> reinterpret_cast <char *> ((*fe)->next));
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> (e) + sz
|
||||
> reinterpret_cast <char *> ((*fe)->next));
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline bool pool::in_pool (void *ptr) const noexcept
|
||||
@@ -386,8 +388,8 @@ namespace __gnu_cxx
|
||||
#ifndef _GLIBCXX_EH_POOL_STATIC
|
||||
if (emergency_pool.arena)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
|
||||
{
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
|
||||
void *ret = malloc (thrown_size);
|
||||
void *ret = __wrap_malloc (thrown_size);
|
||||
|
||||
#if USE_POOL
|
||||
if (!ret)
|
||||
@@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
|
||||
extern "C" __cxa_dependent_exception*
|
||||
__cxxabiv1::__cxa_allocate_dependent_exception() noexcept
|
||||
{
|
||||
void *ret = malloc (sizeof (__cxa_dependent_exception));
|
||||
void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception));
|
||||
|
||||
#if USE_POOL
|
||||
if (!ret)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// -*- C++ -*- Allocate exception objects.
|
||||
// Copyright (C) 2001-2025 Free Software Foundation, Inc.
|
||||
// Copyright (C) 2001-2024 Free Software Foundation, Inc.
|
||||
//
|
||||
// This file is part of GCC.
|
||||
//
|
||||
@@ -73,14 +73,16 @@
|
||||
// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
using std::memset;
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -91,16 +93,16 @@ using namespace __cxxabiv1;
|
||||
// N.B. sizeof(std::bad_alloc) == sizeof(void*)
|
||||
// and sizeof(std::runtime_error) == 2 * sizeof(void*)
|
||||
// and sizeof(std::system_error) == 4 * sizeof(void*).
|
||||
#define EMERGENCY_OBJ_SIZE 6
|
||||
#define EMERGENCY_OBJ_SIZE 6
|
||||
|
||||
#ifdef __GTHREADS
|
||||
// Assume that the number of concurrent exception objects scales with the
|
||||
// processor word size, i.e., 16-bit systems are not likely to have hundreds
|
||||
// of threads all simultaneously throwing on OOM conditions.
|
||||
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
|
||||
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
|
||||
# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
|
||||
#else
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define MAX_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
@@ -153,12 +155,12 @@ namespace
|
||||
|
||||
private:
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
@@ -176,7 +178,7 @@ namespace
|
||||
// to implement in_pool.
|
||||
#ifdef _GLIBCXX_EH_POOL_STATIC
|
||||
static constexpr std::size_t arena_size
|
||||
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
|
||||
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
|
||||
alignas(void*) char arena[arena_size];
|
||||
#else
|
||||
char *arena = nullptr;
|
||||
@@ -201,48 +203,48 @@ namespace
|
||||
#endif
|
||||
const std::string_view ns_name = "glibcxx.eh_pool";
|
||||
std::pair<std::string_view, int> tunables[]{
|
||||
{"obj_size", 0}, {"obj_count", obj_count}
|
||||
{"obj_size", 0}, {"obj_count", obj_count}
|
||||
};
|
||||
while (str)
|
||||
{
|
||||
if (*str == ':')
|
||||
++str;
|
||||
{
|
||||
if (*str == ':')
|
||||
++str;
|
||||
|
||||
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
|
||||
&& str[ns_name.size()] == '.')
|
||||
{
|
||||
str += ns_name.size() + 1;
|
||||
for (auto& t : tunables)
|
||||
if (!t.first.compare(0, t.first.size(), str, t.first.size())
|
||||
&& str[t.first.size()] == '=')
|
||||
{
|
||||
str += t.first.size() + 1;
|
||||
char* end;
|
||||
unsigned long val = strtoul(str, &end, 0);
|
||||
if ((*end == ':' || *end == '\0') && val <= INT_MAX)
|
||||
t.second = val;
|
||||
str = end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
str = strchr(str, ':');
|
||||
}
|
||||
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
|
||||
&& str[ns_name.size()] == '.')
|
||||
{
|
||||
str += ns_name.size() + 1;
|
||||
for (auto& t : tunables)
|
||||
if (!t.first.compare(0, t.first.size(), str, t.first.size())
|
||||
&& str[t.first.size()] == '=')
|
||||
{
|
||||
str += t.first.size() + 1;
|
||||
char* end;
|
||||
unsigned long val = strtoul(str, &end, 0);
|
||||
if ((*end == ':' || *end == '\0') && val <= INT_MAX)
|
||||
t.second = val;
|
||||
str = end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
str = strchr(str, ':');
|
||||
}
|
||||
obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
|
||||
if (tunables[0].second != 0)
|
||||
obj_size = tunables[0].second;
|
||||
obj_size = tunables[0].second;
|
||||
#endif // HOSTED
|
||||
#endif // NOT_FOR_L4
|
||||
|
||||
arena_size = buffer_size_in_bytes(obj_count, obj_size);
|
||||
if (arena_size == 0)
|
||||
return;
|
||||
arena = (char *)malloc (arena_size);
|
||||
return;
|
||||
arena = (char *)__wrap_malloc (arena_size);
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
return;
|
||||
}
|
||||
#endif // STATIC
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
@@ -261,46 +263,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -308,64 +310,64 @@ namespace
|
||||
{
|
||||
__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> (e) + sz
|
||||
> reinterpret_cast <char *> ((*fe)->next));
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> (e) + sz
|
||||
> reinterpret_cast <char *> ((*fe)->next));
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline bool pool::in_pool (void *ptr) const noexcept
|
||||
@@ -386,8 +388,8 @@ namespace __gnu_cxx
|
||||
#ifndef _GLIBCXX_EH_POOL_STATIC
|
||||
if (emergency_pool.arena)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
|
||||
{
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
|
||||
void *ret = malloc (thrown_size);
|
||||
void *ret = __wrap_malloc (thrown_size);
|
||||
|
||||
#if USE_POOL
|
||||
if (!ret)
|
||||
@@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
|
||||
extern "C" __cxa_dependent_exception*
|
||||
__cxxabiv1::__cxa_allocate_dependent_exception() noexcept
|
||||
{
|
||||
void *ret = malloc (sizeof (__cxa_dependent_exception));
|
||||
void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception));
|
||||
|
||||
#if USE_POOL
|
||||
if (!ret)
|
||||
|
||||
@@ -37,14 +37,16 @@
|
||||
#include <new>
|
||||
|
||||
#if _GLIBCXX_HOSTED
|
||||
using std::free;
|
||||
using std::malloc;
|
||||
//using std::free;
|
||||
//using std::malloc;
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
using std::memset;
|
||||
#else
|
||||
// In a freestanding environment, these functions may not be available
|
||||
// -- but for now, we assume that they are.
|
||||
extern "C" void *malloc (std::size_t);
|
||||
extern "C" void free(void *);
|
||||
extern "C" void *__wrap_malloc (std::size_t);
|
||||
extern "C" void __wrap_free(void *);
|
||||
extern "C" void *memset (void *, int, std::size_t);
|
||||
#endif
|
||||
|
||||
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
|
||||
// just for overhead.
|
||||
|
||||
#if INT_MAX == 32767
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
# define EMERGENCY_OBJ_SIZE 128
|
||||
# define EMERGENCY_OBJ_COUNT 16
|
||||
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
# define EMERGENCY_OBJ_SIZE 512
|
||||
# define EMERGENCY_OBJ_COUNT 32
|
||||
#else
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
# define EMERGENCY_OBJ_SIZE 1024
|
||||
# define EMERGENCY_OBJ_COUNT 64
|
||||
#endif
|
||||
|
||||
#ifndef __GTHREADS
|
||||
# undef EMERGENCY_OBJ_COUNT
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
# define EMERGENCY_OBJ_COUNT 4
|
||||
#endif
|
||||
|
||||
namespace __gnu_cxx
|
||||
@@ -93,12 +95,12 @@ namespace
|
||||
|
||||
private:
|
||||
struct free_entry {
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
std::size_t size;
|
||||
free_entry *next;
|
||||
};
|
||||
struct allocated_entry {
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
std::size_t size;
|
||||
char data[] __attribute__((aligned));
|
||||
};
|
||||
|
||||
// A single mutex controlling emergency allocations.
|
||||
@@ -119,15 +121,15 @@ namespace
|
||||
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
|
||||
// to make this tunable.
|
||||
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)malloc (arena_size);
|
||||
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
|
||||
arena = (char *)__wrap_malloc (arena_size);
|
||||
if (!arena)
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
{
|
||||
// If the allocation failed go without an emergency pool.
|
||||
arena_size = 0;
|
||||
first_free_entry = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
// Populate the free-list with a single entry covering the whole arena
|
||||
first_free_entry = reinterpret_cast <free_entry *> (arena);
|
||||
@@ -145,46 +147,46 @@ namespace
|
||||
// And we need to at least hand out objects of the size of
|
||||
// a freelist entry.
|
||||
if (size < sizeof (free_entry))
|
||||
size = sizeof (free_entry);
|
||||
size = sizeof (free_entry);
|
||||
// And we need to align objects we hand out to the maximum
|
||||
// alignment required on the target (this really aligns the
|
||||
// tail which will become a new freelist entry).
|
||||
size = ((size + __alignof__ (allocated_entry::data) - 1)
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
& ~(__alignof__ (allocated_entry::data) - 1));
|
||||
// Search for an entry of proper size on the freelist.
|
||||
free_entry **e;
|
||||
for (e = &first_free_entry;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
*e && (*e)->size < size;
|
||||
e = &(*e)->next)
|
||||
;
|
||||
if (!*e)
|
||||
return NULL;
|
||||
return NULL;
|
||||
allocated_entry *x;
|
||||
if ((*e)->size - size >= sizeof (free_entry))
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
{
|
||||
// Split block if it is too large.
|
||||
free_entry *f = reinterpret_cast <free_entry *>
|
||||
(reinterpret_cast <char *> (*e) + size);
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
new (f) free_entry;
|
||||
f->next = next;
|
||||
f->size = sz - size;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = size;
|
||||
*e = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
{
|
||||
// Exact size match or too small overhead for a free entry.
|
||||
std::size_t sz = (*e)->size;
|
||||
free_entry *next = (*e)->next;
|
||||
x = reinterpret_cast <allocated_entry *> (*e);
|
||||
new (x) allocated_entry;
|
||||
x->size = sz;
|
||||
*e = next;
|
||||
}
|
||||
return &x->data;
|
||||
}
|
||||
|
||||
@@ -192,71 +194,71 @@ namespace
|
||||
{
|
||||
__gnu_cxx::__scoped_lock sentry(emergency_mutex);
|
||||
allocated_entry *e = reinterpret_cast <allocated_entry *>
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
|
||||
std::size_t sz = e->size;
|
||||
if (!first_free_entry
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
|| (reinterpret_cast <char *> (e) + sz
|
||||
< reinterpret_cast <char *> (first_free_entry)))
|
||||
{
|
||||
// If the free list is empty or the entry is before the
|
||||
// first element and cannot be merged with it add it as
|
||||
// the first free entry.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = first_free_entry;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
== reinterpret_cast <char *> (first_free_entry))
|
||||
{
|
||||
// Check if we can merge with the first free entry being right
|
||||
// after us.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz + first_free_entry->size;
|
||||
f->next = first_free_entry->next;
|
||||
first_free_entry = f;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
{
|
||||
// Else search for a free item we can merge with at its end.
|
||||
free_entry **fe;
|
||||
for (fe = &first_free_entry;
|
||||
(*fe)->next
|
||||
&& (reinterpret_cast <char *> ((*fe)->next)
|
||||
> reinterpret_cast <char *> (e) + sz);
|
||||
fe = &(*fe)->next)
|
||||
;
|
||||
// If we can merge the next block into us do so and continue
|
||||
// with the cases below.
|
||||
if (reinterpret_cast <char *> (e) + sz
|
||||
== reinterpret_cast <char *> ((*fe)->next))
|
||||
{
|
||||
sz += (*fe)->next->size;
|
||||
(*fe)->next = (*fe)->next->next;
|
||||
}
|
||||
if (reinterpret_cast <char *> (*fe) + (*fe)->size
|
||||
== reinterpret_cast <char *> (e))
|
||||
// Merge with the freelist entry.
|
||||
(*fe)->size += sz;
|
||||
else
|
||||
{
|
||||
// Else put it after it which keeps the freelist sorted.
|
||||
free_entry *f = reinterpret_cast <free_entry *> (e);
|
||||
new (f) free_entry;
|
||||
f->size = sz;
|
||||
f->next = (*fe)->next;
|
||||
(*fe)->next = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool pool::in_pool (void *ptr)
|
||||
{
|
||||
char *p = reinterpret_cast <char *> (ptr);
|
||||
return (p > arena
|
||||
&& p < arena + arena_size);
|
||||
&& p < arena + arena_size);
|
||||
}
|
||||
|
||||
pool emergency_pool;
|
||||
@@ -269,8 +271,8 @@ namespace __gnu_cxx
|
||||
{
|
||||
if (emergency_pool.arena)
|
||||
{
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
::free(emergency_pool.arena);
|
||||
emergency_pool.arena = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
|
||||
void *ret;
|
||||
|
||||
thrown_size += sizeof (__cxa_refcounted_exception);
|
||||
ret = malloc (thrown_size);
|
||||
ret = __wrap_malloc (thrown_size);
|
||||
|
||||
if (!ret)
|
||||
ret = emergency_pool.allocate (thrown_size);
|
||||
@@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
|
||||
__cxa_dependent_exception *ret;
|
||||
|
||||
ret = static_cast<__cxa_dependent_exception*>
|
||||
(malloc (sizeof (__cxa_dependent_exception)));
|
||||
(__wrap_malloc (sizeof (__cxa_dependent_exception)));
|
||||
|
||||
if (!ret)
|
||||
ret = static_cast <__cxa_dependent_exception*>
|
||||
|
||||
@@ -994,6 +994,8 @@ static void *realloc(void *ptr, size_t size)
|
||||
|
||||
} // namespace umalloc
|
||||
|
||||
L4_BEGIN_DECLS
|
||||
|
||||
/**
|
||||
* Standard-compliant malloc implementation.
|
||||
*
|
||||
@@ -1001,7 +1003,7 @@ static void *realloc(void *ptr, size_t size)
|
||||
*
|
||||
* \return Valid allocated memory or nullptr if the allocation failed.
|
||||
*/
|
||||
void *malloc(size_t size) noexcept
|
||||
void *__wrap_malloc(size_t size) noexcept
|
||||
{
|
||||
auto ptr = umalloc::alloc(size);
|
||||
if (!ptr)
|
||||
@@ -1018,7 +1020,7 @@ void *malloc(size_t size) noexcept
|
||||
*
|
||||
* \return Valid allocated memory or nullptr if the allocation failed.
|
||||
*/
|
||||
void *aligned_alloc(size_t alignment, size_t size) noexcept
|
||||
void *__wrap_aligned_alloc(size_t alignment, size_t size) noexcept
|
||||
{
|
||||
auto ptr = umalloc::alloc(size, alignment);
|
||||
if (!ptr)
|
||||
@@ -1032,7 +1034,7 @@ void *aligned_alloc(size_t alignment, size_t size) noexcept
|
||||
*
|
||||
* \param ptr Previously allocated valid memory.
|
||||
*/
|
||||
void free(void *ptr) noexcept
|
||||
void __wrap_free(void *ptr) noexcept
|
||||
{
|
||||
if (ptr)
|
||||
umalloc::dealloc(ptr);
|
||||
@@ -1046,7 +1048,7 @@ void free(void *ptr) noexcept
|
||||
*
|
||||
* \return Valid allocated memory or nullptr if the allocation failed.
|
||||
*/
|
||||
void *calloc(size_t nmemb, size_t size) noexcept
|
||||
void *__wrap_calloc(size_t nmemb, size_t size) noexcept
|
||||
{
|
||||
// Avoid multiplication overflow.
|
||||
if ((size > 0) && (nmemb > std::numeric_limits<typeof(nmemb)>::max() / size))
|
||||
@@ -1073,7 +1075,7 @@ void *calloc(size_t nmemb, size_t size) noexcept
|
||||
* \return Valid reallocated memory or nullptr if the reallocation failed.
|
||||
* (in which case the previously allocated memory is not touched).
|
||||
*/
|
||||
void *realloc(void *ptr, size_t size) noexcept
|
||||
void *__wrap_realloc(void *ptr, size_t size) noexcept
|
||||
{
|
||||
if (!ptr)
|
||||
return malloc(size);
|
||||
@@ -1084,3 +1086,5 @@ void *realloc(void *ptr, size_t size) noexcept
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
L4_END_DECLS
|
||||
|
||||
@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
|
||||
TARGET = lua
|
||||
SRC_C = lua.c
|
||||
REQUIRES_LIBS = lua libc_support_misc libc_be_fs_noop libc_be_sig
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
vpath %.c $(PKGDIR)/lib/contrib/src
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -10,6 +10,8 @@ $(GENERAL_D_LOC): $(PKGDIR)/lib/build/Makefile
|
||||
|
||||
PKGNAME_DIRNAME := lua-c++
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
# Difference to other version
|
||||
|
||||
# WARNINGS EXCEPTION: CFLAGS is used here for C++ files (instead of the usual
|
||||
|
||||
@@ -25,6 +25,7 @@ CAN_PIE_arm := y
|
||||
CAN_PIE_arm64 := y
|
||||
BID_CAN_PIE = $(CAN_PIE_$(ARCH))
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc --wrap=aligned_alloc --wrap=calloc
|
||||
REQUIRES_LIBS := libkproxy libloader libsigma0 \
|
||||
cxx_io cxx_libc_io libsupc++_minimal \
|
||||
libc_minimal libc_minimal_l4re libumalloc
|
||||
|
||||
@@ -6,4 +6,6 @@ SRC_CC = ned-prompt.cc
|
||||
REQUIRES_LIBS := readline
|
||||
DEPENDS_PKGS := readline
|
||||
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -14,5 +14,6 @@ SRC_CC += lua_sleep.cc
|
||||
REQUIRES_LIBS := libloader lua++ libc_support_misc cxx_libc_io cxx_io
|
||||
|
||||
DEFAULT_HEAP_SIZE := 0x20000
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../..
|
||||
TARGET = backtracer
|
||||
SRC_CC = backtracer.cc
|
||||
REQUIRES_LIBS = stdlibs libunwind libstdc++
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -10,5 +10,6 @@ LDFLAGS +=
|
||||
#CPPFLAGS += -fPIC
|
||||
|
||||
REQUIRES_LIBS := libloader libkproxy cxx_libc_io cxx_io
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
provides: libmag mag-input-libinput mag-input-event mag-client_fb mag-mag_client mag-session_manager
|
||||
requires: l4re libc stdlibs-sh input l4util mag-gfx libstdc++
|
||||
lua++
|
||||
lua++ libc_be_mem
|
||||
Maintainer: warg@os.inf.tu-dresden.de
|
||||
|
||||
@@ -15,7 +15,7 @@ STATIC_PLUGINS += mag-input-event
|
||||
STATIC_PLUGINS += mag-client_fb
|
||||
STATIC_PLUGINS += mag-mag_client
|
||||
|
||||
REQUIRES_LIBS:= libsupc++ libdl mag-gfx lua++ cxx_libc_io cxx_io
|
||||
REQUIRES_LIBS:= libsupc++ libdl mag-gfx lua++ cxx_libc_io cxx_io libc_be_mem libstdc++
|
||||
REQUIRES_LIBS += $(STATIC_PLUGINS)
|
||||
#LDFLAGS += --export-dynamic
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
provides: rtc rtc_libc_be
|
||||
requires: stdlibs libio cxx_libc_io cxx_io libstdc++
|
||||
requires: stdlibs libio cxx_libc_io cxx_io libstdc++ libc_be_mem
|
||||
optional: drivers-frst i2c-server
|
||||
Maintainer: adam.lackorzynski@kernkonzept.com
|
||||
|
||||
@@ -9,6 +9,6 @@ SRC_CC_arm64-l4f = pl031.cc
|
||||
SRC_CC = main.cc
|
||||
SRC_CC-$(CONFIG_RTC_DS3231) += ds3231.cc
|
||||
SRC_CC-$(CONFIG_RTC_PCF85063A) += pcf85063a.cc
|
||||
REQUIRES_LIBS = libio cxx_libc_io cxx_io libstdc++
|
||||
REQUIRES_LIBS = libio cxx_libc_io cxx_io libc_be_mem libstdc++
|
||||
|
||||
include $(L4DIR)/mk/prog.mk
|
||||
|
||||
@@ -5,6 +5,7 @@ TARGET = l4vio_switch
|
||||
|
||||
REQUIRES_LIBS = libstdc++ l4virtio
|
||||
REQUIRES_LIBS-$(CONFIG_VNS_IXL) += ixl
|
||||
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
|
||||
|
||||
SRC_CC-$(CONFIG_VNS_PORT_FILTER) += filter.cc
|
||||
|
||||
|
||||
Reference in New Issue
Block a user