Added memory backend

Updated by Viktor Reusch for the new l4re-base-25.08.0.

Co-authored-by: vreusch <viktor.reusch@barkhauseninstitut.org>
This commit is contained in:
Martin Kuettler
2023-10-23 10:35:55 +02:00
committed by vreusch
parent d8ad183432
commit 20d8c2c149
87 changed files with 2161 additions and 2012 deletions

View File

@@ -1,3 +1,3 @@
requires: xyz requires: libstdc++ libc_be_mem xyz
provides: abc provides: abc
maintainer: your@email.example.com maintainer: your@email.example.com

View File

@@ -8,6 +8,6 @@ SRC_C = main.c
SRC_CC = SRC_CC =
# list requirements of your program here # list requirements of your program here
REQUIRES_LIBS = REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -10,4 +10,6 @@ SRC_CC-$(CONFIG_CONS_USE_ASYNC_FE) += async_vcon_fe.cc
REQUIRES_LIBS = libstdc++ cxx_libc_io cxx_io REQUIRES_LIBS = libstdc++ cxx_libc_io cxx_io
REQUIRES_LIBS-$(CONFIG_CONS_USE_ASYNC_FE) = libpthread REQUIRES_LIBS-$(CONFIG_CONS_USE_ASYNC_FE) = libpthread
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,2 +1,2 @@
requires: stdlibs requires: stdlibs libstdc++ libc_be_mem
Maintainer: adam@os.inf.tu-dresden.de Maintainer: adam@os.inf.tu-dresden.de

View File

@@ -4,6 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_clntsrv-server ex_clntsrv-client TARGET = ex_clntsrv-server ex_clntsrv-client
SRC_CC_ex_clntsrv-server = server.cc SRC_CC_ex_clntsrv-server = server.cc
SRC_CC_ex_clntsrv-client = client.cc SRC_CC_ex_clntsrv-client = client.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,3 +1,5 @@
requires: libstdc++ libc_be_mem
# color, fractal, spectrum # color, fractal, spectrum
optional: l4re_c-util optional: l4re_c-util

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_fb_spectrum_c ex_fb_spectrum_cc TARGET = ex_fb_spectrum_c ex_fb_spectrum_cc
SRC_CC_ex_fb_spectrum_cc = spectrum.cc SRC_CC_ex_fb_spectrum_cc = spectrum.cc
SRC_C_ex_fb_spectrum_c = spectrum_c.c SRC_C_ex_fb_spectrum_c = spectrum_c.c
REQUIRES_LIBS = libevent l4re_c-util REQUIRES_LIBS = libevent l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,4 +1,4 @@
requires: stdlibs requires: stdlibs libstdc++ libc_be_mem
# input, led, uart # input, led, uart
optional: libstdc++ libio-vbus optional: libstdc++ libio-vbus

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_gpio_input TARGET = ex_gpio_input
SRC_CC = gpio_input.cc SRC_CC = gpio_input.cc
REQUIRES_LIBS = libstdc++ libio-vbus REQUIRES_LIBS = libio-vbus libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_gpio_led TARGET = ex_gpio_led
SRC_CC = gpio_led.cc SRC_CC = gpio_led.cc
REQUIRES_LIBS = libstdc++ libio-vbus REQUIRES_LIBS = libio-vbus libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,7 +1,7 @@
PKGDIR ?= .. PKGDIR ?= ..
L4DIR ?= $(PKGDIR)/../../.. L4DIR ?= $(PKGDIR)/../../..
REQUIRES_LIBS = libstdc++ libio-vbus drivers_uart libio REQUIRES_LIBS = libio-vbus drivers_uart libio libc_be_mem libstdc++
TARGET = rpi_uart TARGET = rpi_uart
SRC_CC = main.cc SRC_CC = main.cc

View File

@@ -1,4 +1,4 @@
requires: stdlibs requires: stdlibs libstdc++ libc_be_mem
# boost, cppunit, stdthread # boost, cppunit, stdthread
optional: libstdc++ optional: libstdc++

View File

@@ -4,6 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = inputtst TARGET = inputtst
SRC_C = main.c SRC_C = main.c
DEPENDS_PKGS = input DEPENDS_PKGS = input
REQUIRES_LIBS = input REQUIRES_LIBS = input libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_l4re_ma+rm_cc TARGET = ex_l4re_ma+rm_cc
SRC_CC = ma+rm.cc SRC_CC = ma+rm.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_periodic_task TARGET = ex_periodic_task
SRC_CC = main.cc SRC_CC = main.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,5 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_l4re_physmem_cc TARGET = ex_l4re_physmem_cc
SRC_CC = physmem.cc SRC_CC = physmem.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_l4re_ds_clnt ex_l4re_ds_srv TARGET = ex_l4re_ds_clnt ex_l4re_ds_srv
SRC_CC_ex_l4re_ds_clnt = ds_clnt.cc SRC_CC_ex_l4re_ds_clnt = ds_clnt.cc
SRC_CC_ex_l4re_ds_srv = ds_srv.cc SRC_CC_ex_l4re_ds_srv = ds_srv.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_l4re_ma+rm_c TARGET = ex_l4re_ma+rm_c
SRC_C = ma+rm.c SRC_C = ma+rm.c
REQUIRES_LIBS = l4re_c-util REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,6 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_smap-server ex_smap-client TARGET = ex_smap-server ex_smap-client
SRC_CC_ex_smap-server = server.cc SRC_CC_ex_smap-server = server.cc
SRC_CC_ex_smap-client = client.cc SRC_CC_ex_smap-client = client.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = uclibc_thread_safe TARGET = uclibc_thread_safe
SRC_CC = main.cc SRC_CC = main.cc
REQUIRES_LIBS = libpthread REQUIRES_LIBS = libpthread libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_libio TARGET = ex_libio
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = libio libirq REQUIRES_LIBS = libio libirq libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
SRC_C_ex_libirq_async = async_isr.c SRC_C_ex_libirq_async = async_isr.c
SRC_C_ex_libirq_loop = loop.c SRC_C_ex_libirq_loop = loop.c
TARGET = ex_libirq_async ex_libirq_loop TARGET = ex_libirq_async ex_libirq_loop
REQUIRES_LIBS = libirq libio REQUIRES_LIBS = libirq libio libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
SRC_C = main.c SRC_C = main.c
TARGET = rtc_test TARGET = rtc_test
DEPENDS_PKGS = rtc DEPENDS_PKGS = rtc libc_be_mem libstdc++
REQUIRES_LIBS = rtc REQUIRES_LIBS = rtc libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_shmc TARGET = ex_shmc
SRC_C = prodcons.c SRC_C = prodcons.c
REQUIRES_LIBS = shmc libpthread REQUIRES_LIBS = shmc libpthread libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,3 +1,5 @@
requires: libc_be_mem
# cyclichpet, eb_leds, eb_leds_gfx, hpet # cyclichpet, eb_leds, eb_leds_gfx, hpet
optional: libio optional: libio

View File

@@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = cat TARGET = cat
SRC_C = cat.c SRC_C = cat.c
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_eb_leds TARGET = ex_eb_leds
SRC_C = eb_leds.c SRC_C = eb_leds.c
REQUIRES_LIBS = libio REQUIRES_LIBS = libio libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = reboot TARGET = reboot
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_hello_shared TARGET = ex_hello_shared
MODE = shared MODE = shared
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,4 +1,4 @@
requires: stdlibs requires: stdlibs libc_be_mem
# aliens, isr, singlestep, start-with-exc, utcb-ipc, vm-tz # aliens, isr, singlestep, start-with-exc, utcb-ipc, vm-tz
optional: l4re_c-util optional: l4re_c-util

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_aliens TARGET = ex_aliens
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = l4re_c-util REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = dump_obj TARGET = dump_obj
SRC_CC = dump_obj.cc SRC_CC = dump_obj.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_ipc1 TARGET = ex_ipc1
SRC_C = ipc_example.c SRC_C = ipc_example.c
REQUIRES_LIBS = libpthread REQUIRES_LIBS = libpthread libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_isr TARGET = ex_isr
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = l4re_c-util REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_map_irq_client ex_map_irq_server TARGET = ex_map_irq_client ex_map_irq_server
SRC_CC_ex_map_irq_client = client.cc SRC_CC_ex_map_irq_client = client.cc
SRC_CC_ex_map_irq_server = server.cc SRC_CC_ex_map_irq_server = server.cc
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_thread_migrate ex_thread_migrate_irq TARGET = ex_thread_migrate ex_thread_migrate_irq
SRC_CC_ex_thread_migrate = thread_migrate.cc SRC_CC_ex_thread_migrate = thread_migrate.cc
SRC_CC_ex_thread_migrate_irq = thread_migrate_irq.cc SRC_CC_ex_thread_migrate_irq = thread_migrate_irq.cc
REQUIRES_LIBS = libpthread REQUIRES_LIBS = libpthread libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_singlestep TARGET = ex_singlestep
SYSTEMS = x86-l4f amd64-l4f SYSTEMS = x86-l4f amd64-l4f
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = l4re_c-util REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_start-with-exc TARGET = ex_start-with-exc
SYSTEMS = x86-l4f arm-l4f arm64-l4f SYSTEMS = x86-l4f arm-l4f arm64-l4f
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = l4re_c-util REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,6 +1,9 @@
PKGDIR ?= .. PKGDIR ?= ..
L4DIR ?= $(PKGDIR)/../../.. L4DIR ?= $(PKGDIR)/../../..
REQUIRES_LIBS = libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS)
TARGET = ex_timeouts TARGET = ex_timeouts
SRC_C = main.c SRC_C = main.c

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_uirq TARGET = ex_uirq
SRC_CC = ex_uirq.cc SRC_CC = ex_uirq.cc
REQUIRES_LIBS = libstdc++ libpthread REQUIRES_LIBS = libc_be_mem libstdc++ libpthread
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -2,7 +2,7 @@ PKGDIR ?= ..
L4DIR ?= $(PKGDIR)/../../.. L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_utcb_ipc TARGET = ex_utcb_ipc
REQUIRES_LIBS = l4re_c-util REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
SRC_C = main.c SRC_C = main.c

View File

@@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = ex_vcpu TARGET = ex_vcpu
SRC_CC = vcpu.cc SRC_CC = vcpu.cc
REQUIRES_LIBS = libvcpu cxx_io cxx_libc_io REQUIRES_LIBS = libvcpu cxx_io cxx_libc_io libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -5,7 +5,7 @@ TARGET = ex_vmtest
SYSTEMS = x86-l4f amd64-l4f SYSTEMS = x86-l4f amd64-l4f
SRC_S = guest.S SRC_S = guest.S
SRC_CC = vm.cc vmx.cc svm.cc main.cc SRC_CC = vm.cc vmx.cc svm.cc main.cc
REQUIRES_LIBS = libvcpu l4util REQUIRES_LIBS = libvcpu l4util libc_be_mem libstdc++
DEPENDS_PKGS = $(REQUIRES_LIBS) DEPENDS_PKGS = $(REQUIRES_LIBS)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,2 +1,2 @@
requires: libdrivers-lcd x86emu_int10 stdlibs libio-vbus requires: libdrivers-lcd x86emu_int10 stdlibs libio-vbus libstdc++ libc_be_mem
maintainer: adam@os.inf.tu-dresden.de maintainer: adam@os.inf.tu-dresden.de

View File

@@ -11,7 +11,7 @@ REQUIRES_LIBS_x86-l4f = x86emu_int10
REQUIRES_LIBS_amd64-l4f = x86emu_int10 REQUIRES_LIBS_amd64-l4f = x86emu_int10
REQUIRES_LIBS_arm-l4f = libdrivers-lcd REQUIRES_LIBS_arm-l4f = libdrivers-lcd
REQUIRES_LIBS_arm64-l4f = libdrivers-lcd REQUIRES_LIBS_arm64-l4f = libdrivers-lcd
REQUIRES_LIBS = libc_support_misc libio-vbus REQUIRES_LIBS = libc_support_misc libio-vbus libc_be_mem libstdc++
DEFINES = -DSPLASHNAME=gimp_image \ DEFINES = -DSPLASHNAME=gimp_image \
-DSPLASHNAME_RUN_LENGTH_DECODE=GIMP_IMAGE_RUN_LENGTH_DECODE -DSPLASHNAME_RUN_LENGTH_DECODE=GIMP_IMAGE_RUN_LENGTH_DECODE

View File

@@ -1,2 +1,2 @@
requires: stdlibs requires: stdlibs libstdc++ libc_be_mem
Maintainer: adam@os.inf.tu-dresden.de Maintainer: adam@os.inf.tu-dresden.de

View File

@@ -4,4 +4,6 @@ L4DIR ?= $(PKGDIR)/../..
TARGET = hello TARGET = hello
SRC_C = main.c SRC_C = main.c
REQUIRES_LIBS = libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -2,6 +2,8 @@
#MODE := shared #MODE := shared
TARGET = io TARGET = io
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
DEFINES-$(CONFIG_L4IO_PCIID_DB) += -DCONFIG_L4IO_PCIID_DB DEFINES-$(CONFIG_L4IO_PCIID_DB) += -DCONFIG_L4IO_PCIID_DB
SUBDIRS = drivers SUBDIRS = drivers

View File

@@ -1,2 +1,2 @@
requires: stdlibs requires: stdlibs libstdc++ libc_be_mem
Maintainer: adam@l4re.org Maintainer: adam@l4re.org

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../..
TARGET = ipcbench ipcbench_parallel \ TARGET = ipcbench ipcbench_parallel \
ipcbench_client ipcbench_server \ ipcbench_client ipcbench_server \
syscallbench syscallbench_parallel syscallbench syscallbench_parallel
REQUIRES_LIBS = libpthread REQUIRES_LIBS = libc_be_mem libstdc++ libpthread
SRC_C_ipcbench = ipcbench.c ipc_common.c SRC_C_ipcbench = ipcbench.c ipc_common.c
SRC_C_ipcbench_parallel = ipcbench_parallel.c ipc_common.c SRC_C_ipcbench_parallel = ipcbench_parallel.c ipc_common.c
SRC_C_ipcbench_client = ipcclient.c SRC_C_ipcbench_client = ipcclient.c

View File

@@ -29,7 +29,7 @@ DEFINES += -DL4_CXX_NO_EXCEPTION_BACKTRACE -DL4_LOADER_RELOC_BASE=$(DEFAULT_RELO
REQUIRES_LIBS := cxx_io cxx_libc_io libc_minimal libsupc++_minimal libloader \ REQUIRES_LIBS := cxx_io cxx_libc_io libc_minimal libsupc++_minimal libloader \
libc_minimal_l4re libumalloc libc_minimal_l4re libumalloc
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc --wrap=aligned_alloc --wrap=calloc
CXXFLAGS += $(CXXFLAGS_LOW_LEVEL) CXXFLAGS += $(CXXFLAGS_LOW_LEVEL)
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -24,23 +24,26 @@ struct Vfs_init
cxx::Static_container<L4Re::Vfs::File_factory_t<L4Re::Namespace, L4Re::Core::Ns_dir> > ns_dir; cxx::Static_container<L4Re::Vfs::File_factory_t<L4Re::Namespace, L4Re::Core::Ns_dir> > ns_dir;
cxx::Static_container<L4Re::Vfs::File_factory_t<L4::Vcon, L4Re::Core::Vcon_stream> > vcon_stream; cxx::Static_container<L4Re::Vfs::File_factory_t<L4::Vcon, L4Re::Core::Vcon_stream> > vcon_stream;
// This is part of an ugly hack to avoid calling malloc here.
char fac_items[3*sizeof(Vfs::File_factory_item)];
Vfs_init() Vfs_init()
{ {
vfs.construct(); vfs.construct();
__rtld_l4re_env_posix_vfs_ops = vfs; __rtld_l4re_env_posix_vfs_ops = vfs;
ns_dir.construct(); ns_dir.construct();
auto ns_ptr = cxx::ref_ptr(ns_dir.get()); auto ns_ptr = cxx::ref_ptr(ns_dir.get());
vfs->register_file_factory(ns_ptr); vfs->register_file_factory(ns_ptr, &fac_items[0]);
ns_ptr.release(); // prevent deletion of static object ns_ptr.release(); // prevent deletion of static object
ro_file.construct(); ro_file.construct();
auto ro_ptr = cxx::ref_ptr(ro_file.get()); auto ro_ptr = cxx::ref_ptr(ro_file.get());
vfs->register_file_factory(ro_ptr); vfs->register_file_factory(ro_ptr, &fac_items[sizeof(Vfs::File_factory_item)]);
ro_ptr.release(); // prevent deletion of static object ro_ptr.release(); // prevent deletion of static object
vcon_stream.construct(); vcon_stream.construct();
auto vcon_ptr = cxx::ref_ptr(vcon_stream.get()); auto vcon_ptr = cxx::ref_ptr(vcon_stream.get());
vfs->register_file_factory(vcon_ptr); vfs->register_file_factory(vcon_ptr, &fac_items[2*sizeof(Vfs::File_factory_item)]);
vcon_ptr.release(); // prevent deletion of static object vcon_ptr.release(); // prevent deletion of static object
} }
}; };

View File

@@ -134,6 +134,7 @@ public:
L4Re::Vfs::File_system_list file_system_list() noexcept override; L4Re::Vfs::File_system_list file_system_list() noexcept override;
int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override; int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
int register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f, void *x) noexcept;
int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override; int unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept override;
Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override; Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(int proto) noexcept override;
Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override; Ref_ptr<L4Re::Vfs::File_factory> get_file_factory(char const *proto_name) noexcept override;
@@ -144,14 +145,6 @@ public:
void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); } void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); }
void free(void *m) noexcept override { Vfs_config::free(m); } void free(void *m) noexcept override { Vfs_config::free(m); }
private:
Root_mount_tree _root_mount;
L4Re::Core::Env_dir _root;
Ref_ptr<L4Re::Vfs::File> _cwd;
Fd_store fds;
L4Re::Vfs::File_system *_fs_registry;
struct File_factory_item : cxx::H_list_item_t<File_factory_item> struct File_factory_item : cxx::H_list_item_t<File_factory_item>
{ {
cxx::Ref_ptr<L4Re::Vfs::File_factory> f; cxx::Ref_ptr<L4Re::Vfs::File_factory> f;
@@ -163,6 +156,14 @@ private:
File_factory_item &operator = (File_factory_item const &) = delete; File_factory_item &operator = (File_factory_item const &) = delete;
}; };
private:
Root_mount_tree _root_mount;
L4Re::Core::Env_dir _root;
Ref_ptr<L4Re::Vfs::File> _cwd;
Fd_store fds;
L4Re::Vfs::File_system *_fs_registry;
cxx::H_list_t<File_factory_item> _file_factories; cxx::H_list_t<File_factory_item> _file_factories;
l4_addr_t _anon_offset; l4_addr_t _anon_offset;
@@ -272,6 +273,20 @@ Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
return 0; return 0;
} }
int
Vfs::register_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f, void *x) noexcept
{
if (!f)
return -EINVAL;
if (!x)
return -ENOMEM;
auto ff = new (x, cxx::Nothrow()) File_factory_item(f);
_file_factories.push_front(ff);
return 0;
}
int int
Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept Vfs::unregister_file_factory(cxx::Ref_ptr<L4Re::Vfs::File_factory> f) noexcept
{ {
@@ -740,7 +755,7 @@ Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_of
rm_flags |= Rm::F::In_area; rm_flags |= Rm::F::In_area;
// Make sure to remove old mappings residing at the respective address // Make sure to remove old mappings residing at the respective address
// range. If none exists, we are fine as well, allowing us to ignore // range. If none exists, we are fine as well, allowing us to ignore
// ENOENT here. // ENOENT here.
err = munmap_regions(start, len); err = munmap_regions(start, len);

View File

@@ -14,3 +14,5 @@ CXXFLAGS += -fvisibility=hidden
# No exception information as unwinder code might use malloc and friends # No exception information as unwinder code might use malloc and friends
DEFINES += -DNOT_IN_libc -DL4_NO_RTTI DEFINES += -DNOT_IN_libc -DL4_NO_RTTI
CXXFLAGS += -include libc-symbols.h -fno-exceptions -fno-rtti CXXFLAGS += -include libc-symbols.h -fno-exceptions -fno-rtti
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc

View File

@@ -21,22 +21,22 @@
extern int weak_function __libc_free_aligned(void *ptr) attribute_hidden; extern int weak_function __libc_free_aligned(void *ptr) attribute_hidden;
#ifdef L_malloc #ifdef L_malloc
void *malloc(size_t size) void *__wrap_malloc(size_t size)
{ {
void *result; void *result;
if (unlikely(size == 0)) { if (unlikely(size == 0)) {
size++; size++;
} }
/* prevent Undefined Behaviour for pointer arithmetic (substract) of too big pointers /* prevent Undefined Behaviour for pointer arithmetic (substract) of too big pointers
* see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303 * see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303
* No need to check for size + sizeof(size_t) integer overflow since we already check for PTRDIFF_MAX * No need to check for size + sizeof(size_t) integer overflow since we already check for PTRDIFF_MAX
*/ */
if (unlikely(size > PTRDIFF_MAX)) { if (unlikely(size > PTRDIFF_MAX)) {
__set_errno(ENOMEM); __set_errno(ENOMEM);
return 0; return 0;
} }
#ifdef __ARCH_USE_MMU__ #ifdef __ARCH_USE_MMU__
# define MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS # define MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS
@@ -44,74 +44,74 @@ void *malloc(size_t size)
# define MMAP_FLAGS MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED # define MMAP_FLAGS MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED
#endif #endif
result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE, result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE,
MMAP_FLAGS, 0, 0); MMAP_FLAGS, 0, 0);
if (result == MAP_FAILED) { if (result == MAP_FAILED) {
__set_errno(ENOMEM); __set_errno(ENOMEM);
return 0; return 0;
} }
* (size_t *) result = size; * (size_t *) result = size;
return(result + sizeof(size_t)); return(result + sizeof(size_t));
} }
#endif #endif
#ifdef L_calloc #ifdef L_calloc
void * calloc(size_t nmemb, size_t lsize) void * calloc(size_t nmemb, size_t lsize)
{ {
void *result; void *result;
size_t size=lsize * nmemb; size_t size=lsize * nmemb;
/* guard vs integer overflow, but allow nmemb /* guard vs integer overflow, but allow nmemb
* to fall through and call malloc(0) */ * to fall through and call malloc(0) */
if (nmemb && lsize != (size / nmemb)) { if (nmemb && lsize != (size / nmemb)) {
__set_errno(ENOMEM); __set_errno(ENOMEM);
return NULL; return NULL;
} }
result = malloc(size); result = malloc(size);
#ifndef __ARCH_USE_MMU__ #ifndef __ARCH_USE_MMU__
/* mmap'd with MAP_UNINITIALIZED, we have to blank memory ourselves */ /* mmap'd with MAP_UNINITIALIZED, we have to blank memory ourselves */
if (result != NULL) { if (result != NULL) {
memset(result, 0, size); memset(result, 0, size);
} }
#endif #endif
return result; return result;
} }
#endif #endif
#ifdef L_realloc #ifdef L_realloc
void *realloc(void *ptr, size_t size) void *__wrap_realloc(void *ptr, size_t size)
{ {
void *newptr = NULL; void *newptr = NULL;
if (!ptr) if (!ptr)
return malloc(size); return malloc(size);
if (!size) { if (!size) {
free(ptr); free(ptr);
return malloc(0); return malloc(0);
} }
newptr = malloc(size); newptr = malloc(size);
if (newptr) { if (newptr) {
size_t old_size = *((size_t *) (ptr - sizeof(size_t))); size_t old_size = *((size_t *) (ptr - sizeof(size_t)));
memcpy(newptr, ptr, (old_size < size ? old_size : size)); memcpy(newptr, ptr, (old_size < size ? old_size : size));
free(ptr); free(ptr);
} }
return newptr; return newptr;
} }
#endif #endif
#ifdef L_free #ifdef L_free
void free(void *ptr) void __wrap_free(void *ptr)
{ {
if (unlikely(ptr == NULL)) if (unlikely(ptr == NULL))
return; return;
if (unlikely(__libc_free_aligned != NULL)) { if (unlikely(__libc_free_aligned != NULL)) {
if (__libc_free_aligned(ptr)) if (__libc_free_aligned(ptr))
return; return;
} }
ptr -= sizeof(size_t); ptr -= sizeof(size_t);
munmap(ptr, * (size_t *) ptr + sizeof(size_t)); munmap(ptr, * (size_t *) ptr + sizeof(size_t));
} }
#endif #endif
@@ -119,84 +119,84 @@ void free(void *ptr)
#include <bits/uClibc_mutex.h> #include <bits/uClibc_mutex.h>
__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); __UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock) #define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock) #define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
/* List of blocks allocated with memalign or valloc */ /* List of blocks allocated with memalign or valloc */
struct alignlist struct alignlist
{ {
struct alignlist *next; struct alignlist *next;
__ptr_t aligned; /* The address that memaligned returned. */ __ptr_t aligned; /* The address that memaligned returned. */
__ptr_t exact; /* The address that malloc returned. */ __ptr_t exact; /* The address that malloc returned. */
}; };
static struct alignlist *_aligned_blocks; static struct alignlist *_aligned_blocks;
/* Return memory to the heap. */ /* Return memory to the heap. */
int __libc_free_aligned(void *ptr) int __libc_free_aligned(void *ptr)
{ {
struct alignlist *l; struct alignlist *l;
if (ptr == NULL) if (ptr == NULL)
return 0; return 0;
__MALLOC_LOCK; __MALLOC_LOCK;
for (l = _aligned_blocks; l != NULL; l = l->next) { for (l = _aligned_blocks; l != NULL; l = l->next) {
if (l->aligned == ptr) { if (l->aligned == ptr) {
/* Mark the block as free */ /* Mark the block as free */
l->aligned = NULL; l->aligned = NULL;
ptr = l->exact; ptr = l->exact;
ptr -= sizeof(size_t); ptr -= sizeof(size_t);
munmap(ptr, * (size_t *) ptr + sizeof(size_t)); munmap(ptr, * (size_t *) ptr + sizeof(size_t));
return 1; return 1;
} }
} }
__MALLOC_UNLOCK; __MALLOC_UNLOCK;
return 0; return 0;
} }
void * memalign (size_t alignment, size_t size) void * memalign (size_t alignment, size_t size)
{ {
void * result; void * result;
unsigned long int adj; unsigned long int adj;
if (unlikely(size > PTRDIFF_MAX)) { if (unlikely(size > PTRDIFF_MAX)) {
__set_errno(ENOMEM); __set_errno(ENOMEM);
return NULL; return NULL;
} }
if (unlikely((size + alignment - 1 < size) && (alignment != 0))) { if (unlikely((size + alignment - 1 < size) && (alignment != 0))) {
__set_errno(ENOMEM); __set_errno(ENOMEM);
return NULL; return NULL;
} }
result = malloc (size + alignment - 1); result = malloc (size + alignment - 1);
if (result == NULL) if (result == NULL)
return NULL; return NULL;
adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment; adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment;
if (adj != 0) { if (adj != 0) {
struct alignlist *l; struct alignlist *l;
__MALLOC_LOCK; __MALLOC_LOCK;
for (l = _aligned_blocks; l != NULL; l = l->next) for (l = _aligned_blocks; l != NULL; l = l->next)
if (l->aligned == NULL) if (l->aligned == NULL)
/* This slot is free. Use it. */ /* This slot is free. Use it. */
break; break;
if (l == NULL) { if (l == NULL) {
l = (struct alignlist *) malloc (sizeof (struct alignlist)); l = (struct alignlist *) malloc (sizeof (struct alignlist));
if (l == NULL) { if (l == NULL) {
free(result); free(result);
result = NULL; result = NULL;
goto DONE; goto DONE;
} }
l->next = _aligned_blocks; l->next = _aligned_blocks;
_aligned_blocks = l; _aligned_blocks = l;
} }
l->exact = result; l->exact = result;
result = l->aligned = (char *) result + alignment - adj; result = l->aligned = (char *) result + alignment - adj;
DONE: DONE:
__MALLOC_UNLOCK; __MALLOC_UNLOCK;
} }
return result; return result;
} }
libc_hidden_def(memalign) libc_hidden_def(memalign)
#endif #endif

View File

@@ -42,38 +42,38 @@ static int __malloc_trim(size_t pad, mstate av)
if (extra > 0) { if (extra > 0) {
/* /*
Only proceed if end of memory is where we last set it. Only proceed if end of memory is where we last set it.
This avoids problems if there were foreign sbrk calls. This avoids problems if there were foreign sbrk calls.
*/ */
current_brk = (char*)(MORECORE(0)); current_brk = (char*)(MORECORE(0));
if (current_brk == (char*)(av->top) + top_size) { if (current_brk == (char*)(av->top) + top_size) {
/* /*
Attempt to release memory. We ignore MORECORE return value, Attempt to release memory. We ignore MORECORE return value,
and instead call again to find out where new end of memory is. and instead call again to find out where new end of memory is.
This avoids problems if first call releases less than we asked, This avoids problems if first call releases less than we asked,
of if failure somehow altered brk value. (We could still of if failure somehow altered brk value. (We could still
encounter problems if it altered brk in some very bad way, encounter problems if it altered brk in some very bad way,
but the only thing we can do is adjust anyway, which will cause but the only thing we can do is adjust anyway, which will cause
some downstream failure.) some downstream failure.)
*/ */
MORECORE(-extra); MORECORE(-extra);
new_brk = (char*)(MORECORE(0)); new_brk = (char*)(MORECORE(0));
if (new_brk != (char*)MORECORE_FAILURE) { if (new_brk != (char*)MORECORE_FAILURE) {
released = (long)(current_brk - new_brk); released = (long)(current_brk - new_brk);
if (released != 0) { if (released != 0) {
/* Success. Adjust top. */ /* Success. Adjust top. */
av->sbrked_mem -= released; av->sbrked_mem -= released;
set_head(av->top, (top_size - released) | PREV_INUSE); set_head(av->top, (top_size - released) | PREV_INUSE);
check_malloc_state(); check_malloc_state();
return 1; return 1;
} }
} }
} }
} }
return 0; return 0;
} }
@@ -129,8 +129,8 @@ static void malloc_init_state(mstate av)
/* Establish circular links for normal bins */ /* Establish circular links for normal bins */
for (i = 1; i < NBINS; ++i) { for (i = 1; i < NBINS; ++i) {
bin = bin_at(av,i); bin = bin_at(av,i);
bin->fd = bin->bk = bin; bin->fd = bin->bk = bin;
} }
av->top_pad = DEFAULT_TOP_PAD; av->top_pad = DEFAULT_TOP_PAD;
@@ -195,80 +195,80 @@ void attribute_hidden __malloc_consolidate(mstate av)
*/ */
if (av->max_fast != 0) { if (av->max_fast != 0) {
clear_fastchunks(av); clear_fastchunks(av);
unsorted_bin = unsorted_chunks(av); unsorted_bin = unsorted_chunks(av);
/* /*
Remove each chunk from fast bin and consolidate it, placing it Remove each chunk from fast bin and consolidate it, placing it
then in unsorted bin. Among other reasons for doing this, then in unsorted bin. Among other reasons for doing this,
placing in unsorted bin avoids needing to calculate actual bins placing in unsorted bin avoids needing to calculate actual bins
until malloc is sure that chunks aren't immediately going to be until malloc is sure that chunks aren't immediately going to be
reused anyway. reused anyway.
*/ */
maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
fb = &(av->fastbins[0]); fb = &(av->fastbins[0]);
do { do {
if ( (p = *fb) != 0) { if ( (p = *fb) != 0) {
*fb = 0; *fb = 0;
do { do {
CHECK_PTR(p); CHECK_PTR(p);
check_inuse_chunk(p); check_inuse_chunk(p);
nextp = REVEAL_PTR(&p->fd, p->fd); nextp = REVEAL_PTR(&p->fd, p->fd);
/* Slightly streamlined version of consolidation code in free() */ /* Slightly streamlined version of consolidation code in free() */
size = p->size & ~PREV_INUSE; size = p->size & ~PREV_INUSE;
nextchunk = chunk_at_offset(p, size); nextchunk = chunk_at_offset(p, size);
nextsize = chunksize(nextchunk); nextsize = chunksize(nextchunk);
if (!prev_inuse(p)) { if (!prev_inuse(p)) {
prevsize = p->prev_size; prevsize = p->prev_size;
size += prevsize; size += prevsize;
p = chunk_at_offset(p, -((long) prevsize)); p = chunk_at_offset(p, -((long) prevsize));
unlink(p, bck, fwd); unlink(p, bck, fwd);
} }
if (nextchunk != av->top) { if (nextchunk != av->top) {
nextinuse = inuse_bit_at_offset(nextchunk, nextsize); nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
set_head(nextchunk, nextsize); set_head(nextchunk, nextsize);
if (!nextinuse) { if (!nextinuse) {
size += nextsize; size += nextsize;
unlink(nextchunk, bck, fwd); unlink(nextchunk, bck, fwd);
} }
first_unsorted = unsorted_bin->fd; first_unsorted = unsorted_bin->fd;
unsorted_bin->fd = p; unsorted_bin->fd = p;
first_unsorted->bk = p; first_unsorted->bk = p;
set_head(p, size | PREV_INUSE); set_head(p, size | PREV_INUSE);
p->bk = unsorted_bin; p->bk = unsorted_bin;
p->fd = first_unsorted; p->fd = first_unsorted;
set_foot(p, size); set_foot(p, size);
} }
else { else {
size += nextsize; size += nextsize;
set_head(p, size | PREV_INUSE); set_head(p, size | PREV_INUSE);
av->top = p; av->top = p;
} }
} while ( (p = nextp) != 0); } while ( (p = nextp) != 0);
} }
} while (fb++ != maxfb); } while (fb++ != maxfb);
} }
else { else {
malloc_init_state(av); malloc_init_state(av);
check_malloc_state(); check_malloc_state();
} }
} }
/* ------------------------------ free ------------------------------ */ /* ------------------------------ free ------------------------------ */
void free(void* mem) void __wrap_free(void* mem)
{ {
mstate av; mstate av;
@@ -284,7 +284,7 @@ void free(void* mem)
/* free(0) has no effect */ /* free(0) has no effect */
if (mem == NULL) if (mem == NULL)
return; return;
__MALLOC_LOCK; __MALLOC_LOCK;
av = get_malloc_state(); av = get_malloc_state();
@@ -301,16 +301,16 @@ void free(void* mem)
if ((unsigned long)(size) <= (unsigned long)(av->max_fast) if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
#if TRIM_FASTBINS #if TRIM_FASTBINS
/* If TRIM_FASTBINS set, don't place chunks /* If TRIM_FASTBINS set, don't place chunks
bordering top into fastbins */ bordering top into fastbins */
&& (chunk_at_offset(p, size) != av->top) && (chunk_at_offset(p, size) != av->top)
#endif #endif
) { ) {
set_fastchunks(av); set_fastchunks(av);
fb = &(av->fastbins[fastbin_index(size)]); fb = &(av->fastbins[fastbin_index(size)]);
p->fd = PROTECT_PTR(&p->fd, *fb); p->fd = PROTECT_PTR(&p->fd, *fb);
*fb = p; *fb = p;
} }
/* /*
@@ -318,82 +318,82 @@ void free(void* mem)
*/ */
else if (!chunk_is_mmapped(p)) { else if (!chunk_is_mmapped(p)) {
set_anychunks(av); set_anychunks(av);
nextchunk = chunk_at_offset(p, size); nextchunk = chunk_at_offset(p, size);
nextsize = chunksize(nextchunk); nextsize = chunksize(nextchunk);
/* consolidate backward */ /* consolidate backward */
if (!prev_inuse(p)) { if (!prev_inuse(p)) {
prevsize = p->prev_size; prevsize = p->prev_size;
size += prevsize; size += prevsize;
p = chunk_at_offset(p, -((long) prevsize)); p = chunk_at_offset(p, -((long) prevsize));
unlink(p, bck, fwd); unlink(p, bck, fwd);
} }
if (nextchunk != av->top) { if (nextchunk != av->top) {
/* get and clear inuse bit */ /* get and clear inuse bit */
nextinuse = inuse_bit_at_offset(nextchunk, nextsize); nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
set_head(nextchunk, nextsize); set_head(nextchunk, nextsize);
/* consolidate forward */ /* consolidate forward */
if (!nextinuse) { if (!nextinuse) {
unlink(nextchunk, bck, fwd); unlink(nextchunk, bck, fwd);
size += nextsize; size += nextsize;
} }
/* /*
Place the chunk in unsorted chunk list. Chunks are Place the chunk in unsorted chunk list. Chunks are
not placed into regular bins until after they have not placed into regular bins until after they have
been given one chance to be used in malloc. been given one chance to be used in malloc.
*/ */
bck = unsorted_chunks(av); bck = unsorted_chunks(av);
fwd = bck->fd; fwd = bck->fd;
p->bk = bck; p->bk = bck;
p->fd = fwd; p->fd = fwd;
bck->fd = p; bck->fd = p;
fwd->bk = p; fwd->bk = p;
set_head(p, size | PREV_INUSE); set_head(p, size | PREV_INUSE);
set_foot(p, size); set_foot(p, size);
check_free_chunk(p); check_free_chunk(p);
} }
/* /*
If the chunk borders the current high end of memory, If the chunk borders the current high end of memory,
consolidate into top consolidate into top
*/ */
else { else {
size += nextsize; size += nextsize;
set_head(p, size | PREV_INUSE); set_head(p, size | PREV_INUSE);
av->top = p; av->top = p;
check_chunk(p); check_chunk(p);
} }
/* /*
If freeing a large space, consolidate possibly-surrounding If freeing a large space, consolidate possibly-surrounding
chunks. Then, if the total unused topmost memory exceeds trim chunks. Then, if the total unused topmost memory exceeds trim
threshold, ask malloc_trim to reduce top. threshold, ask malloc_trim to reduce top.
Unless max_fast is 0, we don't know if there are fastbins Unless max_fast is 0, we don't know if there are fastbins
bordering top, so we cannot tell for sure whether threshold bordering top, so we cannot tell for sure whether threshold
has been reached unless fastbins are consolidated. But we has been reached unless fastbins are consolidated. But we
don't want to consolidate on each free. As a compromise, don't want to consolidate on each free. As a compromise,
consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
is reached. is reached.
*/ */
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
if (have_fastchunks(av)) if (have_fastchunks(av))
__malloc_consolidate(av); __malloc_consolidate(av);
if ((unsigned long)(chunksize(av->top)) >= if ((unsigned long)(chunksize(av->top)) >=
(unsigned long)(av->trim_threshold)) (unsigned long)(av->trim_threshold))
__malloc_trim(av->top_pad, av); __malloc_trim(av->top_pad, av);
} }
} }
/* /*
@@ -405,13 +405,13 @@ void free(void* mem)
*/ */
else { else {
size_t offset = p->prev_size; size_t offset = p->prev_size;
av->n_mmaps--; av->n_mmaps--;
av->mmapped_mem -= (size + offset); av->mmapped_mem -= (size + offset);
munmap((char*)p - offset, size + offset); munmap((char*)p - offset, size + offset);
} }
__MALLOC_UNLOCK; __MALLOC_UNLOCK;
} }
/* glibc compatibilty */ /* glibc compatibilty */
weak_alias(free, __libc_free) weak_alias(__wrap_free, __libc_free)

View File

@@ -18,7 +18,7 @@
/* ------------------------------ realloc ------------------------------ */ /* ------------------------------ realloc ------------------------------ */
void* realloc(void* oldmem, size_t bytes) void* __wrap_realloc(void* oldmem, size_t bytes)
{ {
mstate av; mstate av;
@@ -48,10 +48,10 @@ void* realloc(void* oldmem, size_t bytes)
/* Check for special cases. */ /* Check for special cases. */
if (! oldmem) if (! oldmem)
return malloc(bytes); return malloc(bytes);
if (! bytes) { if (! bytes) {
free (oldmem); free (oldmem);
return NULL; return NULL;
} }
checked_request2size(bytes, nb); checked_request2size(bytes, nb);
@@ -65,117 +65,117 @@ void* realloc(void* oldmem, size_t bytes)
if (!chunk_is_mmapped(oldp)) { if (!chunk_is_mmapped(oldp)) {
if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
/* already big enough; split below */ /* already big enough; split below */
newp = oldp; newp = oldp;
newsize = oldsize; newsize = oldsize;
} }
else { else {
next = chunk_at_offset(oldp, oldsize); next = chunk_at_offset(oldp, oldsize);
/* Try to expand forward into top */ /* Try to expand forward into top */
if (next == av->top && if (next == av->top &&
(unsigned long)(newsize = oldsize + chunksize(next)) >= (unsigned long)(newsize = oldsize + chunksize(next)) >=
(unsigned long)(nb + MINSIZE)) { (unsigned long)(nb + MINSIZE)) {
set_head_size(oldp, nb); set_head_size(oldp, nb);
av->top = chunk_at_offset(oldp, nb); av->top = chunk_at_offset(oldp, nb);
set_head(av->top, (newsize - nb) | PREV_INUSE); set_head(av->top, (newsize - nb) | PREV_INUSE);
retval = chunk2mem(oldp); retval = chunk2mem(oldp);
goto DONE; goto DONE;
} }
/* Try to expand forward into next chunk; split off remainder below */ /* Try to expand forward into next chunk; split off remainder below */
else if (next != av->top && else if (next != av->top &&
!inuse(next) && !inuse(next) &&
(unsigned long)(newsize = oldsize + chunksize(next)) >= (unsigned long)(newsize = oldsize + chunksize(next)) >=
(unsigned long)(nb)) { (unsigned long)(nb)) {
newp = oldp; newp = oldp;
unlink(next, bck, fwd); unlink(next, bck, fwd);
} }
/* allocate, copy, free */ /* allocate, copy, free */
else { else {
newmem = malloc(nb - MALLOC_ALIGN_MASK); newmem = malloc(nb - MALLOC_ALIGN_MASK);
if (newmem == 0) { if (newmem == 0) {
retval = 0; /* propagate failure */ retval = 0; /* propagate failure */
goto DONE; goto DONE;
} }
newp = mem2chunk(newmem); newp = mem2chunk(newmem);
newsize = chunksize(newp); newsize = chunksize(newp);
/* /*
Avoid copy if newp is next chunk after oldp. Avoid copy if newp is next chunk after oldp.
*/ */
if (newp == next) { if (newp == next) {
newsize += oldsize; newsize += oldsize;
newp = oldp; newp = oldp;
} }
else { else {
/* /*
Unroll copy of <= 36 bytes (72 if 8byte sizes) Unroll copy of <= 36 bytes (72 if 8byte sizes)
We know that contents have an odd number of We know that contents have an odd number of
size_t-sized words; minimally 3. size_t-sized words; minimally 3.
*/ */
copysize = oldsize - (sizeof(size_t)); copysize = oldsize - (sizeof(size_t));
s = (size_t*)(oldmem); s = (size_t*)(oldmem);
d = (size_t*)(newmem); d = (size_t*)(newmem);
ncopies = copysize / sizeof(size_t); ncopies = copysize / sizeof(size_t);
assert(ncopies >= 3); assert(ncopies >= 3);
if (ncopies > 9) if (ncopies > 9)
memcpy(d, s, copysize); memcpy(d, s, copysize);
else { else {
*(d+0) = *(s+0); *(d+0) = *(s+0);
*(d+1) = *(s+1); *(d+1) = *(s+1);
*(d+2) = *(s+2); *(d+2) = *(s+2);
if (ncopies > 4) { if (ncopies > 4) {
*(d+3) = *(s+3); *(d+3) = *(s+3);
*(d+4) = *(s+4); *(d+4) = *(s+4);
if (ncopies > 6) { if (ncopies > 6) {
*(d+5) = *(s+5); *(d+5) = *(s+5);
*(d+6) = *(s+6); *(d+6) = *(s+6);
if (ncopies > 8) { if (ncopies > 8) {
*(d+7) = *(s+7); *(d+7) = *(s+7);
*(d+8) = *(s+8); *(d+8) = *(s+8);
} }
} }
} }
} }
free(oldmem); free(oldmem);
check_inuse_chunk(newp); check_inuse_chunk(newp);
retval = chunk2mem(newp); retval = chunk2mem(newp);
goto DONE; goto DONE;
} }
} }
} }
/* If possible, free extra space in old or extended chunk */ /* If possible, free extra space in old or extended chunk */
assert((unsigned long)(newsize) >= (unsigned long)(nb)); assert((unsigned long)(newsize) >= (unsigned long)(nb));
remainder_size = newsize - nb; remainder_size = newsize - nb;
if (remainder_size < MINSIZE) { /* not enough extra to split off */ if (remainder_size < MINSIZE) { /* not enough extra to split off */
set_head_size(newp, newsize); set_head_size(newp, newsize);
set_inuse_bit_at_offset(newp, newsize); set_inuse_bit_at_offset(newp, newsize);
} }
else { /* split remainder */ else { /* split remainder */
remainder = chunk_at_offset(newp, nb); remainder = chunk_at_offset(newp, nb);
set_head_size(newp, nb); set_head_size(newp, nb);
set_head(remainder, remainder_size | PREV_INUSE); set_head(remainder, remainder_size | PREV_INUSE);
/* Mark remainder as inuse so free() won't complain */ /* Mark remainder as inuse so free() won't complain */
set_inuse_bit_at_offset(remainder, remainder_size); set_inuse_bit_at_offset(remainder, remainder_size);
free(chunk2mem(remainder)); free(chunk2mem(remainder));
} }
check_inuse_chunk(newp); check_inuse_chunk(newp);
retval = chunk2mem(newp); retval = chunk2mem(newp);
goto DONE; goto DONE;
} }
/* /*
@@ -183,54 +183,54 @@ void* realloc(void* oldmem, size_t bytes)
*/ */
else { else {
size_t offset = oldp->prev_size; size_t offset = oldp->prev_size;
size_t pagemask = av->pagesize - 1; size_t pagemask = av->pagesize - 1;
char *cp; char *cp;
unsigned long sum; unsigned long sum;
/* Note the extra (sizeof(size_t)) overhead */ /* Note the extra (sizeof(size_t)) overhead */
newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask; newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
/* don't need to remap if still within same page */ /* don't need to remap if still within same page */
if (oldsize == newsize - offset) { if (oldsize == newsize - offset) {
retval = oldmem; retval = oldmem;
goto DONE; goto DONE;
} }
cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
if (cp != (char*)MORECORE_FAILURE) { if (cp != (char*)MORECORE_FAILURE) {
newp = (mchunkptr)(cp + offset); newp = (mchunkptr)(cp + offset);
set_head(newp, (newsize - offset)|IS_MMAPPED); set_head(newp, (newsize - offset)|IS_MMAPPED);
assert(aligned_OK(chunk2mem(newp))); assert(aligned_OK(chunk2mem(newp)));
assert((newp->prev_size == offset)); assert((newp->prev_size == offset));
/* update statistics */ /* update statistics */
sum = av->mmapped_mem += newsize - oldsize; sum = av->mmapped_mem += newsize - oldsize;
if (sum > (unsigned long)(av->max_mmapped_mem)) if (sum > (unsigned long)(av->max_mmapped_mem))
av->max_mmapped_mem = sum; av->max_mmapped_mem = sum;
sum += av->sbrked_mem; sum += av->sbrked_mem;
if (sum > (unsigned long)(av->max_total_mem)) if (sum > (unsigned long)(av->max_total_mem))
av->max_total_mem = sum; av->max_total_mem = sum;
retval = chunk2mem(newp); retval = chunk2mem(newp);
goto DONE; goto DONE;
} }
/* Note the extra (sizeof(size_t)) overhead. */ /* Note the extra (sizeof(size_t)) overhead. */
if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t)))) if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
newmem = oldmem; /* do nothing */ newmem = oldmem; /* do nothing */
else { else {
/* Must alloc, copy, free. */ /* Must alloc, copy, free. */
newmem = malloc(nb - MALLOC_ALIGN_MASK); newmem = malloc(nb - MALLOC_ALIGN_MASK);
if (newmem != 0) { if (newmem != 0) {
memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t))); memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
free(oldmem); free(oldmem);
} }
} }
retval = newmem; retval = newmem;
} }
DONE: DONE:
@@ -239,4 +239,4 @@ void* realloc(void* oldmem, size_t bytes)
} }
/* glibc compatibilty */ /* glibc compatibilty */
weak_alias(realloc, __libc_realloc) weak_alias(__wrap_realloc, __libc_realloc)

View File

@@ -28,9 +28,9 @@
static void static void
__free_to_heap (void *mem, struct heap_free_area **heap __free_to_heap (void *mem, struct heap_free_area **heap
#ifdef HEAP_USE_LOCKING #ifdef HEAP_USE_LOCKING
, __UCLIBC_MUTEX_TYPE *heap_lock , __UCLIBC_MUTEX_TYPE *heap_lock
#endif #endif
) )
{ {
size_t size; size_t size;
struct heap_free_area *fa; struct heap_free_area *fa;
@@ -42,7 +42,7 @@ __free_to_heap (void *mem, struct heap_free_area **heap
/* Normal free. */ /* Normal free. */
MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)", MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)",
(long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem)); (long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem));
size = MALLOC_SIZE (mem); size = MALLOC_SIZE (mem);
mem = MALLOC_BASE (mem); mem = MALLOC_BASE (mem);
@@ -73,45 +73,45 @@ __free_to_heap (void *mem, struct heap_free_area **heap
#ifdef MALLOC_USE_SBRK #ifdef MALLOC_USE_SBRK
/* Get the sbrk lock so that the two possible calls to sbrk below /* Get the sbrk lock so that the two possible calls to sbrk below
are guaranteed to be contiguous. */ are guaranteed to be contiguous. */
__malloc_lock_sbrk (); __malloc_lock_sbrk ();
/* When using sbrk, we only shrink the heap from the end. It would /* When using sbrk, we only shrink the heap from the end. It would
be possible to allow _both_ -- shrinking via sbrk when possible, be possible to allow _both_ -- shrinking via sbrk when possible,
and otherwise shrinking via munmap, but this results in holes in and otherwise shrinking via munmap, but this results in holes in
memory that prevent the brk from every growing back down; since memory that prevent the brk from every growing back down; since
we only ever grow the heap via sbrk, this tends to produce a we only ever grow the heap via sbrk, this tends to produce a
continuously growing brk (though the actual memory is unmapped), continuously growing brk (though the actual memory is unmapped),
which could eventually run out of address space. Note that which could eventually run out of address space. Note that
`sbrk(0)' shouldn't normally do a system call, so this test is `sbrk(0)' shouldn't normally do a system call, so this test is
reasonably cheap. */ reasonably cheap. */
if ((void *)end != sbrk (0)) if ((void *)end != sbrk (0))
{ {
MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)", MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
start, end, end - start); start, end, end - start);
__malloc_unlock_sbrk (); __malloc_unlock_sbrk ();
__heap_unlock (heap_lock); __heap_unlock (heap_lock);
return; return;
} }
#endif #endif
MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)", MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)",
start, end, end - start); start, end, end - start);
/* Remove FA from the heap. */ /* Remove FA from the heap. */
__heap_delete (heap, fa); __heap_delete (heap, fa);
if (__heap_is_empty (heap)) if (__heap_is_empty (heap))
/* We want to avoid the heap from losing all memory, so reserve /* We want to avoid the heap from losing all memory, so reserve
a bit. This test is only a heuristic -- the existance of a bit. This test is only a heuristic -- the existance of
another free area, even if it's smaller than another free area, even if it's smaller than
MALLOC_MIN_SIZE, will cause us not to reserve anything. */ MALLOC_MIN_SIZE, will cause us not to reserve anything. */
{ {
/* Put the reserved memory back in the heap; we assume that /* Put the reserved memory back in the heap; we assume that
MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so
we use the latter unconditionally here. */ we use the latter unconditionally here. */
__heap_free (heap, (void *)start, MALLOC_MIN_SIZE); __heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
start += MALLOC_MIN_SIZE; start += MALLOC_MIN_SIZE;
} }
#ifdef MALLOC_USE_SBRK #ifdef MALLOC_USE_SBRK
@@ -126,99 +126,99 @@ __free_to_heap (void *mem, struct heap_free_area **heap
# ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ # ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
/* Using the uClinux broken munmap, we have to only munmap blocks /* Using the uClinux broken munmap, we have to only munmap blocks
exactly as we got them from mmap, so scan through our list of exactly as we got them from mmap, so scan through our list of
mmapped blocks, and return them in order. */ mmapped blocks, and return them in order. */
MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...", MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...",
start, end - start); start, end - start);
prev_mmb = 0; prev_mmb = 0;
mmb = __malloc_mmapped_blocks; mmb = __malloc_mmapped_blocks;
while (mmb while (mmb
&& ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size) && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size)
<= end)) <= end))
{ {
MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]", MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]",
(unsigned)mmb, mmb_start, mmb_end - mmb_start); (unsigned)mmb, mmb_start, mmb_end - mmb_start);
if (mmb_start >= start if (mmb_start >= start
/* If the space between START and MMB_START is non-zero, but /* If the space between START and MMB_START is non-zero, but
too small to return to the heap, we can't unmap MMB. */ too small to return to the heap, we can't unmap MMB. */
&& (start == mmb_start && (start == mmb_start
|| mmb_start - start > HEAP_MIN_FREE_AREA_SIZE)) || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE))
{ {
struct malloc_mmb *next_mmb = mmb->next; struct malloc_mmb *next_mmb = mmb->next;
if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end) if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end)
/* There's too little space left at the end to deallocate /* There's too little space left at the end to deallocate
this block, so give up. */ this block, so give up. */
break; break;
MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]", MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]",
(unsigned)mmb, mmb_start, mmb_end - mmb_start); (unsigned)mmb, mmb_start, mmb_end - mmb_start);
if (mmb_start != start) if (mmb_start != start)
/* We're going to unmap a part of the heap that begins after /* We're going to unmap a part of the heap that begins after
start, so put the intervening region back into the heap. */ start, so put the intervening region back into the heap. */
{ {
MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]", MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]",
start, mmb_start - start); start, mmb_start - start);
__heap_free (heap, (void *)start, mmb_start - start); __heap_free (heap, (void *)start, mmb_start - start);
} }
MALLOC_MMB_DEBUG_INDENT (-1); MALLOC_MMB_DEBUG_INDENT (-1);
/* Unlink MMB from the list. */ /* Unlink MMB from the list. */
if (prev_mmb) if (prev_mmb)
prev_mmb->next = next_mmb; prev_mmb->next = next_mmb;
else else
__malloc_mmapped_blocks = next_mmb; __malloc_mmapped_blocks = next_mmb;
/* Start searching again from the end of this block. */ /* Start searching again from the end of this block. */
start = mmb_end; start = mmb_end;
/* Release the descriptor block we used. */ /* Release the descriptor block we used. */
free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
/* We have to unlock the heap before we recurse to free the mmb /* We have to unlock the heap before we recurse to free the mmb
descriptor, because we might be unmapping from the mmb descriptor, because we might be unmapping from the mmb
heap. */ heap. */
__heap_unlock (heap_lock); __heap_unlock (heap_lock);
/* Do the actual munmap. */ /* Do the actual munmap. */
munmap ((void *)mmb_start, mmb_end - mmb_start); munmap ((void *)mmb_start, mmb_end - mmb_start);
__heap_lock (heap_lock); __heap_lock (heap_lock);
# ifdef __UCLIBC_HAS_THREADS__ # ifdef __UCLIBC_HAS_THREADS__
/* In a multi-threaded program, it's possible that PREV_MMB has /* In a multi-threaded program, it's possible that PREV_MMB has
been invalidated by another thread when we released the been invalidated by another thread when we released the
heap lock to do the munmap system call, so just start over heap lock to do the munmap system call, so just start over
from the beginning of the list. It sucks, but oh well; from the beginning of the list. It sucks, but oh well;
it's probably not worth the bother to do better. */ it's probably not worth the bother to do better. */
prev_mmb = 0; prev_mmb = 0;
mmb = __malloc_mmapped_blocks; mmb = __malloc_mmapped_blocks;
# else # else
mmb = next_mmb; mmb = next_mmb;
# endif # endif
} }
else else
{ {
prev_mmb = mmb; prev_mmb = mmb;
mmb = mmb->next; mmb = mmb->next;
} }
MALLOC_MMB_DEBUG_INDENT (-1); MALLOC_MMB_DEBUG_INDENT (-1);
} }
if (start != end) if (start != end)
/* Hmm, well there's something we couldn't unmap, so put it back /* Hmm, well there's something we couldn't unmap, so put it back
into the heap. */ into the heap. */
{ {
MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]", MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]",
start, end - start); start, end - start);
__heap_free (heap, (void *)start, end - start); __heap_free (heap, (void *)start, end - start);
} }
/* Finally release the lock for good. */ /* Finally release the lock for good. */
__heap_unlock (heap_lock); __heap_unlock (heap_lock);
@@ -228,34 +228,34 @@ __free_to_heap (void *mem, struct heap_free_area **heap
# else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ # else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
/* MEM/LEN may not be page-aligned, so we have to page-align them, /* MEM/LEN may not be page-aligned, so we have to page-align them,
and return any left-over bits on the end to the heap. */ and return any left-over bits on the end to the heap. */
unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start); unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end); unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
/* We have to be careful that any left-over bits are large enough to /* We have to be careful that any left-over bits are large enough to
return. Note that we _don't check_ to make sure there's room to return. Note that we _don't check_ to make sure there's room to
grow/shrink the start/end by another page, we just assume that grow/shrink the start/end by another page, we just assume that
the unmap threshold is high enough so that this is always safe the unmap threshold is high enough so that this is always safe
(i.e., it should probably be at least 3 pages). */ (i.e., it should probably be at least 3 pages). */
if (unmap_start > start) if (unmap_start > start)
{ {
if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE) if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
unmap_start += MALLOC_PAGE_SIZE; unmap_start += MALLOC_PAGE_SIZE;
__heap_free (heap, (void *)start, unmap_start - start); __heap_free (heap, (void *)start, unmap_start - start);
} }
if (end > unmap_end) if (end > unmap_end)
{ {
if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE) if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
unmap_end -= MALLOC_PAGE_SIZE; unmap_end -= MALLOC_PAGE_SIZE;
__heap_free (heap, (void *)unmap_end, end - unmap_end); __heap_free (heap, (void *)unmap_end, end - unmap_end);
} }
/* Release the heap lock before we do the system call. */ /* Release the heap lock before we do the system call. */
__heap_unlock (heap_lock); __heap_unlock (heap_lock);
if (unmap_end > unmap_start) if (unmap_end > unmap_start)
/* Finally, actually unmap the memory. */ /* Finally, actually unmap the memory. */
munmap ((void *)unmap_start, unmap_end - unmap_start); munmap ((void *)unmap_start, unmap_end - unmap_start);
# endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ # endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
@@ -266,7 +266,7 @@ __free_to_heap (void *mem, struct heap_free_area **heap
} }
void void
free (void *mem) __wrap_free (void *mem)
{ {
free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock); free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock);
} }

View File

@@ -59,9 +59,9 @@ __UCLIBC_MUTEX_INIT(__malloc_mmb_heap_lock,PTHREAD_RECURSIVE_MUTEX_INITIALIZER_N
static void * static void *
__malloc_from_heap (size_t size, struct heap_free_area **heap __malloc_from_heap (size_t size, struct heap_free_area **heap
#ifdef HEAP_USE_LOCKING #ifdef HEAP_USE_LOCKING
, __UCLIBC_MUTEX_TYPE *heap_lock , __UCLIBC_MUTEX_TYPE *heap_lock
#endif #endif
) )
{ {
void *mem; void *mem;
@@ -82,12 +82,12 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
from the system, add it to the heap, and try again. */ from the system, add it to the heap, and try again. */
{ {
/* If we're trying to allocate a block bigger than the default /* If we're trying to allocate a block bigger than the default
MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */ MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
void *block; void *block;
size_t block_size size_t block_size
= (size < MALLOC_HEAP_EXTEND_SIZE = (size < MALLOC_HEAP_EXTEND_SIZE
? MALLOC_HEAP_EXTEND_SIZE ? MALLOC_HEAP_EXTEND_SIZE
: MALLOC_ROUND_UP_TO_PAGE_SIZE (size)); : MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
/* Allocate the new heap block. */ /* Allocate the new heap block. */
#ifdef MALLOC_USE_SBRK #ifdef MALLOC_USE_SBRK
@@ -95,24 +95,24 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
__malloc_lock_sbrk (); __malloc_lock_sbrk ();
/* Use sbrk we can, as it's faster than mmap, and guarantees /* Use sbrk we can, as it's faster than mmap, and guarantees
contiguous allocation. */ contiguous allocation. */
block = sbrk (block_size); block = sbrk (block_size);
if (likely (block != (void *)-1)) if (likely (block != (void *)-1))
{ {
/* Because sbrk can return results of arbitrary /* Because sbrk can return results of arbitrary
alignment, align the result to a MALLOC_ALIGNMENT boundary. */ alignment, align the result to a MALLOC_ALIGNMENT boundary. */
long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT); long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
if (block != (void *)aligned_block) if (block != (void *)aligned_block)
/* Have to adjust. We should only have to actually do this /* Have to adjust. We should only have to actually do this
the first time (after which we will have aligned the brk the first time (after which we will have aligned the brk
correctly). */ correctly). */
{ {
/* Move the brk to reflect the alignment; our next allocation /* Move the brk to reflect the alignment; our next allocation
should start on exactly the right alignment. */ should start on exactly the right alignment. */
sbrk (aligned_block - (long)block); sbrk (aligned_block - (long)block);
block = (void *)aligned_block; block = (void *)aligned_block;
} }
} }
__malloc_unlock_sbrk (); __malloc_unlock_sbrk ();
@@ -121,62 +121,62 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
/* Otherwise, use mmap. */ /* Otherwise, use mmap. */
#ifdef __ARCH_USE_MMU__ #ifdef __ARCH_USE_MMU__
block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE, block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
#else #else
block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE, block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0); MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0);
#endif #endif
#endif /* MALLOC_USE_SBRK */ #endif /* MALLOC_USE_SBRK */
if (likely (block != (void *)-1)) if (likely (block != (void *)-1))
{ {
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
struct malloc_mmb *mmb, *prev_mmb, *new_mmb; struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
#endif #endif
MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)", MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)",
(long)block, (long)block + block_size, block_size); (long)block, (long)block + block_size, block_size);
/* Get back the heap lock. */ /* Get back the heap lock. */
__heap_lock (heap_lock); __heap_lock (heap_lock);
/* Put BLOCK into the heap. */ /* Put BLOCK into the heap. */
__heap_free (heap, block, block_size); __heap_free (heap, block, block_size);
MALLOC_DEBUG_INDENT (-1); MALLOC_DEBUG_INDENT (-1);
/* Try again to allocate. */ /* Try again to allocate. */
mem = __heap_alloc (heap, &size); mem = __heap_alloc (heap, &size);
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
/* Insert a record of BLOCK in sorted order into the /* Insert a record of BLOCK in sorted order into the
__malloc_mmapped_blocks list. */ __malloc_mmapped_blocks list. */
new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
for (prev_mmb = 0, mmb = __malloc_mmapped_blocks; for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
mmb; mmb;
prev_mmb = mmb, mmb = mmb->next) prev_mmb = mmb, mmb = mmb->next)
if (block < mmb->mem) if (block < mmb->mem)
break; break;
new_mmb->next = mmb; new_mmb->next = mmb;
new_mmb->mem = block; new_mmb->mem = block;
new_mmb->size = block_size; new_mmb->size = block_size;
if (prev_mmb) if (prev_mmb)
prev_mmb->next = new_mmb; prev_mmb->next = new_mmb;
else else
__malloc_mmapped_blocks = new_mmb; __malloc_mmapped_blocks = new_mmb;
MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]", MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]",
(unsigned)new_mmb, (unsigned)new_mmb,
(unsigned)new_mmb->mem, block_size); (unsigned)new_mmb->mem, block_size);
#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
__heap_unlock (heap_lock); __heap_unlock (heap_lock);
} }
} }
if (likely (mem)) if (likely (mem))
@@ -185,7 +185,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
mem = MALLOC_SETUP (mem, size); mem = MALLOC_SETUP (mem, size);
MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)", MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)",
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
} }
else else
MALLOC_DEBUG (-1, "malloc: returning 0"); MALLOC_DEBUG (-1, "malloc: returning 0");
@@ -194,7 +194,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
} }
void * void *
malloc (size_t size) __wrap_malloc (size_t size)
{ {
void *mem; void *mem;
#ifdef MALLOC_DEBUGGING #ifdef MALLOC_DEBUGGING

View File

@@ -21,7 +21,7 @@
void * void *
realloc (void *mem, size_t new_size) __wrap_realloc (void *mem, size_t new_size)
{ {
size_t size; size_t size;
char *base_mem; char *base_mem;
@@ -56,7 +56,7 @@ realloc (void *mem, size_t new_size)
new_size = HEAP_ADJUST_SIZE (sizeof (struct heap_free_area)); new_size = HEAP_ADJUST_SIZE (sizeof (struct heap_free_area));
MALLOC_DEBUG (1, "realloc: 0x%lx, %d (base = 0x%lx, total_size = %d)", MALLOC_DEBUG (1, "realloc: 0x%lx, %d (base = 0x%lx, total_size = %d)",
(long)mem, new_size, (long)base_mem, size); (long)mem, new_size, (long)base_mem, size);
if (new_size > size) if (new_size > size)
/* Grow the block. */ /* Grow the block. */
@@ -68,20 +68,20 @@ realloc (void *mem, size_t new_size)
__heap_unlock (&__malloc_heap_lock); __heap_unlock (&__malloc_heap_lock);
if (extra) if (extra)
/* Record the changed size. */ /* Record the changed size. */
MALLOC_SET_SIZE (base_mem, size + extra); MALLOC_SET_SIZE (base_mem, size + extra);
else else
/* Our attempts to extend MEM in place failed, just /* Our attempts to extend MEM in place failed, just
allocate-and-copy. */ allocate-and-copy. */
{ {
void *new_mem = malloc (new_size - MALLOC_HEADER_SIZE); void *new_mem = malloc (new_size - MALLOC_HEADER_SIZE);
if (new_mem) if (new_mem)
{ {
memcpy (new_mem, mem, size - MALLOC_HEADER_SIZE); memcpy (new_mem, mem, size - MALLOC_HEADER_SIZE);
free (mem); free (mem);
} }
mem = new_mem; mem = new_mem;
} }
} }
else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size) else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size)
/* Shrink the block. */ /* Shrink the block. */
@@ -94,7 +94,7 @@ realloc (void *mem, size_t new_size)
if (mem) if (mem)
MALLOC_DEBUG (-1, "realloc: returning 0x%lx (base:0x%lx, total_size:%d)", MALLOC_DEBUG (-1, "realloc: returning 0x%lx (base:0x%lx, total_size:%d)",
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
else else
MALLOC_DEBUG (-1, "realloc: returning 0"); MALLOC_DEBUG (-1, "realloc: returning 0");

View File

@@ -1,6 +1,6 @@
Provides: libc_be_socket_noop libc_be_l4re libc_support_misc Provides: libc_be_socket_noop libc_be_l4re libc_support_misc
libc_be_fs_noop libc_be_math libc_be_l4refile libinitcwd libc_be_fs_noop libc_be_math libc_be_l4refile libinitcwd
libc_be_minimal_log_io libmount libc_be_sig libc_be_minimal_log_io libmount libc_be_sig
libc_be_sem_noop libc_be_static_heap libc_be_sem_noop libc_be_static_heap libc_be_mem
Requires: l4re libl4re-vfs libc-headers Requires: l4re libl4re-vfs libc-headers
Maintainer: adam@os.inf.tu-dresden.de Maintainer: adam@os.inf.tu-dresden.de

View File

@@ -0,0 +1,12 @@
PKGDIR ?= ../..
L4DIR ?= $(PKGDIR)/../../..
TARGET = libc_be_mem.a libc_be_mem.so
LINK_INCR = libc_be_mem.a
PC_FILENAME = libc_be_mem
REQUIRES_LIBS = l4re
SRC_CC = mem.cc
include $(L4DIR)/mk/lib.mk
LDFLAGS := $(filter-out -gc-sections,$(LDFLAGS))

View File

@@ -0,0 +1,32 @@
/**
* \file libc_backends/l4re_mem/mem.cc
*/
/*
* (c) 2004-2009 Technische Universität Dresden
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
#include <stdlib.h>
#include <l4/sys/kdebug.h>
void *malloc(size_t size) throw()
{
void *data = 0;
enter_kdebug("malloc");
return (void*)data;
}
void free(void *p) throw()
{
if (p)
enter_kdebug("free");
}
void *realloc(void *p, size_t size) throw()
{
void *data = 0;
enter_kdebug("realloc");
return (void*)data;
}

View File

@@ -28,6 +28,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include <stdbool.h> #include <stdbool.h>
void *__wrap_malloc();
// Common logic for version locks. // Common logic for version locks.
struct version_lock struct version_lock
{ {
@@ -402,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner)
// No free node available, allocate a new one. // No free node available, allocate a new one.
struct btree_node *new_node struct btree_node *new_node
= (struct btree_node *) (malloc (sizeof (struct btree_node))); = (struct btree_node *) (__wrap_malloc (sizeof (struct btree_node)));
version_lock_initialize_locked_exclusive ( version_lock_initialize_locked_exclusive (
&(new_node->version_lock)); // initialize the node in locked state. &(new_node->version_lock)); // initialize the node in locked state.
new_node->entry_count = 0; new_node->entry_count = 0;

View File

@@ -28,6 +28,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include <stdbool.h> #include <stdbool.h>
void *__wrap_malloc();
// Common logic for version locks. // Common logic for version locks.
struct version_lock struct version_lock
{ {
@@ -402,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner)
// No free node available, allocate a new one. // No free node available, allocate a new one.
struct btree_node *new_node struct btree_node *new_node
= (struct btree_node *) (malloc (sizeof (struct btree_node))); = (struct btree_node *) (__wrap_malloc (sizeof (struct btree_node)));
version_lock_initialize_locked_exclusive ( version_lock_initialize_locked_exclusive (
&(new_node->version_lock)); // initialize the node in locked state. &(new_node->version_lock)); // initialize the node in locked state.
new_node->entry_count = 0; new_node->entry_count = 0;

View File

@@ -28,6 +28,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include <stdbool.h> #include <stdbool.h>
void *__wrap_malloc(size_t);
void __wrap_free(void *);
// Common logic for version locks. // Common logic for version locks.
struct version_lock struct version_lock
{ {
@@ -362,7 +365,7 @@ btree_destroy (struct btree *t)
while (t->free_list) while (t->free_list)
{ {
struct btree_node *next = t->free_list->content.children[0].child; struct btree_node *next = t->free_list->content.children[0].child;
free (t->free_list); __wrap_free (t->free_list);
t->free_list = next; t->free_list = next;
} }
} }
@@ -401,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner)
// No free node available, allocate a new one. // No free node available, allocate a new one.
struct btree_node *new_node struct btree_node *new_node
= (struct btree_node *) malloc (sizeof (struct btree_node)); = (struct btree_node *) __wrap_malloc (sizeof (struct btree_node));
// Initialize the node in locked state. // Initialize the node in locked state.
version_lock_initialize_locked_exclusive (&new_node->version_lock); version_lock_initialize_locked_exclusive (&new_node->version_lock);
new_node->entry_count = 0; new_node->entry_count = 0;

View File

@@ -37,14 +37,16 @@
#include <new> #include <new>
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
using std::memset; using std::memset;
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
// just for overhead. // just for overhead.
#if INT_MAX == 32767 #if INT_MAX == 32767
# define EMERGENCY_OBJ_SIZE 128 # define EMERGENCY_OBJ_SIZE 128
# define EMERGENCY_OBJ_COUNT 16 # define EMERGENCY_OBJ_COUNT 16
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
# define EMERGENCY_OBJ_SIZE 512 # define EMERGENCY_OBJ_SIZE 512
# define EMERGENCY_OBJ_COUNT 32 # define EMERGENCY_OBJ_COUNT 32
#else #else
# define EMERGENCY_OBJ_SIZE 1024 # define EMERGENCY_OBJ_SIZE 1024
# define EMERGENCY_OBJ_COUNT 64 # define EMERGENCY_OBJ_COUNT 64
#endif #endif
#ifndef __GTHREADS #ifndef __GTHREADS
# undef EMERGENCY_OBJ_COUNT # undef EMERGENCY_OBJ_COUNT
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
#endif #endif
namespace __gnu_cxx namespace __gnu_cxx
@@ -85,20 +87,25 @@ namespace
{ {
public: public:
pool(); pool();
pool(char*, int);
_GLIBCXX_NODISCARD void *allocate (std::size_t); _GLIBCXX_NODISCARD void *allocate (std::size_t);
void free (void *); void free (void *);
bool in_pool (void *); bool in_pool (void *);
bool mem_static;
private: private:
void init();
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
// A single mutex controlling emergency allocations. // A single mutex controlling emergency allocations.
@@ -119,15 +126,31 @@ namespace
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
// to make this tunable. // to make this tunable.
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
arena = (char *)malloc (arena_size); arena = (char *)malloc (arena_size);
mem_static = false;
init();
}
pool::pool(char * storage, int size)
{
arena_size = size;
arena = storage;
mem_static = true;
init();
}
void pool::init()
{
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
first_free_entry = NULL; first_free_entry = NULL;
return; return;
} }
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
first_free_entry = reinterpret_cast <free_entry *> (arena); first_free_entry = reinterpret_cast <free_entry *> (arena);
@@ -145,46 +168,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -192,74 +215,77 @@ namespace
{ {
__gnu_cxx::__scoped_lock sentry(emergency_mutex); __gnu_cxx::__scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> ((*fe)->next) && (reinterpret_cast <char *> ((*fe)->next)
> reinterpret_cast <char *> (e) + sz); > reinterpret_cast <char *> (e) + sz);
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
bool pool::in_pool (void *ptr) bool pool::in_pool (void *ptr)
{ {
char *p = reinterpret_cast <char *> (ptr); char *p = reinterpret_cast <char *> (ptr);
return (p > arena return (p > arena
&& p < arena + arena_size); && p < arena + arena_size);
} }
pool emergency_pool; int const emergency_pool_size = EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception);
char emergency_pool_storage[emergency_pool_size];
pool emergency_pool{emergency_pool_storage, emergency_pool_size};
} }
namespace __gnu_cxx namespace __gnu_cxx
@@ -267,10 +293,11 @@ namespace __gnu_cxx
void void
__freeres() __freeres()
{ {
if (emergency_pool.arena) // why is this not a destructor?
if (emergency_pool.arena and not emergency_pool.mem_static)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
} }
} }
@@ -281,7 +308,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
void *ret; void *ret;
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
ret = malloc (thrown_size); ret = __wrap_malloc (thrown_size);
if (!ret) if (!ret)
ret = emergency_pool.allocate (thrown_size); ret = emergency_pool.allocate (thrown_size);
@@ -312,7 +339,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
__cxa_dependent_exception *ret; __cxa_dependent_exception *ret;
ret = static_cast<__cxa_dependent_exception*> ret = static_cast<__cxa_dependent_exception*>
(malloc (sizeof (__cxa_dependent_exception))); (__wrap_malloc(sizeof (__cxa_dependent_exception)));
if (!ret) if (!ret)
ret = static_cast <__cxa_dependent_exception*> ret = static_cast <__cxa_dependent_exception*>

View File

@@ -37,14 +37,16 @@
#include <new> #include <new>
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
using std::memset; using std::memset;
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
// just for overhead. // just for overhead.
#if INT_MAX == 32767 #if INT_MAX == 32767
# define EMERGENCY_OBJ_SIZE 128 # define EMERGENCY_OBJ_SIZE 128
# define EMERGENCY_OBJ_COUNT 16 # define EMERGENCY_OBJ_COUNT 16
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
# define EMERGENCY_OBJ_SIZE 512 # define EMERGENCY_OBJ_SIZE 512
# define EMERGENCY_OBJ_COUNT 32 # define EMERGENCY_OBJ_COUNT 32
#else #else
# define EMERGENCY_OBJ_SIZE 1024 # define EMERGENCY_OBJ_SIZE 1024
# define EMERGENCY_OBJ_COUNT 64 # define EMERGENCY_OBJ_COUNT 64
#endif #endif
#ifndef __GTHREADS #ifndef __GTHREADS
# undef EMERGENCY_OBJ_COUNT # undef EMERGENCY_OBJ_COUNT
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
#endif #endif
namespace __gnu_cxx namespace __gnu_cxx
@@ -93,12 +95,12 @@ namespace
private: private:
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
// A single mutex controlling emergency allocations. // A single mutex controlling emergency allocations.
@@ -119,15 +121,15 @@ namespace
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
// to make this tunable. // to make this tunable.
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
arena = (char *)malloc (arena_size); arena = (char *)__wrap_malloc (arena_size);
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
first_free_entry = NULL; first_free_entry = NULL;
return; return;
} }
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
first_free_entry = reinterpret_cast <free_entry *> (arena); first_free_entry = reinterpret_cast <free_entry *> (arena);
@@ -145,46 +147,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -192,71 +194,71 @@ namespace
{ {
__gnu_cxx::__scoped_lock sentry(emergency_mutex); __gnu_cxx::__scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> ((*fe)->next) && (reinterpret_cast <char *> ((*fe)->next)
> reinterpret_cast <char *> (e) + sz); > reinterpret_cast <char *> (e) + sz);
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
bool pool::in_pool (void *ptr) bool pool::in_pool (void *ptr)
{ {
char *p = reinterpret_cast <char *> (ptr); char *p = reinterpret_cast <char *> (ptr);
return (p > arena return (p > arena
&& p < arena + arena_size); && p < arena + arena_size);
} }
pool emergency_pool; pool emergency_pool;
@@ -269,8 +271,8 @@ namespace __gnu_cxx
{ {
if (emergency_pool.arena) if (emergency_pool.arena)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
} }
} }
@@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
void *ret; void *ret;
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
ret = malloc (thrown_size); ret = __wrap_malloc (thrown_size);
if (!ret) if (!ret)
ret = emergency_pool.allocate (thrown_size); ret = emergency_pool.allocate (thrown_size);
@@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
__cxa_dependent_exception *ret; __cxa_dependent_exception *ret;
ret = static_cast<__cxa_dependent_exception*> ret = static_cast<__cxa_dependent_exception*>
(malloc (sizeof (__cxa_dependent_exception))); (__wrap_malloc (sizeof (__cxa_dependent_exception)));
if (!ret) if (!ret)
ret = static_cast <__cxa_dependent_exception*> ret = static_cast <__cxa_dependent_exception*>

View File

@@ -37,14 +37,16 @@
#include <new> #include <new>
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
using std::memset; using std::memset;
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
// just for overhead. // just for overhead.
#if INT_MAX == 32767 #if INT_MAX == 32767
# define EMERGENCY_OBJ_SIZE 128 # define EMERGENCY_OBJ_SIZE 128
# define EMERGENCY_OBJ_COUNT 16 # define EMERGENCY_OBJ_COUNT 16
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
# define EMERGENCY_OBJ_SIZE 512 # define EMERGENCY_OBJ_SIZE 512
# define EMERGENCY_OBJ_COUNT 32 # define EMERGENCY_OBJ_COUNT 32
#else #else
# define EMERGENCY_OBJ_SIZE 1024 # define EMERGENCY_OBJ_SIZE 1024
# define EMERGENCY_OBJ_COUNT 64 # define EMERGENCY_OBJ_COUNT 64
#endif #endif
#ifndef __GTHREADS #ifndef __GTHREADS
# undef EMERGENCY_OBJ_COUNT # undef EMERGENCY_OBJ_COUNT
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
#endif #endif
namespace __gnu_cxx namespace __gnu_cxx
@@ -93,12 +95,12 @@ namespace
private: private:
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
// A single mutex controlling emergency allocations. // A single mutex controlling emergency allocations.
@@ -119,15 +121,15 @@ namespace
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
// to make this tunable. // to make this tunable.
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
arena = (char *)malloc (arena_size); arena = (char *)__wrap_malloc (arena_size);
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
first_free_entry = NULL; first_free_entry = NULL;
return; return;
} }
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
first_free_entry = reinterpret_cast <free_entry *> (arena); first_free_entry = reinterpret_cast <free_entry *> (arena);
@@ -145,46 +147,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -192,71 +194,71 @@ namespace
{ {
__gnu_cxx::__scoped_lock sentry(emergency_mutex); __gnu_cxx::__scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> ((*fe)->next) && (reinterpret_cast <char *> ((*fe)->next)
> reinterpret_cast <char *> (e) + sz); > reinterpret_cast <char *> (e) + sz);
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
bool pool::in_pool (void *ptr) bool pool::in_pool (void *ptr)
{ {
char *p = reinterpret_cast <char *> (ptr); char *p = reinterpret_cast <char *> (ptr);
return (p > arena return (p > arena
&& p < arena + arena_size); && p < arena + arena_size);
} }
pool emergency_pool; pool emergency_pool;
@@ -269,8 +271,8 @@ namespace __gnu_cxx
{ {
if (emergency_pool.arena) if (emergency_pool.arena)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
} }
} }
@@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
void *ret; void *ret;
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
ret = malloc (thrown_size); ret = __wrap_malloc (thrown_size);
if (!ret) if (!ret)
ret = emergency_pool.allocate (thrown_size); ret = emergency_pool.allocate (thrown_size);
@@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
__cxa_dependent_exception *ret; __cxa_dependent_exception *ret;
ret = static_cast<__cxa_dependent_exception*> ret = static_cast<__cxa_dependent_exception*>
(malloc (sizeof (__cxa_dependent_exception))); (__wrap_malloc (sizeof (__cxa_dependent_exception)));
if (!ret) if (!ret)
ret = static_cast <__cxa_dependent_exception*> ret = static_cast <__cxa_dependent_exception*>

View File

@@ -73,14 +73,16 @@
// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE. // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
using std::memset; using std::memset;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -91,16 +93,16 @@ using namespace __cxxabiv1;
// N.B. sizeof(std::bad_alloc) == sizeof(void*) // N.B. sizeof(std::bad_alloc) == sizeof(void*)
// and sizeof(std::runtime_error) == 2 * sizeof(void*) // and sizeof(std::runtime_error) == 2 * sizeof(void*)
// and sizeof(std::system_error) == 4 * sizeof(void*). // and sizeof(std::system_error) == 4 * sizeof(void*).
#define EMERGENCY_OBJ_SIZE 6 #define EMERGENCY_OBJ_SIZE 6
#ifdef __GTHREADS #ifdef __GTHREADS
// Assume that the number of concurrent exception objects scales with the // Assume that the number of concurrent exception objects scales with the
// processor word size, i.e., 16-bit systems are not likely to have hundreds // processor word size, i.e., 16-bit systems are not likely to have hundreds
// of threads all simultaneously throwing on OOM conditions. // of threads all simultaneously throwing on OOM conditions.
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) # define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__) # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
#else #else
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
# define MAX_OBJ_COUNT 64 # define MAX_OBJ_COUNT 64
#endif #endif
@@ -153,12 +155,12 @@ namespace
private: private:
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
@@ -176,7 +178,7 @@ namespace
// to implement in_pool. // to implement in_pool.
#ifdef _GLIBCXX_EH_POOL_STATIC #ifdef _GLIBCXX_EH_POOL_STATIC
static constexpr std::size_t arena_size static constexpr std::size_t arena_size
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
alignas(void*) char arena[arena_size]; alignas(void*) char arena[arena_size];
#else #else
char *arena = nullptr; char *arena = nullptr;
@@ -201,48 +203,48 @@ namespace
#endif #endif
const std::string_view ns_name = "glibcxx.eh_pool"; const std::string_view ns_name = "glibcxx.eh_pool";
std::pair<std::string_view, int> tunables[]{ std::pair<std::string_view, int> tunables[]{
{"obj_size", 0}, {"obj_count", obj_count} {"obj_size", 0}, {"obj_count", obj_count}
}; };
while (str) while (str)
{ {
if (*str == ':') if (*str == ':')
++str; ++str;
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
&& str[ns_name.size()] == '.') && str[ns_name.size()] == '.')
{ {
str += ns_name.size() + 1; str += ns_name.size() + 1;
for (auto& t : tunables) for (auto& t : tunables)
if (!t.first.compare(0, t.first.size(), str, t.first.size()) if (!t.first.compare(0, t.first.size(), str, t.first.size())
&& str[t.first.size()] == '=') && str[t.first.size()] == '=')
{ {
str += t.first.size() + 1; str += t.first.size() + 1;
char* end; char* end;
unsigned long val = strtoul(str, &end, 0); unsigned long val = strtoul(str, &end, 0);
if ((*end == ':' || *end == '\0') && val <= INT_MAX) if ((*end == ':' || *end == '\0') && val <= INT_MAX)
t.second = val; t.second = val;
str = end; str = end;
break; break;
} }
} }
str = strchr(str, ':'); str = strchr(str, ':');
} }
obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero. obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
if (tunables[0].second != 0) if (tunables[0].second != 0)
obj_size = tunables[0].second; obj_size = tunables[0].second;
#endif // HOSTED #endif // HOSTED
#endif // NOT_FOR_L4 #endif // NOT_FOR_L4
arena_size = buffer_size_in_bytes(obj_count, obj_size); arena_size = buffer_size_in_bytes(obj_count, obj_size);
if (arena_size == 0) if (arena_size == 0)
return; return;
arena = (char *)malloc (arena_size); arena = (char *)__wrap_malloc (arena_size);
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
return; return;
} }
#endif // STATIC #endif // STATIC
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
@@ -261,46 +263,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -308,64 +310,64 @@ namespace
{ {
__scoped_lock sentry(emergency_mutex); __scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> (e) + sz && (reinterpret_cast <char *> (e) + sz
> reinterpret_cast <char *> ((*fe)->next)); > reinterpret_cast <char *> ((*fe)->next));
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
inline bool pool::in_pool (void *ptr) const noexcept inline bool pool::in_pool (void *ptr) const noexcept
@@ -386,8 +388,8 @@ namespace __gnu_cxx
#ifndef _GLIBCXX_EH_POOL_STATIC #ifndef _GLIBCXX_EH_POOL_STATIC
if (emergency_pool.arena) if (emergency_pool.arena)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
#endif #endif
} }
@@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
{ {
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
void *ret = malloc (thrown_size); void *ret = __wrap_malloc (thrown_size);
#if USE_POOL #if USE_POOL
if (!ret) if (!ret)
@@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
extern "C" __cxa_dependent_exception* extern "C" __cxa_dependent_exception*
__cxxabiv1::__cxa_allocate_dependent_exception() noexcept __cxxabiv1::__cxa_allocate_dependent_exception() noexcept
{ {
void *ret = malloc (sizeof (__cxa_dependent_exception)); void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception));
#if USE_POOL #if USE_POOL
if (!ret) if (!ret)

View File

@@ -73,14 +73,16 @@
// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE. // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
using std::memset; using std::memset;
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -91,16 +93,16 @@ using namespace __cxxabiv1;
// N.B. sizeof(std::bad_alloc) == sizeof(void*) // N.B. sizeof(std::bad_alloc) == sizeof(void*)
// and sizeof(std::runtime_error) == 2 * sizeof(void*) // and sizeof(std::runtime_error) == 2 * sizeof(void*)
// and sizeof(std::system_error) == 4 * sizeof(void*). // and sizeof(std::system_error) == 4 * sizeof(void*).
#define EMERGENCY_OBJ_SIZE 6 #define EMERGENCY_OBJ_SIZE 6
#ifdef __GTHREADS #ifdef __GTHREADS
// Assume that the number of concurrent exception objects scales with the // Assume that the number of concurrent exception objects scales with the
// processor word size, i.e., 16-bit systems are not likely to have hundreds // processor word size, i.e., 16-bit systems are not likely to have hundreds
// of threads all simultaneously throwing on OOM conditions. // of threads all simultaneously throwing on OOM conditions.
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) # define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__) # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
#else #else
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
# define MAX_OBJ_COUNT 64 # define MAX_OBJ_COUNT 64
#endif #endif
@@ -153,12 +155,12 @@ namespace
private: private:
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
@@ -176,7 +178,7 @@ namespace
// to implement in_pool. // to implement in_pool.
#ifdef _GLIBCXX_EH_POOL_STATIC #ifdef _GLIBCXX_EH_POOL_STATIC
static constexpr std::size_t arena_size static constexpr std::size_t arena_size
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
alignas(void*) char arena[arena_size]; alignas(void*) char arena[arena_size];
#else #else
char *arena = nullptr; char *arena = nullptr;
@@ -201,48 +203,48 @@ namespace
#endif #endif
const std::string_view ns_name = "glibcxx.eh_pool"; const std::string_view ns_name = "glibcxx.eh_pool";
std::pair<std::string_view, int> tunables[]{ std::pair<std::string_view, int> tunables[]{
{"obj_size", 0}, {"obj_count", obj_count} {"obj_size", 0}, {"obj_count", obj_count}
}; };
while (str) while (str)
{ {
if (*str == ':') if (*str == ':')
++str; ++str;
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
&& str[ns_name.size()] == '.') && str[ns_name.size()] == '.')
{ {
str += ns_name.size() + 1; str += ns_name.size() + 1;
for (auto& t : tunables) for (auto& t : tunables)
if (!t.first.compare(0, t.first.size(), str, t.first.size()) if (!t.first.compare(0, t.first.size(), str, t.first.size())
&& str[t.first.size()] == '=') && str[t.first.size()] == '=')
{ {
str += t.first.size() + 1; str += t.first.size() + 1;
char* end; char* end;
unsigned long val = strtoul(str, &end, 0); unsigned long val = strtoul(str, &end, 0);
if ((*end == ':' || *end == '\0') && val <= INT_MAX) if ((*end == ':' || *end == '\0') && val <= INT_MAX)
t.second = val; t.second = val;
str = end; str = end;
break; break;
} }
} }
str = strchr(str, ':'); str = strchr(str, ':');
} }
obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero. obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
if (tunables[0].second != 0) if (tunables[0].second != 0)
obj_size = tunables[0].second; obj_size = tunables[0].second;
#endif // HOSTED #endif // HOSTED
#endif // NOT_FOR_L4 #endif // NOT_FOR_L4
arena_size = buffer_size_in_bytes(obj_count, obj_size); arena_size = buffer_size_in_bytes(obj_count, obj_size);
if (arena_size == 0) if (arena_size == 0)
return; return;
arena = (char *)malloc (arena_size); arena = (char *)__wrap_malloc (arena_size);
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
return; return;
} }
#endif // STATIC #endif // STATIC
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
@@ -261,46 +263,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -308,64 +310,64 @@ namespace
{ {
__scoped_lock sentry(emergency_mutex); __scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> (e) + sz && (reinterpret_cast <char *> (e) + sz
> reinterpret_cast <char *> ((*fe)->next)); > reinterpret_cast <char *> ((*fe)->next));
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
inline bool pool::in_pool (void *ptr) const noexcept inline bool pool::in_pool (void *ptr) const noexcept
@@ -386,8 +388,8 @@ namespace __gnu_cxx
#ifndef _GLIBCXX_EH_POOL_STATIC #ifndef _GLIBCXX_EH_POOL_STATIC
if (emergency_pool.arena) if (emergency_pool.arena)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
#endif #endif
} }
@@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
{ {
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
void *ret = malloc (thrown_size); void *ret = __wrap_malloc (thrown_size);
#if USE_POOL #if USE_POOL
if (!ret) if (!ret)
@@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
extern "C" __cxa_dependent_exception* extern "C" __cxa_dependent_exception*
__cxxabiv1::__cxa_allocate_dependent_exception() noexcept __cxxabiv1::__cxa_allocate_dependent_exception() noexcept
{ {
void *ret = malloc (sizeof (__cxa_dependent_exception)); void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception));
#if USE_POOL #if USE_POOL
if (!ret) if (!ret)

View File

@@ -1,5 +1,5 @@
// -*- C++ -*- Allocate exception objects. // -*- C++ -*- Allocate exception objects.
// Copyright (C) 2001-2025 Free Software Foundation, Inc. // Copyright (C) 2001-2024 Free Software Foundation, Inc.
// //
// This file is part of GCC. // This file is part of GCC.
// //
@@ -73,14 +73,16 @@
// - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE. // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
using std::memset; using std::memset;
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -91,16 +93,16 @@ using namespace __cxxabiv1;
// N.B. sizeof(std::bad_alloc) == sizeof(void*) // N.B. sizeof(std::bad_alloc) == sizeof(void*)
// and sizeof(std::runtime_error) == 2 * sizeof(void*) // and sizeof(std::runtime_error) == 2 * sizeof(void*)
// and sizeof(std::system_error) == 4 * sizeof(void*). // and sizeof(std::system_error) == 4 * sizeof(void*).
#define EMERGENCY_OBJ_SIZE 6 #define EMERGENCY_OBJ_SIZE 6
#ifdef __GTHREADS #ifdef __GTHREADS
// Assume that the number of concurrent exception objects scales with the // Assume that the number of concurrent exception objects scales with the
// processor word size, i.e., 16-bit systems are not likely to have hundreds // processor word size, i.e., 16-bit systems are not likely to have hundreds
// of threads all simultaneously throwing on OOM conditions. // of threads all simultaneously throwing on OOM conditions.
# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) # define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
# define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__) # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
#else #else
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
# define MAX_OBJ_COUNT 64 # define MAX_OBJ_COUNT 64
#endif #endif
@@ -153,12 +155,12 @@ namespace
private: private:
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
@@ -176,7 +178,7 @@ namespace
// to implement in_pool. // to implement in_pool.
#ifdef _GLIBCXX_EH_POOL_STATIC #ifdef _GLIBCXX_EH_POOL_STATIC
static constexpr std::size_t arena_size static constexpr std::size_t arena_size
= buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
alignas(void*) char arena[arena_size]; alignas(void*) char arena[arena_size];
#else #else
char *arena = nullptr; char *arena = nullptr;
@@ -201,48 +203,48 @@ namespace
#endif #endif
const std::string_view ns_name = "glibcxx.eh_pool"; const std::string_view ns_name = "glibcxx.eh_pool";
std::pair<std::string_view, int> tunables[]{ std::pair<std::string_view, int> tunables[]{
{"obj_size", 0}, {"obj_count", obj_count} {"obj_size", 0}, {"obj_count", obj_count}
}; };
while (str) while (str)
{ {
if (*str == ':') if (*str == ':')
++str; ++str;
if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
&& str[ns_name.size()] == '.') && str[ns_name.size()] == '.')
{ {
str += ns_name.size() + 1; str += ns_name.size() + 1;
for (auto& t : tunables) for (auto& t : tunables)
if (!t.first.compare(0, t.first.size(), str, t.first.size()) if (!t.first.compare(0, t.first.size(), str, t.first.size())
&& str[t.first.size()] == '=') && str[t.first.size()] == '=')
{ {
str += t.first.size() + 1; str += t.first.size() + 1;
char* end; char* end;
unsigned long val = strtoul(str, &end, 0); unsigned long val = strtoul(str, &end, 0);
if ((*end == ':' || *end == '\0') && val <= INT_MAX) if ((*end == ':' || *end == '\0') && val <= INT_MAX)
t.second = val; t.second = val;
str = end; str = end;
break; break;
} }
} }
str = strchr(str, ':'); str = strchr(str, ':');
} }
obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero. obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
if (tunables[0].second != 0) if (tunables[0].second != 0)
obj_size = tunables[0].second; obj_size = tunables[0].second;
#endif // HOSTED #endif // HOSTED
#endif // NOT_FOR_L4 #endif // NOT_FOR_L4
arena_size = buffer_size_in_bytes(obj_count, obj_size); arena_size = buffer_size_in_bytes(obj_count, obj_size);
if (arena_size == 0) if (arena_size == 0)
return; return;
arena = (char *)malloc (arena_size); arena = (char *)__wrap_malloc (arena_size);
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
return; return;
} }
#endif // STATIC #endif // STATIC
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
@@ -261,46 +263,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -308,64 +310,64 @@ namespace
{ {
__scoped_lock sentry(emergency_mutex); __scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> (e) + sz && (reinterpret_cast <char *> (e) + sz
> reinterpret_cast <char *> ((*fe)->next)); > reinterpret_cast <char *> ((*fe)->next));
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
inline bool pool::in_pool (void *ptr) const noexcept inline bool pool::in_pool (void *ptr) const noexcept
@@ -386,8 +388,8 @@ namespace __gnu_cxx
#ifndef _GLIBCXX_EH_POOL_STATIC #ifndef _GLIBCXX_EH_POOL_STATIC
if (emergency_pool.arena) if (emergency_pool.arena)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
#endif #endif
} }
@@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
{ {
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
void *ret = malloc (thrown_size); void *ret = __wrap_malloc (thrown_size);
#if USE_POOL #if USE_POOL
if (!ret) if (!ret)
@@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
extern "C" __cxa_dependent_exception* extern "C" __cxa_dependent_exception*
__cxxabiv1::__cxa_allocate_dependent_exception() noexcept __cxxabiv1::__cxa_allocate_dependent_exception() noexcept
{ {
void *ret = malloc (sizeof (__cxa_dependent_exception)); void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception));
#if USE_POOL #if USE_POOL
if (!ret) if (!ret)

View File

@@ -37,14 +37,16 @@
#include <new> #include <new>
#if _GLIBCXX_HOSTED #if _GLIBCXX_HOSTED
using std::free; //using std::free;
using std::malloc; //using std::malloc;
extern "C" void *__wrap_malloc (std::size_t);
extern "C" void __wrap_free(void *);
using std::memset; using std::memset;
#else #else
// In a freestanding environment, these functions may not be available // In a freestanding environment, these functions may not be available
// -- but for now, we assume that they are. // -- but for now, we assume that they are.
extern "C" void *malloc (std::size_t); extern "C" void *__wrap_malloc (std::size_t);
extern "C" void free(void *); extern "C" void __wrap_free(void *);
extern "C" void *memset (void *, int, std::size_t); extern "C" void *memset (void *, int, std::size_t);
#endif #endif
@@ -58,19 +60,19 @@ using namespace __cxxabiv1;
// just for overhead. // just for overhead.
#if INT_MAX == 32767 #if INT_MAX == 32767
# define EMERGENCY_OBJ_SIZE 128 # define EMERGENCY_OBJ_SIZE 128
# define EMERGENCY_OBJ_COUNT 16 # define EMERGENCY_OBJ_COUNT 16
#elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
# define EMERGENCY_OBJ_SIZE 512 # define EMERGENCY_OBJ_SIZE 512
# define EMERGENCY_OBJ_COUNT 32 # define EMERGENCY_OBJ_COUNT 32
#else #else
# define EMERGENCY_OBJ_SIZE 1024 # define EMERGENCY_OBJ_SIZE 1024
# define EMERGENCY_OBJ_COUNT 64 # define EMERGENCY_OBJ_COUNT 64
#endif #endif
#ifndef __GTHREADS #ifndef __GTHREADS
# undef EMERGENCY_OBJ_COUNT # undef EMERGENCY_OBJ_COUNT
# define EMERGENCY_OBJ_COUNT 4 # define EMERGENCY_OBJ_COUNT 4
#endif #endif
namespace __gnu_cxx namespace __gnu_cxx
@@ -93,12 +95,12 @@ namespace
private: private:
struct free_entry { struct free_entry {
std::size_t size; std::size_t size;
free_entry *next; free_entry *next;
}; };
struct allocated_entry { struct allocated_entry {
std::size_t size; std::size_t size;
char data[] __attribute__((aligned)); char data[] __attribute__((aligned));
}; };
// A single mutex controlling emergency allocations. // A single mutex controlling emergency allocations.
@@ -119,15 +121,15 @@ namespace
// Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
// to make this tunable. // to make this tunable.
arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT
+ EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception));
arena = (char *)malloc (arena_size); arena = (char *)__wrap_malloc (arena_size);
if (!arena) if (!arena)
{ {
// If the allocation failed go without an emergency pool. // If the allocation failed go without an emergency pool.
arena_size = 0; arena_size = 0;
first_free_entry = NULL; first_free_entry = NULL;
return; return;
} }
// Populate the free-list with a single entry covering the whole arena // Populate the free-list with a single entry covering the whole arena
first_free_entry = reinterpret_cast <free_entry *> (arena); first_free_entry = reinterpret_cast <free_entry *> (arena);
@@ -145,46 +147,46 @@ namespace
// And we need to at least hand out objects of the size of // And we need to at least hand out objects of the size of
// a freelist entry. // a freelist entry.
if (size < sizeof (free_entry)) if (size < sizeof (free_entry))
size = sizeof (free_entry); size = sizeof (free_entry);
// And we need to align objects we hand out to the maximum // And we need to align objects we hand out to the maximum
// alignment required on the target (this really aligns the // alignment required on the target (this really aligns the
// tail which will become a new freelist entry). // tail which will become a new freelist entry).
size = ((size + __alignof__ (allocated_entry::data) - 1) size = ((size + __alignof__ (allocated_entry::data) - 1)
& ~(__alignof__ (allocated_entry::data) - 1)); & ~(__alignof__ (allocated_entry::data) - 1));
// Search for an entry of proper size on the freelist. // Search for an entry of proper size on the freelist.
free_entry **e; free_entry **e;
for (e = &first_free_entry; for (e = &first_free_entry;
*e && (*e)->size < size; *e && (*e)->size < size;
e = &(*e)->next) e = &(*e)->next)
; ;
if (!*e) if (!*e)
return NULL; return NULL;
allocated_entry *x; allocated_entry *x;
if ((*e)->size - size >= sizeof (free_entry)) if ((*e)->size - size >= sizeof (free_entry))
{ {
// Split block if it is too large. // Split block if it is too large.
free_entry *f = reinterpret_cast <free_entry *> free_entry *f = reinterpret_cast <free_entry *>
(reinterpret_cast <char *> (*e) + size); (reinterpret_cast <char *> (*e) + size);
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
new (f) free_entry; new (f) free_entry;
f->next = next; f->next = next;
f->size = sz - size; f->size = sz - size;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = size; x->size = size;
*e = f; *e = f;
} }
else else
{ {
// Exact size match or too small overhead for a free entry. // Exact size match or too small overhead for a free entry.
std::size_t sz = (*e)->size; std::size_t sz = (*e)->size;
free_entry *next = (*e)->next; free_entry *next = (*e)->next;
x = reinterpret_cast <allocated_entry *> (*e); x = reinterpret_cast <allocated_entry *> (*e);
new (x) allocated_entry; new (x) allocated_entry;
x->size = sz; x->size = sz;
*e = next; *e = next;
} }
return &x->data; return &x->data;
} }
@@ -192,71 +194,71 @@ namespace
{ {
__gnu_cxx::__scoped_lock sentry(emergency_mutex); __gnu_cxx::__scoped_lock sentry(emergency_mutex);
allocated_entry *e = reinterpret_cast <allocated_entry *> allocated_entry *e = reinterpret_cast <allocated_entry *>
(reinterpret_cast <char *> (data) - offsetof (allocated_entry, data)); (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
std::size_t sz = e->size; std::size_t sz = e->size;
if (!first_free_entry if (!first_free_entry
|| (reinterpret_cast <char *> (e) + sz || (reinterpret_cast <char *> (e) + sz
< reinterpret_cast <char *> (first_free_entry))) < reinterpret_cast <char *> (first_free_entry)))
{ {
// If the free list is empty or the entry is before the // If the free list is empty or the entry is before the
// first element and cannot be merged with it add it as // first element and cannot be merged with it add it as
// the first free entry. // the first free entry.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = first_free_entry; f->next = first_free_entry;
first_free_entry = f; first_free_entry = f;
} }
else if (reinterpret_cast <char *> (e) + sz else if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> (first_free_entry)) == reinterpret_cast <char *> (first_free_entry))
{ {
// Check if we can merge with the first free entry being right // Check if we can merge with the first free entry being right
// after us. // after us.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz + first_free_entry->size; f->size = sz + first_free_entry->size;
f->next = first_free_entry->next; f->next = first_free_entry->next;
first_free_entry = f; first_free_entry = f;
} }
else else
{ {
// Else search for a free item we can merge with at its end. // Else search for a free item we can merge with at its end.
free_entry **fe; free_entry **fe;
for (fe = &first_free_entry; for (fe = &first_free_entry;
(*fe)->next (*fe)->next
&& (reinterpret_cast <char *> ((*fe)->next) && (reinterpret_cast <char *> ((*fe)->next)
> reinterpret_cast <char *> (e) + sz); > reinterpret_cast <char *> (e) + sz);
fe = &(*fe)->next) fe = &(*fe)->next)
; ;
// If we can merge the next block into us do so and continue // If we can merge the next block into us do so and continue
// with the cases below. // with the cases below.
if (reinterpret_cast <char *> (e) + sz if (reinterpret_cast <char *> (e) + sz
== reinterpret_cast <char *> ((*fe)->next)) == reinterpret_cast <char *> ((*fe)->next))
{ {
sz += (*fe)->next->size; sz += (*fe)->next->size;
(*fe)->next = (*fe)->next->next; (*fe)->next = (*fe)->next->next;
} }
if (reinterpret_cast <char *> (*fe) + (*fe)->size if (reinterpret_cast <char *> (*fe) + (*fe)->size
== reinterpret_cast <char *> (e)) == reinterpret_cast <char *> (e))
// Merge with the freelist entry. // Merge with the freelist entry.
(*fe)->size += sz; (*fe)->size += sz;
else else
{ {
// Else put it after it which keeps the freelist sorted. // Else put it after it which keeps the freelist sorted.
free_entry *f = reinterpret_cast <free_entry *> (e); free_entry *f = reinterpret_cast <free_entry *> (e);
new (f) free_entry; new (f) free_entry;
f->size = sz; f->size = sz;
f->next = (*fe)->next; f->next = (*fe)->next;
(*fe)->next = f; (*fe)->next = f;
} }
} }
} }
bool pool::in_pool (void *ptr) bool pool::in_pool (void *ptr)
{ {
char *p = reinterpret_cast <char *> (ptr); char *p = reinterpret_cast <char *> (ptr);
return (p > arena return (p > arena
&& p < arena + arena_size); && p < arena + arena_size);
} }
pool emergency_pool; pool emergency_pool;
@@ -269,8 +271,8 @@ namespace __gnu_cxx
{ {
if (emergency_pool.arena) if (emergency_pool.arena)
{ {
::free(emergency_pool.arena); ::free(emergency_pool.arena);
emergency_pool.arena = 0; emergency_pool.arena = 0;
} }
} }
} }
@@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW
void *ret; void *ret;
thrown_size += sizeof (__cxa_refcounted_exception); thrown_size += sizeof (__cxa_refcounted_exception);
ret = malloc (thrown_size); ret = __wrap_malloc (thrown_size);
if (!ret) if (!ret)
ret = emergency_pool.allocate (thrown_size); ret = emergency_pool.allocate (thrown_size);
@@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
__cxa_dependent_exception *ret; __cxa_dependent_exception *ret;
ret = static_cast<__cxa_dependent_exception*> ret = static_cast<__cxa_dependent_exception*>
(malloc (sizeof (__cxa_dependent_exception))); (__wrap_malloc (sizeof (__cxa_dependent_exception)));
if (!ret) if (!ret)
ret = static_cast <__cxa_dependent_exception*> ret = static_cast <__cxa_dependent_exception*>

View File

@@ -994,6 +994,8 @@ static void *realloc(void *ptr, size_t size)
} // namespace umalloc } // namespace umalloc
L4_BEGIN_DECLS
/** /**
* Standard-compliant malloc implementation. * Standard-compliant malloc implementation.
* *
@@ -1001,7 +1003,7 @@ static void *realloc(void *ptr, size_t size)
* *
* \return Valid allocated memory or nullptr if the allocation failed. * \return Valid allocated memory or nullptr if the allocation failed.
*/ */
void *malloc(size_t size) noexcept void *__wrap_malloc(size_t size) noexcept
{ {
auto ptr = umalloc::alloc(size); auto ptr = umalloc::alloc(size);
if (!ptr) if (!ptr)
@@ -1018,7 +1020,7 @@ void *malloc(size_t size) noexcept
* *
* \return Valid allocated memory or nullptr if the allocation failed. * \return Valid allocated memory or nullptr if the allocation failed.
*/ */
void *aligned_alloc(size_t alignment, size_t size) noexcept void *__wrap_aligned_alloc(size_t alignment, size_t size) noexcept
{ {
auto ptr = umalloc::alloc(size, alignment); auto ptr = umalloc::alloc(size, alignment);
if (!ptr) if (!ptr)
@@ -1032,7 +1034,7 @@ void *aligned_alloc(size_t alignment, size_t size) noexcept
* *
* \param ptr Previously allocated valid memory. * \param ptr Previously allocated valid memory.
*/ */
void free(void *ptr) noexcept void __wrap_free(void *ptr) noexcept
{ {
if (ptr) if (ptr)
umalloc::dealloc(ptr); umalloc::dealloc(ptr);
@@ -1046,7 +1048,7 @@ void free(void *ptr) noexcept
* *
* \return Valid allocated memory or nullptr if the allocation failed. * \return Valid allocated memory or nullptr if the allocation failed.
*/ */
void *calloc(size_t nmemb, size_t size) noexcept void *__wrap_calloc(size_t nmemb, size_t size) noexcept
{ {
// Avoid multiplication overflow. // Avoid multiplication overflow.
if ((size > 0) && (nmemb > std::numeric_limits<typeof(nmemb)>::max() / size)) if ((size > 0) && (nmemb > std::numeric_limits<typeof(nmemb)>::max() / size))
@@ -1073,7 +1075,7 @@ void *calloc(size_t nmemb, size_t size) noexcept
* \return Valid reallocated memory or nullptr if the reallocation failed. * \return Valid reallocated memory or nullptr if the reallocation failed.
* (in which case the previously allocated memory is not touched). * (in which case the previously allocated memory is not touched).
*/ */
void *realloc(void *ptr, size_t size) noexcept void *__wrap_realloc(void *ptr, size_t size) noexcept
{ {
if (!ptr) if (!ptr)
return malloc(size); return malloc(size);
@@ -1084,3 +1086,5 @@ void *realloc(void *ptr, size_t size) noexcept
return ptr; return ptr;
} }
L4_END_DECLS

View File

@@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../..
TARGET = lua TARGET = lua
SRC_C = lua.c SRC_C = lua.c
REQUIRES_LIBS = lua libc_support_misc libc_be_fs_noop libc_be_sig REQUIRES_LIBS = lua libc_support_misc libc_be_fs_noop libc_be_sig
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
vpath %.c $(PKGDIR)/lib/contrib/src vpath %.c $(PKGDIR)/lib/contrib/src
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -10,6 +10,8 @@ $(GENERAL_D_LOC): $(PKGDIR)/lib/build/Makefile
PKGNAME_DIRNAME := lua-c++ PKGNAME_DIRNAME := lua-c++
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
# Difference to other version # Difference to other version
# WARNINGS EXCEPTION: CFLAGS is used here for C++ files (instead of the usual # WARNINGS EXCEPTION: CFLAGS is used here for C++ files (instead of the usual

View File

@@ -25,6 +25,7 @@ CAN_PIE_arm := y
CAN_PIE_arm64 := y CAN_PIE_arm64 := y
BID_CAN_PIE = $(CAN_PIE_$(ARCH)) BID_CAN_PIE = $(CAN_PIE_$(ARCH))
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc --wrap=aligned_alloc --wrap=calloc
REQUIRES_LIBS := libkproxy libloader libsigma0 \ REQUIRES_LIBS := libkproxy libloader libsigma0 \
cxx_io cxx_libc_io libsupc++_minimal \ cxx_io cxx_libc_io libsupc++_minimal \
libc_minimal libc_minimal_l4re libumalloc libc_minimal libc_minimal_l4re libumalloc

View File

@@ -6,4 +6,6 @@ SRC_CC = ned-prompt.cc
REQUIRES_LIBS := readline REQUIRES_LIBS := readline
DEPENDS_PKGS := readline DEPENDS_PKGS := readline
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -14,5 +14,6 @@ SRC_CC += lua_sleep.cc
REQUIRES_LIBS := libloader lua++ libc_support_misc cxx_libc_io cxx_io REQUIRES_LIBS := libloader lua++ libc_support_misc cxx_libc_io cxx_io
DEFAULT_HEAP_SIZE := 0x20000 DEFAULT_HEAP_SIZE := 0x20000
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../..
TARGET = backtracer TARGET = backtracer
SRC_CC = backtracer.cc SRC_CC = backtracer.cc
REQUIRES_LIBS = stdlibs libunwind libstdc++ REQUIRES_LIBS = stdlibs libunwind libstdc++
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -10,5 +10,6 @@ LDFLAGS +=
#CPPFLAGS += -fPIC #CPPFLAGS += -fPIC
REQUIRES_LIBS := libloader libkproxy cxx_libc_io cxx_io REQUIRES_LIBS := libloader libkproxy cxx_libc_io cxx_io
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -1,4 +1,4 @@
provides: libmag mag-input-libinput mag-input-event mag-client_fb mag-mag_client mag-session_manager provides: libmag mag-input-libinput mag-input-event mag-client_fb mag-mag_client mag-session_manager
requires: l4re libc stdlibs-sh input l4util mag-gfx libstdc++ requires: l4re libc stdlibs-sh input l4util mag-gfx libstdc++
lua++ lua++ libc_be_mem
Maintainer: warg@os.inf.tu-dresden.de Maintainer: warg@os.inf.tu-dresden.de

View File

@@ -15,7 +15,7 @@ STATIC_PLUGINS += mag-input-event
STATIC_PLUGINS += mag-client_fb STATIC_PLUGINS += mag-client_fb
STATIC_PLUGINS += mag-mag_client STATIC_PLUGINS += mag-mag_client
REQUIRES_LIBS:= libsupc++ libdl mag-gfx lua++ cxx_libc_io cxx_io REQUIRES_LIBS:= libsupc++ libdl mag-gfx lua++ cxx_libc_io cxx_io libc_be_mem libstdc++
REQUIRES_LIBS += $(STATIC_PLUGINS) REQUIRES_LIBS += $(STATIC_PLUGINS)
#LDFLAGS += --export-dynamic #LDFLAGS += --export-dynamic

View File

@@ -1,4 +1,4 @@
provides: rtc rtc_libc_be provides: rtc rtc_libc_be
requires: stdlibs libio cxx_libc_io cxx_io libstdc++ requires: stdlibs libio cxx_libc_io cxx_io libstdc++ libc_be_mem
optional: drivers-frst i2c-server optional: drivers-frst i2c-server
Maintainer: adam.lackorzynski@kernkonzept.com Maintainer: adam.lackorzynski@kernkonzept.com

View File

@@ -9,6 +9,6 @@ SRC_CC_arm64-l4f = pl031.cc
SRC_CC = main.cc SRC_CC = main.cc
SRC_CC-$(CONFIG_RTC_DS3231) += ds3231.cc SRC_CC-$(CONFIG_RTC_DS3231) += ds3231.cc
SRC_CC-$(CONFIG_RTC_PCF85063A) += pcf85063a.cc SRC_CC-$(CONFIG_RTC_PCF85063A) += pcf85063a.cc
REQUIRES_LIBS = libio cxx_libc_io cxx_io libstdc++ REQUIRES_LIBS = libio cxx_libc_io cxx_io libc_be_mem libstdc++
include $(L4DIR)/mk/prog.mk include $(L4DIR)/mk/prog.mk

View File

@@ -5,6 +5,7 @@ TARGET = l4vio_switch
REQUIRES_LIBS = libstdc++ l4virtio REQUIRES_LIBS = libstdc++ l4virtio
REQUIRES_LIBS-$(CONFIG_VNS_IXL) += ixl REQUIRES_LIBS-$(CONFIG_VNS_IXL) += ixl
LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc
SRC_CC-$(CONFIG_VNS_PORT_FILTER) += filter.cc SRC_CC-$(CONFIG_VNS_PORT_FILTER) += filter.cc