From 20d8c2c149ee2e1b48a861b2120f18af23967e37 Mon Sep 17 00:00:00 2001 From: Martin Kuettler Date: Mon, 23 Oct 2023 10:35:55 +0200 Subject: [PATCH] Added memory backend Updated by Viktor Reusch for the new l4re-base-25.08.0. Co-authored-by: vreusch --- src/l4/mk/tmpl/Control | 2 +- src/l4/mk/tmpl/server/src/Makefile | 2 +- src/l4/pkg/cons/server/src/Makefile | 2 + src/l4/pkg/examples/clntsrv/Control | 2 +- src/l4/pkg/examples/clntsrv/src/Makefile | 1 + src/l4/pkg/examples/fb/Control | 2 + src/l4/pkg/examples/fb/spectrum/Makefile | 2 +- src/l4/pkg/examples/gpio/Control | 2 +- src/l4/pkg/examples/gpio/input/Makefile | 2 +- src/l4/pkg/examples/gpio/led/Makefile | 2 +- src/l4/pkg/examples/gpio/uart/Makefile | 2 +- src/l4/pkg/examples/libs/Control | 2 +- src/l4/pkg/examples/libs/inputtst/Makefile | 2 +- .../examples/libs/l4re/c++/mem_alloc/Makefile | 1 + .../libs/l4re/c++/periodic_task/Makefile | 1 + .../examples/libs/l4re/c++/physmem/Makefile | 2 + .../examples/libs/l4re/c++/shared_ds/Makefile | 1 + src/l4/pkg/examples/libs/l4re/c/Makefile | 2 +- .../pkg/examples/libs/l4re/streammap/Makefile | 1 + .../examples/libs/libc_thread_safe/Makefile | 2 +- src/l4/pkg/examples/libs/libio/Makefile | 2 +- src/l4/pkg/examples/libs/libirq/Makefile | 2 +- src/l4/pkg/examples/libs/rtc/Makefile | 4 +- src/l4/pkg/examples/libs/shmc/Makefile | 2 +- src/l4/pkg/examples/misc/Control | 2 + src/l4/pkg/examples/misc/cat/Makefile | 1 + src/l4/pkg/examples/misc/eb_leds/Makefile | 2 +- src/l4/pkg/examples/misc/reboot/Makefile | 1 + .../pkg/examples/misc/shared-hello/Makefile | 1 + src/l4/pkg/examples/sys/Control | 2 +- src/l4/pkg/examples/sys/aliens/Makefile | 2 +- src/l4/pkg/examples/sys/debug/Makefile | 1 + src/l4/pkg/examples/sys/ipc/Makefile | 2 +- src/l4/pkg/examples/sys/isr/Makefile | 2 +- src/l4/pkg/examples/sys/map_irq/Makefile | 1 + src/l4/pkg/examples/sys/migrate/Makefile | 2 +- src/l4/pkg/examples/sys/singlestep/Makefile | 2 +- .../pkg/examples/sys/start-with-exc/Makefile | 2 +- src/l4/pkg/examples/sys/timeout/Makefile | 3 + src/l4/pkg/examples/sys/uirq/Makefile | 2 +- src/l4/pkg/examples/sys/utcb-ipc/Makefile | 2 +- src/l4/pkg/examples/sys/vcpu/Makefile | 2 +- src/l4/pkg/examples/sys/vmtest/Makefile | 2 +- src/l4/pkg/fb-drv/Control | 2 +- src/l4/pkg/fb-drv/server/src/Makefile | 2 +- src/l4/pkg/hello/Control | 2 +- src/l4/pkg/hello/server/src/Makefile | 2 + src/l4/pkg/io/io/server/src/Make.rules | 2 + src/l4/pkg/ipcbench/Control | 2 +- src/l4/pkg/ipcbench/src/Makefile | 2 +- .../l4re-core/l4re_itas/server/src/Makefile | 2 +- .../l4re_vfs/include/impl/default_ops_impl.h | 9 +- .../l4re_vfs/include/impl/vfs_impl.h | 33 +- .../pkg/l4re-core/l4re_vfs/lib/src/Makefile | 2 + .../uclibc/libc/stdlib/malloc-simple/alloc.c | 232 ++-- .../uclibc/libc/stdlib/malloc-standard/free.c | 316 ++--- .../libc/stdlib/malloc-standard/malloc.c | 1021 +++++++++-------- .../libc/stdlib/malloc-standard/realloc.c | 280 ++--- .../contrib/uclibc/libc/stdlib/malloc/free.c | 234 ++-- .../uclibc/libc/stdlib/malloc/malloc.c | 114 +- .../uclibc/libc/stdlib/malloc/realloc.c | 32 +- src/l4/pkg/l4re-core/libc_backends/Control | 2 +- .../libc_backends/lib/l4re_mem/Makefile | 12 + .../libc_backends/lib/l4re_mem/mem.cc | 32 + .../contrib/gcc-13/libgcc/unwind-dw2-btree.h | 4 +- .../contrib/gcc-14/libgcc/unwind-dw2-btree.h | 4 +- .../contrib/gcc-15/libgcc/unwind-dw2-btree.h | 7 +- .../libstdc++-v3-10/libsupc++/eh_alloc.cc | 251 ++-- .../libstdc++-v3-11/libsupc++/eh_alloc.cc | 224 ++-- .../libstdc++-v3-12/libsupc++/eh_alloc.cc | 224 ++-- .../libstdc++-v3-13/libsupc++/eh_alloc.cc | 262 ++--- .../libstdc++-v3-14/libsupc++/eh_alloc.cc | 262 ++--- .../libstdc++-v3-15/libsupc++/eh_alloc.cc | 264 ++--- .../libstdc++-v3-9/libsupc++/eh_alloc.cc | 224 ++-- .../l4re-core/libumalloc/lib/src/malloc.cc | 14 +- .../l4re-core/lua/examples/interpr/Makefile | 2 +- src/l4/pkg/l4re-core/lua/lib/build++/Makefile | 2 + src/l4/pkg/l4re-core/moe/server/src/Makefile | 1 + .../pkg/l4re-core/ned/ned-prompt/src/Makefile | 2 + src/l4/pkg/l4re-core/ned/server/src/Makefile | 1 + src/l4/pkg/libunwind/server/src/Makefile | 1 + src/l4/pkg/loader/server/src/Makefile | 1 + src/l4/pkg/mag/Control | 2 +- src/l4/pkg/mag/server/src/Makefile | 2 +- src/l4/pkg/rtc/Control | 2 +- src/l4/pkg/rtc/server/src/Makefile | 2 +- .../virtio-net-switch/server/switch/Makefile | 1 + 87 files changed, 2161 insertions(+), 2012 deletions(-) create mode 100644 src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/Makefile create mode 100644 src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/mem.cc diff --git a/src/l4/mk/tmpl/Control b/src/l4/mk/tmpl/Control index db063eea..5d436bbd 100644 --- a/src/l4/mk/tmpl/Control +++ b/src/l4/mk/tmpl/Control @@ -1,3 +1,3 @@ -requires: xyz +requires: libstdc++ libc_be_mem xyz provides: abc maintainer: your@email.example.com diff --git a/src/l4/mk/tmpl/server/src/Makefile b/src/l4/mk/tmpl/server/src/Makefile index e61e0ff3..77c43f89 100644 --- a/src/l4/mk/tmpl/server/src/Makefile +++ b/src/l4/mk/tmpl/server/src/Makefile @@ -8,6 +8,6 @@ SRC_C = main.c SRC_CC = # list requirements of your program here -REQUIRES_LIBS = +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/cons/server/src/Makefile b/src/l4/pkg/cons/server/src/Makefile index 20f045fd..ba548a2a 100644 --- a/src/l4/pkg/cons/server/src/Makefile +++ b/src/l4/pkg/cons/server/src/Makefile @@ -10,4 +10,6 @@ SRC_CC-$(CONFIG_CONS_USE_ASYNC_FE) += async_vcon_fe.cc REQUIRES_LIBS = libstdc++ cxx_libc_io cxx_io REQUIRES_LIBS-$(CONFIG_CONS_USE_ASYNC_FE) = libpthread +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc + include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/clntsrv/Control b/src/l4/pkg/examples/clntsrv/Control index 632074ff..f3a22701 100644 --- a/src/l4/pkg/examples/clntsrv/Control +++ b/src/l4/pkg/examples/clntsrv/Control @@ -1,2 +1,2 @@ -requires: stdlibs +requires: stdlibs libstdc++ libc_be_mem Maintainer: adam@os.inf.tu-dresden.de diff --git a/src/l4/pkg/examples/clntsrv/src/Makefile b/src/l4/pkg/examples/clntsrv/src/Makefile index be5423ff..3f2348f5 100644 --- a/src/l4/pkg/examples/clntsrv/src/Makefile +++ b/src/l4/pkg/examples/clntsrv/src/Makefile @@ -4,6 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_clntsrv-server ex_clntsrv-client SRC_CC_ex_clntsrv-server = server.cc SRC_CC_ex_clntsrv-client = client.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/fb/Control b/src/l4/pkg/examples/fb/Control index 2ba496b3..384b6b6f 100644 --- a/src/l4/pkg/examples/fb/Control +++ b/src/l4/pkg/examples/fb/Control @@ -1,3 +1,5 @@ +requires: libstdc++ libc_be_mem + # color, fractal, spectrum optional: l4re_c-util diff --git a/src/l4/pkg/examples/fb/spectrum/Makefile b/src/l4/pkg/examples/fb/spectrum/Makefile index 57ddb5c5..859ddd47 100644 --- a/src/l4/pkg/examples/fb/spectrum/Makefile +++ b/src/l4/pkg/examples/fb/spectrum/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_fb_spectrum_c ex_fb_spectrum_cc SRC_CC_ex_fb_spectrum_cc = spectrum.cc SRC_C_ex_fb_spectrum_c = spectrum_c.c -REQUIRES_LIBS = libevent l4re_c-util +REQUIRES_LIBS = libevent l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/gpio/Control b/src/l4/pkg/examples/gpio/Control index 03ce8f4a..818dabd7 100644 --- a/src/l4/pkg/examples/gpio/Control +++ b/src/l4/pkg/examples/gpio/Control @@ -1,4 +1,4 @@ -requires: stdlibs +requires: stdlibs libstdc++ libc_be_mem # input, led, uart optional: libstdc++ libio-vbus diff --git a/src/l4/pkg/examples/gpio/input/Makefile b/src/l4/pkg/examples/gpio/input/Makefile index 2d84675f..b22186e0 100644 --- a/src/l4/pkg/examples/gpio/input/Makefile +++ b/src/l4/pkg/examples/gpio/input/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_gpio_input SRC_CC = gpio_input.cc -REQUIRES_LIBS = libstdc++ libio-vbus +REQUIRES_LIBS = libio-vbus libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/gpio/led/Makefile b/src/l4/pkg/examples/gpio/led/Makefile index 6b8ccb58..50549f6f 100644 --- a/src/l4/pkg/examples/gpio/led/Makefile +++ b/src/l4/pkg/examples/gpio/led/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_gpio_led SRC_CC = gpio_led.cc -REQUIRES_LIBS = libstdc++ libio-vbus +REQUIRES_LIBS = libio-vbus libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/gpio/uart/Makefile b/src/l4/pkg/examples/gpio/uart/Makefile index b07bfd8e..2450ad2e 100644 --- a/src/l4/pkg/examples/gpio/uart/Makefile +++ b/src/l4/pkg/examples/gpio/uart/Makefile @@ -1,7 +1,7 @@ PKGDIR ?= .. L4DIR ?= $(PKGDIR)/../../.. -REQUIRES_LIBS = libstdc++ libio-vbus drivers_uart libio +REQUIRES_LIBS = libio-vbus drivers_uart libio libc_be_mem libstdc++ TARGET = rpi_uart SRC_CC = main.cc diff --git a/src/l4/pkg/examples/libs/Control b/src/l4/pkg/examples/libs/Control index 64de6734..be86733d 100644 --- a/src/l4/pkg/examples/libs/Control +++ b/src/l4/pkg/examples/libs/Control @@ -1,4 +1,4 @@ -requires: stdlibs +requires: stdlibs libstdc++ libc_be_mem # boost, cppunit, stdthread optional: libstdc++ diff --git a/src/l4/pkg/examples/libs/inputtst/Makefile b/src/l4/pkg/examples/libs/inputtst/Makefile index 947f218c..48c835fd 100644 --- a/src/l4/pkg/examples/libs/inputtst/Makefile +++ b/src/l4/pkg/examples/libs/inputtst/Makefile @@ -4,6 +4,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = inputtst SRC_C = main.c DEPENDS_PKGS = input -REQUIRES_LIBS = input +REQUIRES_LIBS = input libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/l4re/c++/mem_alloc/Makefile b/src/l4/pkg/examples/libs/l4re/c++/mem_alloc/Makefile index a23e0162..36006987 100644 --- a/src/l4/pkg/examples/libs/l4re/c++/mem_alloc/Makefile +++ b/src/l4/pkg/examples/libs/l4re/c++/mem_alloc/Makefile @@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_l4re_ma+rm_cc SRC_CC = ma+rm.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/l4re/c++/periodic_task/Makefile b/src/l4/pkg/examples/libs/l4re/c++/periodic_task/Makefile index 916af629..f569e1a5 100644 --- a/src/l4/pkg/examples/libs/l4re/c++/periodic_task/Makefile +++ b/src/l4/pkg/examples/libs/l4re/c++/periodic_task/Makefile @@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_periodic_task SRC_CC = main.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/l4re/c++/physmem/Makefile b/src/l4/pkg/examples/libs/l4re/c++/physmem/Makefile index b5e22a06..0d412c62 100644 --- a/src/l4/pkg/examples/libs/l4re/c++/physmem/Makefile +++ b/src/l4/pkg/examples/libs/l4re/c++/physmem/Makefile @@ -3,5 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_l4re_physmem_cc SRC_CC = physmem.cc +REQUIRES_LIBS = libc_be_mem libstdc++ + include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/l4re/c++/shared_ds/Makefile b/src/l4/pkg/examples/libs/l4re/c++/shared_ds/Makefile index 2145f556..9ada6b3f 100644 --- a/src/l4/pkg/examples/libs/l4re/c++/shared_ds/Makefile +++ b/src/l4/pkg/examples/libs/l4re/c++/shared_ds/Makefile @@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_l4re_ds_clnt ex_l4re_ds_srv SRC_CC_ex_l4re_ds_clnt = ds_clnt.cc SRC_CC_ex_l4re_ds_srv = ds_srv.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/l4re/c/Makefile b/src/l4/pkg/examples/libs/l4re/c/Makefile index efccf426..1053675c 100644 --- a/src/l4/pkg/examples/libs/l4re/c/Makefile +++ b/src/l4/pkg/examples/libs/l4re/c/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_l4re_ma+rm_c SRC_C = ma+rm.c -REQUIRES_LIBS = l4re_c-util +REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/l4re/streammap/Makefile b/src/l4/pkg/examples/libs/l4re/streammap/Makefile index 2741ecad..8f3ea7ff 100644 --- a/src/l4/pkg/examples/libs/l4re/streammap/Makefile +++ b/src/l4/pkg/examples/libs/l4re/streammap/Makefile @@ -4,6 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_smap-server ex_smap-client SRC_CC_ex_smap-server = server.cc SRC_CC_ex_smap-client = client.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/libc_thread_safe/Makefile b/src/l4/pkg/examples/libs/libc_thread_safe/Makefile index 5f7c159a..ff70a6ba 100644 --- a/src/l4/pkg/examples/libs/libc_thread_safe/Makefile +++ b/src/l4/pkg/examples/libs/libc_thread_safe/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = uclibc_thread_safe SRC_CC = main.cc -REQUIRES_LIBS = libpthread +REQUIRES_LIBS = libpthread libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/libio/Makefile b/src/l4/pkg/examples/libs/libio/Makefile index cf000316..6f821503 100644 --- a/src/l4/pkg/examples/libs/libio/Makefile +++ b/src/l4/pkg/examples/libs/libio/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_libio SRC_C = main.c -REQUIRES_LIBS = libio libirq +REQUIRES_LIBS = libio libirq libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/libirq/Makefile b/src/l4/pkg/examples/libs/libirq/Makefile index 6cc8fc3f..5c5a75f9 100644 --- a/src/l4/pkg/examples/libs/libirq/Makefile +++ b/src/l4/pkg/examples/libs/libirq/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. SRC_C_ex_libirq_async = async_isr.c SRC_C_ex_libirq_loop = loop.c TARGET = ex_libirq_async ex_libirq_loop -REQUIRES_LIBS = libirq libio +REQUIRES_LIBS = libirq libio libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/rtc/Makefile b/src/l4/pkg/examples/libs/rtc/Makefile index c6702e67..a9f66d9b 100644 --- a/src/l4/pkg/examples/libs/rtc/Makefile +++ b/src/l4/pkg/examples/libs/rtc/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. SRC_C = main.c TARGET = rtc_test -DEPENDS_PKGS = rtc -REQUIRES_LIBS = rtc +DEPENDS_PKGS = rtc libc_be_mem libstdc++ +REQUIRES_LIBS = rtc libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/libs/shmc/Makefile b/src/l4/pkg/examples/libs/shmc/Makefile index f3e04930..c9586358 100644 --- a/src/l4/pkg/examples/libs/shmc/Makefile +++ b/src/l4/pkg/examples/libs/shmc/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_shmc SRC_C = prodcons.c -REQUIRES_LIBS = shmc libpthread +REQUIRES_LIBS = shmc libpthread libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/misc/Control b/src/l4/pkg/examples/misc/Control index be12f274..be214bb8 100644 --- a/src/l4/pkg/examples/misc/Control +++ b/src/l4/pkg/examples/misc/Control @@ -1,3 +1,5 @@ +requires: libc_be_mem + # cyclichpet, eb_leds, eb_leds_gfx, hpet optional: libio diff --git a/src/l4/pkg/examples/misc/cat/Makefile b/src/l4/pkg/examples/misc/cat/Makefile index 609348b5..45be1112 100644 --- a/src/l4/pkg/examples/misc/cat/Makefile +++ b/src/l4/pkg/examples/misc/cat/Makefile @@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = cat SRC_C = cat.c +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/misc/eb_leds/Makefile b/src/l4/pkg/examples/misc/eb_leds/Makefile index 392d6afa..6cf5dc0d 100644 --- a/src/l4/pkg/examples/misc/eb_leds/Makefile +++ b/src/l4/pkg/examples/misc/eb_leds/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_eb_leds SRC_C = eb_leds.c -REQUIRES_LIBS = libio +REQUIRES_LIBS = libio libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/misc/reboot/Makefile b/src/l4/pkg/examples/misc/reboot/Makefile index 6cc066ea..88c77db3 100644 --- a/src/l4/pkg/examples/misc/reboot/Makefile +++ b/src/l4/pkg/examples/misc/reboot/Makefile @@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = reboot SRC_C = main.c +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/misc/shared-hello/Makefile b/src/l4/pkg/examples/misc/shared-hello/Makefile index c6fabf52..4fbec6d2 100644 --- a/src/l4/pkg/examples/misc/shared-hello/Makefile +++ b/src/l4/pkg/examples/misc/shared-hello/Makefile @@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_hello_shared MODE = shared SRC_C = main.c +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/Control b/src/l4/pkg/examples/sys/Control index 3bc33e9e..1778a6bc 100644 --- a/src/l4/pkg/examples/sys/Control +++ b/src/l4/pkg/examples/sys/Control @@ -1,4 +1,4 @@ -requires: stdlibs +requires: stdlibs libc_be_mem # aliens, isr, singlestep, start-with-exc, utcb-ipc, vm-tz optional: l4re_c-util diff --git a/src/l4/pkg/examples/sys/aliens/Makefile b/src/l4/pkg/examples/sys/aliens/Makefile index b313a6f6..bdbd9951 100644 --- a/src/l4/pkg/examples/sys/aliens/Makefile +++ b/src/l4/pkg/examples/sys/aliens/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_aliens SRC_C = main.c -REQUIRES_LIBS = l4re_c-util +REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/debug/Makefile b/src/l4/pkg/examples/sys/debug/Makefile index 65eef6b2..887a637a 100644 --- a/src/l4/pkg/examples/sys/debug/Makefile +++ b/src/l4/pkg/examples/sys/debug/Makefile @@ -3,5 +3,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = dump_obj SRC_CC = dump_obj.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/ipc/Makefile b/src/l4/pkg/examples/sys/ipc/Makefile index c4f99634..71fbfaec 100644 --- a/src/l4/pkg/examples/sys/ipc/Makefile +++ b/src/l4/pkg/examples/sys/ipc/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_ipc1 SRC_C = ipc_example.c -REQUIRES_LIBS = libpthread +REQUIRES_LIBS = libpthread libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/isr/Makefile b/src/l4/pkg/examples/sys/isr/Makefile index 76897323..db8fe6d0 100644 --- a/src/l4/pkg/examples/sys/isr/Makefile +++ b/src/l4/pkg/examples/sys/isr/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_isr SRC_C = main.c -REQUIRES_LIBS = l4re_c-util +REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/map_irq/Makefile b/src/l4/pkg/examples/sys/map_irq/Makefile index 6d9be3cd..c6f02c2a 100644 --- a/src/l4/pkg/examples/sys/map_irq/Makefile +++ b/src/l4/pkg/examples/sys/map_irq/Makefile @@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_map_irq_client ex_map_irq_server SRC_CC_ex_map_irq_client = client.cc SRC_CC_ex_map_irq_server = server.cc +REQUIRES_LIBS = libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/migrate/Makefile b/src/l4/pkg/examples/sys/migrate/Makefile index c395a698..4d8f3acb 100644 --- a/src/l4/pkg/examples/sys/migrate/Makefile +++ b/src/l4/pkg/examples/sys/migrate/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_thread_migrate ex_thread_migrate_irq SRC_CC_ex_thread_migrate = thread_migrate.cc SRC_CC_ex_thread_migrate_irq = thread_migrate_irq.cc -REQUIRES_LIBS = libpthread +REQUIRES_LIBS = libpthread libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/singlestep/Makefile b/src/l4/pkg/examples/sys/singlestep/Makefile index f5e01b73..f6fbdac2 100644 --- a/src/l4/pkg/examples/sys/singlestep/Makefile +++ b/src/l4/pkg/examples/sys/singlestep/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_singlestep SYSTEMS = x86-l4f amd64-l4f SRC_C = main.c -REQUIRES_LIBS = l4re_c-util +REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/start-with-exc/Makefile b/src/l4/pkg/examples/sys/start-with-exc/Makefile index e02f90f4..29046728 100644 --- a/src/l4/pkg/examples/sys/start-with-exc/Makefile +++ b/src/l4/pkg/examples/sys/start-with-exc/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_start-with-exc SYSTEMS = x86-l4f arm-l4f arm64-l4f SRC_C = main.c -REQUIRES_LIBS = l4re_c-util +REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/timeout/Makefile b/src/l4/pkg/examples/sys/timeout/Makefile index bf98ef71..0cc4c302 100644 --- a/src/l4/pkg/examples/sys/timeout/Makefile +++ b/src/l4/pkg/examples/sys/timeout/Makefile @@ -1,6 +1,9 @@ PKGDIR ?= .. L4DIR ?= $(PKGDIR)/../../.. +REQUIRES_LIBS = libc_be_mem libstdc++ +DEPENDS_PKGS = $(REQUIRES_LIBS) + TARGET = ex_timeouts SRC_C = main.c diff --git a/src/l4/pkg/examples/sys/uirq/Makefile b/src/l4/pkg/examples/sys/uirq/Makefile index 84c13030..cd153e38 100644 --- a/src/l4/pkg/examples/sys/uirq/Makefile +++ b/src/l4/pkg/examples/sys/uirq/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_uirq SRC_CC = ex_uirq.cc -REQUIRES_LIBS = libstdc++ libpthread +REQUIRES_LIBS = libc_be_mem libstdc++ libpthread DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/utcb-ipc/Makefile b/src/l4/pkg/examples/sys/utcb-ipc/Makefile index 4f0599fb..0a15314d 100644 --- a/src/l4/pkg/examples/sys/utcb-ipc/Makefile +++ b/src/l4/pkg/examples/sys/utcb-ipc/Makefile @@ -2,7 +2,7 @@ PKGDIR ?= .. L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_utcb_ipc -REQUIRES_LIBS = l4re_c-util +REQUIRES_LIBS = l4re_c-util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) SRC_C = main.c diff --git a/src/l4/pkg/examples/sys/vcpu/Makefile b/src/l4/pkg/examples/sys/vcpu/Makefile index 562a5778..784a3d09 100644 --- a/src/l4/pkg/examples/sys/vcpu/Makefile +++ b/src/l4/pkg/examples/sys/vcpu/Makefile @@ -3,7 +3,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = ex_vcpu SRC_CC = vcpu.cc -REQUIRES_LIBS = libvcpu cxx_io cxx_libc_io +REQUIRES_LIBS = libvcpu cxx_io cxx_libc_io libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/examples/sys/vmtest/Makefile b/src/l4/pkg/examples/sys/vmtest/Makefile index 08eef079..80e3ef5a 100644 --- a/src/l4/pkg/examples/sys/vmtest/Makefile +++ b/src/l4/pkg/examples/sys/vmtest/Makefile @@ -5,7 +5,7 @@ TARGET = ex_vmtest SYSTEMS = x86-l4f amd64-l4f SRC_S = guest.S SRC_CC = vm.cc vmx.cc svm.cc main.cc -REQUIRES_LIBS = libvcpu l4util +REQUIRES_LIBS = libvcpu l4util libc_be_mem libstdc++ DEPENDS_PKGS = $(REQUIRES_LIBS) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/fb-drv/Control b/src/l4/pkg/fb-drv/Control index b9a23a75..3ba44bcc 100644 --- a/src/l4/pkg/fb-drv/Control +++ b/src/l4/pkg/fb-drv/Control @@ -1,2 +1,2 @@ -requires: libdrivers-lcd x86emu_int10 stdlibs libio-vbus +requires: libdrivers-lcd x86emu_int10 stdlibs libio-vbus libstdc++ libc_be_mem maintainer: adam@os.inf.tu-dresden.de diff --git a/src/l4/pkg/fb-drv/server/src/Makefile b/src/l4/pkg/fb-drv/server/src/Makefile index a8ec0fbf..2b6f0028 100644 --- a/src/l4/pkg/fb-drv/server/src/Makefile +++ b/src/l4/pkg/fb-drv/server/src/Makefile @@ -11,7 +11,7 @@ REQUIRES_LIBS_x86-l4f = x86emu_int10 REQUIRES_LIBS_amd64-l4f = x86emu_int10 REQUIRES_LIBS_arm-l4f = libdrivers-lcd REQUIRES_LIBS_arm64-l4f = libdrivers-lcd -REQUIRES_LIBS = libc_support_misc libio-vbus +REQUIRES_LIBS = libc_support_misc libio-vbus libc_be_mem libstdc++ DEFINES = -DSPLASHNAME=gimp_image \ -DSPLASHNAME_RUN_LENGTH_DECODE=GIMP_IMAGE_RUN_LENGTH_DECODE diff --git a/src/l4/pkg/hello/Control b/src/l4/pkg/hello/Control index 632074ff..f3a22701 100644 --- a/src/l4/pkg/hello/Control +++ b/src/l4/pkg/hello/Control @@ -1,2 +1,2 @@ -requires: stdlibs +requires: stdlibs libstdc++ libc_be_mem Maintainer: adam@os.inf.tu-dresden.de diff --git a/src/l4/pkg/hello/server/src/Makefile b/src/l4/pkg/hello/server/src/Makefile index f1f999ce..91cdbd08 100644 --- a/src/l4/pkg/hello/server/src/Makefile +++ b/src/l4/pkg/hello/server/src/Makefile @@ -4,4 +4,6 @@ L4DIR ?= $(PKGDIR)/../.. TARGET = hello SRC_C = main.c +REQUIRES_LIBS = libc_be_mem libstdc++ + include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/io/io/server/src/Make.rules b/src/l4/pkg/io/io/server/src/Make.rules index 3cd10ab2..d2ae3ead 100644 --- a/src/l4/pkg/io/io/server/src/Make.rules +++ b/src/l4/pkg/io/io/server/src/Make.rules @@ -2,6 +2,8 @@ #MODE := shared TARGET = io +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc + DEFINES-$(CONFIG_L4IO_PCIID_DB) += -DCONFIG_L4IO_PCIID_DB SUBDIRS = drivers diff --git a/src/l4/pkg/ipcbench/Control b/src/l4/pkg/ipcbench/Control index 36edf0b7..6656bde9 100644 --- a/src/l4/pkg/ipcbench/Control +++ b/src/l4/pkg/ipcbench/Control @@ -1,2 +1,2 @@ -requires: stdlibs +requires: stdlibs libstdc++ libc_be_mem Maintainer: adam@l4re.org diff --git a/src/l4/pkg/ipcbench/src/Makefile b/src/l4/pkg/ipcbench/src/Makefile index 696dcc5e..13007b12 100644 --- a/src/l4/pkg/ipcbench/src/Makefile +++ b/src/l4/pkg/ipcbench/src/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../.. TARGET = ipcbench ipcbench_parallel \ ipcbench_client ipcbench_server \ syscallbench syscallbench_parallel -REQUIRES_LIBS = libpthread +REQUIRES_LIBS = libc_be_mem libstdc++ libpthread SRC_C_ipcbench = ipcbench.c ipc_common.c SRC_C_ipcbench_parallel = ipcbench_parallel.c ipc_common.c SRC_C_ipcbench_client = ipcclient.c diff --git a/src/l4/pkg/l4re-core/l4re_itas/server/src/Makefile b/src/l4/pkg/l4re-core/l4re_itas/server/src/Makefile index c9c9c535..444308e6 100644 --- a/src/l4/pkg/l4re-core/l4re_itas/server/src/Makefile +++ b/src/l4/pkg/l4re-core/l4re_itas/server/src/Makefile @@ -29,7 +29,7 @@ DEFINES += -DL4_CXX_NO_EXCEPTION_BACKTRACE -DL4_LOADER_RELOC_BASE=$(DEFAULT_RELO REQUIRES_LIBS := cxx_io cxx_libc_io libc_minimal libsupc++_minimal libloader \ libc_minimal_l4re libumalloc - +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc --wrap=aligned_alloc --wrap=calloc CXXFLAGS += $(CXXFLAGS_LOW_LEVEL) include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/l4re-core/l4re_vfs/include/impl/default_ops_impl.h b/src/l4/pkg/l4re-core/l4re_vfs/include/impl/default_ops_impl.h index 6b805549..338b70c9 100644 --- a/src/l4/pkg/l4re-core/l4re_vfs/include/impl/default_ops_impl.h +++ b/src/l4/pkg/l4re-core/l4re_vfs/include/impl/default_ops_impl.h @@ -24,23 +24,26 @@ struct Vfs_init cxx::Static_container > ns_dir; cxx::Static_container > vcon_stream; + // This is part of an ugly hack to avoid calling malloc here. + char fac_items[3*sizeof(Vfs::File_factory_item)]; + Vfs_init() { vfs.construct(); __rtld_l4re_env_posix_vfs_ops = vfs; ns_dir.construct(); auto ns_ptr = cxx::ref_ptr(ns_dir.get()); - vfs->register_file_factory(ns_ptr); + vfs->register_file_factory(ns_ptr, &fac_items[0]); ns_ptr.release(); // prevent deletion of static object ro_file.construct(); auto ro_ptr = cxx::ref_ptr(ro_file.get()); - vfs->register_file_factory(ro_ptr); + vfs->register_file_factory(ro_ptr, &fac_items[sizeof(Vfs::File_factory_item)]); ro_ptr.release(); // prevent deletion of static object vcon_stream.construct(); auto vcon_ptr = cxx::ref_ptr(vcon_stream.get()); - vfs->register_file_factory(vcon_ptr); + vfs->register_file_factory(vcon_ptr, &fac_items[2*sizeof(Vfs::File_factory_item)]); vcon_ptr.release(); // prevent deletion of static object } }; diff --git a/src/l4/pkg/l4re-core/l4re_vfs/include/impl/vfs_impl.h b/src/l4/pkg/l4re-core/l4re_vfs/include/impl/vfs_impl.h index 62e243a8..ccb1d6e9 100644 --- a/src/l4/pkg/l4re-core/l4re_vfs/include/impl/vfs_impl.h +++ b/src/l4/pkg/l4re-core/l4re_vfs/include/impl/vfs_impl.h @@ -134,6 +134,7 @@ public: L4Re::Vfs::File_system_list file_system_list() noexcept override; int register_file_factory(cxx::Ref_ptr f) noexcept override; + int register_file_factory(cxx::Ref_ptr f, void *x) noexcept; int unregister_file_factory(cxx::Ref_ptr f) noexcept override; Ref_ptr get_file_factory(int proto) noexcept override; Ref_ptr get_file_factory(char const *proto_name) noexcept override; @@ -144,14 +145,6 @@ public: void *malloc(size_t size) noexcept override { return Vfs_config::malloc(size); } void free(void *m) noexcept override { Vfs_config::free(m); } -private: - Root_mount_tree _root_mount; - L4Re::Core::Env_dir _root; - Ref_ptr _cwd; - Fd_store fds; - - L4Re::Vfs::File_system *_fs_registry; - struct File_factory_item : cxx::H_list_item_t { cxx::Ref_ptr f; @@ -163,6 +156,14 @@ private: File_factory_item &operator = (File_factory_item const &) = delete; }; +private: + Root_mount_tree _root_mount; + L4Re::Core::Env_dir _root; + Ref_ptr _cwd; + Fd_store fds; + + L4Re::Vfs::File_system *_fs_registry; + cxx::H_list_t _file_factories; l4_addr_t _anon_offset; @@ -272,6 +273,20 @@ Vfs::register_file_factory(cxx::Ref_ptr f) noexcept return 0; } +int +Vfs::register_file_factory(cxx::Ref_ptr f, void *x) noexcept +{ + if (!f) + return -EINVAL; + + if (!x) + return -ENOMEM; + + auto ff = new (x, cxx::Nothrow()) File_factory_item(f); + _file_factories.push_front(ff); + return 0; +} + int Vfs::unregister_file_factory(cxx::Ref_ptr f) noexcept { @@ -740,7 +755,7 @@ Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t page4k_of rm_flags |= Rm::F::In_area; - // Make sure to remove old mappings residing at the respective address + // Make sure to remove old mappings residing at the respective address // range. If none exists, we are fine as well, allowing us to ignore // ENOENT here. err = munmap_regions(start, len); diff --git a/src/l4/pkg/l4re-core/l4re_vfs/lib/src/Makefile b/src/l4/pkg/l4re-core/l4re_vfs/lib/src/Makefile index eda24aca..8f42bbfe 100644 --- a/src/l4/pkg/l4re-core/l4re_vfs/lib/src/Makefile +++ b/src/l4/pkg/l4re-core/l4re_vfs/lib/src/Makefile @@ -14,3 +14,5 @@ CXXFLAGS += -fvisibility=hidden # No exception information as unwinder code might use malloc and friends DEFINES += -DNOT_IN_libc -DL4_NO_RTTI CXXFLAGS += -include libc-symbols.h -fno-exceptions -fno-rtti +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc + diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-simple/alloc.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-simple/alloc.c index 757a05ec..6a2e0cc0 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-simple/alloc.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-simple/alloc.c @@ -21,22 +21,22 @@ extern int weak_function __libc_free_aligned(void *ptr) attribute_hidden; #ifdef L_malloc -void *malloc(size_t size) +void *__wrap_malloc(size_t size) { - void *result; + void *result; - if (unlikely(size == 0)) { - size++; - } + if (unlikely(size == 0)) { + size++; + } - /* prevent Undefined Behaviour for pointer arithmetic (substract) of too big pointers - * see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303 - * No need to check for size + sizeof(size_t) integer overflow since we already check for PTRDIFF_MAX - */ - if (unlikely(size > PTRDIFF_MAX)) { - __set_errno(ENOMEM); - return 0; - } + /* prevent Undefined Behaviour for pointer arithmetic (substract) of too big pointers + * see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303 + * No need to check for size + sizeof(size_t) integer overflow since we already check for PTRDIFF_MAX + */ + if (unlikely(size > PTRDIFF_MAX)) { + __set_errno(ENOMEM); + return 0; + } #ifdef __ARCH_USE_MMU__ # define MMAP_FLAGS MAP_PRIVATE | MAP_ANONYMOUS @@ -44,74 +44,74 @@ void *malloc(size_t size) # define MMAP_FLAGS MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED #endif - result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE, - MMAP_FLAGS, 0, 0); - if (result == MAP_FAILED) { - __set_errno(ENOMEM); - return 0; - } - * (size_t *) result = size; - return(result + sizeof(size_t)); + result = mmap((void *) 0, size + sizeof(size_t), PROT_READ | PROT_WRITE, + MMAP_FLAGS, 0, 0); + if (result == MAP_FAILED) { + __set_errno(ENOMEM); + return 0; + } + * (size_t *) result = size; + return(result + sizeof(size_t)); } #endif #ifdef L_calloc void * calloc(size_t nmemb, size_t lsize) { - void *result; - size_t size=lsize * nmemb; + void *result; + size_t size=lsize * nmemb; - /* guard vs integer overflow, but allow nmemb - * to fall through and call malloc(0) */ - if (nmemb && lsize != (size / nmemb)) { - __set_errno(ENOMEM); - return NULL; - } - result = malloc(size); + /* guard vs integer overflow, but allow nmemb + * to fall through and call malloc(0) */ + if (nmemb && lsize != (size / nmemb)) { + __set_errno(ENOMEM); + return NULL; + } + result = malloc(size); #ifndef __ARCH_USE_MMU__ - /* mmap'd with MAP_UNINITIALIZED, we have to blank memory ourselves */ - if (result != NULL) { - memset(result, 0, size); - } + /* mmap'd with MAP_UNINITIALIZED, we have to blank memory ourselves */ + if (result != NULL) { + memset(result, 0, size); + } #endif - return result; + return result; } #endif #ifdef L_realloc -void *realloc(void *ptr, size_t size) +void *__wrap_realloc(void *ptr, size_t size) { - void *newptr = NULL; + void *newptr = NULL; - if (!ptr) - return malloc(size); - if (!size) { - free(ptr); - return malloc(0); - } + if (!ptr) + return malloc(size); + if (!size) { + free(ptr); + return malloc(0); + } - newptr = malloc(size); - if (newptr) { - size_t old_size = *((size_t *) (ptr - sizeof(size_t))); - memcpy(newptr, ptr, (old_size < size ? old_size : size)); - free(ptr); - } - return newptr; + newptr = malloc(size); + if (newptr) { + size_t old_size = *((size_t *) (ptr - sizeof(size_t))); + memcpy(newptr, ptr, (old_size < size ? old_size : size)); + free(ptr); + } + return newptr; } #endif #ifdef L_free -void free(void *ptr) +void __wrap_free(void *ptr) { - if (unlikely(ptr == NULL)) - return; - if (unlikely(__libc_free_aligned != NULL)) { - if (__libc_free_aligned(ptr)) - return; - } - ptr -= sizeof(size_t); - munmap(ptr, * (size_t *) ptr + sizeof(size_t)); + if (unlikely(ptr == NULL)) + return; + if (unlikely(__libc_free_aligned != NULL)) { + if (__libc_free_aligned(ptr)) + return; + } + ptr -= sizeof(size_t); + munmap(ptr, * (size_t *) ptr + sizeof(size_t)); } #endif @@ -119,84 +119,84 @@ void free(void *ptr) #include __UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); -#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock) -#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock) +#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock) +#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock) /* List of blocks allocated with memalign or valloc */ struct alignlist { - struct alignlist *next; - __ptr_t aligned; /* The address that memaligned returned. */ - __ptr_t exact; /* The address that malloc returned. */ + struct alignlist *next; + __ptr_t aligned; /* The address that memaligned returned. */ + __ptr_t exact; /* The address that malloc returned. */ }; static struct alignlist *_aligned_blocks; /* Return memory to the heap. */ int __libc_free_aligned(void *ptr) { - struct alignlist *l; + struct alignlist *l; - if (ptr == NULL) - return 0; + if (ptr == NULL) + return 0; - __MALLOC_LOCK; - for (l = _aligned_blocks; l != NULL; l = l->next) { - if (l->aligned == ptr) { - /* Mark the block as free */ - l->aligned = NULL; - ptr = l->exact; - ptr -= sizeof(size_t); - munmap(ptr, * (size_t *) ptr + sizeof(size_t)); - return 1; - } - } - __MALLOC_UNLOCK; - return 0; + __MALLOC_LOCK; + for (l = _aligned_blocks; l != NULL; l = l->next) { + if (l->aligned == ptr) { + /* Mark the block as free */ + l->aligned = NULL; + ptr = l->exact; + ptr -= sizeof(size_t); + munmap(ptr, * (size_t *) ptr + sizeof(size_t)); + return 1; + } + } + __MALLOC_UNLOCK; + return 0; } void * memalign (size_t alignment, size_t size) { - void * result; - unsigned long int adj; + void * result; + unsigned long int adj; - if (unlikely(size > PTRDIFF_MAX)) { - __set_errno(ENOMEM); - return NULL; - } + if (unlikely(size > PTRDIFF_MAX)) { + __set_errno(ENOMEM); + return NULL; + } - if (unlikely((size + alignment - 1 < size) && (alignment != 0))) { - __set_errno(ENOMEM); - return NULL; - } + if (unlikely((size + alignment - 1 < size) && (alignment != 0))) { + __set_errno(ENOMEM); + return NULL; + } - result = malloc (size + alignment - 1); - if (result == NULL) - return NULL; + result = malloc (size + alignment - 1); + if (result == NULL) + return NULL; - adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment; - if (adj != 0) { - struct alignlist *l; - __MALLOC_LOCK; - for (l = _aligned_blocks; l != NULL; l = l->next) - if (l->aligned == NULL) - /* This slot is free. Use it. */ - break; - if (l == NULL) { - l = (struct alignlist *) malloc (sizeof (struct alignlist)); - if (l == NULL) { - free(result); - result = NULL; - goto DONE; - } - l->next = _aligned_blocks; - _aligned_blocks = l; - } - l->exact = result; - result = l->aligned = (char *) result + alignment - adj; + adj = (unsigned long int) ((unsigned long int) ((char *) result - (char *) NULL)) % alignment; + if (adj != 0) { + struct alignlist *l; + __MALLOC_LOCK; + for (l = _aligned_blocks; l != NULL; l = l->next) + if (l->aligned == NULL) + /* This slot is free. Use it. */ + break; + if (l == NULL) { + l = (struct alignlist *) malloc (sizeof (struct alignlist)); + if (l == NULL) { + free(result); + result = NULL; + goto DONE; + } + l->next = _aligned_blocks; + _aligned_blocks = l; + } + l->exact = result; + result = l->aligned = (char *) result + alignment - adj; DONE: - __MALLOC_UNLOCK; - } + __MALLOC_UNLOCK; + } - return result; + return result; } libc_hidden_def(memalign) #endif diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/free.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/free.c index f3602cf4..d8904b55 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/free.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/free.c @@ -42,38 +42,38 @@ static int __malloc_trim(size_t pad, mstate av) if (extra > 0) { - /* - Only proceed if end of memory is where we last set it. - This avoids problems if there were foreign sbrk calls. - */ - current_brk = (char*)(MORECORE(0)); - if (current_brk == (char*)(av->top) + top_size) { + /* + Only proceed if end of memory is where we last set it. + This avoids problems if there were foreign sbrk calls. + */ + current_brk = (char*)(MORECORE(0)); + if (current_brk == (char*)(av->top) + top_size) { - /* - Attempt to release memory. We ignore MORECORE return value, - and instead call again to find out where new end of memory is. - This avoids problems if first call releases less than we asked, - of if failure somehow altered brk value. (We could still - encounter problems if it altered brk in some very bad way, - but the only thing we can do is adjust anyway, which will cause - some downstream failure.) - */ + /* + Attempt to release memory. We ignore MORECORE return value, + and instead call again to find out where new end of memory is. + This avoids problems if first call releases less than we asked, + of if failure somehow altered brk value. (We could still + encounter problems if it altered brk in some very bad way, + but the only thing we can do is adjust anyway, which will cause + some downstream failure.) + */ - MORECORE(-extra); - new_brk = (char*)(MORECORE(0)); + MORECORE(-extra); + new_brk = (char*)(MORECORE(0)); - if (new_brk != (char*)MORECORE_FAILURE) { - released = (long)(current_brk - new_brk); + if (new_brk != (char*)MORECORE_FAILURE) { + released = (long)(current_brk - new_brk); - if (released != 0) { - /* Success. Adjust top. */ - av->sbrked_mem -= released; - set_head(av->top, (top_size - released) | PREV_INUSE); - check_malloc_state(); - return 1; - } - } - } + if (released != 0) { + /* Success. Adjust top. */ + av->sbrked_mem -= released; + set_head(av->top, (top_size - released) | PREV_INUSE); + check_malloc_state(); + return 1; + } + } + } } return 0; } @@ -129,8 +129,8 @@ static void malloc_init_state(mstate av) /* Establish circular links for normal bins */ for (i = 1; i < NBINS; ++i) { - bin = bin_at(av,i); - bin->fd = bin->bk = bin; + bin = bin_at(av,i); + bin->fd = bin->bk = bin; } av->top_pad = DEFAULT_TOP_PAD; @@ -195,80 +195,80 @@ void attribute_hidden __malloc_consolidate(mstate av) */ if (av->max_fast != 0) { - clear_fastchunks(av); + clear_fastchunks(av); - unsorted_bin = unsorted_chunks(av); + unsorted_bin = unsorted_chunks(av); - /* - Remove each chunk from fast bin and consolidate it, placing it - then in unsorted bin. Among other reasons for doing this, - placing in unsorted bin avoids needing to calculate actual bins - until malloc is sure that chunks aren't immediately going to be - reused anyway. - */ + /* + Remove each chunk from fast bin and consolidate it, placing it + then in unsorted bin. Among other reasons for doing this, + placing in unsorted bin avoids needing to calculate actual bins + until malloc is sure that chunks aren't immediately going to be + reused anyway. + */ - maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); - fb = &(av->fastbins[0]); - do { - if ( (p = *fb) != 0) { - *fb = 0; + maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); + fb = &(av->fastbins[0]); + do { + if ( (p = *fb) != 0) { + *fb = 0; - do { + do { CHECK_PTR(p); - check_inuse_chunk(p); - nextp = REVEAL_PTR(&p->fd, p->fd); + check_inuse_chunk(p); + nextp = REVEAL_PTR(&p->fd, p->fd); - /* Slightly streamlined version of consolidation code in free() */ - size = p->size & ~PREV_INUSE; - nextchunk = chunk_at_offset(p, size); - nextsize = chunksize(nextchunk); + /* Slightly streamlined version of consolidation code in free() */ + size = p->size & ~PREV_INUSE; + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); - if (!prev_inuse(p)) { - prevsize = p->prev_size; - size += prevsize; - p = chunk_at_offset(p, -((long) prevsize)); - unlink(p, bck, fwd); - } + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } - if (nextchunk != av->top) { - nextinuse = inuse_bit_at_offset(nextchunk, nextsize); - set_head(nextchunk, nextsize); + if (nextchunk != av->top) { + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + set_head(nextchunk, nextsize); - if (!nextinuse) { - size += nextsize; - unlink(nextchunk, bck, fwd); - } + if (!nextinuse) { + size += nextsize; + unlink(nextchunk, bck, fwd); + } - first_unsorted = unsorted_bin->fd; - unsorted_bin->fd = p; - first_unsorted->bk = p; + first_unsorted = unsorted_bin->fd; + unsorted_bin->fd = p; + first_unsorted->bk = p; - set_head(p, size | PREV_INUSE); - p->bk = unsorted_bin; - p->fd = first_unsorted; - set_foot(p, size); - } + set_head(p, size | PREV_INUSE); + p->bk = unsorted_bin; + p->fd = first_unsorted; + set_foot(p, size); + } - else { - size += nextsize; - set_head(p, size | PREV_INUSE); - av->top = p; - } + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + } - } while ( (p = nextp) != 0); + } while ( (p = nextp) != 0); - } - } while (fb++ != maxfb); + } + } while (fb++ != maxfb); } else { - malloc_init_state(av); - check_malloc_state(); + malloc_init_state(av); + check_malloc_state(); } } /* ------------------------------ free ------------------------------ */ -void free(void* mem) +void __wrap_free(void* mem) { mstate av; @@ -284,7 +284,7 @@ void free(void* mem) /* free(0) has no effect */ if (mem == NULL) - return; + return; __MALLOC_LOCK; av = get_malloc_state(); @@ -301,16 +301,16 @@ void free(void* mem) if ((unsigned long)(size) <= (unsigned long)(av->max_fast) #if TRIM_FASTBINS - /* If TRIM_FASTBINS set, don't place chunks - bordering top into fastbins */ - && (chunk_at_offset(p, size) != av->top) + /* If TRIM_FASTBINS set, don't place chunks + bordering top into fastbins */ + && (chunk_at_offset(p, size) != av->top) #endif ) { - set_fastchunks(av); - fb = &(av->fastbins[fastbin_index(size)]); - p->fd = PROTECT_PTR(&p->fd, *fb); - *fb = p; + set_fastchunks(av); + fb = &(av->fastbins[fastbin_index(size)]); + p->fd = PROTECT_PTR(&p->fd, *fb); + *fb = p; } /* @@ -318,82 +318,82 @@ void free(void* mem) */ else if (!chunk_is_mmapped(p)) { - set_anychunks(av); + set_anychunks(av); - nextchunk = chunk_at_offset(p, size); - nextsize = chunksize(nextchunk); + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); - /* consolidate backward */ - if (!prev_inuse(p)) { - prevsize = p->prev_size; - size += prevsize; - p = chunk_at_offset(p, -((long) prevsize)); - unlink(p, bck, fwd); - } + /* consolidate backward */ + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } - if (nextchunk != av->top) { - /* get and clear inuse bit */ - nextinuse = inuse_bit_at_offset(nextchunk, nextsize); - set_head(nextchunk, nextsize); + if (nextchunk != av->top) { + /* get and clear inuse bit */ + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + set_head(nextchunk, nextsize); - /* consolidate forward */ - if (!nextinuse) { - unlink(nextchunk, bck, fwd); - size += nextsize; - } + /* consolidate forward */ + if (!nextinuse) { + unlink(nextchunk, bck, fwd); + size += nextsize; + } - /* - Place the chunk in unsorted chunk list. Chunks are - not placed into regular bins until after they have - been given one chance to be used in malloc. - */ + /* + Place the chunk in unsorted chunk list. Chunks are + not placed into regular bins until after they have + been given one chance to be used in malloc. + */ - bck = unsorted_chunks(av); - fwd = bck->fd; - p->bk = bck; - p->fd = fwd; - bck->fd = p; - fwd->bk = p; + bck = unsorted_chunks(av); + fwd = bck->fd; + p->bk = bck; + p->fd = fwd; + bck->fd = p; + fwd->bk = p; - set_head(p, size | PREV_INUSE); - set_foot(p, size); + set_head(p, size | PREV_INUSE); + set_foot(p, size); - check_free_chunk(p); - } + check_free_chunk(p); + } - /* - If the chunk borders the current high end of memory, - consolidate into top - */ + /* + If the chunk borders the current high end of memory, + consolidate into top + */ - else { - size += nextsize; - set_head(p, size | PREV_INUSE); - av->top = p; - check_chunk(p); - } + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + check_chunk(p); + } - /* - If freeing a large space, consolidate possibly-surrounding - chunks. Then, if the total unused topmost memory exceeds trim - threshold, ask malloc_trim to reduce top. + /* + If freeing a large space, consolidate possibly-surrounding + chunks. Then, if the total unused topmost memory exceeds trim + threshold, ask malloc_trim to reduce top. - Unless max_fast is 0, we don't know if there are fastbins - bordering top, so we cannot tell for sure whether threshold - has been reached unless fastbins are consolidated. But we - don't want to consolidate on each free. As a compromise, - consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD - is reached. - */ + Unless max_fast is 0, we don't know if there are fastbins + bordering top, so we cannot tell for sure whether threshold + has been reached unless fastbins are consolidated. But we + don't want to consolidate on each free. As a compromise, + consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD + is reached. + */ - if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { - if (have_fastchunks(av)) - __malloc_consolidate(av); + if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { + if (have_fastchunks(av)) + __malloc_consolidate(av); - if ((unsigned long)(chunksize(av->top)) >= - (unsigned long)(av->trim_threshold)) - __malloc_trim(av->top_pad, av); - } + if ((unsigned long)(chunksize(av->top)) >= + (unsigned long)(av->trim_threshold)) + __malloc_trim(av->top_pad, av); + } } /* @@ -405,13 +405,13 @@ void free(void* mem) */ else { - size_t offset = p->prev_size; - av->n_mmaps--; - av->mmapped_mem -= (size + offset); - munmap((char*)p - offset, size + offset); + size_t offset = p->prev_size; + av->n_mmaps--; + av->mmapped_mem -= (size + offset); + munmap((char*)p - offset, size + offset); } __MALLOC_UNLOCK; } /* glibc compatibilty */ -weak_alias(free, __libc_free) +weak_alias(__wrap_free, __libc_free) diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/malloc.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/malloc.c index a43b971a..b2aaac71 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/malloc.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/malloc.c @@ -16,6 +16,7 @@ #include "malloc.h" +void __wrap_free(void *); __UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); @@ -74,30 +75,30 @@ void __do_check_chunk(mchunkptr p) if (!chunk_is_mmapped(p)) { - /* Has legal address ... */ - if (p != av->top) { - if (contiguous(av)) { - assert(((char*)p) >= min_address); - assert(((char*)p + sz) <= ((char*)(av->top))); - } - } - else { - /* top size is always at least MINSIZE */ - assert((unsigned long)(sz) >= MINSIZE); - /* top predecessor always marked inuse */ - assert(prev_inuse(p)); - } + /* Has legal address ... */ + if (p != av->top) { + if (contiguous(av)) { + assert(((char*)p) >= min_address); + assert(((char*)p + sz) <= ((char*)(av->top))); + } + } + else { + /* top size is always at least MINSIZE */ + assert((unsigned long)(sz) >= MINSIZE); + /* top predecessor always marked inuse */ + assert(prev_inuse(p)); + } } else { - /* address is outside main heap */ - if (contiguous(av) && av->top != initial_top(av)) { - assert(((char*)p) < min_address || ((char*)p) > max_address); - } - /* chunk is page-aligned */ - assert(((p->prev_size + sz) & (av->pagesize-1)) == 0); - /* mem is aligned */ - assert(aligned_OK(chunk2mem(p))); + /* address is outside main heap */ + if (contiguous(av) && av->top != initial_top(av)) { + assert(((char*)p) < min_address || ((char*)p) > max_address); + } + /* chunk is page-aligned */ + assert(((p->prev_size + sz) & (av->pagesize-1)) == 0); + /* mem is aligned */ + assert(aligned_OK(chunk2mem(p))); } } @@ -119,20 +120,20 @@ void __do_check_free_chunk(mchunkptr p) /* Unless a special marker, must have OK fields */ if ((unsigned long)(sz) >= MINSIZE) { - assert((sz & MALLOC_ALIGN_MASK) == 0); - assert(aligned_OK(chunk2mem(p))); - /* ... matching footer field */ - assert(next->prev_size == sz); - /* ... and is fully consolidated */ - assert(prev_inuse(p)); - assert (next == av->top || inuse(next)); + assert((sz & MALLOC_ALIGN_MASK) == 0); + assert(aligned_OK(chunk2mem(p))); + /* ... matching footer field */ + assert(next->prev_size == sz); + /* ... and is fully consolidated */ + assert(prev_inuse(p)); + assert (next == av->top || inuse(next)); - /* ... and has minimally sane links */ - assert(p->fd->bk == p); - assert(p->bk->fd == p); + /* ... and has minimally sane links */ + assert(p->fd->bk == p); + assert(p->bk->fd == p); } else /* markers are always of size (sizeof(size_t)) */ - assert(sz == (sizeof(size_t))); + assert(sz == (sizeof(size_t))); } /* Properties of inuse chunks */ @@ -143,7 +144,7 @@ void __do_check_inuse_chunk(mchunkptr p) __do_check_chunk(p); if (chunk_is_mmapped(p)) - return; /* mmapped chunks have no next/prev */ + return; /* mmapped chunks have no next/prev */ /* Check whether it claims to be in use ... */ assert(inuse(p)); @@ -155,18 +156,18 @@ void __do_check_inuse_chunk(mchunkptr p) if an inuse chunk borders them and debug is on, it's worth doing them. */ if (!prev_inuse(p)) { - /* Note that we cannot even look at prev unless it is not inuse */ - mchunkptr prv = prev_chunk(p); - assert(next_chunk(prv) == p); - __do_check_free_chunk(prv); + /* Note that we cannot even look at prev unless it is not inuse */ + mchunkptr prv = prev_chunk(p); + assert(next_chunk(prv) == p); + __do_check_free_chunk(prv); } if (next == av->top) { - assert(prev_inuse(next)); - assert(chunksize(next) >= MINSIZE); + assert(prev_inuse(next)); + assert(chunksize(next) >= MINSIZE); } else if (!inuse(next)) - __do_check_free_chunk(next); + __do_check_free_chunk(next); } /* Properties of chunks recycled from fastbins */ @@ -240,7 +241,7 @@ void __do_check_malloc_state(void) /* cannot run remaining checks until fully initialized */ if (av->top == 0 || av->top == initial_top(av)) - return; + return; /* pagesize is a power of 2 */ assert((av->pagesize & (av->pagesize-1)) == 0); @@ -253,64 +254,64 @@ void __do_check_malloc_state(void) max_fast_bin = fastbin_index(av->max_fast); for (i = 0; (unsigned int)i < NFASTBINS; ++i) { - p = av->fastbins[i]; + p = av->fastbins[i]; - /* all bins past max_fast are empty */ - if (i > max_fast_bin) - assert(p == 0); + /* all bins past max_fast are empty */ + if (i > max_fast_bin) + assert(p == 0); - while (p != 0) { - CHECK_PTR(p); - /* each chunk claims to be inuse */ - __do_check_inuse_chunk(p); - total += chunksize(p); - /* chunk belongs in this bin */ - assert(fastbin_index(chunksize(p)) == i); - p = REVEAL_PTR(&p->fd, p->fd); - } + while (p != 0) { + CHECK_PTR(p); + /* each chunk claims to be inuse */ + __do_check_inuse_chunk(p); + total += chunksize(p); + /* chunk belongs in this bin */ + assert(fastbin_index(chunksize(p)) == i); + p = REVEAL_PTR(&p->fd, p->fd); + } } if (total != 0) - assert(have_fastchunks(av)); + assert(have_fastchunks(av)); else if (!have_fastchunks(av)) - assert(total == 0); + assert(total == 0); /* check normal bins */ for (i = 1; i < NBINS; ++i) { - b = bin_at(av,i); + b = bin_at(av,i); - /* binmap is accurate (except for bin 1 == unsorted_chunks) */ - if (i >= 2) { - binbit = get_binmap(av,i); - empty = last(b) == b; - if (!binbit) - assert(empty); - else if (!empty) - assert(binbit); - } + /* binmap is accurate (except for bin 1 == unsorted_chunks) */ + if (i >= 2) { + binbit = get_binmap(av,i); + empty = last(b) == b; + if (!binbit) + assert(empty); + else if (!empty) + assert(binbit); + } - for (p = last(b); p != b; p = p->bk) { - /* each chunk claims to be free */ - __do_check_free_chunk(p); - size = chunksize(p); - total += size; - if (i >= 2) { - /* chunk belongs in bin */ - assert(bin_index(size) == i); - /* lists are sorted */ - if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) { - assert(p->bk == b || - (unsigned long)chunksize(p->bk) >= - (unsigned long)chunksize(p)); - } - } - /* chunk is followed by a legal chain of inuse chunks */ - for (q = next_chunk(p); - (q != av->top && inuse(q) && - (unsigned long)(chunksize(q)) >= MINSIZE); - q = next_chunk(q)) - __do_check_inuse_chunk(q); - } + for (p = last(b); p != b; p = p->bk) { + /* each chunk claims to be free */ + __do_check_free_chunk(p); + size = chunksize(p); + total += size; + if (i >= 2) { + /* chunk belongs in bin */ + assert(bin_index(size) == i); + /* lists are sorted */ + if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) { + assert(p->bk == b || + (unsigned long)chunksize(p->bk) >= + (unsigned long)chunksize(p)); + } + } + /* chunk is followed by a legal chain of inuse chunks */ + for (q = next_chunk(p); + (q != av->top && inuse(q) && + (unsigned long)(chunksize(q)) >= MINSIZE); + q = next_chunk(q)) + __do_check_inuse_chunk(q); + } } /* top chunk is OK */ @@ -323,13 +324,13 @@ void __do_check_malloc_state(void) assert(av->n_mmaps <= av->max_n_mmaps); assert((unsigned long)(av->sbrked_mem) <= - (unsigned long)(av->max_sbrked_mem)); + (unsigned long)(av->max_sbrked_mem)); assert((unsigned long)(av->mmapped_mem) <= - (unsigned long)(av->max_mmapped_mem)); + (unsigned long)(av->max_mmapped_mem)); assert((unsigned long)(av->max_total_mem) >= - (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem)); + (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem)); } #endif @@ -375,9 +376,9 @@ static void* __malloc_alloc(size_t nb, mstate av) */ if (have_fastchunks(av)) { - assert(in_smallbin_range(nb)); - __malloc_consolidate(av); - return malloc(nb - MALLOC_ALIGN_MASK); + assert(in_smallbin_range(nb)); + __malloc_consolidate(av); + return malloc(nb - MALLOC_ALIGN_MASK); } @@ -389,62 +390,62 @@ static void* __malloc_alloc(size_t nb, mstate av) */ if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) && - (av->n_mmaps < av->n_mmaps_max)) { + (av->n_mmaps < av->n_mmaps_max)) { - char* mm; /* return value from mmap call*/ + char* mm; /* return value from mmap call*/ - /* - Round up size to nearest page. For mmapped chunks, the overhead - is one (sizeof(size_t)) unit larger than for normal chunks, because there - is no following chunk whose prev_size field could be used. - */ - size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; + /* + Round up size to nearest page. For mmapped chunks, the overhead + is one (sizeof(size_t)) unit larger than for normal chunks, because there + is no following chunk whose prev_size field could be used. + */ + size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; - /* Don't try if size wraps around 0 */ - if ((unsigned long)(size) > (unsigned long)(nb)) { + /* Don't try if size wraps around 0 */ + if ((unsigned long)(size) > (unsigned long)(nb)) { - mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE)); + mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE)); - if (mm != (char*)(MORECORE_FAILURE)) { + if (mm != (char*)(MORECORE_FAILURE)) { - /* - The offset to the start of the mmapped region is stored - in the prev_size field of the chunk. This allows us to adjust - returned start address to meet alignment requirements here - and in memalign(), and still be able to compute proper - address argument for later munmap in free() and realloc(). - */ + /* + The offset to the start of the mmapped region is stored + in the prev_size field of the chunk. This allows us to adjust + returned start address to meet alignment requirements here + and in memalign(), and still be able to compute proper + address argument for later munmap in free() and realloc(). + */ - front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK; - if (front_misalign > 0) { - correction = MALLOC_ALIGNMENT - front_misalign; - p = (mchunkptr)(mm + correction); - p->prev_size = correction; - set_head(p, (size - correction) |IS_MMAPPED); - } - else { - p = (mchunkptr)mm; - p->prev_size = 0; - set_head(p, size|IS_MMAPPED); - } + front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + correction = MALLOC_ALIGNMENT - front_misalign; + p = (mchunkptr)(mm + correction); + p->prev_size = correction; + set_head(p, (size - correction) |IS_MMAPPED); + } + else { + p = (mchunkptr)mm; + p->prev_size = 0; + set_head(p, size|IS_MMAPPED); + } - /* update statistics */ + /* update statistics */ - if (++av->n_mmaps > av->max_n_mmaps) - av->max_n_mmaps = av->n_mmaps; + if (++av->n_mmaps > av->max_n_mmaps) + av->max_n_mmaps = av->n_mmaps; - sum = av->mmapped_mem += size; - if (sum > (unsigned long)(av->max_mmapped_mem)) - av->max_mmapped_mem = sum; - sum += av->sbrked_mem; - if (sum > (unsigned long)(av->max_total_mem)) - av->max_total_mem = sum; + sum = av->mmapped_mem += size; + if (sum > (unsigned long)(av->max_mmapped_mem)) + av->max_mmapped_mem = sum; + sum += av->sbrked_mem; + if (sum > (unsigned long)(av->max_total_mem)) + av->max_total_mem = sum; - check_chunk(p); + check_chunk(p); - return chunk2mem(p); - } - } + return chunk2mem(p); + } + } } /* Record incoming configuration of top */ @@ -459,8 +460,8 @@ static void* __malloc_alloc(size_t nb, mstate av) * be at least MINSIZE and to have prev_inuse set. */ assert((old_top == initial_top(av) && old_size == 0) || - ((unsigned long) (old_size) >= MINSIZE && - prev_inuse(old_top))); + ((unsigned long) (old_size) >= MINSIZE && + prev_inuse(old_top))); /* Precondition: not enough current space to satisfy nb request */ assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE)); @@ -480,7 +481,7 @@ static void* __malloc_alloc(size_t nb, mstate av) */ if (contiguous(av)) - size -= old_size; + size -= old_size; /* Round to a multiple of page size. @@ -499,7 +500,7 @@ static void* __malloc_alloc(size_t nb, mstate av) */ if (size > 0) - fst_brk = (char*)(MORECORE(size)); + fst_brk = (char*)(MORECORE(size)); /* If have mmap, try using it as a backup when MORECORE fails or @@ -512,234 +513,234 @@ static void* __malloc_alloc(size_t nb, mstate av) if (fst_brk == (char*)(MORECORE_FAILURE)) { - /* Cannot merge with old top, so add its size back in */ - if (contiguous(av)) - size = (size + old_size + pagemask) & ~pagemask; + /* Cannot merge with old top, so add its size back in */ + if (contiguous(av)) + size = (size + old_size + pagemask) & ~pagemask; - /* If we are relying on mmap as backup, then use larger units */ - if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE)) - size = MMAP_AS_MORECORE_SIZE; + /* If we are relying on mmap as backup, then use larger units */ + if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE)) + size = MMAP_AS_MORECORE_SIZE; - /* Don't try if size wraps around 0 */ - if ((unsigned long)(size) > (unsigned long)(nb)) { + /* Don't try if size wraps around 0 */ + if ((unsigned long)(size) > (unsigned long)(nb)) { - fst_brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE)); + fst_brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE)); - if (fst_brk != (char*)(MORECORE_FAILURE)) { + if (fst_brk != (char*)(MORECORE_FAILURE)) { - /* We do not need, and cannot use, another sbrk call to find end */ - snd_brk = fst_brk + size; + /* We do not need, and cannot use, another sbrk call to find end */ + snd_brk = fst_brk + size; - /* Record that we no longer have a contiguous sbrk region. - After the first time mmap is used as backup, we do not - ever rely on contiguous space since this could incorrectly - bridge regions. - */ - set_noncontiguous(av); - } - } + /* Record that we no longer have a contiguous sbrk region. + After the first time mmap is used as backup, we do not + ever rely on contiguous space since this could incorrectly + bridge regions. + */ + set_noncontiguous(av); + } + } } if (fst_brk != (char*)(MORECORE_FAILURE)) { - av->sbrked_mem += size; + av->sbrked_mem += size; - /* - If MORECORE extends previous space, we can likewise extend top size. - */ + /* + If MORECORE extends previous space, we can likewise extend top size. + */ - if (fst_brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) { - set_head(old_top, (size + old_size) | PREV_INUSE); - } + if (fst_brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) { + set_head(old_top, (size + old_size) | PREV_INUSE); + } - /* - Otherwise, make adjustments: + /* + Otherwise, make adjustments: - * If the first time through or noncontiguous, we need to call sbrk - just to find out where the end of memory lies. + * If the first time through or noncontiguous, we need to call sbrk + just to find out where the end of memory lies. - * We need to ensure that all returned chunks from malloc will meet - MALLOC_ALIGNMENT + * We need to ensure that all returned chunks from malloc will meet + MALLOC_ALIGNMENT - * If there was an intervening foreign sbrk, we need to adjust sbrk - request size to account for fact that we will not be able to - combine new space with existing space in old_top. + * If there was an intervening foreign sbrk, we need to adjust sbrk + request size to account for fact that we will not be able to + combine new space with existing space in old_top. - * Almost all systems internally allocate whole pages at a time, in - which case we might as well use the whole last page of request. - So we allocate enough more memory to hit a page boundary now, - which in turn causes future contiguous calls to page-align. - */ + * Almost all systems internally allocate whole pages at a time, in + which case we might as well use the whole last page of request. + So we allocate enough more memory to hit a page boundary now, + which in turn causes future contiguous calls to page-align. + */ - else { - front_misalign = 0; - end_misalign = 0; - correction = 0; - aligned_brk = fst_brk; + else { + front_misalign = 0; + end_misalign = 0; + correction = 0; + aligned_brk = fst_brk; - /* - If MORECORE returns an address lower than we have seen before, - we know it isn't really contiguous. This and some subsequent - checks help cope with non-conforming MORECORE functions and - the presence of "foreign" calls to MORECORE from outside of - malloc or by other threads. We cannot guarantee to detect - these in all cases, but cope with the ones we do detect. - */ - if (contiguous(av) && old_size != 0 && fst_brk < old_end) { - set_noncontiguous(av); - } + /* + If MORECORE returns an address lower than we have seen before, + we know it isn't really contiguous. This and some subsequent + checks help cope with non-conforming MORECORE functions and + the presence of "foreign" calls to MORECORE from outside of + malloc or by other threads. We cannot guarantee to detect + these in all cases, but cope with the ones we do detect. + */ + if (contiguous(av) && old_size != 0 && fst_brk < old_end) { + set_noncontiguous(av); + } - /* handle contiguous cases */ - if (contiguous(av)) { + /* handle contiguous cases */ + if (contiguous(av)) { - /* We can tolerate forward non-contiguities here (usually due - to foreign calls) but treat them as part of our space for - stats reporting. */ - if (old_size != 0) - av->sbrked_mem += fst_brk - old_end; + /* We can tolerate forward non-contiguities here (usually due + to foreign calls) but treat them as part of our space for + stats reporting. */ + if (old_size != 0) + av->sbrked_mem += fst_brk - old_end; - /* Guarantee alignment of first new chunk made from this space */ + /* Guarantee alignment of first new chunk made from this space */ - front_misalign = (size_t)chunk2mem(fst_brk) & MALLOC_ALIGN_MASK; - if (front_misalign > 0) { + front_misalign = (size_t)chunk2mem(fst_brk) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { - /* - Skip over some bytes to arrive at an aligned position. - We don't need to specially mark these wasted front bytes. - They will never be accessed anyway because - prev_inuse of av->top (and any chunk created from its start) - is always true after initialization. - */ + /* + Skip over some bytes to arrive at an aligned position. + We don't need to specially mark these wasted front bytes. + They will never be accessed anyway because + prev_inuse of av->top (and any chunk created from its start) + is always true after initialization. + */ - correction = MALLOC_ALIGNMENT - front_misalign; - aligned_brk += correction; - } + correction = MALLOC_ALIGNMENT - front_misalign; + aligned_brk += correction; + } - /* - If this isn't adjacent to existing space, then we will not - be able to merge with old_top space, so must add to 2nd request. - */ + /* + If this isn't adjacent to existing space, then we will not + be able to merge with old_top space, so must add to 2nd request. + */ - correction += old_size; + correction += old_size; - /* Extend the end address to hit a page boundary */ - end_misalign = (size_t)(fst_brk + size + correction); - correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; + /* Extend the end address to hit a page boundary */ + end_misalign = (size_t)(fst_brk + size + correction); + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; - assert(correction >= 0); - snd_brk = (char*)(MORECORE(correction)); + assert(correction >= 0); + snd_brk = (char*)(MORECORE(correction)); - if (snd_brk == (char*)(MORECORE_FAILURE)) { - /* - If can't allocate correction, try to at least find out current - brk. It might be enough to proceed without failing. - */ - correction = 0; - snd_brk = (char*)(MORECORE(0)); - } - else if (snd_brk < fst_brk) { - /* - If the second call gives noncontiguous space even though - it says it won't, the only course of action is to ignore - results of second call, and conservatively estimate where - the first call left us. Also set noncontiguous, so this - won't happen again, leaving at most one hole. + if (snd_brk == (char*)(MORECORE_FAILURE)) { + /* + If can't allocate correction, try to at least find out current + brk. It might be enough to proceed without failing. + */ + correction = 0; + snd_brk = (char*)(MORECORE(0)); + } + else if (snd_brk < fst_brk) { + /* + If the second call gives noncontiguous space even though + it says it won't, the only course of action is to ignore + results of second call, and conservatively estimate where + the first call left us. Also set noncontiguous, so this + won't happen again, leaving at most one hole. - Note that this check is intrinsically incomplete. Because - MORECORE is allowed to give more space than we ask for, - there is no reliable way to detect a noncontiguity - producing a forward gap for the second call. - */ - snd_brk = fst_brk + size; - correction = 0; - set_noncontiguous(av); - } + Note that this check is intrinsically incomplete. Because + MORECORE is allowed to give more space than we ask for, + there is no reliable way to detect a noncontiguity + producing a forward gap for the second call. + */ + snd_brk = fst_brk + size; + correction = 0; + set_noncontiguous(av); + } - } + } - /* handle non-contiguous cases */ - else { - /* MORECORE/mmap must correctly align */ - assert(aligned_OK(chunk2mem(fst_brk))); + /* handle non-contiguous cases */ + else { + /* MORECORE/mmap must correctly align */ + assert(aligned_OK(chunk2mem(fst_brk))); - /* Find out current end of memory */ - if (snd_brk == (char*)(MORECORE_FAILURE)) { - snd_brk = (char*)(MORECORE(0)); - av->sbrked_mem += snd_brk - fst_brk - size; - } - } + /* Find out current end of memory */ + if (snd_brk == (char*)(MORECORE_FAILURE)) { + snd_brk = (char*)(MORECORE(0)); + av->sbrked_mem += snd_brk - fst_brk - size; + } + } - /* Adjust top based on results of second sbrk */ - if (snd_brk != (char*)(MORECORE_FAILURE)) { - av->top = (mchunkptr)aligned_brk; - set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); - av->sbrked_mem += correction; + /* Adjust top based on results of second sbrk */ + if (snd_brk != (char*)(MORECORE_FAILURE)) { + av->top = (mchunkptr)aligned_brk; + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); + av->sbrked_mem += correction; - /* - If not the first time through, we either have a - gap due to foreign sbrk or a non-contiguous region. Insert a - double fencepost at old_top to prevent consolidation with space - we don't own. These fenceposts are artificial chunks that are - marked as inuse and are in any case too small to use. We need - two to make sizes and alignments work out. - */ + /* + If not the first time through, we either have a + gap due to foreign sbrk or a non-contiguous region. Insert a + double fencepost at old_top to prevent consolidation with space + we don't own. These fenceposts are artificial chunks that are + marked as inuse and are in any case too small to use. We need + two to make sizes and alignments work out. + */ - if (old_size != 0) { - /* Shrink old_top to insert fenceposts, keeping size a - multiple of MALLOC_ALIGNMENT. We know there is at least - enough space in old_top to do this. - */ - old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK; - set_head(old_top, old_size | PREV_INUSE); + if (old_size != 0) { + /* Shrink old_top to insert fenceposts, keeping size a + multiple of MALLOC_ALIGNMENT. We know there is at least + enough space in old_top to do this. + */ + old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK; + set_head(old_top, old_size | PREV_INUSE); - /* - Note that the following assignments completely overwrite - old_top when old_size was previously MINSIZE. This is - intentional. We need the fencepost, even if old_top otherwise gets - lost. - */ - chunk_at_offset(old_top, old_size )->size = - (sizeof(size_t))|PREV_INUSE; + /* + Note that the following assignments completely overwrite + old_top when old_size was previously MINSIZE. This is + intentional. We need the fencepost, even if old_top otherwise gets + lost. + */ + chunk_at_offset(old_top, old_size )->size = + (sizeof(size_t))|PREV_INUSE; - chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size = - (sizeof(size_t))|PREV_INUSE; + chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size = + (sizeof(size_t))|PREV_INUSE; - /* If possible, release the rest, suppressing trimming. */ - if (old_size >= MINSIZE) { - size_t tt = av->trim_threshold; - av->trim_threshold = (size_t)(-1); - free(chunk2mem(old_top)); - av->trim_threshold = tt; - } - } - } - } + /* If possible, release the rest, suppressing trimming. */ + if (old_size >= MINSIZE) { + size_t tt = av->trim_threshold; + av->trim_threshold = (size_t)(-1); + __wrap_free(chunk2mem(old_top)); + av->trim_threshold = tt; + } + } + } + } - /* Update statistics */ - sum = av->sbrked_mem; - if (sum > (unsigned long)(av->max_sbrked_mem)) - av->max_sbrked_mem = sum; + /* Update statistics */ + sum = av->sbrked_mem; + if (sum > (unsigned long)(av->max_sbrked_mem)) + av->max_sbrked_mem = sum; - sum += av->mmapped_mem; - if (sum > (unsigned long)(av->max_total_mem)) - av->max_total_mem = sum; + sum += av->mmapped_mem; + if (sum > (unsigned long)(av->max_total_mem)) + av->max_total_mem = sum; - check_malloc_state(); + check_malloc_state(); - /* finally, do the allocation */ + /* finally, do the allocation */ - p = av->top; - size = chunksize(p); + p = av->top; + size = chunksize(p); - /* check that one of the above allocation paths succeeded */ - if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { - remainder_size = size - nb; - remainder = chunk_at_offset(p, nb); - av->top = remainder; - set_head(p, nb | PREV_INUSE); - set_head(remainder, remainder_size | PREV_INUSE); - check_malloced_chunk(p, nb); - return chunk2mem(p); - } + /* check that one of the above allocation paths succeeded */ + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(p, nb); + av->top = remainder; + set_head(p, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + check_malloced_chunk(p, nb); + return chunk2mem(p); + } } @@ -764,8 +765,8 @@ static int __malloc_largebin_index(unsigned long sz) #if defined(__GNUC__) && defined(i386) __asm__("bsrl %1,%0\n\t" - : "=r" (m) - : "g" (x)); + : "=r" (m) + : "g" (x)); #else { @@ -800,7 +801,7 @@ static int __malloc_largebin_index(unsigned long sz) /* ------------------------------ malloc ------------------------------ */ -void* malloc(size_t bytes) +void* __wrap_malloc(size_t bytes) { mstate av; @@ -843,9 +844,9 @@ void* malloc(size_t bytes) Bypass search if no frees yet */ if (!have_anychunks(av)) { - if (av->max_fast == 0) /* initialization check */ - __malloc_consolidate(av); - goto use_top; + if (av->max_fast == 0) /* initialization check */ + __malloc_consolidate(av); + goto use_top; } /* @@ -853,14 +854,14 @@ void* malloc(size_t bytes) */ if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) { - fb = &(av->fastbins[(fastbin_index(nb))]); - if ( (victim = *fb) != 0) { - CHECK_PTR(victim); - *fb = REVEAL_PTR(&victim->fd, victim->fd); - check_remalloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } + fb = &(av->fastbins[(fastbin_index(nb))]); + if ( (victim = *fb) != 0) { + CHECK_PTR(victim); + *fb = REVEAL_PTR(&victim->fd, victim->fd); + check_remalloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } } /* @@ -872,19 +873,19 @@ void* malloc(size_t bytes) */ if (in_smallbin_range(nb)) { - idx = smallbin_index(nb); - bin = bin_at(av,idx); + idx = smallbin_index(nb); + bin = bin_at(av,idx); - if ( (victim = last(bin)) != bin) { - bck = victim->bk; - set_inuse_bit_at_offset(victim, nb); - bin->bk = bck; - bck->fd = bin; + if ( (victim = last(bin)) != bin) { + bck = victim->bk; + set_inuse_bit_at_offset(victim, nb); + bin->bk = bck; + bck->fd = bin; - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } } /* If this is a large request, consolidate fastbins before continuing. @@ -898,9 +899,9 @@ void* malloc(size_t bytes) */ else { - idx = __malloc_largebin_index(nb); - if (have_fastchunks(av)) - __malloc_consolidate(av); + idx = __malloc_largebin_index(nb); + if (have_fastchunks(av)) + __malloc_consolidate(av); } /* @@ -912,85 +913,85 @@ void* malloc(size_t bytes) */ while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { - bck = victim->bk; - size = chunksize(victim); + bck = victim->bk; + size = chunksize(victim); - /* If a small request, try to use last remainder if it is the - only chunk in unsorted bin. This helps promote locality for - runs of consecutive small requests. This is the only - exception to best-fit, and applies only when there is - no exact fit for a small chunk. - */ + /* If a small request, try to use last remainder if it is the + only chunk in unsorted bin. This helps promote locality for + runs of consecutive small requests. This is the only + exception to best-fit, and applies only when there is + no exact fit for a small chunk. + */ - if (in_smallbin_range(nb) && - bck == unsorted_chunks(av) && - victim == av->last_remainder && - (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { + if (in_smallbin_range(nb) && + bck == unsorted_chunks(av) && + victim == av->last_remainder && + (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) { - /* split and reattach remainder */ - remainder_size = size - nb; - remainder = chunk_at_offset(victim, nb); - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; - av->last_remainder = remainder; - remainder->bk = remainder->fd = unsorted_chunks(av); + /* split and reattach remainder */ + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + av->last_remainder = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); - set_head(victim, nb | PREV_INUSE); - set_head(remainder, remainder_size | PREV_INUSE); - set_foot(remainder, remainder_size); + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } - /* remove from unsorted list */ - unsorted_chunks(av)->bk = bck; - bck->fd = unsorted_chunks(av); + /* remove from unsorted list */ + unsorted_chunks(av)->bk = bck; + bck->fd = unsorted_chunks(av); - /* Take now instead of binning if exact fit */ + /* Take now instead of binning if exact fit */ - if (size == nb) { - set_inuse_bit_at_offset(victim, size); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } + if (size == nb) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } - /* place chunk in bin */ + /* place chunk in bin */ - if (in_smallbin_range(size)) { - victim_index = smallbin_index(size); - bck = bin_at(av, victim_index); - fwd = bck->fd; - } - else { - victim_index = __malloc_largebin_index(size); - bck = bin_at(av, victim_index); - fwd = bck->fd; + if (in_smallbin_range(size)) { + victim_index = smallbin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; + } + else { + victim_index = __malloc_largebin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; - if (fwd != bck) { - /* if smaller than smallest, place first */ - if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) { - fwd = bck; - bck = bck->bk; - } - else if ((unsigned long)(size) >= - (unsigned long)(FIRST_SORTED_BIN_SIZE)) { + if (fwd != bck) { + /* if smaller than smallest, place first */ + if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) { + fwd = bck; + bck = bck->bk; + } + else if ((unsigned long)(size) >= + (unsigned long)(FIRST_SORTED_BIN_SIZE)) { - /* maintain large bins in sorted order */ - size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */ - while ((unsigned long)(size) < (unsigned long)(fwd->size)) - fwd = fwd->fd; - bck = fwd->bk; - } - } - } + /* maintain large bins in sorted order */ + size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */ + while ((unsigned long)(size) < (unsigned long)(fwd->size)) + fwd = fwd->fd; + bck = fwd->bk; + } + } + } - mark_bin(av, victim_index); - victim->bk = bck; - victim->fd = fwd; - fwd->bk = victim; - bck->fd = victim; + mark_bin(av, victim_index); + victim->bk = bck; + victim->fd = fwd; + fwd->bk = victim; + bck->fd = victim; } /* @@ -1003,36 +1004,36 @@ void* malloc(size_t bytes) */ if (!in_smallbin_range(nb)) { - bin = bin_at(av, idx); + bin = bin_at(av, idx); - for (victim = last(bin); victim != bin; victim = victim->bk) { - size = chunksize(victim); + for (victim = last(bin); victim != bin; victim = victim->bk) { + size = chunksize(victim); - if ((unsigned long)(size) >= (unsigned long)(nb)) { - remainder_size = size - nb; - unlink(victim, bck, fwd); + if ((unsigned long)(size) >= (unsigned long)(nb)) { + remainder_size = size - nb; + unlink(victim, bck, fwd); - /* Exhaust */ - if (remainder_size < MINSIZE) { - set_inuse_bit_at_offset(victim, size); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } - /* Split */ - else { - remainder = chunk_at_offset(victim, nb); - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; - remainder->bk = remainder->fd = unsorted_chunks(av); - set_head(victim, nb | PREV_INUSE); - set_head(remainder, remainder_size | PREV_INUSE); - set_foot(remainder, remainder_size); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } - } - } + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } + } + } } /* @@ -1052,73 +1053,73 @@ void* malloc(size_t bytes) for (;;) { - /* Skip rest of block if there are no more set bits in this block. */ - if (bit > map || bit == 0) { - do { - if (++block >= BINMAPSIZE) /* out of bins */ - goto use_top; - } while ( (map = av->binmap[block]) == 0); + /* Skip rest of block if there are no more set bits in this block. */ + if (bit > map || bit == 0) { + do { + if (++block >= BINMAPSIZE) /* out of bins */ + goto use_top; + } while ( (map = av->binmap[block]) == 0); - bin = bin_at(av, (block << BINMAPSHIFT)); - bit = 1; - } + bin = bin_at(av, (block << BINMAPSHIFT)); + bit = 1; + } - /* Advance to bin with set bit. There must be one. */ - while ((bit & map) == 0) { - bin = next_bin(bin); - bit <<= 1; - assert(bit != 0); - } + /* Advance to bin with set bit. There must be one. */ + while ((bit & map) == 0) { + bin = next_bin(bin); + bit <<= 1; + assert(bit != 0); + } - /* Inspect the bin. It is likely to be non-empty */ - victim = last(bin); + /* Inspect the bin. It is likely to be non-empty */ + victim = last(bin); - /* If a false alarm (empty bin), clear the bit. */ - if (victim == bin) { - av->binmap[block] = map &= ~bit; /* Write through */ - bin = next_bin(bin); - bit <<= 1; - } + /* If a false alarm (empty bin), clear the bit. */ + if (victim == bin) { + av->binmap[block] = map &= ~bit; /* Write through */ + bin = next_bin(bin); + bit <<= 1; + } - else { - size = chunksize(victim); + else { + size = chunksize(victim); - /* We know the first chunk in this bin is big enough to use. */ - assert((unsigned long)(size) >= (unsigned long)(nb)); + /* We know the first chunk in this bin is big enough to use. */ + assert((unsigned long)(size) >= (unsigned long)(nb)); - remainder_size = size - nb; + remainder_size = size - nb; - /* unlink */ - bck = victim->bk; - bin->bk = bck; - bck->fd = bin; + /* unlink */ + bck = victim->bk; + bin->bk = bck; + bck->fd = bin; - /* Exhaust */ - if (remainder_size < MINSIZE) { - set_inuse_bit_at_offset(victim, size); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } - /* Split */ - else { - remainder = chunk_at_offset(victim, nb); + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; - remainder->bk = remainder->fd = unsorted_chunks(av); - /* advertise as last remainder */ - if (in_smallbin_range(nb)) - av->last_remainder = remainder; + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + /* advertise as last remainder */ + if (in_smallbin_range(nb)) + av->last_remainder = remainder; - set_head(victim, nb | PREV_INUSE); - set_head(remainder, remainder_size | PREV_INUSE); - set_foot(remainder, remainder_size); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; - } - } + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; + } + } } use_top: @@ -1141,15 +1142,15 @@ use_top: size = chunksize(victim); if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) { - remainder_size = size - nb; - remainder = chunk_at_offset(victim, nb); - av->top = remainder; - set_head(victim, nb | PREV_INUSE); - set_head(remainder, remainder_size | PREV_INUSE); + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + av->top = remainder; + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); - check_malloced_chunk(victim, nb); - retval = chunk2mem(victim); - goto DONE; + check_malloced_chunk(victim, nb); + retval = chunk2mem(victim); + goto DONE; } /* If no space in top, relay to handle system-dependent cases */ @@ -1161,4 +1162,4 @@ DONE: } /* glibc compatibilty */ -weak_alias(malloc, __libc_malloc) +weak_alias(__wrap_malloc, __libc_malloc) diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/realloc.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/realloc.c index 1db52d1d..0e24bc6d 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/realloc.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc-standard/realloc.c @@ -18,7 +18,7 @@ /* ------------------------------ realloc ------------------------------ */ -void* realloc(void* oldmem, size_t bytes) +void* __wrap_realloc(void* oldmem, size_t bytes) { mstate av; @@ -48,10 +48,10 @@ void* realloc(void* oldmem, size_t bytes) /* Check for special cases. */ if (! oldmem) - return malloc(bytes); + return malloc(bytes); if (! bytes) { - free (oldmem); - return NULL; + free (oldmem); + return NULL; } checked_request2size(bytes, nb); @@ -65,117 +65,117 @@ void* realloc(void* oldmem, size_t bytes) if (!chunk_is_mmapped(oldp)) { - if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { - /* already big enough; split below */ - newp = oldp; - newsize = oldsize; - } + if ((unsigned long)(oldsize) >= (unsigned long)(nb)) { + /* already big enough; split below */ + newp = oldp; + newsize = oldsize; + } - else { - next = chunk_at_offset(oldp, oldsize); + else { + next = chunk_at_offset(oldp, oldsize); - /* Try to expand forward into top */ - if (next == av->top && - (unsigned long)(newsize = oldsize + chunksize(next)) >= - (unsigned long)(nb + MINSIZE)) { - set_head_size(oldp, nb); - av->top = chunk_at_offset(oldp, nb); - set_head(av->top, (newsize - nb) | PREV_INUSE); - retval = chunk2mem(oldp); - goto DONE; - } + /* Try to expand forward into top */ + if (next == av->top && + (unsigned long)(newsize = oldsize + chunksize(next)) >= + (unsigned long)(nb + MINSIZE)) { + set_head_size(oldp, nb); + av->top = chunk_at_offset(oldp, nb); + set_head(av->top, (newsize - nb) | PREV_INUSE); + retval = chunk2mem(oldp); + goto DONE; + } - /* Try to expand forward into next chunk; split off remainder below */ - else if (next != av->top && - !inuse(next) && - (unsigned long)(newsize = oldsize + chunksize(next)) >= - (unsigned long)(nb)) { - newp = oldp; - unlink(next, bck, fwd); - } + /* Try to expand forward into next chunk; split off remainder below */ + else if (next != av->top && + !inuse(next) && + (unsigned long)(newsize = oldsize + chunksize(next)) >= + (unsigned long)(nb)) { + newp = oldp; + unlink(next, bck, fwd); + } - /* allocate, copy, free */ - else { - newmem = malloc(nb - MALLOC_ALIGN_MASK); - if (newmem == 0) { - retval = 0; /* propagate failure */ - goto DONE; - } + /* allocate, copy, free */ + else { + newmem = malloc(nb - MALLOC_ALIGN_MASK); + if (newmem == 0) { + retval = 0; /* propagate failure */ + goto DONE; + } - newp = mem2chunk(newmem); - newsize = chunksize(newp); + newp = mem2chunk(newmem); + newsize = chunksize(newp); - /* - Avoid copy if newp is next chunk after oldp. - */ - if (newp == next) { - newsize += oldsize; - newp = oldp; - } - else { - /* - Unroll copy of <= 36 bytes (72 if 8byte sizes) - We know that contents have an odd number of - size_t-sized words; minimally 3. - */ + /* + Avoid copy if newp is next chunk after oldp. + */ + if (newp == next) { + newsize += oldsize; + newp = oldp; + } + else { + /* + Unroll copy of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + size_t-sized words; minimally 3. + */ - copysize = oldsize - (sizeof(size_t)); - s = (size_t*)(oldmem); - d = (size_t*)(newmem); - ncopies = copysize / sizeof(size_t); - assert(ncopies >= 3); + copysize = oldsize - (sizeof(size_t)); + s = (size_t*)(oldmem); + d = (size_t*)(newmem); + ncopies = copysize / sizeof(size_t); + assert(ncopies >= 3); - if (ncopies > 9) - memcpy(d, s, copysize); + if (ncopies > 9) + memcpy(d, s, copysize); - else { - *(d+0) = *(s+0); - *(d+1) = *(s+1); - *(d+2) = *(s+2); - if (ncopies > 4) { - *(d+3) = *(s+3); - *(d+4) = *(s+4); - if (ncopies > 6) { - *(d+5) = *(s+5); - *(d+6) = *(s+6); - if (ncopies > 8) { - *(d+7) = *(s+7); - *(d+8) = *(s+8); - } - } - } - } + else { + *(d+0) = *(s+0); + *(d+1) = *(s+1); + *(d+2) = *(s+2); + if (ncopies > 4) { + *(d+3) = *(s+3); + *(d+4) = *(s+4); + if (ncopies > 6) { + *(d+5) = *(s+5); + *(d+6) = *(s+6); + if (ncopies > 8) { + *(d+7) = *(s+7); + *(d+8) = *(s+8); + } + } + } + } - free(oldmem); - check_inuse_chunk(newp); - retval = chunk2mem(newp); - goto DONE; - } - } - } + free(oldmem); + check_inuse_chunk(newp); + retval = chunk2mem(newp); + goto DONE; + } + } + } - /* If possible, free extra space in old or extended chunk */ + /* If possible, free extra space in old or extended chunk */ - assert((unsigned long)(newsize) >= (unsigned long)(nb)); + assert((unsigned long)(newsize) >= (unsigned long)(nb)); - remainder_size = newsize - nb; + remainder_size = newsize - nb; - if (remainder_size < MINSIZE) { /* not enough extra to split off */ - set_head_size(newp, newsize); - set_inuse_bit_at_offset(newp, newsize); - } - else { /* split remainder */ - remainder = chunk_at_offset(newp, nb); - set_head_size(newp, nb); - set_head(remainder, remainder_size | PREV_INUSE); - /* Mark remainder as inuse so free() won't complain */ - set_inuse_bit_at_offset(remainder, remainder_size); - free(chunk2mem(remainder)); - } + if (remainder_size < MINSIZE) { /* not enough extra to split off */ + set_head_size(newp, newsize); + set_inuse_bit_at_offset(newp, newsize); + } + else { /* split remainder */ + remainder = chunk_at_offset(newp, nb); + set_head_size(newp, nb); + set_head(remainder, remainder_size | PREV_INUSE); + /* Mark remainder as inuse so free() won't complain */ + set_inuse_bit_at_offset(remainder, remainder_size); + free(chunk2mem(remainder)); + } - check_inuse_chunk(newp); - retval = chunk2mem(newp); - goto DONE; + check_inuse_chunk(newp); + retval = chunk2mem(newp); + goto DONE; } /* @@ -183,54 +183,54 @@ void* realloc(void* oldmem, size_t bytes) */ else { - size_t offset = oldp->prev_size; - size_t pagemask = av->pagesize - 1; - char *cp; - unsigned long sum; + size_t offset = oldp->prev_size; + size_t pagemask = av->pagesize - 1; + char *cp; + unsigned long sum; - /* Note the extra (sizeof(size_t)) overhead */ - newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask; + /* Note the extra (sizeof(size_t)) overhead */ + newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask; - /* don't need to remap if still within same page */ - if (oldsize == newsize - offset) { - retval = oldmem; - goto DONE; - } + /* don't need to remap if still within same page */ + if (oldsize == newsize - offset) { + retval = oldmem; + goto DONE; + } - cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); + cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); - if (cp != (char*)MORECORE_FAILURE) { + if (cp != (char*)MORECORE_FAILURE) { - newp = (mchunkptr)(cp + offset); - set_head(newp, (newsize - offset)|IS_MMAPPED); + newp = (mchunkptr)(cp + offset); + set_head(newp, (newsize - offset)|IS_MMAPPED); - assert(aligned_OK(chunk2mem(newp))); - assert((newp->prev_size == offset)); + assert(aligned_OK(chunk2mem(newp))); + assert((newp->prev_size == offset)); - /* update statistics */ - sum = av->mmapped_mem += newsize - oldsize; - if (sum > (unsigned long)(av->max_mmapped_mem)) - av->max_mmapped_mem = sum; - sum += av->sbrked_mem; - if (sum > (unsigned long)(av->max_total_mem)) - av->max_total_mem = sum; + /* update statistics */ + sum = av->mmapped_mem += newsize - oldsize; + if (sum > (unsigned long)(av->max_mmapped_mem)) + av->max_mmapped_mem = sum; + sum += av->sbrked_mem; + if (sum > (unsigned long)(av->max_total_mem)) + av->max_total_mem = sum; - retval = chunk2mem(newp); - goto DONE; - } + retval = chunk2mem(newp); + goto DONE; + } - /* Note the extra (sizeof(size_t)) overhead. */ - if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t)))) - newmem = oldmem; /* do nothing */ - else { - /* Must alloc, copy, free. */ - newmem = malloc(nb - MALLOC_ALIGN_MASK); - if (newmem != 0) { - memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t))); - free(oldmem); - } - } - retval = newmem; + /* Note the extra (sizeof(size_t)) overhead. */ + if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t)))) + newmem = oldmem; /* do nothing */ + else { + /* Must alloc, copy, free. */ + newmem = malloc(nb - MALLOC_ALIGN_MASK); + if (newmem != 0) { + memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t))); + free(oldmem); + } + } + retval = newmem; } DONE: @@ -239,4 +239,4 @@ void* realloc(void* oldmem, size_t bytes) } /* glibc compatibilty */ -weak_alias(realloc, __libc_realloc) +weak_alias(__wrap_realloc, __libc_realloc) diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/free.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/free.c index 14d11015..496e6bd9 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/free.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/free.c @@ -28,9 +28,9 @@ static void __free_to_heap (void *mem, struct heap_free_area **heap #ifdef HEAP_USE_LOCKING - , __UCLIBC_MUTEX_TYPE *heap_lock + , __UCLIBC_MUTEX_TYPE *heap_lock #endif - ) + ) { size_t size; struct heap_free_area *fa; @@ -42,7 +42,7 @@ __free_to_heap (void *mem, struct heap_free_area **heap /* Normal free. */ MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)", - (long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem)); + (long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem)); size = MALLOC_SIZE (mem); mem = MALLOC_BASE (mem); @@ -73,45 +73,45 @@ __free_to_heap (void *mem, struct heap_free_area **heap #ifdef MALLOC_USE_SBRK /* Get the sbrk lock so that the two possible calls to sbrk below - are guaranteed to be contiguous. */ + are guaranteed to be contiguous. */ __malloc_lock_sbrk (); /* When using sbrk, we only shrink the heap from the end. It would - be possible to allow _both_ -- shrinking via sbrk when possible, - and otherwise shrinking via munmap, but this results in holes in - memory that prevent the brk from every growing back down; since - we only ever grow the heap via sbrk, this tends to produce a - continuously growing brk (though the actual memory is unmapped), - which could eventually run out of address space. Note that - `sbrk(0)' shouldn't normally do a system call, so this test is - reasonably cheap. */ + be possible to allow _both_ -- shrinking via sbrk when possible, + and otherwise shrinking via munmap, but this results in holes in + memory that prevent the brk from every growing back down; since + we only ever grow the heap via sbrk, this tends to produce a + continuously growing brk (though the actual memory is unmapped), + which could eventually run out of address space. Note that + `sbrk(0)' shouldn't normally do a system call, so this test is + reasonably cheap. */ if ((void *)end != sbrk (0)) - { - MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)", - start, end, end - start); - __malloc_unlock_sbrk (); - __heap_unlock (heap_lock); - return; - } + { + MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)", + start, end, end - start); + __malloc_unlock_sbrk (); + __heap_unlock (heap_lock); + return; + } #endif MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)", - start, end, end - start); + start, end, end - start); /* Remove FA from the heap. */ __heap_delete (heap, fa); if (__heap_is_empty (heap)) - /* We want to avoid the heap from losing all memory, so reserve - a bit. This test is only a heuristic -- the existance of - another free area, even if it's smaller than - MALLOC_MIN_SIZE, will cause us not to reserve anything. */ - { - /* Put the reserved memory back in the heap; we assume that - MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so - we use the latter unconditionally here. */ - __heap_free (heap, (void *)start, MALLOC_MIN_SIZE); - start += MALLOC_MIN_SIZE; - } + /* We want to avoid the heap from losing all memory, so reserve + a bit. This test is only a heuristic -- the existance of + another free area, even if it's smaller than + MALLOC_MIN_SIZE, will cause us not to reserve anything. */ + { + /* Put the reserved memory back in the heap; we assume that + MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so + we use the latter unconditionally here. */ + __heap_free (heap, (void *)start, MALLOC_MIN_SIZE); + start += MALLOC_MIN_SIZE; + } #ifdef MALLOC_USE_SBRK @@ -126,99 +126,99 @@ __free_to_heap (void *mem, struct heap_free_area **heap # ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__ /* Using the uClinux broken munmap, we have to only munmap blocks - exactly as we got them from mmap, so scan through our list of - mmapped blocks, and return them in order. */ + exactly as we got them from mmap, so scan through our list of + mmapped blocks, and return them in order. */ MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...", - start, end - start); + start, end - start); prev_mmb = 0; mmb = __malloc_mmapped_blocks; while (mmb - && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size) - <= end)) - { - MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]", - (unsigned)mmb, mmb_start, mmb_end - mmb_start); + && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size) + <= end)) + { + MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]", + (unsigned)mmb, mmb_start, mmb_end - mmb_start); - if (mmb_start >= start - /* If the space between START and MMB_START is non-zero, but - too small to return to the heap, we can't unmap MMB. */ - && (start == mmb_start - || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE)) - { - struct malloc_mmb *next_mmb = mmb->next; + if (mmb_start >= start + /* If the space between START and MMB_START is non-zero, but + too small to return to the heap, we can't unmap MMB. */ + && (start == mmb_start + || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE)) + { + struct malloc_mmb *next_mmb = mmb->next; - if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end) - /* There's too little space left at the end to deallocate - this block, so give up. */ - break; + if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end) + /* There's too little space left at the end to deallocate + this block, so give up. */ + break; - MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]", - (unsigned)mmb, mmb_start, mmb_end - mmb_start); + MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]", + (unsigned)mmb, mmb_start, mmb_end - mmb_start); - if (mmb_start != start) - /* We're going to unmap a part of the heap that begins after - start, so put the intervening region back into the heap. */ - { - MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]", - start, mmb_start - start); - __heap_free (heap, (void *)start, mmb_start - start); - } + if (mmb_start != start) + /* We're going to unmap a part of the heap that begins after + start, so put the intervening region back into the heap. */ + { + MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]", + start, mmb_start - start); + __heap_free (heap, (void *)start, mmb_start - start); + } - MALLOC_MMB_DEBUG_INDENT (-1); + MALLOC_MMB_DEBUG_INDENT (-1); - /* Unlink MMB from the list. */ - if (prev_mmb) - prev_mmb->next = next_mmb; - else - __malloc_mmapped_blocks = next_mmb; + /* Unlink MMB from the list. */ + if (prev_mmb) + prev_mmb->next = next_mmb; + else + __malloc_mmapped_blocks = next_mmb; - /* Start searching again from the end of this block. */ - start = mmb_end; + /* Start searching again from the end of this block. */ + start = mmb_end; - /* Release the descriptor block we used. */ - free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); + /* Release the descriptor block we used. */ + free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); - /* We have to unlock the heap before we recurse to free the mmb - descriptor, because we might be unmapping from the mmb - heap. */ + /* We have to unlock the heap before we recurse to free the mmb + descriptor, because we might be unmapping from the mmb + heap. */ __heap_unlock (heap_lock); - /* Do the actual munmap. */ - munmap ((void *)mmb_start, mmb_end - mmb_start); + /* Do the actual munmap. */ + munmap ((void *)mmb_start, mmb_end - mmb_start); - __heap_lock (heap_lock); + __heap_lock (heap_lock); # ifdef __UCLIBC_HAS_THREADS__ - /* In a multi-threaded program, it's possible that PREV_MMB has - been invalidated by another thread when we released the - heap lock to do the munmap system call, so just start over - from the beginning of the list. It sucks, but oh well; - it's probably not worth the bother to do better. */ - prev_mmb = 0; - mmb = __malloc_mmapped_blocks; + /* In a multi-threaded program, it's possible that PREV_MMB has + been invalidated by another thread when we released the + heap lock to do the munmap system call, so just start over + from the beginning of the list. It sucks, but oh well; + it's probably not worth the bother to do better. */ + prev_mmb = 0; + mmb = __malloc_mmapped_blocks; # else - mmb = next_mmb; + mmb = next_mmb; # endif - } - else - { - prev_mmb = mmb; - mmb = mmb->next; - } + } + else + { + prev_mmb = mmb; + mmb = mmb->next; + } - MALLOC_MMB_DEBUG_INDENT (-1); - } + MALLOC_MMB_DEBUG_INDENT (-1); + } if (start != end) - /* Hmm, well there's something we couldn't unmap, so put it back - into the heap. */ - { - MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]", - start, end - start); - __heap_free (heap, (void *)start, end - start); - } + /* Hmm, well there's something we couldn't unmap, so put it back + into the heap. */ + { + MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]", + start, end - start); + __heap_free (heap, (void *)start, end - start); + } /* Finally release the lock for good. */ __heap_unlock (heap_lock); @@ -228,34 +228,34 @@ __free_to_heap (void *mem, struct heap_free_area **heap # else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ /* MEM/LEN may not be page-aligned, so we have to page-align them, - and return any left-over bits on the end to the heap. */ + and return any left-over bits on the end to the heap. */ unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start); unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end); /* We have to be careful that any left-over bits are large enough to - return. Note that we _don't check_ to make sure there's room to - grow/shrink the start/end by another page, we just assume that - the unmap threshold is high enough so that this is always safe - (i.e., it should probably be at least 3 pages). */ + return. Note that we _don't check_ to make sure there's room to + grow/shrink the start/end by another page, we just assume that + the unmap threshold is high enough so that this is always safe + (i.e., it should probably be at least 3 pages). */ if (unmap_start > start) - { - if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE) - unmap_start += MALLOC_PAGE_SIZE; - __heap_free (heap, (void *)start, unmap_start - start); - } + { + if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE) + unmap_start += MALLOC_PAGE_SIZE; + __heap_free (heap, (void *)start, unmap_start - start); + } if (end > unmap_end) - { - if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE) - unmap_end -= MALLOC_PAGE_SIZE; - __heap_free (heap, (void *)unmap_end, end - unmap_end); - } + { + if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE) + unmap_end -= MALLOC_PAGE_SIZE; + __heap_free (heap, (void *)unmap_end, end - unmap_end); + } /* Release the heap lock before we do the system call. */ __heap_unlock (heap_lock); if (unmap_end > unmap_start) - /* Finally, actually unmap the memory. */ - munmap ((void *)unmap_start, unmap_end - unmap_start); + /* Finally, actually unmap the memory. */ + munmap ((void *)unmap_start, unmap_end - unmap_start); # endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ @@ -266,7 +266,7 @@ __free_to_heap (void *mem, struct heap_free_area **heap } void -free (void *mem) +__wrap_free (void *mem) { free_to_heap (mem, &__malloc_heap, &__malloc_heap_lock); } diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/malloc.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/malloc.c index 8d8ac7ff..7ef839f6 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/malloc.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/malloc.c @@ -59,9 +59,9 @@ __UCLIBC_MUTEX_INIT(__malloc_mmb_heap_lock,PTHREAD_RECURSIVE_MUTEX_INITIALIZER_N static void * __malloc_from_heap (size_t size, struct heap_free_area **heap #ifdef HEAP_USE_LOCKING - , __UCLIBC_MUTEX_TYPE *heap_lock + , __UCLIBC_MUTEX_TYPE *heap_lock #endif - ) + ) { void *mem; @@ -82,12 +82,12 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap from the system, add it to the heap, and try again. */ { /* If we're trying to allocate a block bigger than the default - MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */ + MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */ void *block; size_t block_size - = (size < MALLOC_HEAP_EXTEND_SIZE - ? MALLOC_HEAP_EXTEND_SIZE - : MALLOC_ROUND_UP_TO_PAGE_SIZE (size)); + = (size < MALLOC_HEAP_EXTEND_SIZE + ? MALLOC_HEAP_EXTEND_SIZE + : MALLOC_ROUND_UP_TO_PAGE_SIZE (size)); /* Allocate the new heap block. */ #ifdef MALLOC_USE_SBRK @@ -95,24 +95,24 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap __malloc_lock_sbrk (); /* Use sbrk we can, as it's faster than mmap, and guarantees - contiguous allocation. */ + contiguous allocation. */ block = sbrk (block_size); if (likely (block != (void *)-1)) - { - /* Because sbrk can return results of arbitrary - alignment, align the result to a MALLOC_ALIGNMENT boundary. */ - long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT); - if (block != (void *)aligned_block) - /* Have to adjust. We should only have to actually do this - the first time (after which we will have aligned the brk - correctly). */ - { - /* Move the brk to reflect the alignment; our next allocation - should start on exactly the right alignment. */ - sbrk (aligned_block - (long)block); - block = (void *)aligned_block; - } - } + { + /* Because sbrk can return results of arbitrary + alignment, align the result to a MALLOC_ALIGNMENT boundary. */ + long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT); + if (block != (void *)aligned_block) + /* Have to adjust. We should only have to actually do this + the first time (after which we will have aligned the brk + correctly). */ + { + /* Move the brk to reflect the alignment; our next allocation + should start on exactly the right alignment. */ + sbrk (aligned_block - (long)block); + block = (void *)aligned_block; + } + } __malloc_unlock_sbrk (); @@ -121,62 +121,62 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap /* Otherwise, use mmap. */ #ifdef __ARCH_USE_MMU__ block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); #else block = mmap ((void *)0, block_size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0); + MAP_SHARED | MAP_ANONYMOUS | MAP_UNINITIALIZED, 0, 0); #endif #endif /* MALLOC_USE_SBRK */ if (likely (block != (void *)-1)) - { + { #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) - struct malloc_mmb *mmb, *prev_mmb, *new_mmb; + struct malloc_mmb *mmb, *prev_mmb, *new_mmb; #endif - MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)", - (long)block, (long)block + block_size, block_size); + MALLOC_DEBUG (1, "adding system memory to heap: 0x%lx - 0x%lx (%d bytes)", + (long)block, (long)block + block_size, block_size); - /* Get back the heap lock. */ - __heap_lock (heap_lock); + /* Get back the heap lock. */ + __heap_lock (heap_lock); - /* Put BLOCK into the heap. */ - __heap_free (heap, block, block_size); + /* Put BLOCK into the heap. */ + __heap_free (heap, block, block_size); - MALLOC_DEBUG_INDENT (-1); + MALLOC_DEBUG_INDENT (-1); - /* Try again to allocate. */ - mem = __heap_alloc (heap, &size); + /* Try again to allocate. */ + mem = __heap_alloc (heap, &size); #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__) - /* Insert a record of BLOCK in sorted order into the - __malloc_mmapped_blocks list. */ + /* Insert a record of BLOCK in sorted order into the + __malloc_mmapped_blocks list. */ - new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); + new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock); - for (prev_mmb = 0, mmb = __malloc_mmapped_blocks; - mmb; - prev_mmb = mmb, mmb = mmb->next) - if (block < mmb->mem) - break; + for (prev_mmb = 0, mmb = __malloc_mmapped_blocks; + mmb; + prev_mmb = mmb, mmb = mmb->next) + if (block < mmb->mem) + break; - new_mmb->next = mmb; - new_mmb->mem = block; - new_mmb->size = block_size; + new_mmb->next = mmb; + new_mmb->mem = block; + new_mmb->size = block_size; - if (prev_mmb) - prev_mmb->next = new_mmb; - else - __malloc_mmapped_blocks = new_mmb; + if (prev_mmb) + prev_mmb->next = new_mmb; + else + __malloc_mmapped_blocks = new_mmb; - MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]", - (unsigned)new_mmb, - (unsigned)new_mmb->mem, block_size); + MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]", + (unsigned)new_mmb, + (unsigned)new_mmb->mem, block_size); #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */ - __heap_unlock (heap_lock); - } + __heap_unlock (heap_lock); + } } if (likely (mem)) @@ -185,7 +185,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap mem = MALLOC_SETUP (mem, size); MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)", - (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); + (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); } else MALLOC_DEBUG (-1, "malloc: returning 0"); @@ -194,7 +194,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap } void * -malloc (size_t size) +__wrap_malloc (size_t size) { void *mem; #ifdef MALLOC_DEBUGGING diff --git a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/realloc.c b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/realloc.c index bdfb5269..e67623a4 100644 --- a/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/realloc.c +++ b/src/l4/pkg/l4re-core/libc/uclibc-ng/contrib/uclibc/libc/stdlib/malloc/realloc.c @@ -21,7 +21,7 @@ void * -realloc (void *mem, size_t new_size) +__wrap_realloc (void *mem, size_t new_size) { size_t size; char *base_mem; @@ -56,7 +56,7 @@ realloc (void *mem, size_t new_size) new_size = HEAP_ADJUST_SIZE (sizeof (struct heap_free_area)); MALLOC_DEBUG (1, "realloc: 0x%lx, %d (base = 0x%lx, total_size = %d)", - (long)mem, new_size, (long)base_mem, size); + (long)mem, new_size, (long)base_mem, size); if (new_size > size) /* Grow the block. */ @@ -68,20 +68,20 @@ realloc (void *mem, size_t new_size) __heap_unlock (&__malloc_heap_lock); if (extra) - /* Record the changed size. */ - MALLOC_SET_SIZE (base_mem, size + extra); + /* Record the changed size. */ + MALLOC_SET_SIZE (base_mem, size + extra); else - /* Our attempts to extend MEM in place failed, just - allocate-and-copy. */ - { - void *new_mem = malloc (new_size - MALLOC_HEADER_SIZE); - if (new_mem) - { - memcpy (new_mem, mem, size - MALLOC_HEADER_SIZE); - free (mem); - } - mem = new_mem; - } + /* Our attempts to extend MEM in place failed, just + allocate-and-copy. */ + { + void *new_mem = malloc (new_size - MALLOC_HEADER_SIZE); + if (new_mem) + { + memcpy (new_mem, mem, size - MALLOC_HEADER_SIZE); + free (mem); + } + mem = new_mem; + } } else if (new_size + MALLOC_REALLOC_MIN_FREE_SIZE <= size) /* Shrink the block. */ @@ -94,7 +94,7 @@ realloc (void *mem, size_t new_size) if (mem) MALLOC_DEBUG (-1, "realloc: returning 0x%lx (base:0x%lx, total_size:%d)", - (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); + (long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem)); else MALLOC_DEBUG (-1, "realloc: returning 0"); diff --git a/src/l4/pkg/l4re-core/libc_backends/Control b/src/l4/pkg/l4re-core/libc_backends/Control index f0cdb713..6bb5a21d 100644 --- a/src/l4/pkg/l4re-core/libc_backends/Control +++ b/src/l4/pkg/l4re-core/libc_backends/Control @@ -1,6 +1,6 @@ Provides: libc_be_socket_noop libc_be_l4re libc_support_misc libc_be_fs_noop libc_be_math libc_be_l4refile libinitcwd libc_be_minimal_log_io libmount libc_be_sig - libc_be_sem_noop libc_be_static_heap + libc_be_sem_noop libc_be_static_heap libc_be_mem Requires: l4re libl4re-vfs libc-headers Maintainer: adam@os.inf.tu-dresden.de diff --git a/src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/Makefile b/src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/Makefile new file mode 100644 index 00000000..56bd471d --- /dev/null +++ b/src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/Makefile @@ -0,0 +1,12 @@ +PKGDIR ?= ../.. +L4DIR ?= $(PKGDIR)/../../.. + +TARGET = libc_be_mem.a libc_be_mem.so +LINK_INCR = libc_be_mem.a +PC_FILENAME = libc_be_mem +REQUIRES_LIBS = l4re +SRC_CC = mem.cc + +include $(L4DIR)/mk/lib.mk + +LDFLAGS := $(filter-out -gc-sections,$(LDFLAGS)) diff --git a/src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/mem.cc b/src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/mem.cc new file mode 100644 index 00000000..bc6206cb --- /dev/null +++ b/src/l4/pkg/l4re-core/libc_backends/lib/l4re_mem/mem.cc @@ -0,0 +1,32 @@ +/** + * \file libc_backends/l4re_mem/mem.cc + */ +/* + * (c) 2004-2009 Technische Universität Dresden + * This file is part of TUD:OS and distributed under the terms of the + * GNU Lesser General Public License 2.1. + * Please see the COPYING-LGPL-2.1 file for details. + */ +#include +#include + +void *malloc(size_t size) throw() +{ + void *data = 0; + enter_kdebug("malloc"); + return (void*)data; +} + + +void free(void *p) throw() +{ + if (p) + enter_kdebug("free"); +} + +void *realloc(void *p, size_t size) throw() +{ + void *data = 0; + enter_kdebug("realloc"); + return (void*)data; +} diff --git a/src/l4/pkg/l4re-core/libgcc/contrib/gcc-13/libgcc/unwind-dw2-btree.h b/src/l4/pkg/l4re-core/libgcc/contrib/gcc-13/libgcc/unwind-dw2-btree.h index 488a0e0d..c05812f1 100644 --- a/src/l4/pkg/l4re-core/libgcc/contrib/gcc-13/libgcc/unwind-dw2-btree.h +++ b/src/l4/pkg/l4re-core/libgcc/contrib/gcc-13/libgcc/unwind-dw2-btree.h @@ -28,6 +28,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #include +void *__wrap_malloc(); + // Common logic for version locks. struct version_lock { @@ -402,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner) // No free node available, allocate a new one. struct btree_node *new_node - = (struct btree_node *) (malloc (sizeof (struct btree_node))); + = (struct btree_node *) (__wrap_malloc (sizeof (struct btree_node))); version_lock_initialize_locked_exclusive ( &(new_node->version_lock)); // initialize the node in locked state. new_node->entry_count = 0; diff --git a/src/l4/pkg/l4re-core/libgcc/contrib/gcc-14/libgcc/unwind-dw2-btree.h b/src/l4/pkg/l4re-core/libgcc/contrib/gcc-14/libgcc/unwind-dw2-btree.h index f4902378..82f1f319 100644 --- a/src/l4/pkg/l4re-core/libgcc/contrib/gcc-14/libgcc/unwind-dw2-btree.h +++ b/src/l4/pkg/l4re-core/libgcc/contrib/gcc-14/libgcc/unwind-dw2-btree.h @@ -28,6 +28,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #include +void *__wrap_malloc(); + // Common logic for version locks. struct version_lock { @@ -402,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner) // No free node available, allocate a new one. struct btree_node *new_node - = (struct btree_node *) (malloc (sizeof (struct btree_node))); + = (struct btree_node *) (__wrap_malloc (sizeof (struct btree_node))); version_lock_initialize_locked_exclusive ( &(new_node->version_lock)); // initialize the node in locked state. new_node->entry_count = 0; diff --git a/src/l4/pkg/l4re-core/libgcc/contrib/gcc-15/libgcc/unwind-dw2-btree.h b/src/l4/pkg/l4re-core/libgcc/contrib/gcc-15/libgcc/unwind-dw2-btree.h index 5e817407..9dd2e750 100644 --- a/src/l4/pkg/l4re-core/libgcc/contrib/gcc-15/libgcc/unwind-dw2-btree.h +++ b/src/l4/pkg/l4re-core/libgcc/contrib/gcc-15/libgcc/unwind-dw2-btree.h @@ -28,6 +28,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #include +void *__wrap_malloc(size_t); +void __wrap_free(void *); + // Common logic for version locks. struct version_lock { @@ -362,7 +365,7 @@ btree_destroy (struct btree *t) while (t->free_list) { struct btree_node *next = t->free_list->content.children[0].child; - free (t->free_list); + __wrap_free (t->free_list); t->free_list = next; } } @@ -401,7 +404,7 @@ btree_allocate_node (struct btree *t, bool inner) // No free node available, allocate a new one. struct btree_node *new_node - = (struct btree_node *) malloc (sizeof (struct btree_node)); + = (struct btree_node *) __wrap_malloc (sizeof (struct btree_node)); // Initialize the node in locked state. version_lock_initialize_locked_exclusive (&new_node->version_lock); new_node->entry_count = 0; diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-10/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-10/libsupc++/eh_alloc.cc index d4a9988f..e16f0bbc 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-10/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-10/libsupc++/eh_alloc.cc @@ -37,14 +37,16 @@ #include #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); using std::memset; #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -58,19 +60,19 @@ using namespace __cxxabiv1; // just for overhead. #if INT_MAX == 32767 -# define EMERGENCY_OBJ_SIZE 128 -# define EMERGENCY_OBJ_COUNT 16 +# define EMERGENCY_OBJ_SIZE 128 +# define EMERGENCY_OBJ_COUNT 16 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 -# define EMERGENCY_OBJ_SIZE 512 -# define EMERGENCY_OBJ_COUNT 32 +# define EMERGENCY_OBJ_SIZE 512 +# define EMERGENCY_OBJ_COUNT 32 #else -# define EMERGENCY_OBJ_SIZE 1024 -# define EMERGENCY_OBJ_COUNT 64 +# define EMERGENCY_OBJ_SIZE 1024 +# define EMERGENCY_OBJ_COUNT 64 #endif #ifndef __GTHREADS # undef EMERGENCY_OBJ_COUNT -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 #endif namespace __gnu_cxx @@ -85,20 +87,25 @@ namespace { public: pool(); + pool(char*, int); _GLIBCXX_NODISCARD void *allocate (std::size_t); void free (void *); bool in_pool (void *); + bool mem_static; + private: + void init(); + struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; // A single mutex controlling emergency allocations. @@ -119,15 +126,31 @@ namespace // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // to make this tunable. arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT - + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); arena = (char *)malloc (arena_size); + mem_static = false; + + init(); + } + + pool::pool(char * storage, int size) + { + arena_size = size; + arena = storage; + mem_static = true; + + init(); + } + + void pool::init() + { if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - first_free_entry = NULL; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + first_free_entry = NULL; + return; + } // Populate the free-list with a single entry covering the whole arena first_free_entry = reinterpret_cast (arena); @@ -145,46 +168,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -192,74 +215,77 @@ namespace { __gnu_cxx::__scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast ((*fe)->next) - > reinterpret_cast (e) + sz); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast ((*fe)->next) + > reinterpret_cast (e) + sz); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } bool pool::in_pool (void *ptr) { char *p = reinterpret_cast (ptr); return (p > arena - && p < arena + arena_size); + && p < arena + arena_size); } - pool emergency_pool; + int const emergency_pool_size = EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT + + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception); + char emergency_pool_storage[emergency_pool_size]; + pool emergency_pool{emergency_pool_storage, emergency_pool_size}; } namespace __gnu_cxx @@ -267,10 +293,11 @@ namespace __gnu_cxx void __freeres() { - if (emergency_pool.arena) + // why is this not a destructor? + if (emergency_pool.arena and not emergency_pool.mem_static) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } } } @@ -281,7 +308,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW void *ret; thrown_size += sizeof (__cxa_refcounted_exception); - ret = malloc (thrown_size); + ret = __wrap_malloc (thrown_size); if (!ret) ret = emergency_pool.allocate (thrown_size); @@ -312,7 +339,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW __cxa_dependent_exception *ret; ret = static_cast<__cxa_dependent_exception*> - (malloc (sizeof (__cxa_dependent_exception))); + (__wrap_malloc(sizeof (__cxa_dependent_exception))); if (!ret) ret = static_cast <__cxa_dependent_exception*> diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-11/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-11/libsupc++/eh_alloc.cc index 294440fd..f71c098f 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-11/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-11/libsupc++/eh_alloc.cc @@ -37,14 +37,16 @@ #include #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); using std::memset; #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -58,19 +60,19 @@ using namespace __cxxabiv1; // just for overhead. #if INT_MAX == 32767 -# define EMERGENCY_OBJ_SIZE 128 -# define EMERGENCY_OBJ_COUNT 16 +# define EMERGENCY_OBJ_SIZE 128 +# define EMERGENCY_OBJ_COUNT 16 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 -# define EMERGENCY_OBJ_SIZE 512 -# define EMERGENCY_OBJ_COUNT 32 +# define EMERGENCY_OBJ_SIZE 512 +# define EMERGENCY_OBJ_COUNT 32 #else -# define EMERGENCY_OBJ_SIZE 1024 -# define EMERGENCY_OBJ_COUNT 64 +# define EMERGENCY_OBJ_SIZE 1024 +# define EMERGENCY_OBJ_COUNT 64 #endif #ifndef __GTHREADS # undef EMERGENCY_OBJ_COUNT -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 #endif namespace __gnu_cxx @@ -93,12 +95,12 @@ namespace private: struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; // A single mutex controlling emergency allocations. @@ -119,15 +121,15 @@ namespace // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // to make this tunable. arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT - + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); - arena = (char *)malloc (arena_size); + + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + arena = (char *)__wrap_malloc (arena_size); if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - first_free_entry = NULL; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + first_free_entry = NULL; + return; + } // Populate the free-list with a single entry covering the whole arena first_free_entry = reinterpret_cast (arena); @@ -145,46 +147,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -192,71 +194,71 @@ namespace { __gnu_cxx::__scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast ((*fe)->next) - > reinterpret_cast (e) + sz); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast ((*fe)->next) + > reinterpret_cast (e) + sz); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } bool pool::in_pool (void *ptr) { char *p = reinterpret_cast (ptr); return (p > arena - && p < arena + arena_size); + && p < arena + arena_size); } pool emergency_pool; @@ -269,8 +271,8 @@ namespace __gnu_cxx { if (emergency_pool.arena) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } } } @@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW void *ret; thrown_size += sizeof (__cxa_refcounted_exception); - ret = malloc (thrown_size); + ret = __wrap_malloc (thrown_size); if (!ret) ret = emergency_pool.allocate (thrown_size); @@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW __cxa_dependent_exception *ret; ret = static_cast<__cxa_dependent_exception*> - (malloc (sizeof (__cxa_dependent_exception))); + (__wrap_malloc (sizeof (__cxa_dependent_exception))); if (!ret) ret = static_cast <__cxa_dependent_exception*> diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-12/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-12/libsupc++/eh_alloc.cc index c85b9aed..db944d83 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-12/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-12/libsupc++/eh_alloc.cc @@ -37,14 +37,16 @@ #include #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); using std::memset; #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -58,19 +60,19 @@ using namespace __cxxabiv1; // just for overhead. #if INT_MAX == 32767 -# define EMERGENCY_OBJ_SIZE 128 -# define EMERGENCY_OBJ_COUNT 16 +# define EMERGENCY_OBJ_SIZE 128 +# define EMERGENCY_OBJ_COUNT 16 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 -# define EMERGENCY_OBJ_SIZE 512 -# define EMERGENCY_OBJ_COUNT 32 +# define EMERGENCY_OBJ_SIZE 512 +# define EMERGENCY_OBJ_COUNT 32 #else -# define EMERGENCY_OBJ_SIZE 1024 -# define EMERGENCY_OBJ_COUNT 64 +# define EMERGENCY_OBJ_SIZE 1024 +# define EMERGENCY_OBJ_COUNT 64 #endif #ifndef __GTHREADS # undef EMERGENCY_OBJ_COUNT -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 #endif namespace __gnu_cxx @@ -93,12 +95,12 @@ namespace private: struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; // A single mutex controlling emergency allocations. @@ -119,15 +121,15 @@ namespace // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // to make this tunable. arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT - + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); - arena = (char *)malloc (arena_size); + + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + arena = (char *)__wrap_malloc (arena_size); if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - first_free_entry = NULL; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + first_free_entry = NULL; + return; + } // Populate the free-list with a single entry covering the whole arena first_free_entry = reinterpret_cast (arena); @@ -145,46 +147,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -192,71 +194,71 @@ namespace { __gnu_cxx::__scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast ((*fe)->next) - > reinterpret_cast (e) + sz); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast ((*fe)->next) + > reinterpret_cast (e) + sz); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } bool pool::in_pool (void *ptr) { char *p = reinterpret_cast (ptr); return (p > arena - && p < arena + arena_size); + && p < arena + arena_size); } pool emergency_pool; @@ -269,8 +271,8 @@ namespace __gnu_cxx { if (emergency_pool.arena) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } } } @@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW void *ret; thrown_size += sizeof (__cxa_refcounted_exception); - ret = malloc (thrown_size); + ret = __wrap_malloc (thrown_size); if (!ret) ret = emergency_pool.allocate (thrown_size); @@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW __cxa_dependent_exception *ret; ret = static_cast<__cxa_dependent_exception*> - (malloc (sizeof (__cxa_dependent_exception))); + (__wrap_malloc (sizeof (__cxa_dependent_exception))); if (!ret) ret = static_cast <__cxa_dependent_exception*> diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-13/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-13/libsupc++/eh_alloc.cc index ff98b04b..23b13efe 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-13/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-13/libsupc++/eh_alloc.cc @@ -73,14 +73,16 @@ // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE. #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; using std::memset; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -91,16 +93,16 @@ using namespace __cxxabiv1; // N.B. sizeof(std::bad_alloc) == sizeof(void*) // and sizeof(std::runtime_error) == 2 * sizeof(void*) // and sizeof(std::system_error) == 4 * sizeof(void*). -#define EMERGENCY_OBJ_SIZE 6 +#define EMERGENCY_OBJ_SIZE 6 #ifdef __GTHREADS // Assume that the number of concurrent exception objects scales with the // processor word size, i.e., 16-bit systems are not likely to have hundreds // of threads all simultaneously throwing on OOM conditions. -# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) +# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__) #else -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 # define MAX_OBJ_COUNT 64 #endif @@ -153,12 +155,12 @@ namespace private: struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; #if _GLIBCXX_HOSTED @@ -176,7 +178,7 @@ namespace // to implement in_pool. #ifdef _GLIBCXX_EH_POOL_STATIC static constexpr std::size_t arena_size - = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); + = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); alignas(void*) char arena[arena_size]; #else char *arena = nullptr; @@ -201,48 +203,48 @@ namespace #endif const std::string_view ns_name = "glibcxx.eh_pool"; std::pair tunables[]{ - {"obj_size", 0}, {"obj_count", obj_count} + {"obj_size", 0}, {"obj_count", obj_count} }; while (str) - { - if (*str == ':') - ++str; + { + if (*str == ':') + ++str; - if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) - && str[ns_name.size()] == '.') - { - str += ns_name.size() + 1; - for (auto& t : tunables) - if (!t.first.compare(0, t.first.size(), str, t.first.size()) - && str[t.first.size()] == '=') - { - str += t.first.size() + 1; - char* end; - unsigned long val = strtoul(str, &end, 0); - if ((*end == ':' || *end == '\0') && val <= INT_MAX) - t.second = val; - str = end; - break; - } - } - str = strchr(str, ':'); - } + if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) + && str[ns_name.size()] == '.') + { + str += ns_name.size() + 1; + for (auto& t : tunables) + if (!t.first.compare(0, t.first.size(), str, t.first.size()) + && str[t.first.size()] == '=') + { + str += t.first.size() + 1; + char* end; + unsigned long val = strtoul(str, &end, 0); + if ((*end == ':' || *end == '\0') && val <= INT_MAX) + t.second = val; + str = end; + break; + } + } + str = strchr(str, ':'); + } obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero. if (tunables[0].second != 0) - obj_size = tunables[0].second; + obj_size = tunables[0].second; #endif // HOSTED #endif // NOT_FOR_L4 arena_size = buffer_size_in_bytes(obj_count, obj_size); if (arena_size == 0) - return; - arena = (char *)malloc (arena_size); + return; + arena = (char *)__wrap_malloc (arena_size); if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + return; + } #endif // STATIC // Populate the free-list with a single entry covering the whole arena @@ -261,46 +263,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -308,64 +310,64 @@ namespace { __scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast (e) + sz - > reinterpret_cast ((*fe)->next)); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast (e) + sz + > reinterpret_cast ((*fe)->next)); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } inline bool pool::in_pool (void *ptr) const noexcept @@ -386,8 +388,8 @@ namespace __gnu_cxx #ifndef _GLIBCXX_EH_POOL_STATIC if (emergency_pool.arena) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } #endif } @@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept { thrown_size += sizeof (__cxa_refcounted_exception); - void *ret = malloc (thrown_size); + void *ret = __wrap_malloc (thrown_size); #if USE_POOL if (!ret) @@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept extern "C" __cxa_dependent_exception* __cxxabiv1::__cxa_allocate_dependent_exception() noexcept { - void *ret = malloc (sizeof (__cxa_dependent_exception)); + void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception)); #if USE_POOL if (!ret) diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-14/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-14/libsupc++/eh_alloc.cc index a1a163a5..12c9e576 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-14/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-14/libsupc++/eh_alloc.cc @@ -73,14 +73,16 @@ // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE. #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); using std::memset; #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -91,16 +93,16 @@ using namespace __cxxabiv1; // N.B. sizeof(std::bad_alloc) == sizeof(void*) // and sizeof(std::runtime_error) == 2 * sizeof(void*) // and sizeof(std::system_error) == 4 * sizeof(void*). -#define EMERGENCY_OBJ_SIZE 6 +#define EMERGENCY_OBJ_SIZE 6 #ifdef __GTHREADS // Assume that the number of concurrent exception objects scales with the // processor word size, i.e., 16-bit systems are not likely to have hundreds // of threads all simultaneously throwing on OOM conditions. -# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) +# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__) #else -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 # define MAX_OBJ_COUNT 64 #endif @@ -153,12 +155,12 @@ namespace private: struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; #if _GLIBCXX_HOSTED @@ -176,7 +178,7 @@ namespace // to implement in_pool. #ifdef _GLIBCXX_EH_POOL_STATIC static constexpr std::size_t arena_size - = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); + = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); alignas(void*) char arena[arena_size]; #else char *arena = nullptr; @@ -201,48 +203,48 @@ namespace #endif const std::string_view ns_name = "glibcxx.eh_pool"; std::pair tunables[]{ - {"obj_size", 0}, {"obj_count", obj_count} + {"obj_size", 0}, {"obj_count", obj_count} }; while (str) - { - if (*str == ':') - ++str; + { + if (*str == ':') + ++str; - if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) - && str[ns_name.size()] == '.') - { - str += ns_name.size() + 1; - for (auto& t : tunables) - if (!t.first.compare(0, t.first.size(), str, t.first.size()) - && str[t.first.size()] == '=') - { - str += t.first.size() + 1; - char* end; - unsigned long val = strtoul(str, &end, 0); - if ((*end == ':' || *end == '\0') && val <= INT_MAX) - t.second = val; - str = end; - break; - } - } - str = strchr(str, ':'); - } + if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) + && str[ns_name.size()] == '.') + { + str += ns_name.size() + 1; + for (auto& t : tunables) + if (!t.first.compare(0, t.first.size(), str, t.first.size()) + && str[t.first.size()] == '=') + { + str += t.first.size() + 1; + char* end; + unsigned long val = strtoul(str, &end, 0); + if ((*end == ':' || *end == '\0') && val <= INT_MAX) + t.second = val; + str = end; + break; + } + } + str = strchr(str, ':'); + } obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero. if (tunables[0].second != 0) - obj_size = tunables[0].second; + obj_size = tunables[0].second; #endif // HOSTED #endif // NOT_FOR_L4 arena_size = buffer_size_in_bytes(obj_count, obj_size); if (arena_size == 0) - return; - arena = (char *)malloc (arena_size); + return; + arena = (char *)__wrap_malloc (arena_size); if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + return; + } #endif // STATIC // Populate the free-list with a single entry covering the whole arena @@ -261,46 +263,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -308,64 +310,64 @@ namespace { __scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast (e) + sz - > reinterpret_cast ((*fe)->next)); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast (e) + sz + > reinterpret_cast ((*fe)->next)); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } inline bool pool::in_pool (void *ptr) const noexcept @@ -386,8 +388,8 @@ namespace __gnu_cxx #ifndef _GLIBCXX_EH_POOL_STATIC if (emergency_pool.arena) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } #endif } @@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept { thrown_size += sizeof (__cxa_refcounted_exception); - void *ret = malloc (thrown_size); + void *ret = __wrap_malloc (thrown_size); #if USE_POOL if (!ret) @@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept extern "C" __cxa_dependent_exception* __cxxabiv1::__cxa_allocate_dependent_exception() noexcept { - void *ret = malloc (sizeof (__cxa_dependent_exception)); + void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception)); #if USE_POOL if (!ret) diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-15/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-15/libsupc++/eh_alloc.cc index 302bc6f1..12c9e576 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-15/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-15/libsupc++/eh_alloc.cc @@ -1,5 +1,5 @@ // -*- C++ -*- Allocate exception objects. -// Copyright (C) 2001-2025 Free Software Foundation, Inc. +// Copyright (C) 2001-2024 Free Software Foundation, Inc. // // This file is part of GCC. // @@ -73,14 +73,16 @@ // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE. #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); using std::memset; #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -91,16 +93,16 @@ using namespace __cxxabiv1; // N.B. sizeof(std::bad_alloc) == sizeof(void*) // and sizeof(std::runtime_error) == 2 * sizeof(void*) // and sizeof(std::system_error) == 4 * sizeof(void*). -#define EMERGENCY_OBJ_SIZE 6 +#define EMERGENCY_OBJ_SIZE 6 #ifdef __GTHREADS // Assume that the number of concurrent exception objects scales with the // processor word size, i.e., 16-bit systems are not likely to have hundreds // of threads all simultaneously throwing on OOM conditions. -# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) +# define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__) # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__) #else -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 # define MAX_OBJ_COUNT 64 #endif @@ -153,12 +155,12 @@ namespace private: struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; #if _GLIBCXX_HOSTED @@ -176,7 +178,7 @@ namespace // to implement in_pool. #ifdef _GLIBCXX_EH_POOL_STATIC static constexpr std::size_t arena_size - = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); + = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE); alignas(void*) char arena[arena_size]; #else char *arena = nullptr; @@ -201,48 +203,48 @@ namespace #endif const std::string_view ns_name = "glibcxx.eh_pool"; std::pair tunables[]{ - {"obj_size", 0}, {"obj_count", obj_count} + {"obj_size", 0}, {"obj_count", obj_count} }; while (str) - { - if (*str == ':') - ++str; + { + if (*str == ':') + ++str; - if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) - && str[ns_name.size()] == '.') - { - str += ns_name.size() + 1; - for (auto& t : tunables) - if (!t.first.compare(0, t.first.size(), str, t.first.size()) - && str[t.first.size()] == '=') - { - str += t.first.size() + 1; - char* end; - unsigned long val = strtoul(str, &end, 0); - if ((*end == ':' || *end == '\0') && val <= INT_MAX) - t.second = val; - str = end; - break; - } - } - str = strchr(str, ':'); - } + if (!ns_name.compare(0, ns_name.size(), str, ns_name.size()) + && str[ns_name.size()] == '.') + { + str += ns_name.size() + 1; + for (auto& t : tunables) + if (!t.first.compare(0, t.first.size(), str, t.first.size()) + && str[t.first.size()] == '=') + { + str += t.first.size() + 1; + char* end; + unsigned long val = strtoul(str, &end, 0); + if ((*end == ':' || *end == '\0') && val <= INT_MAX) + t.second = val; + str = end; + break; + } + } + str = strchr(str, ':'); + } obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero. if (tunables[0].second != 0) - obj_size = tunables[0].second; + obj_size = tunables[0].second; #endif // HOSTED #endif // NOT_FOR_L4 arena_size = buffer_size_in_bytes(obj_count, obj_size); if (arena_size == 0) - return; - arena = (char *)malloc (arena_size); + return; + arena = (char *)__wrap_malloc (arena_size); if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + return; + } #endif // STATIC // Populate the free-list with a single entry covering the whole arena @@ -261,46 +263,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -308,64 +310,64 @@ namespace { __scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast (e) + sz - > reinterpret_cast ((*fe)->next)); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast (e) + sz + > reinterpret_cast ((*fe)->next)); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } inline bool pool::in_pool (void *ptr) const noexcept @@ -386,8 +388,8 @@ namespace __gnu_cxx #ifndef _GLIBCXX_EH_POOL_STATIC if (emergency_pool.arena) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } #endif } @@ -399,7 +401,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept { thrown_size += sizeof (__cxa_refcounted_exception); - void *ret = malloc (thrown_size); + void *ret = __wrap_malloc (thrown_size); #if USE_POOL if (!ret) @@ -431,7 +433,7 @@ __cxxabiv1::__cxa_free_exception(void *vptr) noexcept extern "C" __cxa_dependent_exception* __cxxabiv1::__cxa_allocate_dependent_exception() noexcept { - void *ret = malloc (sizeof (__cxa_dependent_exception)); + void *ret = __wrap_malloc (sizeof (__cxa_dependent_exception)); #if USE_POOL if (!ret) diff --git a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-9/libsupc++/eh_alloc.cc b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-9/libsupc++/eh_alloc.cc index 005c28db..b184e012 100644 --- a/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-9/libsupc++/eh_alloc.cc +++ b/src/l4/pkg/l4re-core/libstdc++-v3/contrib/libstdc++-v3-9/libsupc++/eh_alloc.cc @@ -37,14 +37,16 @@ #include #if _GLIBCXX_HOSTED -using std::free; -using std::malloc; +//using std::free; +//using std::malloc; +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); using std::memset; #else // In a freestanding environment, these functions may not be available // -- but for now, we assume that they are. -extern "C" void *malloc (std::size_t); -extern "C" void free(void *); +extern "C" void *__wrap_malloc (std::size_t); +extern "C" void __wrap_free(void *); extern "C" void *memset (void *, int, std::size_t); #endif @@ -58,19 +60,19 @@ using namespace __cxxabiv1; // just for overhead. #if INT_MAX == 32767 -# define EMERGENCY_OBJ_SIZE 128 -# define EMERGENCY_OBJ_COUNT 16 +# define EMERGENCY_OBJ_SIZE 128 +# define EMERGENCY_OBJ_COUNT 16 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647 -# define EMERGENCY_OBJ_SIZE 512 -# define EMERGENCY_OBJ_COUNT 32 +# define EMERGENCY_OBJ_SIZE 512 +# define EMERGENCY_OBJ_COUNT 32 #else -# define EMERGENCY_OBJ_SIZE 1024 -# define EMERGENCY_OBJ_COUNT 64 +# define EMERGENCY_OBJ_SIZE 1024 +# define EMERGENCY_OBJ_COUNT 64 #endif #ifndef __GTHREADS # undef EMERGENCY_OBJ_COUNT -# define EMERGENCY_OBJ_COUNT 4 +# define EMERGENCY_OBJ_COUNT 4 #endif namespace __gnu_cxx @@ -93,12 +95,12 @@ namespace private: struct free_entry { - std::size_t size; - free_entry *next; + std::size_t size; + free_entry *next; }; struct allocated_entry { - std::size_t size; - char data[] __attribute__((aligned)); + std::size_t size; + char data[] __attribute__((aligned)); }; // A single mutex controlling emergency allocations. @@ -119,15 +121,15 @@ namespace // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment // to make this tunable. arena_size = (EMERGENCY_OBJ_SIZE * EMERGENCY_OBJ_COUNT - + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); - arena = (char *)malloc (arena_size); + + EMERGENCY_OBJ_COUNT * sizeof (__cxa_dependent_exception)); + arena = (char *)__wrap_malloc (arena_size); if (!arena) - { - // If the allocation failed go without an emergency pool. - arena_size = 0; - first_free_entry = NULL; - return; - } + { + // If the allocation failed go without an emergency pool. + arena_size = 0; + first_free_entry = NULL; + return; + } // Populate the free-list with a single entry covering the whole arena first_free_entry = reinterpret_cast (arena); @@ -145,46 +147,46 @@ namespace // And we need to at least hand out objects of the size of // a freelist entry. if (size < sizeof (free_entry)) - size = sizeof (free_entry); + size = sizeof (free_entry); // And we need to align objects we hand out to the maximum // alignment required on the target (this really aligns the // tail which will become a new freelist entry). size = ((size + __alignof__ (allocated_entry::data) - 1) - & ~(__alignof__ (allocated_entry::data) - 1)); + & ~(__alignof__ (allocated_entry::data) - 1)); // Search for an entry of proper size on the freelist. free_entry **e; for (e = &first_free_entry; - *e && (*e)->size < size; - e = &(*e)->next) - ; + *e && (*e)->size < size; + e = &(*e)->next) + ; if (!*e) - return NULL; + return NULL; allocated_entry *x; if ((*e)->size - size >= sizeof (free_entry)) - { - // Split block if it is too large. - free_entry *f = reinterpret_cast - (reinterpret_cast (*e) + size); - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - new (f) free_entry; - f->next = next; - f->size = sz - size; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = size; - *e = f; - } + { + // Split block if it is too large. + free_entry *f = reinterpret_cast + (reinterpret_cast (*e) + size); + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + new (f) free_entry; + f->next = next; + f->size = sz - size; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = size; + *e = f; + } else - { - // Exact size match or too small overhead for a free entry. - std::size_t sz = (*e)->size; - free_entry *next = (*e)->next; - x = reinterpret_cast (*e); - new (x) allocated_entry; - x->size = sz; - *e = next; - } + { + // Exact size match or too small overhead for a free entry. + std::size_t sz = (*e)->size; + free_entry *next = (*e)->next; + x = reinterpret_cast (*e); + new (x) allocated_entry; + x->size = sz; + *e = next; + } return &x->data; } @@ -192,71 +194,71 @@ namespace { __gnu_cxx::__scoped_lock sentry(emergency_mutex); allocated_entry *e = reinterpret_cast - (reinterpret_cast (data) - offsetof (allocated_entry, data)); + (reinterpret_cast (data) - offsetof (allocated_entry, data)); std::size_t sz = e->size; if (!first_free_entry - || (reinterpret_cast (e) + sz - < reinterpret_cast (first_free_entry))) - { - // If the free list is empty or the entry is before the - // first element and cannot be merged with it add it as - // the first free entry. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = first_free_entry; - first_free_entry = f; - } + || (reinterpret_cast (e) + sz + < reinterpret_cast (first_free_entry))) + { + // If the free list is empty or the entry is before the + // first element and cannot be merged with it add it as + // the first free entry. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = first_free_entry; + first_free_entry = f; + } else if (reinterpret_cast (e) + sz - == reinterpret_cast (first_free_entry)) - { - // Check if we can merge with the first free entry being right - // after us. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz + first_free_entry->size; - f->next = first_free_entry->next; - first_free_entry = f; - } + == reinterpret_cast (first_free_entry)) + { + // Check if we can merge with the first free entry being right + // after us. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz + first_free_entry->size; + f->next = first_free_entry->next; + first_free_entry = f; + } else - { - // Else search for a free item we can merge with at its end. - free_entry **fe; - for (fe = &first_free_entry; - (*fe)->next - && (reinterpret_cast ((*fe)->next) - > reinterpret_cast (e) + sz); - fe = &(*fe)->next) - ; - // If we can merge the next block into us do so and continue - // with the cases below. - if (reinterpret_cast (e) + sz - == reinterpret_cast ((*fe)->next)) - { - sz += (*fe)->next->size; - (*fe)->next = (*fe)->next->next; - } - if (reinterpret_cast (*fe) + (*fe)->size - == reinterpret_cast (e)) - // Merge with the freelist entry. - (*fe)->size += sz; - else - { - // Else put it after it which keeps the freelist sorted. - free_entry *f = reinterpret_cast (e); - new (f) free_entry; - f->size = sz; - f->next = (*fe)->next; - (*fe)->next = f; - } - } + { + // Else search for a free item we can merge with at its end. + free_entry **fe; + for (fe = &first_free_entry; + (*fe)->next + && (reinterpret_cast ((*fe)->next) + > reinterpret_cast (e) + sz); + fe = &(*fe)->next) + ; + // If we can merge the next block into us do so and continue + // with the cases below. + if (reinterpret_cast (e) + sz + == reinterpret_cast ((*fe)->next)) + { + sz += (*fe)->next->size; + (*fe)->next = (*fe)->next->next; + } + if (reinterpret_cast (*fe) + (*fe)->size + == reinterpret_cast (e)) + // Merge with the freelist entry. + (*fe)->size += sz; + else + { + // Else put it after it which keeps the freelist sorted. + free_entry *f = reinterpret_cast (e); + new (f) free_entry; + f->size = sz; + f->next = (*fe)->next; + (*fe)->next = f; + } + } } bool pool::in_pool (void *ptr) { char *p = reinterpret_cast (ptr); return (p > arena - && p < arena + arena_size); + && p < arena + arena_size); } pool emergency_pool; @@ -269,8 +271,8 @@ namespace __gnu_cxx { if (emergency_pool.arena) { - ::free(emergency_pool.arena); - emergency_pool.arena = 0; + ::free(emergency_pool.arena); + emergency_pool.arena = 0; } } } @@ -281,7 +283,7 @@ __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) _GLIBCXX_NOTHROW void *ret; thrown_size += sizeof (__cxa_refcounted_exception); - ret = malloc (thrown_size); + ret = __wrap_malloc (thrown_size); if (!ret) ret = emergency_pool.allocate (thrown_size); @@ -312,7 +314,7 @@ __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW __cxa_dependent_exception *ret; ret = static_cast<__cxa_dependent_exception*> - (malloc (sizeof (__cxa_dependent_exception))); + (__wrap_malloc (sizeof (__cxa_dependent_exception))); if (!ret) ret = static_cast <__cxa_dependent_exception*> diff --git a/src/l4/pkg/l4re-core/libumalloc/lib/src/malloc.cc b/src/l4/pkg/l4re-core/libumalloc/lib/src/malloc.cc index f61eaa32..507ceef3 100644 --- a/src/l4/pkg/l4re-core/libumalloc/lib/src/malloc.cc +++ b/src/l4/pkg/l4re-core/libumalloc/lib/src/malloc.cc @@ -994,6 +994,8 @@ static void *realloc(void *ptr, size_t size) } // namespace umalloc +L4_BEGIN_DECLS + /** * Standard-compliant malloc implementation. * @@ -1001,7 +1003,7 @@ static void *realloc(void *ptr, size_t size) * * \return Valid allocated memory or nullptr if the allocation failed. */ -void *malloc(size_t size) noexcept +void *__wrap_malloc(size_t size) noexcept { auto ptr = umalloc::alloc(size); if (!ptr) @@ -1018,7 +1020,7 @@ void *malloc(size_t size) noexcept * * \return Valid allocated memory or nullptr if the allocation failed. */ -void *aligned_alloc(size_t alignment, size_t size) noexcept +void *__wrap_aligned_alloc(size_t alignment, size_t size) noexcept { auto ptr = umalloc::alloc(size, alignment); if (!ptr) @@ -1032,7 +1034,7 @@ void *aligned_alloc(size_t alignment, size_t size) noexcept * * \param ptr Previously allocated valid memory. */ -void free(void *ptr) noexcept +void __wrap_free(void *ptr) noexcept { if (ptr) umalloc::dealloc(ptr); @@ -1046,7 +1048,7 @@ void free(void *ptr) noexcept * * \return Valid allocated memory or nullptr if the allocation failed. */ -void *calloc(size_t nmemb, size_t size) noexcept +void *__wrap_calloc(size_t nmemb, size_t size) noexcept { // Avoid multiplication overflow. if ((size > 0) && (nmemb > std::numeric_limits::max() / size)) @@ -1073,7 +1075,7 @@ void *calloc(size_t nmemb, size_t size) noexcept * \return Valid reallocated memory or nullptr if the reallocation failed. * (in which case the previously allocated memory is not touched). */ -void *realloc(void *ptr, size_t size) noexcept +void *__wrap_realloc(void *ptr, size_t size) noexcept { if (!ptr) return malloc(size); @@ -1084,3 +1086,5 @@ void *realloc(void *ptr, size_t size) noexcept return ptr; } + +L4_END_DECLS diff --git a/src/l4/pkg/l4re-core/lua/examples/interpr/Makefile b/src/l4/pkg/l4re-core/lua/examples/interpr/Makefile index bb4ba4cf..6f4a7bb5 100644 --- a/src/l4/pkg/l4re-core/lua/examples/interpr/Makefile +++ b/src/l4/pkg/l4re-core/lua/examples/interpr/Makefile @@ -4,7 +4,7 @@ L4DIR ?= $(PKGDIR)/../../.. TARGET = lua SRC_C = lua.c REQUIRES_LIBS = lua libc_support_misc libc_be_fs_noop libc_be_sig - +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc vpath %.c $(PKGDIR)/lib/contrib/src include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/l4re-core/lua/lib/build++/Makefile b/src/l4/pkg/l4re-core/lua/lib/build++/Makefile index 64a7b6c3..cab7f2af 100644 --- a/src/l4/pkg/l4re-core/lua/lib/build++/Makefile +++ b/src/l4/pkg/l4re-core/lua/lib/build++/Makefile @@ -10,6 +10,8 @@ $(GENERAL_D_LOC): $(PKGDIR)/lib/build/Makefile PKGNAME_DIRNAME := lua-c++ +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc + # Difference to other version # WARNINGS EXCEPTION: CFLAGS is used here for C++ files (instead of the usual diff --git a/src/l4/pkg/l4re-core/moe/server/src/Makefile b/src/l4/pkg/l4re-core/moe/server/src/Makefile index 9c792e1d..56737b69 100644 --- a/src/l4/pkg/l4re-core/moe/server/src/Makefile +++ b/src/l4/pkg/l4re-core/moe/server/src/Makefile @@ -25,6 +25,7 @@ CAN_PIE_arm := y CAN_PIE_arm64 := y BID_CAN_PIE = $(CAN_PIE_$(ARCH)) +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc --wrap=aligned_alloc --wrap=calloc REQUIRES_LIBS := libkproxy libloader libsigma0 \ cxx_io cxx_libc_io libsupc++_minimal \ libc_minimal libc_minimal_l4re libumalloc diff --git a/src/l4/pkg/l4re-core/ned/ned-prompt/src/Makefile b/src/l4/pkg/l4re-core/ned/ned-prompt/src/Makefile index 2dcf8dbc..580e955c 100644 --- a/src/l4/pkg/l4re-core/ned/ned-prompt/src/Makefile +++ b/src/l4/pkg/l4re-core/ned/ned-prompt/src/Makefile @@ -6,4 +6,6 @@ SRC_CC = ned-prompt.cc REQUIRES_LIBS := readline DEPENDS_PKGS := readline +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc + include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/l4re-core/ned/server/src/Makefile b/src/l4/pkg/l4re-core/ned/server/src/Makefile index c63a96c7..09c2cfc6 100644 --- a/src/l4/pkg/l4re-core/ned/server/src/Makefile +++ b/src/l4/pkg/l4re-core/ned/server/src/Makefile @@ -14,5 +14,6 @@ SRC_CC += lua_sleep.cc REQUIRES_LIBS := libloader lua++ libc_support_misc cxx_libc_io cxx_io DEFAULT_HEAP_SIZE := 0x20000 +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/libunwind/server/src/Makefile b/src/l4/pkg/libunwind/server/src/Makefile index a3cec495..8e37c03e 100644 --- a/src/l4/pkg/libunwind/server/src/Makefile +++ b/src/l4/pkg/libunwind/server/src/Makefile @@ -4,5 +4,6 @@ L4DIR ?= $(PKGDIR)/../.. TARGET = backtracer SRC_CC = backtracer.cc REQUIRES_LIBS = stdlibs libunwind libstdc++ +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/loader/server/src/Makefile b/src/l4/pkg/loader/server/src/Makefile index c19a2ee4..87ed115c 100644 --- a/src/l4/pkg/loader/server/src/Makefile +++ b/src/l4/pkg/loader/server/src/Makefile @@ -10,5 +10,6 @@ LDFLAGS += #CPPFLAGS += -fPIC REQUIRES_LIBS := libloader libkproxy cxx_libc_io cxx_io +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/mag/Control b/src/l4/pkg/mag/Control index ad97681b..786dba64 100644 --- a/src/l4/pkg/mag/Control +++ b/src/l4/pkg/mag/Control @@ -1,4 +1,4 @@ provides: libmag mag-input-libinput mag-input-event mag-client_fb mag-mag_client mag-session_manager requires: l4re libc stdlibs-sh input l4util mag-gfx libstdc++ - lua++ + lua++ libc_be_mem Maintainer: warg@os.inf.tu-dresden.de diff --git a/src/l4/pkg/mag/server/src/Makefile b/src/l4/pkg/mag/server/src/Makefile index 9d944ce4..08ede247 100644 --- a/src/l4/pkg/mag/server/src/Makefile +++ b/src/l4/pkg/mag/server/src/Makefile @@ -15,7 +15,7 @@ STATIC_PLUGINS += mag-input-event STATIC_PLUGINS += mag-client_fb STATIC_PLUGINS += mag-mag_client -REQUIRES_LIBS:= libsupc++ libdl mag-gfx lua++ cxx_libc_io cxx_io +REQUIRES_LIBS:= libsupc++ libdl mag-gfx lua++ cxx_libc_io cxx_io libc_be_mem libstdc++ REQUIRES_LIBS += $(STATIC_PLUGINS) #LDFLAGS += --export-dynamic diff --git a/src/l4/pkg/rtc/Control b/src/l4/pkg/rtc/Control index 578bd04f..b556bb32 100644 --- a/src/l4/pkg/rtc/Control +++ b/src/l4/pkg/rtc/Control @@ -1,4 +1,4 @@ provides: rtc rtc_libc_be -requires: stdlibs libio cxx_libc_io cxx_io libstdc++ +requires: stdlibs libio cxx_libc_io cxx_io libstdc++ libc_be_mem optional: drivers-frst i2c-server Maintainer: adam.lackorzynski@kernkonzept.com diff --git a/src/l4/pkg/rtc/server/src/Makefile b/src/l4/pkg/rtc/server/src/Makefile index f6a06034..2f4d23e9 100644 --- a/src/l4/pkg/rtc/server/src/Makefile +++ b/src/l4/pkg/rtc/server/src/Makefile @@ -9,6 +9,6 @@ SRC_CC_arm64-l4f = pl031.cc SRC_CC = main.cc SRC_CC-$(CONFIG_RTC_DS3231) += ds3231.cc SRC_CC-$(CONFIG_RTC_PCF85063A) += pcf85063a.cc -REQUIRES_LIBS = libio cxx_libc_io cxx_io libstdc++ +REQUIRES_LIBS = libio cxx_libc_io cxx_io libc_be_mem libstdc++ include $(L4DIR)/mk/prog.mk diff --git a/src/l4/pkg/virtio-net-switch/server/switch/Makefile b/src/l4/pkg/virtio-net-switch/server/switch/Makefile index a88a6203..b640ea29 100644 --- a/src/l4/pkg/virtio-net-switch/server/switch/Makefile +++ b/src/l4/pkg/virtio-net-switch/server/switch/Makefile @@ -5,6 +5,7 @@ TARGET = l4vio_switch REQUIRES_LIBS = libstdc++ l4virtio REQUIRES_LIBS-$(CONFIG_VNS_IXL) += ixl +LDFLAGS += --wrap=malloc --wrap=free --wrap=realloc SRC_CC-$(CONFIG_VNS_PORT_FILTER) += filter.cc