l4re-base-25.08.0

This commit is contained in:
2025-09-12 15:55:45 +02:00
commit d959eaab98
37938 changed files with 9382688 additions and 0 deletions

3
src/l4/pkg/shmc/Control Normal file
View File

@@ -0,0 +1,3 @@
provides: shmc shmc_ringbuf
requires: l4re_c
Maintainer: adam@os.inf.tu-dresden.de

4
src/l4/pkg/shmc/Makefile Normal file
View File

@@ -0,0 +1,4 @@
PKGDIR = .
L4DIR ?= $(PKGDIR)/../..
include $(L4DIR)/mk/subdir.mk

View File

@@ -0,0 +1 @@
INPUT += l4/shmc/shmc.h l4/shmc/ringbuf.h

View File

@@ -0,0 +1,4 @@
PKGDIR = ..
L4DIR ?= $(PKGDIR)/../..
include $(L4DIR)/mk/include.mk

View File

@@ -0,0 +1,179 @@
/*
* (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
* Alexander Warg <warg@os.inf.tu-dresden.de>
* economic rights: Technische Universität Dresden (Germany)
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
/**
* \internal
* \file
*/
#pragma once
#ifndef __INCLUDED_FROM_L4SHMC_H__
#error Do not include l4/shm/internal.h directly, use l4/shm/shm.h!
#endif
#include <l4/sys/irq.h>
#include <l4/util/atomic.h>
L4_CV L4_INLINE long
l4shmc_wait_any(l4shmc_signal_t **p)
{
return l4shmc_wait_any_to(L4_IPC_NEVER, p);
}
L4_CV L4_INLINE long
l4shmc_wait_any_try(l4shmc_signal_t **p)
{
return l4shmc_wait_any_to(L4_IPC_BOTH_TIMEOUT_0, p);
}
L4_CV L4_INLINE long
l4shmc_wait_signal_try(l4shmc_signal_t *s)
{
return l4shmc_wait_signal_to(s, L4_IPC_BOTH_TIMEOUT_0);
}
L4_CV L4_INLINE long
l4shmc_wait_signal(l4shmc_signal_t *s)
{
return l4shmc_wait_signal_to(s, L4_IPC_NEVER);
}
L4_CV L4_INLINE long
l4shmc_wait_chunk_try(l4shmc_chunk_t *p)
{
return l4shmc_wait_chunk_to(p, L4_IPC_BOTH_TIMEOUT_0);
}
L4_CV L4_INLINE long
l4shmc_wait_chunk(l4shmc_chunk_t *p)
{
return l4shmc_wait_chunk_to(p, L4_IPC_NEVER);
}
L4_CV L4_INLINE long
l4shmc_get_chunk(l4shmc_area_t *shmarea, char const *chunk_name,
l4shmc_chunk_t *chunk)
{
return l4shmc_get_chunk_to(shmarea, chunk_name, 0, chunk);
}
L4_CV L4_INLINE long
l4shmc_chunk_ready(l4shmc_chunk_t *chunk, l4_umword_t size)
{
chunk->_chunk->_size = size;
__sync_synchronize();
chunk->_chunk->_status = L4SHMC_CHUNK_READY;
return L4_EOK;
}
L4_CV L4_INLINE long
l4shmc_trigger(l4shmc_signal_t *s)
{
return l4_ipc_error(l4_irq_trigger(s->_sigcap), l4_utcb());
}
L4_CV L4_INLINE long
l4shmc_chunk_ready_sig(l4shmc_chunk_t *chunk, l4_umword_t size)
{
l4shmc_chunk_ready(chunk, size);
return l4shmc_trigger(chunk->_sig);
}
L4_CV L4_INLINE void *
l4shmc_chunk_ptr(l4shmc_chunk_t const *p)
{
return (void *)( ((l4shmc_chunk_desc_t *)(p->_chunk->_offset
+ (l4_addr_t)p->_shm->_local_addr))->payload);
}
L4_CV L4_INLINE l4shmc_signal_t *
l4shmc_chunk_signal(l4shmc_chunk_t const *chunk)
{
return chunk->_sig;
}
L4_CV L4_INLINE l4_cap_idx_t
l4shmc_signal_cap(l4shmc_signal_t const *signal)
{
return signal->_sigcap;
}
L4_CV L4_INLINE long
l4shmc_chunk_size(l4shmc_chunk_t const *p)
{
l4_umword_t s = p->_chunk->_size;
if (s > p->_capacity)
return -L4_EIO;
return s;
}
L4_CV L4_INLINE long
l4shmc_chunk_capacity(l4shmc_chunk_t const *p)
{
return p->_capacity;
}
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take(l4shmc_chunk_t *chunk)
{
if (!l4util_cmpxchg(&chunk->_chunk->_status,
L4SHMC_CHUNK_CLEAR, L4SHMC_CHUNK_BUSY_WRITING))
return -L4_EPERM;
return L4_EOK;
}
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take_for_writing(l4shmc_chunk_t *chunk)
{
return l4shmc_chunk_try_to_take(chunk);
}
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take_for_overwriting(l4shmc_chunk_t *chunk)
{
if (!l4util_cmpxchg(&chunk->_chunk->_status,
L4SHMC_CHUNK_READY, L4SHMC_CHUNK_BUSY_WRITING))
return -L4_EBUSY;
return L4_EOK;
}
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take_for_reading(l4shmc_chunk_t *chunk)
{
if (!l4util_cmpxchg(&chunk->_chunk->_status,
L4SHMC_CHUNK_READY, L4SHMC_CHUNK_BUSY_READING))
return -L4_EBUSY;
return L4_EOK;
}
L4_CV L4_INLINE long
l4shmc_chunk_consumed(l4shmc_chunk_t *chunk)
{
chunk->_chunk->_status = L4SHMC_CHUNK_CLEAR;
return L4_EOK;
}
L4_CV L4_INLINE long
l4shmc_is_chunk_ready(l4shmc_chunk_t const *chunk)
{
return chunk->_chunk->_status == L4SHMC_CHUNK_READY;
}
L4_CV L4_INLINE long
l4shmc_is_chunk_clear(l4shmc_chunk_t const *chunk)
{
return chunk->_chunk->_status == L4SHMC_CHUNK_CLEAR;
}
L4_CV L4_INLINE long
l4shmc_check_magic(l4shmc_chunk_t const *chunk)
{
return chunk->_chunk->_magic == L4SHMC_CHUNK_MAGIC;
}

View File

@@ -0,0 +1,348 @@
/*
* (c) 2010 Björn Döbel <doebel@os.inf.tu-dresden.de>
* economic rights: Technische Universität Dresden (Germany)
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
/**
* \file
*/
#pragma once
#include <l4/shmc/shmc.h>
#include <l4/util/assert.h>
#include <l4/sys/compiler.h>
#include <l4/sys/thread.h>
L4_BEGIN_DECLS
/**
* \defgroup api_l4shm_ringbuf L4SHM-based ring buffer implementation
*
* The library provides a non-locking (strictly 1:1) shared-memory-based ring
* buffer implementation based on the L4SHM library. It requires an already
* allocated L4SHM area to be attached to sender and receiver. It will allocate
* an SHM chunk within this area and provides functions to produce data and
* consume data in FIFO order from the ring buffer.
*
* The sender side of the buffer needs to be initialized *before* the receiver
* side, because allocation of the SHM chunk and the necessary signals is done
* on the sender side and the receiver initialization tries to attach to these
* objects.
*/
/**
* \defgroup api_l4shm_ringbuf_sender Sender
* \ingroup api_l4shm_ringbuf
*
* \defgroup api_l4shm_ringbuf_receiver Receiver
* \ingroup api_l4shm_ringbuf
*
* \defgroup api_l4shm_ringbuf_internal Internal
* \ingroup api_l4shm_ringbuf
*/
/*
* Turn on ringbuf poisoning. This will add magic values to the ringbuf
* header as well as each packet header and check that these values are
* valid all the time.
*/
#define L4SHMC_RINGBUF_POISONING 1
/**
* Head field of a ring buffer.
*
* \ingroup api_l4shm_ringbuf_internal
*/
typedef struct
{
volatile l4_uint32_t lock;
unsigned data_size;
#if L4SHMC_RINGBUF_POISONING
char magic1;
#endif
unsigned next_read; ///< offset to next read packet
unsigned next_write; ///< offset to next write packet
#if L4SHMC_RINGBUF_POISONING
char magic2;
#endif
unsigned bytes_filled; ///< bytes filled in buffer
unsigned sender_waits; ///< sender waiting?
#if L4SHMC_RINGBUF_POISONING
char magic3;
#endif
char data[]; ///< tail pointer -> data
} l4shmc_ringbuf_head_t;
/**
* Ring buffer
*
* \ingroup api_l4shm_ringbuf_internal
*/
typedef struct
{
l4shmc_area_t *_area; ///< L4SHM area this buffer is located in
l4_cap_idx_t _owner; ///< owner (attached to send/recv signal)
l4shmc_chunk_t _chunk; ///< chunk descriptor
unsigned _size; ///< chunk size // XXX do we need this?
char *_chunkname; ///< name of the ring buffer chunk
char *_signame; ///< base name of the ring buffer signals
l4shmc_ringbuf_head_t *_addr; ///< pointer to ring buffer head
l4shmc_signal_t _signal_full; ///< "full" signal - triggered when data is produced
l4shmc_signal_t _signal_empty; ///< "empty" signal - triggered when data is consumed
} l4shmc_ringbuf_t;
/**
* Get ring buffer head pointer.
* \param ringbuf l4shmc_ringbuf_t struct
*
* \ingroup api_l4shm_ringbuf_internal
*/
#define L4SHMC_RINGBUF_HEAD(ringbuf) ((l4shmc_ringbuf_head_t*)((ringbuf)->_addr))
/**
* Get ring buffer data pointer.
* \param ringbuf l4shmc_ringbuf_t struct
*
* \ingroup api_l4shm_ringbuf_internal
*/
#define L4SHMC_RINGBUF_DATA(ringbuf) (L4SHMC_RINGBUF_HEAD(ringbuf)->data)
/**
* Get size of data area.
* \param ringbuf l4shmc_ringbuf_t struct
*
* \ingroup api_l4shm_ringbuf_internal
*/
#define L4SHMC_RINGBUF_DATA_SIZE(ringbuf) ((ringbuf)->_size - sizeof(l4shmc_ringbuf_head_t))
enum lock_content
{
lock_cont_min = 4,
locked = 5,
unlocked = 6,
lock_cont_max = 7,
};
static L4_CV inline void l4shmc_rb_lock(l4shmc_ringbuf_head_t *head)
{
ASSERT_NOT_NULL(head);
ASSERT_ASSERT(head->lock > lock_cont_min);
ASSERT_ASSERT(head->lock < lock_cont_max);
while (!l4util_cmpxchg32(&head->lock, unlocked, locked))
l4_thread_yield();
}
static L4_CV inline void l4shmc_rb_unlock(l4shmc_ringbuf_head_t *head)
{
ASSERT_NOT_NULL(head);
ASSERT_ASSERT(head->lock > lock_cont_min);
ASSERT_ASSERT(head->lock < lock_cont_max);
head->lock = unlocked;
}
/******************
* Initialization *
******************/
/**
* Initialize a ring buffer by creating an SHMC chunk and the
* corresponding signals. This needs to be done by one of the
* participating parties when setting up communication channel.
*
* \pre area has been attached using l4shmc_attach().
*
* \param buf pointer to ring buffer struct
* \param area pointer to SHMC area
* \param chunk_name name of SHMC chunk to create in area
* \param signal_name base name for SHMC signals to create
* \param size chunk size
*
* \return 0 on success, error otherwise
*
*/
L4_CV int l4shmc_rb_init_buffer(l4shmc_ringbuf_t *buf, l4shmc_area_t *area,
char const *chunk_name,
char const *signal_name, unsigned size);
/**
* De-init a ring buffer.
*
* \param buf pointer to ring buffer struct
*/
L4_CV void l4shmc_rb_deinit_buffer(l4shmc_ringbuf_t *buf);
/***************************
* RINGBUF SENDER *
***************************/
/**
* Attach to sender signal of a ring buffer.
*
* Attach owner to the sender-side signal of a ring buffer, which
* is triggered whenever new space has been freed in the buffer for
* the sender to write to.
*
* This is split from initialization, because you may not know the
* owner cap when initializing the buffer.
*
* \param buf pointer to ring buffer struct
* \param signal_name signal base name
* \param owner owner thread
*
* \return 0 on success, error otherwise
*/
L4_CV int l4shmc_rb_attach_sender(l4shmc_ringbuf_t *buf, char const *signal_name,
l4_cap_idx_t owner);
/**
* Allocate a packet of a given size within the ring buffer.
*
* This packet may wrap around at the end of the buffer. Users need
* to be aware of that.
*
* \param head ring buffer head pointer
* \param psize packet size
*
* \return valid address on success
* \retval NULL if not enough space available
*/
L4_CV char *l4shmc_rb_sender_alloc_packet(l4shmc_ringbuf_head_t *head,
unsigned psize);
/**
* Copy data into a previously allocated packet.
*
* This function is wrap-around aware.
*
* \param buf pointer to ring buffer struct
* \param addr valid destination (allocate with alloc_packet())
* \param data data source
* \param dsize data size
*/
L4_CV void l4shmc_rb_sender_put_data(l4shmc_ringbuf_t *buf, char *addr,
char *data, unsigned dsize);
/**
* Copy in packet from an external data source.
*
* This is the function you'll want to use. Just pass it a buffer
* pointer and let the lib do the work.
*
* \param buf pointer to ring buffer struct
* \param data valid buffer
* \param size data size
* \param block_if_necessary bool: block if buffer currently full
*
* \retval 0 on success
* \retval -L4_ENOMEM if block == false and no space available
*/
L4_CV int l4shmc_rb_sender_next_copy_in(l4shmc_ringbuf_t *buf, char *data,
unsigned size, int block_if_necessary);
/**
* Tell the consumer that new data is available.
*
* \param buf pointer to ring buffer struct
*/
L4_CV void l4shmc_rb_sender_commit_packet(l4shmc_ringbuf_t *buf);
/***************************
* RINGBUF RECEIVER *
***************************/
/**
* Initialize receive buffer.
*
* Initialize the receiver-side of a ring buffer. This requires the underlying
* SHMC chunk and the corresponding signals to be valid already (read: to be
* initialized by the sender).
*
* \pre chunk & signals have been created and initialized by the sender side
*
* \param buf pointer to ring buffer struct
* \param area pointer to SHMC area
* \param chunk_name name of SHMC chunk to create in area
* \param signal_name base name for SHMC signals to create
*
* \return 0 on success, error otherwise
*/
L4_CV int l4shmc_rb_init_receiver(l4shmc_ringbuf_t *buf, l4shmc_area_t *area,
char const *chunk_name,
char const *signal_name);
/**
* Attach to receiver signal of a ring buffer.
*
* Attach owner to the receiver-side signal of a ring buffer, which
* is triggered whenever new data has been produced.
*
* This is split from initialization, because you may not know the
* owner cap when initializing the buffer.
*
* \param buf pointer to ring buffer struct
* \param owner owner thread
*/
L4_CV void l4shmc_rb_attach_receiver(l4shmc_ringbuf_t *buf, l4_cap_idx_t owner);
/**
* Check if (and optionally block until) new data is ready.
*
* \param buf pointer to ring buffer struct
* \param blocking block if data is not available immediately
*
* Returns immediately, if data is available.
*
* \return 0 success, data available, != 0 otherwise
*/
L4_CV int l4shmc_rb_receiver_wait_for_data(l4shmc_ringbuf_t *buf, int blocking);
/**
* Copy data out of the buffer.
*
* \param head ring buffer head pointer
* \param target valid target buffer
* \param[in,out] tsize size of target buffer (must be >= packet size!);
* contains the real data size
* \return 0 on success, negative error otherwise
*/
L4_CV int l4shmc_rb_receiver_copy_out(l4shmc_ringbuf_head_t *head, char *target,
unsigned *tsize);
/**
* Notify producer that space is available.
*
* \param buf pointer to ring buffer struct
*/
L4_CV void l4shmc_rb_receiver_notify_done(l4shmc_ringbuf_t *buf);
/**
* Have a look at the ring buffer and see which size the next
* packet to be read has. Does not modify anything.
*
* \return size of next buffer or -1 if no data available
*/
L4_CV int l4shmc_rb_receiver_read_next_size(l4shmc_ringbuf_head_t *head);
L4_END_DECLS

View File

@@ -0,0 +1,543 @@
/*
* Copyright (c) 2011 Stefan Fritsch <stefan_fritsch@genua.de>
* Christian Ehrhardt <christian_ehrhardt@genua.de>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#pragma once
#include <l4/shmc/shmc.h>
#include <l4/sys/err.h>
#include <l4/sys/compiler.h>
L4_BEGIN_DECLS
/* sizeof(struct l4shm_buf_pkt_head) must be power of two */
struct l4shm_buf_pkt_head
{
unsigned long size;
};
struct l4shm_buf_chunk_head
{
/** end of ring content */
volatile unsigned long next_offs_to_write;
/** start of ring content */
volatile unsigned long next_offs_to_read;
/** ring buffer full */
volatile unsigned long writer_blocked;
/** The packet buffers. */
volatile struct l4shm_buf_pkt_head pkg[0];
};
/**
* Return the size of the largest packet that currently fits into
* the given chunk.
* @param chunk The chunk.
* @param chunksize The size of the data area of the chunk (maintained
* outside of the shared memory area).
* It is the callers responsibility to ensure that it is the sender of
* this chunk.
*/
L4_INLINE unsigned long l4shm_buf_chunk_tx_free(struct l4shm_buf_chunk_head *chunk,
unsigned long chunksize)
{
unsigned long roff = chunk->next_offs_to_read;
unsigned long woff = chunk->next_offs_to_write;
unsigned long space;
if (woff >= chunksize || roff >= chunksize)
return 0;
if (woff < roff)
space = roff - woff;
else
space = chunksize - (woff - roff);
if (space < 2 * sizeof(struct l4shm_buf_pkt_head))
return 0;
return space - 2 * sizeof(struct l4shm_buf_pkt_head);
}
/** align v to power of 2 boundary */
L4_INLINE l4_umword_t l4shmc_align(l4_umword_t v, l4_umword_t boundary)
{
return (v + boundary - 1 ) & ~(boundary - 1);
}
L4_INLINE volatile struct l4shm_buf_pkt_head *l4shmc_align_ptr_ph(volatile struct l4shm_buf_pkt_head *v)
{
return (struct l4shm_buf_pkt_head *)l4shmc_align((l4_umword_t)v, sizeof(struct l4shm_buf_pkt_head));
}
L4_INLINE l4_umword_t l4shmc_align_off_ph(l4_umword_t v)
{
return l4shmc_align(v, sizeof(struct l4shm_buf_pkt_head));
}
/**
* Initialize a chunk.
* Note that the entire chunk structure lives in shared memory.
* @param chunk The chunk structure.
* @param chunksize The size of the chunk including the l4shm_buf_chunk_head
* structure. This value is not maintained inside the chunk head
* but used to check alignment requirements.
* @return True if initialization was successful, a negative error code
* if initialization failed.
*/
L4_INLINE int l4shm_buf_chunk_init(struct l4shm_buf_chunk_head *chunk,
unsigned long chunksize)
{
static_assert(0 == ( sizeof(struct l4shm_buf_pkt_head) &
(sizeof(struct l4shm_buf_pkt_head)-1) ),
"sizeof(struct l4shm_buf_pkt_head) not power of 2");
if (chunk->pkg != l4shmc_align_ptr_ph(chunk->pkg))
return -L4_EINVAL;
chunksize -= sizeof(struct l4shm_buf_chunk_head);
if (chunksize != l4shmc_align_off_ph(chunksize))
return -L4_EINVAL;
chunk->pkg[0].size = 0;
chunk->next_offs_to_write = 0;
chunk->next_offs_to_read = 0;
chunk->writer_blocked = 0;
return 0;
}
/**
* Add part of a packet to the given chunk. The data is only copied
* into the area of the chunk reserved for the sender. It is not made
* available for the receiver. Use l4shm_buf_chunk_tx_complete for this.
* It is the callers responsibility to ensure that it is the sender
* on this chunk.
* @param ch The chunk.
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @param buf The packet data.
* @param poffset The offset of this chunk within the packet. The caller must
* make sure that it only commits complete packets.
* @param len The length of the packet data.
* @return Zero if the packet was added to the chunk, a negative error
* code otherwise. In particular -L4_EAGAIN means that insufficient
* space was available in the ring.
*/
L4_INLINE int l4shm_buf_chunk_tx_part(struct l4shm_buf_chunk_head *ch,
unsigned long chunksize, const void *buf,
unsigned long poffset, unsigned long len)
{
unsigned long woffset, nextoffset, r, part_len, totallen;
int blocked = 0;
if (len == 0)
return 0;
if (poffset > chunksize || len > chunksize)
return -L4_ENOMEM;
totallen = poffset + len;
if (totallen > chunksize - 2*sizeof(struct l4shm_buf_pkt_head))
return -L4_ENOMEM;
retry:
__sync_synchronize();
woffset = ch->next_offs_to_write;
if (woffset >= chunksize || woffset != l4shmc_align_off_ph(woffset))
return -L4_EIO;
nextoffset = l4shmc_align_off_ph(woffset + totallen
+ sizeof(struct l4shm_buf_pkt_head));
r = ch->next_offs_to_read;
if (r >= chunksize)
return -L4_EIO;
if (r <= woffset)
r += chunksize;
/* Don't use all space, L4Linux needs an additional '0' chunk head after
* the chunk. Therefore we need an additional struct l4shm_buf_pkt_head.
*/
if (nextoffset + sizeof(struct l4shm_buf_pkt_head) > r)
{
/*
* If there is insufficient space set writer_blocked and
* retry. This is neccessary to avoid a race where we set
* writer blocked after the peer emptied the buffer. We don't
* set writer_blocked in the first try to avoid spurious interrupts
* triggered by the reader due to writer_blocked.
*/
if (blocked)
return -L4_EAGAIN;
ch->writer_blocked = 1;
blocked = 1;
goto retry;
}
ch->writer_blocked = 0;
woffset += sizeof(struct l4shm_buf_pkt_head) + poffset;
woffset %= chunksize;
if (woffset + len > chunksize)
part_len = chunksize - woffset;
else
part_len = len;
memcpy(((unsigned char *)ch->pkg)+woffset, buf, part_len);
if (len != part_len)
memcpy((void*)ch->pkg, (char const *)buf + part_len, len - part_len);
return 0;
}
/**
* Complete the transmission of a packet. This function fills in the
* packet head in the shm buffer. The packet data must already be present.
* Use l4shm_buf_chunk_tx_part to copy packet data.
* @param ch The chunk.
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @param pkglen The total length of the packet.
* @return Zero in case of success or a negative error code.
*/
L4_INLINE int l4shm_buf_chunk_tx_complete(struct l4shm_buf_chunk_head *ch,
unsigned long chunksize, size_t pkglen)
{
unsigned long offset, nextoffset, r;
volatile struct l4shm_buf_pkt_head *ph, *next_ph;
if (pkglen == 0)
return 0;
if (pkglen > chunksize - 2*sizeof(struct l4shm_buf_pkt_head))
return -L4_ENOMEM;
offset = ch->next_offs_to_write;
if (offset >= chunksize || offset != l4shmc_align_off_ph(offset))
return -L4_EIO;
ph = ch->pkg + (offset / sizeof(struct l4shm_buf_pkt_head));
nextoffset = l4shmc_align_off_ph(offset + pkglen + sizeof(struct l4shm_buf_pkt_head));
r = ch->next_offs_to_read;
if (r >= chunksize)
return -L4_EIO;
if (r <= offset)
r += chunksize;
/* Don't use all space, L4Linux needs an additional '0' chunk head after
* the chunk. Therefore we need an additional struct l4shm_buf_pkt_head.
*/
if (nextoffset + sizeof(struct l4shm_buf_pkt_head) > r)
return -L4_EIO;
nextoffset %= chunksize;
/* For L4Linux compatibility */
next_ph = ch->pkg + (nextoffset / sizeof(struct l4shm_buf_pkt_head));
next_ph->size = 0;
__sync_synchronize();
ph->size = pkglen;
ch->next_offs_to_write = nextoffset;
return 0;
}
/**
* Add a packet to the given chunk.
* It is the callers responsibility to ensure that it is the sender
* on this chunk.
* @param ch The chunk.
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @param buf The packet data.
* @param size The length of the packet data.
* @return Zero if the packet was added to the chunk, a negative error
* code otherwise. In particular -L4_EAGAIN means that insufficient
* space was available in the ring.
* It is the callers responsibility to wake up the receiver after adding
* packets or when detecting insufficient space in the buffer.
*/
L4_INLINE int l4shm_buf_chunk_tx(struct l4shm_buf_chunk_head *ch,
unsigned long chunksize,
const void *buf, unsigned long pkt_size)
{
int ret = l4shm_buf_chunk_tx_part(ch, chunksize, buf, 0, pkt_size);
if (ret < 0)
return ret;
return l4shm_buf_chunk_tx_complete(ch, chunksize, pkt_size);
}
/**
* Return the length of the next packet in the chunk.
* @param ch The chunk.
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @return Zero if the chunk is empty, the length of the first
* packet in the chunk or a negative value in case of an error.
*/
L4_INLINE int l4shm_buf_chunk_rx_len(struct l4shm_buf_chunk_head *ch,
unsigned long chunksize)
{
unsigned long offset = ch->next_offs_to_read;
unsigned long woffset = ch->next_offs_to_write;
long space = woffset - offset;
unsigned long pkt_size;
volatile struct l4shm_buf_pkt_head *ph;
if (offset >= chunksize)
return -L4_EIO;
if (woffset >= chunksize)
return -L4_EIO;
if (space == 0)
return 0;
if (space < 0)
space += chunksize;
if (space < (int)sizeof(struct l4shm_buf_pkt_head))
return -L4_EIO;
ph = ch->pkg + (offset / sizeof(struct l4shm_buf_pkt_head));
pkt_size = ph->size;
if (pkt_size > (unsigned long)space - sizeof(struct l4shm_buf_pkt_head))
return -L4_EIO;
return pkt_size;
}
/**
* Drop the first packet in the chunk. The packet must have non-zero length.
* @param ch The chunk.
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @return Zero if the packet could be dropped, a negative value in case
* of an error.
*/
L4_INLINE int l4shm_buf_chunk_rx_drop(struct l4shm_buf_chunk_head *ch,
unsigned long chunksize)
{
unsigned long offset = ch->next_offs_to_read;
unsigned long woffset = ch->next_offs_to_write;
long space = woffset - offset;
unsigned long pkt_size;
volatile struct l4shm_buf_pkt_head *ph;
if (offset >= chunksize)
return -L4_EIO;
if (woffset >= chunksize)
return -L4_EIO;
if (space == 0)
return -L4_ENOENT;
if (space < 0)
space += chunksize;
if (space < (int)sizeof(struct l4shm_buf_pkt_head))
return -L4_EIO;
ph = ch->pkg + (offset / sizeof(struct l4shm_buf_pkt_head));
pkt_size = ph->size;
if (pkt_size > (unsigned long)space - sizeof(struct l4shm_buf_pkt_head))
return -L4_EIO;
offset = l4shmc_align_off_ph(offset + sizeof(struct l4shm_buf_pkt_head) + pkt_size);
offset %= chunksize;
ch->next_offs_to_read = offset;
__sync_synchronize();
return 0;
}
/**
* Copy part of a packet from the shm chunk to a buffer. The caller
* must make sure that the packet contains enough data and that the
* buffer is long enough to receive the data.
* @param ch The chunk. The data in the chunk is not modified!
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @param poffset The offset of the part to copy within the packet.
* It is an error if this offset is beyond the length of the packet.
* @param len The amount to copy. It is an error if the packet is shorter
* than offset+len bytes.
* @param buf The target buffer. The caller must make sure that the buffer
* can hold len bytes.
* @return Zero or a negative error code.
*/
L4_INLINE int l4shm_buf_chunk_rx_part(const struct l4shm_buf_chunk_head *ch,
unsigned long chunksize,
unsigned long poffset,
unsigned long len, void *buf)
{
unsigned long roffset = ch->next_offs_to_read;
unsigned long woffset = ch->next_offs_to_write;
long space = woffset - roffset;
volatile const struct l4shm_buf_pkt_head *ph;
unsigned long part_len, pkt_size;
if (roffset >= chunksize)
return -L4_EIO;
if (woffset >= chunksize)
return -L4_EIO;
if (space < 0)
space += chunksize;
if (space < (int)sizeof(struct l4shm_buf_pkt_head))
return -L4_EIO;
ph = ch->pkg + (roffset / sizeof(struct l4shm_buf_pkt_head));
pkt_size = ph->size;
if (pkt_size > (unsigned long)space - sizeof(struct l4shm_buf_pkt_head))
return -L4_EIO;
/* Backward compatibility (pkt_size == 0 means no more packets) */
if (pkt_size == 0)
return -L4_ENOENT;
if (poffset >= pkt_size || len > pkt_size || poffset + len > pkt_size)
return -L4_ENOENT;
roffset += poffset + sizeof(struct l4shm_buf_pkt_head);
roffset %= chunksize;
if (roffset + len > chunksize)
part_len = chunksize - roffset;
else
part_len = len;
memcpy(buf, ((unsigned char *)ch->pkg)+roffset, part_len);
if (part_len != len)
memcpy((char*)buf + part_len, (unsigned char *)ch->pkg, len - part_len);
return 0;
}
/**
* Remove a packet from the given chunk.
* It is the callers responsibility to ensure that it is the receiver
* on this chunk.
* @param ch The chunk.
* @param chunksize The size of the payload data in the chunk, i.e.
* excluding the leading l4shm_buf_chunk_head structure.
* @param buf The packet buffer.
* @param size The length of the packet buffer.
* @return The size of the received packet, zero if no packet was available
* and a negative error code otherwise.
* It is the callers responsibility to wake up a potentially blocked
* sender after removing data from the buffer.
*/
L4_INLINE int l4shm_buf_chunk_rx(struct l4shm_buf_chunk_head *ch,
unsigned long chunksize,
void *buf, unsigned long buf_size)
{
int ret = l4shm_buf_chunk_rx_len(ch, chunksize);
if (ret <= 0)
return ret;
if ((int)buf_size < ret)
return -L4_ENOMEM;
ret = l4shm_buf_chunk_rx_part(ch, chunksize, 0, ret, buf);
if (ret < 0)
return ret;
return l4shm_buf_chunk_rx_drop(ch, chunksize);
}
/*
* Various wrappers for l4shm_buf_chunk_* functions using a single l4shm_buf struct
*/
struct l4shm_buf
{
struct l4shm_buf_chunk_head *tx_head;
struct l4shm_buf_chunk_head *rx_head;
unsigned long tx_ring_size;
unsigned long rx_ring_size;
};
/**
* Initialize an l4shm_buf structure with pre-allocated Rx/Tx rings
* @param sb A pre-allocated l4shm_buf structure to initialize.
* @param rx_chunk The receive ring for this thread.
* @param rx_size The size of the receive ring including the l4shm_buf_chunk_head.
* @param tx_chunk The transmit ring for this thread.
* @param tx_size The size of the transmit ring including the l4shm_buf_chunk_head.
* @return True if successful, a negative error code if initialization
* failed (normally due to bad alignment).
* This library will ensure that this thread only adds data to the tx
* ring and only removes data from the rx ring.
* It is possible to do initialization in two steps, by calling l4shm_buf_init()
* twice, once with rx_chunk == NULL and once with tx_chunk == NULL.
*/
L4_INLINE int l4shm_buf_init(struct l4shm_buf *sb, void *rx_chunk, unsigned long rx_size,
void *tx_chunk, unsigned long tx_size)
{
if (tx_chunk)
{
if (l4shm_buf_chunk_init((struct l4shm_buf_chunk_head *)tx_chunk, tx_size))
return -L4_EINVAL;
sb->tx_head = (struct l4shm_buf_chunk_head *)tx_chunk;
sb->tx_ring_size = tx_size - sizeof(struct l4shm_buf_chunk_head);
}
else
{
sb->tx_head = NULL;
}
if (rx_chunk)
{
if (l4shm_buf_chunk_init((struct l4shm_buf_chunk_head *)rx_chunk, rx_size))
return -L4_EINVAL;
sb->rx_head = (struct l4shm_buf_chunk_head *)rx_chunk;
sb->rx_ring_size = rx_size - sizeof(struct l4shm_buf_chunk_head);
}
else if (!tx_chunk)
{
return -L4_EINVAL;
}
else
{
sb->rx_head = NULL;
}
return 0;
}
L4_INLINE int l4shm_buf_rx_len(struct l4shm_buf *sb)
{
return l4shm_buf_chunk_rx_len(sb->rx_head, sb->rx_ring_size);
}
L4_INLINE int l4shm_buf_rx_drop(struct l4shm_buf *sb)
{
return l4shm_buf_chunk_rx_drop(sb->rx_head, sb->rx_ring_size);
}
L4_INLINE int l4shm_buf_rx_part(struct l4shm_buf *sb, unsigned long poffset,
unsigned long len, char *buf)
{
return l4shm_buf_chunk_rx_part(sb->rx_head, sb->rx_ring_size, poffset, len, buf);
}
L4_INLINE unsigned long l4shm_buf_tx_free(struct l4shm_buf *sb)
{
return l4shm_buf_chunk_tx_free(sb->tx_head, sb->tx_ring_size);
}
L4_INLINE int l4shm_buf_tx(struct l4shm_buf *sb, const void *buf,
unsigned long pkt_size)
{
return l4shm_buf_chunk_tx(sb->tx_head, sb->tx_ring_size, buf, pkt_size);
}
L4_INLINE int l4shm_buf_rx(struct l4shm_buf *sb, void *buf,
unsigned long buf_size)
{
return l4shm_buf_chunk_rx(sb->rx_head, sb->rx_ring_size, buf, buf_size);
}
L4_INLINE int l4shm_buf_tx_part(struct l4shm_buf *sb, char const *buf,
unsigned long poffset, unsigned long len)
{
return l4shm_buf_chunk_tx_part(sb->tx_head, sb->tx_ring_size, buf, poffset, len);
}
L4_INLINE int l4shm_buf_tx_complete(struct l4shm_buf *sb, size_t pktlen)
{
return l4shm_buf_chunk_tx_complete(sb->tx_head, sb->tx_ring_size, pktlen);
}
L4_END_DECLS

View File

@@ -0,0 +1,685 @@
/**
* \file
* Shared memory library header file.
*/
/*
* (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
* Björn Döbel <doebel@os.inf.tu-dresden.de>
* economic rights: Technische Universität Dresden (Germany)
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
#pragma once
#include <l4/sys/compiler.h>
#include <l4/sys/linkage.h>
#include <l4/sys/types.h>
#include <l4/sys/err.h>
/**
* \defgroup api_l4shm Shared Memory Library
*
* L4SHM provides a shared memory infrastructure that establishes a
* shared memory area between multiple parties and uses a fast notification
* mechanism.
*
* A shared memory area consists of chunks and signals. A chunk is a
* defined chunk of memory within the memory area with a maximum size. A
* chunk is filled (written) by a producer and read by a consumer. When a
* producer has finished writing to the chunk it signals a data ready
* notification to the consumer.
*
* A consumer attaches to a chunk and waits for the producer to fill the
* chunk. After reading out the chunk it marks the chunk free again.
*
* A shared memory area can have multiple chunks.
*
* The interface is divided in three roles.
* - The master role, responsible for setting up a shared memory area.
* - A producer, generating data into a chunk
* - A consumer, receiving data.
*
* A signal can be connected with a chunk or can be used independently
* (e.g. for multiple chunks).
*
* \example examples/libs/shmc/prodcons.c
* Simple shared memory example.
*
*/
/**
* \defgroup api_l4shmc_chunk Chunks
* \ingroup api_l4shm
*
* \defgroup api_l4shmc_chunk_prod Producer
* \ingroup api_l4shmc_chunk
*
* \defgroup api_l4shmc_chunk_cons Consumer
* \ingroup api_l4shmc_chunk
*
* \defgroup api_l4shmc_signal Signals
* \ingroup api_l4shm
*
* \defgroup api_l4shmc_signal_prod Producer
* \ingroup api_l4shmc_signal
*
* \defgroup api_l4shmc_signal_cons Consumer
* \ingroup api_l4shmc_signal
*/
#define __INCLUDED_FROM_L4SHMC_H__
#include <l4/shmc/types.h>
L4_BEGIN_DECLS
/**
* Create a shared memory area.
* \ingroup api_l4shm
*
* \param shmc_name Name of the shared memory area.
*
* \retval 0 Success.
* \retval -L4_ENOMEM The requested size is too big.
* \retval -L4_ENOENT No valid capability with the name of the shared memory
* area found.
* \retval <0 Errors from l4re_rm_attach or l4re_ns_register_obj_srv.
*/
L4_CV long
l4shmc_create(char const *shmc_name);
/**
* Attach to a shared memory area.
* \ingroup api_l4shm
*
* \param shmc_name Name of the shared memory area.
* \param[out] shmarea Pointer to shared memory area descriptor to be filled
* with information for the shared memory area.
*
* On success, the data in 'shmarea' contains a client number which can be used
* to mutual agree about client initialization:
* - l4shmc_get_client_nr() returns the client number stored in 'shmarea'. The
* first attached client will get 0 and this number is increased for each
* attached client.
* - l4shmc_mark_client_initialized() tells other clients that this client has
* finished its initialization.
* - l4shmc_get_initialized_clients() returns the bitmap of initialized clients
* attached to this shared memory.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_attach(char const *shmc_name, l4shmc_area_t *shmarea);
/**
* Determine the client number of the shared memory region.
* \ingroup api_l4shm
*
* \param shmarea The shared memory area.
*
* \return client number.
*/
L4_CV long
l4shmc_get_client_nr(l4shmc_area_t const *shmarea);
/**
* Mark this shared memory client as 'initialized'.
* \ingroup api_l4shm
*
* The corresponding bit is set in the `_clients_init_done` bitmask. The
* bitmask can be fetched with #l4shmc_get_initialized_clients().
*
* \param shmarea The shared memory area.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_mark_client_initialized(l4shmc_area_t *shmarea);
/**
* Fetch the `_clients_init_done` bitmask of the shared memory area.
* \ingroup api_l4shm
*
* \param shmarea The shared memory area.
* \param[out] bitmask The bitmask describing which clients are initialized.
*
* \retval 0 Success.
* \retval <0 Error.
*
* \see #l4shmc_mark_client_initialized(), #l4shmc_get_client_nr()
*/
L4_CV long
l4shmc_get_initialized_clients(l4shmc_area_t *shmarea, l4_umword_t *bitmask);
/**
* Add a chunk in the shared memory area.
* \ingroup api_l4shmc_chunk
*
* \param shmarea The shared memory area to put the chunk in.
* \param chunk_name Name of the chunk.
* \param chunk_capacity Capacity for payload of the chunk in bytes.
* \param[out] chunk Chunk structure to fill in.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_add_chunk(l4shmc_area_t *shmarea, char const *chunk_name,
l4_umword_t chunk_capacity, l4shmc_chunk_t *chunk);
/**
* Add a signal for the shared memory area.
* \ingroup api_l4shmc_signal
*
* \param shmarea The shared memory area.
* \param signal_name Name of the signal.
* \param[out] signal Signal structure to fill in.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_add_signal(l4shmc_area_t *shmarea, char const *signal_name,
l4shmc_signal_t *signal);
/**
* Trigger a signal.
* \ingroup api_l4shmc_signal_prod
*
* \param signal Signal to trigger.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_trigger(l4shmc_signal_t *signal);
/**
* Try to mark chunk busy.
* \ingroup api_l4shmc_chunk_prod
*
* \param chunk chunk to mark.
*
* \retval 0 Chunk could be taken.
* \retval <0 Chunk could not be taken, try again.
*/
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take(l4shmc_chunk_t *chunk);
/**
* Try to mark chunk busy writing.
* \ingroup api_l4shmc_chunk_prod
*
* This function is actually an alias for #l4shmc_chunk_try_to_take().
*
* \param chunk chunk to mark busy writing.
*
* \retval 0 Chunk could be taken and can be written.
* \retval <0 Chunk could not be taken, try again.
*/
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take_for_writing(l4shmc_chunk_t *chunk);
/**
* Try to mark the chunk busy writing after it was ready for reading.
* \ingroup api_l4shmc_chunk_prod
*
* \param chunk chunk to mark busy writing.
*
* This function is used by the producer to overwrite a message if the consumer
* did not read the message within an expected time. This function can only be
* used if the consumer uses #l4shmc_chunk_try_to_take_for_reading() before
* reading the chunk.
*
* \retval 0 Chunk could be taken and can be written.
* \retval <0 Chunk could not be taken, try again.
*/
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take_for_overwriting(l4shmc_chunk_t *chunk);
/**
* Try to mark chunk busy reading.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk chunk to mark busy reading.
*
* \retval 0 Chunk could be taken and can be read.
* \retval <0 Chunk could not be taken, try again.
*/
L4_CV L4_INLINE long
l4shmc_chunk_try_to_take_for_reading(l4shmc_chunk_t *chunk);
/**
* Mark chunk as filled (ready).
* \ingroup api_l4shmc_chunk_prod
*
* \param chunk chunk.
* \param size Size of data in the chunk, in bytes.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_chunk_ready(l4shmc_chunk_t *chunk, l4_umword_t size);
/**
* Mark chunk as filled (ready) and signal consumer.
* \ingroup api_l4shmc_chunk_prod
*
* \param chunk chunk.
* \param size Size of data in the chunk, in bytes.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_chunk_ready_sig(l4shmc_chunk_t *chunk, l4_umword_t size);
/**
* Get chunk out of shared memory area.
* \ingroup api_l4shmc_chunk
*
* \param shmarea Shared memory area.
* \param chunk_name Name of the chunk.
* \param[out] chunk Chunk data structure to fill.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_get_chunk(l4shmc_area_t *shmarea, char const *chunk_name,
l4shmc_chunk_t *chunk);
/**
* Get chunk out of shared memory area, with timeout.
* \ingroup api_l4shmc_chunk
*
* \param shmarea Shared memory area.
* \param chunk_name Name of the chunk.
* \param timeout_ms Timeout in milliseconds to wait for the chunk to
* appear in the shared memory area.
* \param[out] chunk Chunk data structure to fill.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_get_chunk_to(l4shmc_area_t *shmarea, char const *chunk_name,
l4_umword_t timeout_ms, l4shmc_chunk_t *chunk);
/**
* Iterate over names of all existing chunks
* \ingroup api_l4shmc_chunk
*
* \param shmarea Shared memory area.
* \param chunk_name Where the name of the current chunk will be stored
* \param offs 0 to start iteration, return value of previous
* call to l4shmc_iterate_chunk() to get next chunk
*
* \retval 0 No more chunks available.
* \retval <0 Error.
* \retval >0 Iterator value for the next call.
*/
L4_CV long
l4shmc_iterate_chunk(l4shmc_area_t const *shmarea, char const **chunk_name,
long offs);
/**
* Attach to signal.
* \ingroup api_l4shmc_signal
*
* \param shmarea Shared memory area.
* \param signal_name Name of the signal.
* \param thread Thread capability index to attach the signal to.
* \param[out] signal Signal data structure to fill.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_attach_signal(l4shmc_area_t *shmarea, char const *signal_name,
l4_cap_idx_t thread, l4shmc_signal_t *signal);
/**
* Get signal object from the shared memory area.
* \ingroup api_l4shmc_signal
*
* \param shmarea Shared memory area.
* \param signal_name Name of the signal.
* \param[out] signal Signal data structure to fill.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_get_signal(l4shmc_area_t *shmarea, char const *signal_name,
l4shmc_signal_t *signal);
/**
* Enable a signal.
* \ingroup api_l4shmc_signal_cons
*
* \param signal Signal to enable.
*
* \retval 0 Success.
* \retval <0 Error.
*
* A signal must be enabled before waiting when the consumer waits on any
* signal. Enabling is not needed if the consumer waits for a specific
* signal or chunk.
*/
L4_CV long
l4shmc_enable_signal(l4shmc_signal_t *signal);
/**
* Enable a signal connected with a chunk.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk to enable.
*
* \retval 0 Success.
* \retval <0 Error.
*
* A signal must be enabled before waiting when the consumer waits on any
* signal. Enabling is not needed if the consumer waits for a specific
* signal or chunk.
*/
L4_CV long
l4shmc_enable_chunk(l4shmc_chunk_t *chunk);
/**
* Wait on any signal.
* \ingroup api_l4shmc_signal_cons
*
* \param[out] retsignal Signal received.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_wait_any(l4shmc_signal_t **retsignal);
/**
* Check whether any waited signal has an event pending.
* \ingroup api_l4shmc_signal_cons
*
* \param[out] retsignal Signal that has the event pending if any.
*
* \retval 0 Success.
* \retval <0 Error.
*
* The return code indicates whether an event was pending or not. Success
* means an event was pending, if an receive timeout error is returned no
* event was pending.
*/
L4_CV L4_INLINE long
l4shmc_wait_any_try(l4shmc_signal_t **retsignal);
/**
* Wait for any signal with timeout.
* \ingroup api_l4shmc_signal_cons
*
* \param timeout Timeout.
* \param[out] retsignal Signal that has the event pending if any.
*
* \retval 0 Success.
* \retval <0 Error.
*
* The return code indicates whether an event was pending or not. Success
* means an event was pending, if an receive timeout error is returned no
* event was pending.
*/
L4_CV long
l4shmc_wait_any_to(l4_timeout_t timeout, l4shmc_signal_t **retsignal);
/**
* Wait on a specific signal.
* \ingroup api_l4shmc_signal_cons
*
* \param signal Signal to wait for.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_wait_signal(l4shmc_signal_t *signal);
/**
* Wait on a specific signal, with timeout.
* \ingroup api_l4shmc_signal_cons
*
* \param signal Signal to wait for.
* \param timeout Timeout.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_wait_signal_to(l4shmc_signal_t *signal, l4_timeout_t timeout);
/**
* Check whether a specific signal has an event pending.
* \ingroup api_l4shmc_signal_cons
*
* \param signal Signal to check.
*
* \retval 0 Success.
* \retval <0 Error.
*
* The return code indicates whether an event was pending or not. Success
* means an event was pending, if an receive timeout error is returned no
* event was pending.
*/
L4_CV L4_INLINE long
l4shmc_wait_signal_try(l4shmc_signal_t *signal);
/**
* Wait on a specific chunk.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk to wait for.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_wait_chunk(l4shmc_chunk_t *chunk);
/**
* Check whether a specific chunk has an event pending, with timeout.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk to check.
* \param timeout Timeout.
*
* \retval 0 Success.
* \retval <0 Error.
*
* The return code indicates whether an event was pending or not. Success
* means an event was pending, if an receive timeout error is returned no
* event was pending.
*/
L4_CV long
l4shmc_wait_chunk_to(l4shmc_chunk_t *chunk, l4_timeout_t timeout);
/**
* Check whether a specific chunk has an event pending.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk to check.
*
* \retval 0 Success.
* \retval <0 Error.
*
* The return code indicates whether an event was pending or not. Success
* means an event was pending, if an receive timeout error is returned no
* event was pending.
*/
L4_CV L4_INLINE long
l4shmc_wait_chunk_try(l4shmc_chunk_t *chunk);
/**
* Mark a chunk as free.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk to mark as free.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV L4_INLINE long
l4shmc_chunk_consumed(l4shmc_chunk_t *chunk);
/**
* Connect a signal with a chunk.
* \ingroup api_l4shm
*
* \param chunk Chunk to attach the signal to.
* \param signal Signal to attach.
*
* \retval 0 Success.
* \retval <0 Error.
*/
L4_CV long
l4shmc_connect_chunk_signal(l4shmc_chunk_t *chunk, l4shmc_signal_t *signal);
/**
* Check whether data is available.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk to check.
*
* \retval !=0 Data is available.
* \retval 0 No data available.
*/
L4_CV L4_INLINE long
l4shmc_is_chunk_ready(l4shmc_chunk_t const *chunk);
/**
* Check whether chunk is free.
* \ingroup api_l4shmc_chunk_prod
*
* \param chunk Chunk to check.
*
* \retval !=0 Chunk is clear.
* \retval 0 Chunk is not clear.
*/
L4_CV L4_INLINE long
l4shmc_is_chunk_clear(l4shmc_chunk_t const *chunk);
/**
* Get data pointer to chunk.
* \ingroup api_l4shmc_chunk
*
* \param chunk Chunk.
*
* \return Chunk pointer.
*/
L4_CV L4_INLINE void *
l4shmc_chunk_ptr(l4shmc_chunk_t const *chunk);
/**
* Get current size of a chunk.
* \ingroup api_l4shmc_chunk_cons
*
* \param chunk Chunk.
*
* \return Current size of the chunk in bytes.
*/
L4_CV L4_INLINE long
l4shmc_chunk_size(l4shmc_chunk_t const *chunk);
/**
* Get capacity of a chunk.
* \ingroup api_l4shmc_chunk
*
* \param chunk Chunk.
*
* \return Capacity of the chunk in bytes.
*/
L4_CV L4_INLINE long
l4shmc_chunk_capacity(l4shmc_chunk_t const *chunk);
/**
* Get the registered signal of a chunk.
* \ingroup api_l4shmc_chunk
*
* \param chunk Chunk.
*
* \retval 0 No signal has been registered with this chunk.
* \retval !=0 Pointer to signal otherwise.
*/
L4_CV L4_INLINE l4shmc_signal_t *
l4shmc_chunk_signal(l4shmc_chunk_t const *chunk);
/**
* Get the signal capability of a signal.
* \ingroup api_l4shmc_signal
*
* \param signal Signal.
*
* \return Capability of the signal object.
*/
L4_CV L4_INLINE l4_cap_idx_t
l4shmc_signal_cap(l4shmc_signal_t const *signal);
/**
* Check magic value of a chunk.
* \ingroup api_l4shmc_signal
*
* \param chunk Chunk.
*
* \retval 0 Magic value is not valid.
* \retval >0 Chunk is OK, the magic value is valid.
*/
L4_CV L4_INLINE long
l4shmc_check_magic(l4shmc_chunk_t const *chunk);
/**
* Get size of shared memory area.
* \ingroup api_l4shm
*
* \param shmarea Shared memory area.
*
* \retval >0 Size of the shared memory area.
* \retval <0 Error.
*/
L4_CV long
l4shmc_area_size(l4shmc_area_t const *shmarea);
/**
* Get free size of shared memory area. To get the max size to
* pass to l4shmc_add_chunk, subtract l4shmc_chunk_overhead().
* \ingroup api_l4shm
*
* \param shmarea Shared memory area.
*
* \return Size of the shared memory area.
*/
L4_CV long
l4shmc_area_size_free(l4shmc_area_t const *shmarea);
/**
* Get memory overhead per area that is not available for chunks
* \ingroup api_l4shm
*
* \return Size of the overhead in bytes.
*/
L4_CV long
l4shmc_area_overhead(void);
/**
* Get memory overhead required in addition to the chunk capacity
* for adding one chunk
* \ingroup api_l4shm
*
* \return Size of the overhead in bytes.
*/
L4_CV long
l4shmc_chunk_overhead(void);
#include <l4/shmc/internal.h>
L4_END_DECLS

View File

@@ -0,0 +1,80 @@
/*
* (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>
* economic rights: Technische Universität Dresden (Germany)
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
/**
* \internal
* \file
*/
#pragma once
#ifndef __INCLUDED_FROM_L4SHMC_H__
#error Do not include l4/shm/types.h directly, use l4/shm/shm.h!
#endif
#include <l4/re/c/dataspace.h>
enum
{
L4SHMC_NAME_SIZE = 15,
L4SHMC_NAME_STRINGLEN = L4SHMC_NAME_SIZE + 1,
L4SHMC_CHUNK_NAME_SIZE = 15,
L4SHMC_CHUNK_NAME_STRINGLEN = L4SHMC_CHUNK_NAME_SIZE + 1,
L4SHMC_SIGNAL_NAME_SIZE = 15,
L4SHMC_SIGNAL_NAME_STRINGLEN = L4SHMC_SIGNAL_NAME_SIZE + 1,
/** The chunk does not contain any data and is owned by nobody. */
L4SHMC_CHUNK_CLEAR = 0,
/** The chunk is currently being written/owned by a producer. */
L4SHMC_CHUNK_BUSY_WRITING = 1,
/** The chunk contains data for a consumer and must not be written. */
L4SHMC_CHUNK_READY = 2,
/** The chunk is currently being read/owned by a consumer. */
L4SHMC_CHUNK_BUSY_READING = 3,
L4SHMC_CHUNK_MAGIC = 0xfedcba98,
};
/* l4shmc_chunk_desc_t is shared among address spaces */
/* private: This data structure is hidden for the clients */
typedef struct
{
l4_umword_t _magic; // magic
l4_addr_t _offset; // offset of chunk in shm-area
l4_umword_t _capacity; // capacity in bytes of chunk
l4_umword_t _size; // size of current payload
l4_umword_t _status; // status of chunk
l4_umword_t _signals_to_attach;
char _name[L4SHMC_CHUNK_NAME_STRINGLEN]; // name of chunk
l4_addr_t _next; // next chunk in shm-area, as absolute offset
char payload[];
} l4shmc_chunk_desc_t;
/* l4shmc_area_t is local to one address space */
/* private: This data structure is hidden for the clients */
typedef struct
{
l4re_ds_t _shm_ds;
void *_local_addr;
char _name[L4SHMC_NAME_STRINGLEN];
l4_umword_t _size;
l4_umword_t _client_nr;
} l4shmc_area_t;
/* l4shmc_signal_t is local to one address space */
typedef struct
{
l4_cap_idx_t _sigcap;
} l4shmc_signal_t;
/* l4shmc_chunk_t is local to one address space */
typedef struct
{
l4shmc_chunk_desc_t *_chunk;
l4shmc_area_t *_shm;
l4shmc_signal_t *_sig;
l4_umword_t _capacity;
} l4shmc_chunk_t;

View File

@@ -0,0 +1,8 @@
PKGDIR = ..
L4DIR ?= $(PKGDIR)/../..
TARGET = src ringbuf
include $(L4DIR)/mk/subdir.mk
ringbuf: src

View File

@@ -0,0 +1,13 @@
PKGDIR ?= ../..
L4DIR ?= $(PKGDIR)/../..
include $(L4DIR)/mk/Makeconf
# sparc: undefined reference to `__atomic_fetch_or_4'
SYSTEMS = $(filter-out sparc-l4f,$(SYSTEMS_ABI))
TARGET = lib4shmc_ringbuf.a lib4shmc_ringbuf.so
PC_FILENAME = shmc_ringbuf
SRC_C = ringbuf.c
REQUIRES_LIBS = shmc
include $(L4DIR)/mk/lib.mk

View File

@@ -0,0 +1,538 @@
/*
* (c) 2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
* Björn Döbel <doebel@os.inf.tu-dresden.de>
* economic rights: Technische Universität Dresden (Germany)
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <l4/shmc/shmc.h>
#include <l4/shmc/ringbuf.h>
#include <l4/util/assert.h>
#include <l4/sys/debugger.h>
#include <l4/sys/kdebug.h>
/***************************
* *
* GENERIC RINGBUF STUFF *
* *
***************************/
typedef struct
{
#if L4SHMC_RINGBUF_POISONING
char mag1;
#endif
unsigned size;
#if L4SHMC_RINGBUF_POISONING
char mag2;
#endif
} size_cookie_t;
#define SIZE_COOKIE_MAGIC1 (char)0xDE
#define SIZE_COOKIE_MAGIC2 (char)0xAD
#define EXTRACT_SIZE(size_cookie) (((size_cookie_t*)(size_cookie))->size)
#if L4SHMC_RINGBUF_POISONING
#define SIZE_COOKIE_INITIALIZER(size) (size_cookie_t){ SIZE_COOKIE_MAGIC1, (size), SIZE_COOKIE_MAGIC2 }
#else
#define SIZE_COOKIE_INITIALIZER(size) (size_cookie_t){(size)}
#endif
#if L4SHMC_RINGBUF_POISONING
#define ASSERT_COOKIE(c) \
ASSERT_EQUAL(((size_cookie_t*)(c))->mag1, SIZE_COOKIE_MAGIC1); \
ASSERT_EQUAL(((size_cookie_t*)(c))->mag2, SIZE_COOKIE_MAGIC2);
#else
#define ASSERT_COOKIE(c) do {} while(0)
#endif
#define BUF_HEAD_MAGIC1 (char)0xAB
#define BUF_HEAD_MAGIC2 (char)0xCD
#define BUF_HEAD_MAGIC3 (char)0xEF
#if L4SHMC_RINGBUF_POISONING
#define ASSERT_MAGIC(buf) \
ASSERT_EQUAL(L4SHMC_RINGBUF_HEAD(buf)->magic1, BUF_HEAD_MAGIC1); \
ASSERT_EQUAL(L4SHMC_RINGBUF_HEAD(buf)->magic2, BUF_HEAD_MAGIC2); \
ASSERT_EQUAL(L4SHMC_RINGBUF_HEAD(buf)->magic3, BUF_HEAD_MAGIC3);
#else
#define ASSERT_MAGIC(buf) do {} while(0)
#endif
static void log_ringbuf_head(l4shmc_ringbuf_head_t *head)
{
ASSERT_NOT_NULL(head);
printf(" head @ %p, lock state '%s', data size %d\n", head,
head->lock == locked ? "LOCKED" : "unlocked",
head->data_size);
printf(" next_rd %x next_wr %x filled %d, data @ %p\n",
head->next_read, head->next_write, head->bytes_filled,
head->data);
}
static void log_ringbuf(l4shmc_ringbuf_t *buf)
{
ASSERT_NOT_NULL(buf);
printf("buf @ %p, area %p (%s), size %d\n", buf, buf->_area,
buf->_area->_name, buf->_size);
printf(" owner %lx, name %s, signal %s\n", buf->_owner,
buf->_chunkname, buf->_signame);
log_ringbuf_head(L4SHMC_RINGBUF_HEAD(buf));
}
static void l4shmc_rb_generic_init(l4shmc_area_t *area,
char const *chunk_name,
l4shmc_ringbuf_t *buf)
{
ASSERT_NOT_NULL(area);
ASSERT_NOT_NULL(chunk_name);
ASSERT_NOT_NULL(buf);
buf->_area = area;
buf->_chunkname = strdup(chunk_name);
}
static void l4shmc_rb_generic_signal_init(l4shmc_ringbuf_t *buf, char const *sig_name)
{
ASSERT_NOT_NULL(buf);
buf->_signame = strdup(sig_name);
}
static void l4shmc_rb_generic_deinit(l4shmc_ringbuf_t *buf)
{
free(buf->_chunkname);
}
static void l4shmc_rb_generic_signal_deinit(l4shmc_ringbuf_t *buf)
{
free(buf->_signame);
}
static void l4shmc_rb_receiver_get_signals(l4shmc_ringbuf_t *buf)
{
ASSERT_NOT_NULL(buf);
int err;
static const int SIGNAME_SIZE = 40;
char s1[SIGNAME_SIZE];
snprintf(s1, sizeof(s1), "%s_rx", buf->_signame);
char s2[SIGNAME_SIZE];
snprintf(s2, sizeof(s2), "%s_tx", buf->_signame);
err = l4shmc_get_chunk(buf->_area, buf->_chunkname, &buf->_chunk);
ASSERT_OK(err);
err = l4shmc_get_signal(buf->_area, s1, &buf->_signal_empty);
ASSERT_OK(err);
err = l4shmc_get_signal(buf->_area, s2, &buf->_signal_full);
ASSERT_OK(err);
printf("RCV: signal RX %lx, TX %lx\n",
l4_debugger_global_id(l4shmc_signal_cap(&buf->_signal_empty)),
l4_debugger_global_id(l4shmc_signal_cap(&buf->_signal_full)));
}
void l4shmc_rb_init_header(l4shmc_ringbuf_head_t *head);
/*static */void l4shmc_rb_init_header(l4shmc_ringbuf_head_t *head)
{
ASSERT_NOT_NULL(head);
head->lock = unlocked;
head->next_read = 0;
head->next_write = 0;
head->bytes_filled = 0;
head->sender_waits = 0;
#if L4SHMC_RINGBUF_POISONING
head->magic1 = BUF_HEAD_MAGIC1;
head->magic2 = BUF_HEAD_MAGIC2;
head->magic3 = BUF_HEAD_MAGIC3;
#endif
}
/***************************
* *
* RINGBUF RECEIVER *
* *
***************************/
L4_CV int l4shmc_rb_init_receiver(l4shmc_ringbuf_t *buf, l4shmc_area_t *area,
char const *chunk_name,
char const *signal_name)
{
l4shmc_rb_generic_init(area, chunk_name, buf);
l4shmc_rb_generic_signal_init(buf, signal_name);
l4shmc_rb_receiver_get_signals(buf);
buf->_size = l4shmc_chunk_capacity(&buf->_chunk);
printf("RCV: buf size %d\n", buf->_size);
buf->_addr = l4shmc_chunk_ptr(&buf->_chunk);
log_ringbuf(buf);
return 0;
}
L4_CV void l4shmc_rb_attach_receiver(l4shmc_ringbuf_t *buf, l4_cap_idx_t owner)
{
ASSERT_NOT_NULL(buf);
ASSERT_VALID(owner);
ASSERT_MAGIC(buf);
int err;
static const int SIGNAME_SIZE = 40;
char s1[SIGNAME_SIZE];
snprintf(s1, sizeof(s1), "%s_rx", buf->_signame);
buf->_owner = owner;
printf("RCV: attaching to signal %lx (%lx)\n",
l4_debugger_global_id(l4shmc_signal_cap(&buf->_signal_empty)),
l4shmc_signal_cap(&buf->_signal_empty));
err = l4shmc_attach_signal(buf->_area, s1, buf->_owner, &buf->_signal_empty);
#if 0
printf(" ... attached: %d\n", err);
#endif
ASSERT_OK(err);
}
L4_CV int l4shmc_rb_receiver_wait_for_data(l4shmc_ringbuf_t *buf, int block)
{
ASSERT_NOT_NULL(buf);
ASSERT_MAGIC(buf);
l4_timeout_t to;
if (block)
to = L4_IPC_NEVER;
else
to = L4_IPC_BOTH_TIMEOUT_0;
//while (!L4SHMC_RINGBUF_HEAD(buf)->bytes_filled)
return l4shmc_wait_signal_to(&buf->_signal_empty, to);
}
L4_CV int l4shmc_rb_receiver_copy_out(l4shmc_ringbuf_head_t *head,
char *target, unsigned *tsize)
{
ASSERT_NOT_NULL(head);
ASSERT_NOT_NULL(target);
ASSERT_NOT_NULL(tsize);
l4shmc_rb_lock(head);
// users (e.g., L4Lx) may call this function directly w/o
// checking whether data is available. In this case, simply
// return an error here.
if (head->bytes_filled == 0)
{
l4shmc_rb_unlock(head);
return -L4_ENOENT;
}
char *addr = head->data + head->next_read;
char *max_addr = head->data + head->data_size;
unsigned size_in_buffer = EXTRACT_SIZE(addr);
ASSERT_COOKIE(addr);
if (*tsize < size_in_buffer)
{
printf("tsize %d, psize %d\n", *tsize, size_in_buffer);
printf("addr = %p\n", addr);
return -L4_EINVAL;
}
ASSERT_GREATER_EQ(*tsize, size_in_buffer);
*tsize = size_in_buffer;
addr += sizeof(size_cookie_t);
ASSERT_GREATER_EQ(max_addr, addr);
if (addr + *tsize >= max_addr)
{
unsigned diff = head->data + head->data_size - addr;
memcpy(target, addr, diff);
memcpy(target + diff, head->data, *tsize - diff);
}
else
memcpy(target, addr, *tsize);
head->next_read += (*tsize + sizeof(size_cookie_t));
// wrap around?
if (head->next_read >= head->data_size)
head->next_read %= head->data_size;
// if there's no space for a length field, the sender will
// have wrapped until beginning of buffer already
if (head->next_read + sizeof(size_cookie_t) >= head->data_size)
head->next_read = 0;
head->bytes_filled -= (*tsize + sizeof(size_cookie_t));
ASSERT_GREATER_EQ(head->data_size, head->bytes_filled);
l4shmc_rb_unlock(head);
return 0;
}
L4_CV void l4shmc_rb_receiver_notify_done(l4shmc_ringbuf_t *buf)
{
ASSERT_NOT_NULL(buf);
l4shmc_rb_lock(L4SHMC_RINGBUF_HEAD(buf));
if (L4SHMC_RINGBUF_HEAD(buf)->sender_waits)
{
L4SHMC_RINGBUF_HEAD(buf)->sender_waits = 0;
#if 0
printf("RCV: TRIGGER %lx (%lx)\n", buf->_signal_empty._sigcap,
l4_debugger_global_id(buf->_signal_empty._sigcap));
#endif
l4shmc_trigger(&buf->_signal_full);
}
l4shmc_rb_unlock(L4SHMC_RINGBUF_HEAD(buf));
}
L4_CV int l4shmc_rb_receiver_read_next_size(l4shmc_ringbuf_head_t *head)
{
ASSERT_NOT_NULL(head);
int ret = -1;
l4shmc_rb_lock(head);
if (head->bytes_filled > 0)
{
char *addr = head->data + head->next_read;
ASSERT_COOKIE(addr);
ret = EXTRACT_SIZE(addr);
}
l4shmc_rb_unlock(head);
return ret;
}
/***************************
* *
* RINGBUF SENDER *
* *
***************************/
static L4_CV void l4shmc_rb_sender_add_signals(l4shmc_ringbuf_t *buf,
char const *signal_name)
{
int err;
ASSERT_NOT_NULL(buf);
ASSERT_NOT_NULL(signal_name);
static const int SIGNAME_SIZE = 40;
char s1[SIGNAME_SIZE];
char s2[SIGNAME_SIZE];
snprintf(s1, sizeof(s1), "%s_rx", buf->_signame);
snprintf(s2, sizeof(s2), "%s_tx", buf->_signame);
err = l4shmc_add_signal(buf->_area, s1, &buf->_signal_empty);
ASSERT_OK(err);
err = l4shmc_add_signal(buf->_area, s2, &buf->_signal_full);
ASSERT_OK(err);
printf("SND: signal RX %lx, TX %lx\n",
l4_debugger_global_id(l4shmc_signal_cap(&buf->_signal_empty)),
l4_debugger_global_id(l4shmc_signal_cap(&buf->_signal_full)));
}
L4_CV int l4shmc_rb_init_buffer(l4shmc_ringbuf_t *buf, l4shmc_area_t *area,
char const *chunk_name,
char const *signal_name, unsigned size)
{
int err;
l4shmc_rb_generic_init(area, chunk_name, buf);
l4shmc_rb_generic_signal_init(buf, signal_name);
buf->_size = size + sizeof(l4shmc_ringbuf_head_t);
printf("add_chunk: area %p, name '%s', size %d\n", buf->_area,
buf->_chunkname, buf->_size);
err = l4shmc_add_chunk(buf->_area, buf->_chunkname, buf->_size, &buf->_chunk);
ASSERT_OK(err);
l4shmc_rb_sender_add_signals(buf, signal_name);
buf->_addr = l4shmc_chunk_ptr(&buf->_chunk);
l4shmc_rb_init_header(L4SHMC_RINGBUF_HEAD(buf));
L4SHMC_RINGBUF_HEAD(buf)->data_size = size;
log_ringbuf(buf);
return 0;
}
L4_CV int l4shmc_rb_attach_sender(l4shmc_ringbuf_t *buf, char const *signal_name,
l4_cap_idx_t owner)
{
ASSERT_NOT_NULL(buf);
ASSERT_VALID(owner);
int err;
static const int SIGNAME_SIZE = 40;
char signame[SIGNAME_SIZE];
snprintf(signame, sizeof(signame), "%s_tx", signal_name);
buf->_owner = owner;
printf("SND: attaching to signal %lx (%lx)\n",
l4_debugger_global_id(l4shmc_signal_cap(&buf->_signal_full)),
l4shmc_signal_cap(&buf->_signal_full));
printf(" buf @ %p\n", L4SHMC_RINGBUF_HEAD(buf));
err = l4shmc_attach_signal(buf->_area, signame, buf->_owner, &buf->_signal_full);
ASSERT_OK(err);
return err;
}
L4_CV void l4shmc_rb_deinit_buffer(l4shmc_ringbuf_t *buf)
{
ASSERT_NOT_NULL(buf);
l4shmc_rb_generic_deinit(buf);
l4shmc_rb_generic_signal_deinit(buf);
}
L4_CV char *l4shmc_rb_sender_alloc_packet(l4shmc_ringbuf_head_t *head, unsigned size)
{
ASSERT_NOT_NULL(head);
char *ret = NULL;
int psize = size + sizeof(size_cookie_t); // need space to store packet size
l4shmc_rb_lock(head);
unsigned W = head->next_write;
unsigned B = head->bytes_filled;
int diff = head->data_size - B;
ASSERT_GREATER_EQ(diff, 0);
while (diff < psize)
{
head->sender_waits = 1;
goto out;
}
// calculate pointer from offset and store packet len
ret = head->data + W;
*(size_cookie_t*)ret = SIZE_COOKIE_INITIALIZER(psize - sizeof(size_cookie_t));
ret += sizeof(size_cookie_t);
// make sure that there is at least enough space left at the end of
// the ring so that we can store a whole packet length field
// (sizeof(long))
W += psize;
// already larger
if (W >= head->data_size)
W %= head->data_size;
// space does not fit another length field - step to beginning
if (W + sizeof(size_cookie_t) >= head->data_size)
W = 0;
head->next_write = W;
head->bytes_filled += psize;
ASSERT_GREATER_EQ(head->data_size, head->bytes_filled);
out:
l4shmc_rb_unlock(head);
return ret;
}
L4_CV int l4shmc_rb_sender_next_copy_in(l4shmc_ringbuf_t *buf, char *data,
unsigned size, int block_if_necessary)
{
ASSERT_NOT_NULL(buf);
ASSERT_NOT_NULL(data);
ASSERT_MAGIC(buf);
char *addr = 0;
#if 0
printf("%s: %p %p %d %d\n", __func__, buf, data, size, block_if_necessary);
#endif
while (((addr = l4shmc_rb_sender_alloc_packet(L4SHMC_RINGBUF_HEAD(buf), size)) == 0) &&
block_if_necessary)
{
#if 0
printf("%s: wait(%lx (%lx))\n", __func__, buf->_signal_full._sigcap,
l4_debugger_global_id(buf->_signal_full._sigcap));
l4shmc_wait_signal(&buf->_signal_full);
#endif
}
if (addr)
{
l4shmc_rb_sender_put_data(buf, addr, data, size);
return 0;
}
return -L4_ENOMEM;
}
L4_CV void l4shmc_rb_sender_put_data(l4shmc_ringbuf_t *buf, char *addr, char *data,
unsigned dsize)
{
ASSERT_NOT_NULL(buf);
ASSERT_NOT_NULL(addr);
ASSERT_NOT_NULL(data);
ASSERT_MAGIC(buf);
char *max_addr = L4SHMC_RINGBUF_DATA(buf) + L4SHMC_RINGBUF_DATA_SIZE(buf);
if (max_addr < addr)
{
printf("ERROR: max %p, addr %p\n", (void*)max_addr, addr);
printf(" DATA %p, DATA_SIZE %lx\n",
L4SHMC_RINGBUF_DATA(buf),
(unsigned long)L4SHMC_RINGBUF_DATA_SIZE(buf));
log_ringbuf(buf);
}
ASSERT_GREATER_EQ(max_addr, addr);
if (addr + dsize > max_addr)
{
l4_addr_t diff = (l4_addr_t)max_addr - (l4_addr_t)addr;
memcpy(addr, data, diff);
memcpy(L4SHMC_RINGBUF_DATA(buf), data + diff, dsize - diff);
}
else
memcpy(addr, data, dsize);
}
L4_CV void l4shmc_rb_sender_commit_packet(l4shmc_ringbuf_t *buf)
{
ASSERT_NOT_NULL(buf);
#if 0
printf("SND: TRIGGER %lx (%lx)\n", buf->_signal_empty._sigcap,
l4_debugger_global_id(buf->_signal_empty._sigcap));
#endif
long __attribute__((unused)) r = l4shmc_trigger(&buf->_signal_empty);
ASSERT_OK(r);
}

View File

@@ -0,0 +1,12 @@
PKGDIR ?= ../..
L4DIR ?= $(PKGDIR)/../..
include $(L4DIR)/mk/Makeconf
# sparc: undefined reference to `__atomic_fetch_or_4'
SYSTEMS = $(filter-out sparc-l4f,$(SYSTEMS_ABI))
TARGET = lib4shmc.a lib4shmc.so
SRC_C = shmc.c
REQUIRES_LIBS = l4re_c l4re_c-util
include $(L4DIR)/mk/lib.mk

View File

@@ -0,0 +1,435 @@
/*
* (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
* Alexander Warg <warg@os.inf.tu-dresden.de>
* economic rights: Technische Universität Dresden (Germany)
* This file is part of TUD:OS and distributed under the terms of the
* GNU Lesser General Public License 2.1.
* Please see the COPYING-LGPL-2.1 file for details.
*/
#include <l4/shmc/shmc.h>
#include <l4/sys/err.h>
#include <l4/sys/rcv_endpoint.h>
#include <l4/re/env.h>
#include <l4/re/c/rm.h>
#include <l4/sys/debugger.h>
#include <l4/util/util.h>
#include <l4/util/atomic.h>
#include <string.h>
#include <stdio.h>
/* Head of a shared memory data memory, which has a size of multiple pages
* No task local data must be in here (pointers, caps)
*/
typedef struct
{
l4_umword_t lock; // lock for handling chunks
l4_umword_t _next_client_nr; // number of next client to attach
l4_umword_t _clients_init_done; // bitmask of initialized clients
l4_addr_t _first_chunk; // offset to first chunk
} shared_mem_t;
enum
{
SHMAREA_LOCK_FREE = 0,
SHMAREA_LOCK_TAKEN,
};
enum
{
MAX_SIZE = (~0UL) >> 1,
};
static inline l4shmc_chunk_desc_t *
chunk_get(l4_addr_t o, void const *shm_local_addr)
{
return (l4shmc_chunk_desc_t *)(o + (l4_addr_t)shm_local_addr);
}
L4_CV long
l4shmc_create(char const *shm_name)
{
shared_mem_t *s;
l4re_ds_t shm_ds;
l4re_ds_size_t shm_size;
long r;
shm_ds = l4re_env_get_cap(shm_name);
if (l4_is_invalid_cap(shm_ds))
return -L4_ENOENT;
shm_size = l4re_ds_size(shm_ds);
if (shm_size > MAX_SIZE)
return -L4_ENOMEM;
if ((l4re_ds_flags(shm_ds) & L4_FPAGE_RW) != L4_FPAGE_RW)
return -L4_EPERM;
if ((r = l4re_rm_attach((void **)&s, shm_size,
L4RE_RM_F_SEARCH_ADDR | L4RE_RM_F_RW,
shm_ds | L4_CAP_FPAGE_RW,
0, L4_PAGESHIFT)))
return r;
// Do NOT initialize any members! We assume that the passed dataspace has
// been zero-initialized. Certain setups assume that both sides may create
// the shared memory area.
//
// Here we assume
// - _first_chunk = 0UL, and
// - lock = SHMAREA_LOCK_FREE.
l4re_rm_detach_unmap((l4_addr_t)s, L4RE_THIS_TASK_CAP);
return L4_EOK;
}
L4_CV long
l4shmc_attach(char const *shm_name, l4shmc_area_t *shmarea)
{
l4re_ds_t shm_ds;
l4re_ds_size_t shm_size;
shared_mem_t *shm_addr;
long ret;
strncpy(shmarea->_name, shm_name, sizeof(shmarea->_name));
shmarea->_name[sizeof(shmarea->_name) - 1] = 0;
shmarea->_local_addr = 0;
shm_ds = l4re_env_get_cap(shm_name);
if (l4_is_invalid_cap(shm_ds))
return -L4_ENOENT;
if ((l4re_ds_flags(shm_ds) & L4_FPAGE_RW) != L4_FPAGE_RW)
return -L4_EPERM;
shm_size = l4re_ds_size(shm_ds);
if (shm_size > MAX_SIZE)
return -L4_ENOMEM;
shmarea->_shm_ds = shm_ds;
shmarea->_size = shm_size;
if ((ret = l4re_rm_attach(&shmarea->_local_addr, shmarea->_size,
L4RE_RM_F_SEARCH_ADDR | L4RE_RM_F_RW,
shmarea->_shm_ds | L4_CAP_FPAGE_RW,
0, L4_PAGESHIFT) < 0))
return ret;
shm_addr = (shared_mem_t *)shmarea->_local_addr;
shmarea->_client_nr = __atomic_fetch_add(&shm_addr->_next_client_nr, 1UL,
__ATOMIC_SEQ_CST);
return L4_EOK;
}
L4_CV long
l4shmc_mark_client_initialized(l4shmc_area_t *shmarea)
{
shared_mem_t *shm_addr = (shared_mem_t *)shmarea->_local_addr;
l4_umword_t n = shmarea->_client_nr;
if (n >= sizeof(l4_umword_t) * 8)
return -L4_EINVAL;
__atomic_or_fetch(&shm_addr->_clients_init_done, 1UL << n, __ATOMIC_SEQ_CST);
return L4_EOK;
}
L4_CV long
l4shmc_get_client_nr(l4shmc_area_t const *shmarea)
{
return shmarea->_client_nr;
}
L4_CV long
l4shmc_get_initialized_clients(l4shmc_area_t *shmarea, l4_umword_t *bitmask)
{
shared_mem_t *shm_addr = (shared_mem_t *)shmarea->_local_addr;
*bitmask = __atomic_load_n(&shm_addr->_clients_init_done, __ATOMIC_SEQ_CST);
return L4_EOK;
}
L4_CV long
l4shmc_area_overhead(void)
{
return sizeof(shared_mem_t);
}
L4_CV long
l4shmc_area_size(l4shmc_area_t const *shmarea)
{
return l4re_ds_size(shmarea->_shm_ds);
}
L4_CV long
l4shmc_chunk_overhead(void)
{
// The function l4shmc_add_chunk() aligns the passed chunk size to l4_addr_t.
// Consider this here!
return sizeof(l4shmc_chunk_desc_t) + sizeof(l4_addr_t);
}
static long next_chunk(l4shmc_area_t const *shmarea, l4_addr_t offs)
{
shared_mem_t *shm_addr = (shared_mem_t *)shmarea->_local_addr;
volatile l4shmc_chunk_desc_t *p;
l4_addr_t next;
if (offs == 0)
next = shm_addr->_first_chunk;
else
{
p = chunk_get(offs, shmarea->_local_addr);
next = p->_next;
}
if (next == 0)
return 0;
if (next >= shmarea->_size || next + sizeof(*p) >= shmarea->_size || next <= offs)
return -L4_EIO;
if (next % sizeof(l4_addr_t) != 0)
return -L4_EINVAL;
p = chunk_get(next, shmarea->_local_addr);
if (p->_magic != L4SHMC_CHUNK_MAGIC)
return -L4_EIO;
return next;
}
L4_CV long
l4shmc_iterate_chunk(l4shmc_area_t const *shmarea, char const **chunk_name,
long offs)
{
if (offs < 0)
return -L4_EINVAL;
offs = next_chunk(shmarea, offs);
if (offs > 0)
{
l4shmc_chunk_desc_t *p = chunk_get(offs, shmarea->_local_addr);
*chunk_name = p->_name;
}
return offs;
}
L4_CV long
l4shmc_add_chunk(l4shmc_area_t *shmarea, char const *chunk_name,
l4_umword_t chunk_capacity, l4shmc_chunk_t *chunk)
{
shared_mem_t *shm_addr = (shared_mem_t *)shmarea->_local_addr;
l4shmc_chunk_desc_t *p = NULL;
l4shmc_chunk_desc_t *prev = NULL;
l4_addr_t offs = 0;
long ret;
chunk_capacity = (chunk_capacity + sizeof(l4_addr_t) - 1) & ~(sizeof(l4_addr_t) - 1);
if (chunk_capacity >> (sizeof(chunk_capacity) * 8 - 1))
return -L4_ENOMEM;
while (!l4util_cmpxchg(&shm_addr->lock, SHMAREA_LOCK_FREE,
SHMAREA_LOCK_TAKEN))
l4_sleep(1);
asm volatile ("" : : : "memory");
while ((ret = next_chunk(shmarea, offs)) > 0)
{
p = chunk_get(ret, shmarea->_local_addr);
if (strcmp(p->_name, chunk_name) == 0)
{
ret = -L4_EEXIST;
goto out_free_lock;
}
offs = ret;
}
if (ret < 0)
goto out_free_lock;
if (offs == 0)
offs = sizeof(shared_mem_t);
else
{
l4_addr_t n = p->_offset + p->_capacity + sizeof(*p);
if (n <= offs || n >= shmarea->_size)
{
ret = -L4_EIO;
goto out_free_lock;
}
offs = n;
prev = p;
}
if (offs + chunk_capacity + sizeof(*p) > (unsigned long)shmarea->_size)
{
ret = -L4_ENOMEM;
goto out_free_lock; // no more free memory in this shm
}
p = chunk_get(offs, shmarea->_local_addr);
p->_offset = offs;
p->_next = 0;
p->_capacity = chunk_capacity;
p->_size = 0;
p->_status = L4SHMC_CHUNK_CLEAR;
p->_magic = L4SHMC_CHUNK_MAGIC;
strncpy(p->_name, chunk_name, sizeof(p->_name));
p->_name[sizeof(p->_name) - 1] = 0;
// Ensure that other CPUs have correct data before inserting chunk
__sync_synchronize();
if (prev)
prev->_next = offs;
else
shm_addr->_first_chunk = offs;
__sync_synchronize();
shm_addr->lock = SHMAREA_LOCK_FREE;
chunk->_chunk = p;
chunk->_shm = shmarea;
chunk->_sig = NULL;
chunk->_capacity = chunk_capacity;
return L4_EOK;
out_free_lock:
shm_addr->lock = SHMAREA_LOCK_FREE;
return ret;
}
L4_CV long
l4shmc_area_size_free(l4shmc_area_t const *shmarea)
{
long ret;
l4_addr_t offs = 0;
while ((ret = next_chunk(shmarea, offs)) > 0)
offs = ret;
if (ret < 0)
return ret;
ret = shmarea->_size - offs;
return ret > 0 ? ret : 0;
}
L4_CV long
l4shmc_add_signal(l4shmc_area_t *shmarea, char const *signal_name,
l4shmc_signal_t *signal)
{
return l4shmc_get_signal(shmarea, signal_name, signal);
}
L4_CV long
l4shmc_get_chunk_to(l4shmc_area_t *shmarea, char const *chunk_name,
l4_umword_t timeout_ms, l4shmc_chunk_t *chunk)
{
l4_kernel_clock_t try_until = l4_kip_clock(l4re_kip()) + (timeout_ms * 1000);
long ret;
do
{
l4_addr_t offs = 0;
while ((ret = next_chunk(shmarea, offs)) > 0)
{
l4shmc_chunk_desc_t *p;
offs = ret;
p = chunk_get(offs, shmarea->_local_addr);
if (!strcmp(p->_name, chunk_name))
{ // found it!
chunk->_shm = shmarea;
chunk->_chunk = p;
chunk->_sig = NULL;
chunk->_capacity = p->_capacity;
if ( chunk->_capacity > shmarea->_size
|| chunk->_capacity + offs > shmarea->_size)
return -L4_EIO;
return L4_EOK;
}
}
if (ret < 0)
return ret;
if (!timeout_ms)
break;
l4_sleep(100);
}
while (l4_kip_clock(l4re_kip()) < try_until);
return -L4_ENOENT;
}
L4_CV long
l4shmc_attach_signal(l4shmc_area_t *shmarea, char const *signal_name,
l4_cap_idx_t thread, l4shmc_signal_t *signal)
{
long r;
if ((r = l4shmc_get_signal(shmarea, signal_name, signal)) < 0)
return r;
if ((r = l4_error(l4_rcv_ep_bind_thread(signal->_sigcap, thread,
((l4_umword_t)signal)))) < 0)
return r;
return L4_EOK;
}
L4_CV long
l4shmc_get_signal(l4shmc_area_t *shmarea, char const *signal_name,
l4shmc_signal_t *signal)
{
(void)shmarea; // maybe link signal to shmarea?
l4_cap_idx_t irq = l4re_env_get_cap(signal_name);
if (l4_is_invalid_cap(irq))
return -L4_ENOENT;
signal->_sigcap = irq;
return L4_EOK;
}
L4_CV long
l4shmc_connect_chunk_signal(l4shmc_chunk_t *chunk, l4shmc_signal_t *signal)
{
chunk->_sig = signal;
return L4_EOK;
}
L4_CV long
l4shmc_enable_signal(l4shmc_signal_t *s)
{
return l4_error(l4_irq_unmask(s->_sigcap));
}
L4_CV long
l4shmc_enable_chunk(l4shmc_chunk_t *p)
{
return l4shmc_enable_signal(p->_sig);
}
L4_CV long
l4shmc_wait_any_to(l4_timeout_t timeout, l4shmc_signal_t **p)
{
l4_umword_t l;
long r;
if ((r = l4_ipc_error(l4_ipc_wait(l4_utcb(), &l, timeout), l4_utcb())))
return r;
*p = (l4shmc_signal_t *)l;
return L4_EOK;
}
L4_CV long
l4shmc_wait_signal_to(l4shmc_signal_t *s, l4_timeout_t timeout)
{
long r;
if ((r = l4_ipc_error(l4_irq_receive(s->_sigcap, timeout), l4_utcb())))
return r;
return L4_EOK;
}
L4_CV long
l4shmc_wait_chunk_to(l4shmc_chunk_t *p, l4_timeout_t to)
{
return l4shmc_wait_signal_to(p->_sig, to);
}