mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
ringbuffer: support to allocate memory on the ringbuffer before send
This commit is contained in:
parent
c5150d16b2
commit
687908b1e9
@ -181,6 +181,50 @@ BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
|
||||
size_t xItemSize,
|
||||
BaseType_t *pxHigherPriorityTaskWoken);
|
||||
|
||||
/**
|
||||
* @brief Acquire memory from the ring buffer to be written to by an external
|
||||
* source and to be sent later.
|
||||
*
|
||||
* Attempt to allocate buffer for an item to be sent into the ring buffer. This
|
||||
* function will block until enough free space is available or until it
|
||||
* timesout.
|
||||
*
|
||||
* The item, as well as the following items ``SendAcquire`` or ``Send`` after it,
|
||||
* will not be able to be read from the ring buffer until this item is actually
|
||||
* sent into the ring buffer.
|
||||
*
|
||||
* @param[in] xRingbuffer Ring buffer to allocate the memory
|
||||
* @param[out] ppvItem Double pointer to memory acquired (set to NULL if no memory were retrieved)
|
||||
* @param[in] xItemSize Size of item to acquire.
|
||||
* @param[in] xTicksToWait Ticks to wait for room in the ring buffer.
|
||||
*
|
||||
* @note Only applicable for no-split ring buffers now, the actual size of
|
||||
* memory that the item will occupy will be rounded up to the nearest 32-bit
|
||||
* aligned size. This is done to ensure all items are always stored in 32-bit
|
||||
* aligned fashion.
|
||||
*
|
||||
* @return
|
||||
* - pdTRUE if succeeded
|
||||
* - pdFALSE on time-out or when the data is larger than the maximum permissible size of the buffer
|
||||
*/
|
||||
BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait);
|
||||
|
||||
/**
|
||||
* @brief Actually send an item into the ring buffer allocated before by
|
||||
* ``xRingbufferSendAcquire``.
|
||||
*
|
||||
* @param[in] xRingbuffer Ring buffer to insert the item into
|
||||
* @param[in] pvItem Pointer to item in allocated memory to insert.
|
||||
*
|
||||
* @note Only applicable for no-split ring buffers. Only call for items
|
||||
* allocated by ``xRingbufferSendAcquire``.
|
||||
*
|
||||
* @return
|
||||
* - pdTRUE if succeeded
|
||||
* - pdFALSE if fail for some reason.
|
||||
*/
|
||||
BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem);
|
||||
|
||||
/**
|
||||
* @brief Retrieve an item from the ring buffer
|
||||
*
|
||||
|
@ -914,6 +914,81 @@ RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
|
||||
}
|
||||
#endif
|
||||
|
||||
BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
|
||||
{
|
||||
//Check arguments
|
||||
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
||||
configASSERT(pxRingbuffer);
|
||||
configASSERT(ppvItem != NULL || xItemSize == 0);
|
||||
//currently only supported in NoSplit buffers
|
||||
configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
|
||||
|
||||
*ppvItem = NULL;
|
||||
if (xItemSize > pxRingbuffer->xMaxItemSize) {
|
||||
return pdFALSE; //Data will never ever fit in the queue.
|
||||
}
|
||||
if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
|
||||
return pdTRUE; //Sending 0 bytes to byte buffer has no effect
|
||||
}
|
||||
|
||||
//Attempt to send an item
|
||||
BaseType_t xReturn = pdFALSE;
|
||||
BaseType_t xReturnSemaphore = pdFALSE;
|
||||
TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
|
||||
TickType_t xTicksRemaining = xTicksToWait;
|
||||
while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
|
||||
//Block until more free space becomes available or timeout
|
||||
if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
|
||||
xReturn = pdFALSE;
|
||||
break;
|
||||
}
|
||||
|
||||
//Semaphore obtained, check if item can fit
|
||||
portENTER_CRITICAL(&pxRingbuffer->mux);
|
||||
if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
|
||||
//Item will fit, copy item
|
||||
*ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
|
||||
xReturn = pdTRUE;
|
||||
//Check if the free semaphore should be returned to allow other tasks to send
|
||||
if (prvGetFreeSize(pxRingbuffer) > 0) {
|
||||
xReturnSemaphore = pdTRUE;
|
||||
}
|
||||
portEXIT_CRITICAL(&pxRingbuffer->mux);
|
||||
break;
|
||||
}
|
||||
//Item doesn't fit, adjust ticks and take the semaphore again
|
||||
if (xTicksToWait != portMAX_DELAY) {
|
||||
xTicksRemaining = xTicksEnd - xTaskGetTickCount();
|
||||
}
|
||||
portEXIT_CRITICAL(&pxRingbuffer->mux);
|
||||
/*
|
||||
* Gap between critical section and re-acquiring of the semaphore. If
|
||||
* semaphore is given now, priority inversion might occur (see docs)
|
||||
*/
|
||||
}
|
||||
|
||||
if (xReturnSemaphore == pdTRUE) {
|
||||
xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can acquire
|
||||
}
|
||||
return xReturn;
|
||||
}
|
||||
|
||||
BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
|
||||
{
|
||||
//Check arguments
|
||||
Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
|
||||
configASSERT(pxRingbuffer);
|
||||
configASSERT(pvItem != NULL);
|
||||
configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
|
||||
|
||||
portENTER_CRITICAL(&pxRingbuffer->mux);
|
||||
prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
|
||||
portEXIT_CRITICAL(&pxRingbuffer->mux);
|
||||
|
||||
xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
|
||||
return pdTRUE;
|
||||
}
|
||||
|
||||
BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
|
||||
const void *pvItem,
|
||||
size_t xItemSize,
|
||||
|
Loading…
Reference in New Issue
Block a user