diff --git a/components/esp32/include/esp_wifi.h b/components/esp32/include/esp_wifi.h index 12378f3346..80ced5dc61 100644 --- a/components/esp32/include/esp_wifi.h +++ b/components/esp32/include/esp_wifi.h @@ -191,14 +191,14 @@ esp_err_t esp_wifi_disconnect(void); esp_err_t esp_wifi_clear_fast_connect(void); /** - * @brief Kick the all station or associated id equals to aid + * @brief deauthenticate all stations or associated id equals to aid * - * @param uint16_t aid : when aid is 0, kick all stations, otherwise kick station whose associated id is aid + * @param uint16_t aid : when aid is 0, deauthenticate all stations, otherwise deauthenticate station whose associated id is aid * * @return ESP_OK : succeed * @return others : fail */ -esp_err_t esp_wifi_kick_station(uint16_t aid); +esp_err_t esp_wifi_deauth_sta(uint16_t aid); /** * @brief Scan all available APs. @@ -235,19 +235,30 @@ esp_err_t esp_wifi_scan_stop(void); * @return ESP_OK : succeed * @return others : fail */ -esp_err_t esp_wifi_get_ap_num(uint16_t *number); +esp_err_t esp_wifi_scan_get_ap_num(uint16_t *number); /** * @brief Get AP list found in last scan * - * @param uint16_t *number : as input param, it stores max AP number ap_list can hold, as output param, it store + * @param uint16_t *number : as input param, it stores max AP number ap_records can hold, as output param, it store the actual AP number this API returns - * @param wifi_ap_list_t *ap_list : a list to hold the found APs + * @param wifi_ap_record_t *ap_records: wifi_ap_record_t array to hold the found APs * * @return ESP_OK : succeed * @return others : fail */ -esp_err_t esp_wifi_get_ap_list(uint16_t *number, wifi_ap_list_t *ap_list); +esp_err_t esp_wifi_scan_get_ap_records(uint16_t *number, wifi_ap_record_t *ap_records); + + +/** + * @brief Get information of AP associated with ESP32 station + * + * @param wifi_ap_record_t *ap_info: the wifi_ap_record_t to hold station assocated AP + * + * @return ESP_OK : succeed + * @return others : fail + */ +esp_err_t esp_wifi_sta_get_ap_info(wifi_ap_record_t *ap_info); /** * @brief Set current power save type @@ -471,14 +482,13 @@ esp_err_t esp_wifi_get_config(wifi_interface_t ifx, wifi_config_t *conf); * * @attention SSC only API * - * @param struct station_info **station : station list + * @param wifi_sta_list_t *sta: station list * * @return ESP_OK : succeed * @return others : fail */ -esp_err_t esp_wifi_get_station_list(struct station_info **station); +esp_err_t esp_wifi_ap_get_sta_list(wifi_sta_list_t *sta); -esp_err_t esp_wifi_free_station_list(void); /** * @brief Set the WiFi API configuration storage type diff --git a/components/esp32/include/esp_wifi_internal.h b/components/esp32/include/esp_wifi_internal.h new file mode 100644 index 0000000000..217d5f6d1f --- /dev/null +++ b/components/esp32/include/esp_wifi_internal.h @@ -0,0 +1,80 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * All the APIs declared here are internal only APIs, it can only be used by + * espressif internal modules, such as SSC, LWIP, TCPIP adapter etc, espressif + * customers are not recommended to use them. + * + * If someone really want to use specified APIs declared in here, please contact + * espressif AE/developer to make sure you know the limitations or risk of + * the API, otherwise you may get unexpected behavior!!! + * + */ + + +#ifndef __ESP_WIFI_INTERNAL_H__ +#define __ESP_WIFI_INTERNAL_H__ + +#include +#include +#include "freertos/FreeRTOS.h" +#include "freertos/queue.h" +#include "rom/queue.h" +#include "esp_err.h" +#include "esp_wifi_types.h" +#include "esp_event.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief get whether the wifi driver is allowed to transmit data or not + * + * @param none + * + * @return true : upper layer should stop to transmit data to wifi driver + * @return false : upper layer can transmit data to wifi driver + */ +bool esp_wifi_internal_tx_is_stop(void); + +/** + * @brief free the rx buffer which allocated by wifi driver + * + * @param void* buffer: rx buffer pointer + * + * @return nonoe + */ +void esp_wifi_internal_free_rx_buffer(void* buffer); + +/** + * @brief transmit the buffer via wifi driver + * + * @attention1 TODO should modify the return type from bool to int + * + * @param wifi_interface_t wifi_if : wifi interface id + * @param void *buffer : the buffer to be tansmit + * @param u16_t len : the length of buffer + * + * @return True : success transmit the buffer to wifi driver + * False : failed to transmit the buffer to wifi driver + */ +bool esp_wifi_internal_tx(wifi_interface_t wifi_if, void *buffer, u16_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* __ESP_WIFI_H__ */ diff --git a/components/esp32/include/esp_wifi_types.h b/components/esp32/include/esp_wifi_types.h index b3474769e8..0ea719a65b 100644 --- a/components/esp32/include/esp_wifi_types.h +++ b/components/esp32/include/esp_wifi_types.h @@ -109,7 +109,7 @@ typedef struct { wifi_second_chan_t second; /**< second channel of AP */ int8_t rssi; /**< signal strength of AP */ wifi_auth_mode_t authmode; /**< authmode of AP */ -} wifi_ap_list_t; +} wifi_ap_record_t; typedef enum { WIFI_PS_NONE, /**< No power save */ @@ -150,10 +150,15 @@ typedef union { wifi_sta_config_t sta; /**< configuration of STA */ } wifi_config_t; -struct station_info { - STAILQ_ENTRY(station_info) next; - uint8_t bssid[6]; -}; +typedef struct { + uint8_t mac[6]; /**< mac address of sta that associated with ESP32 soft-AP */ +}wifi_sta_info_t; + +#define ESP_WIFI_MAX_CONN_NUM (8+2) /**< max number of sta the eSP32 soft-AP can connect */ +typedef struct { + wifi_sta_info_t sta[ESP_WIFI_MAX_CONN_NUM]; /**< station list */ + uint8_t num; /**< number of station that associated with ESP32 soft-AP */ +}wifi_sta_list_t; typedef enum { WIFI_STORAGE_FLASH, /**< all configuration will strore in both memory and flash */ diff --git a/components/esp32/lib b/components/esp32/lib index b9561aa5db..12b3435fc0 160000 --- a/components/esp32/lib +++ b/components/esp32/lib @@ -1 +1 @@ -Subproject commit b9561aa5db15443d11f8bb5aefdfc5da540d8f2d +Subproject commit 12b3435fc0cd04efc249d52d71efb1cdecda50f8 diff --git a/components/freertos/Kconfig b/components/freertos/Kconfig index 881c7616f3..e821d7445b 100644 --- a/components/freertos/Kconfig +++ b/components/freertos/Kconfig @@ -172,6 +172,14 @@ menuconfig ENABLE_MEMORY_DEBUG help Enable this option to show malloc heap block and memory crash detect +config FREERTOS_ISR_STACKSIZE + int "ISR stack size" + range 1536 32768 + default 1536 + help + The interrupt handlers have their own stack. The size of the stack can be defined here. + Each processor has its own stack, so the total size occupied will be twice this. + menuconfig FREERTOS_DEBUG_INTERNALS bool "Debug FreeRTOS internals" default n @@ -197,13 +205,6 @@ config FREERTOS_PORTMUX_DEBUG_RECURSIVE If enabled, additional debug information will be printed for recursive portMUX usage. -config FREERTOS_ISR_STACKSIZE - int "ISR stack size" - range 1536 32768 - default 1536 - help - The interrupt handlers have their own stack. The size of the stack can be defined here. - Each processor has its own stack, so the total size occupied will be twice this. endif # FREERTOS_DEBUG_INTERNALS diff --git a/components/freertos/include/freertos/FreeRTOS.h b/components/freertos/include/freertos/FreeRTOS.h index 04b39b65e9..f6c9aa497d 100644 --- a/components/freertos/include/freertos/FreeRTOS.h +++ b/components/freertos/include/freertos/FreeRTOS.h @@ -74,6 +74,7 @@ * Include the generic headers required for the FreeRTOS port being used. */ #include +#include "sys/reent.h" /* * If stdint.h cannot be located then: @@ -739,6 +740,20 @@ extern "C" { #define portTICK_TYPE_IS_ATOMIC 0 #endif +#ifndef configSUPPORT_STATIC_ALLOCATION + /* Defaults to 0 for backward compatibility. */ + #define configSUPPORT_STATIC_ALLOCATION 0 +#endif + +#ifndef configSUPPORT_DYNAMIC_ALLOCATION + /* Defaults to 1 for backward compatibility. */ + #define configSUPPORT_DYNAMIC_ALLOCATION 1 +#endif + +#if( ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 0 ) ) + #error configSUPPORT_STATIC_ALLOCATION and configSUPPORT_DYNAMIC_ALLOCATION cannot both be 0, but can both be 1. +#endif + #if( portTICK_TYPE_IS_ATOMIC == 0 ) /* Either variables of tick type cannot be read atomically, or portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when @@ -791,6 +806,153 @@ V8 if desired. */ #define configESP32_PER_TASK_DATA 1 #endif +/* + * In line with software engineering best practice, FreeRTOS implements a strict + * data hiding policy, so the real structures used by FreeRTOS to maintain the + * state of tasks, queues, semaphores, etc. are not accessible to the application + * code. However, if the application writer wants to statically allocate such + * an object then the size of the object needs to be know. Dummy structures + * that are guaranteed to have the same size and alignment requirements of the + * real objects are used for this purpose. The dummy list and list item + * structures below are used for inclusion in such a dummy structure. + */ +struct xSTATIC_LIST_ITEM +{ + TickType_t xDummy1; + void *pvDummy2[ 4 ]; +}; +typedef struct xSTATIC_LIST_ITEM StaticListItem_t; + +/* See the comments above the struct xSTATIC_LIST_ITEM definition. */ +struct xSTATIC_MINI_LIST_ITEM +{ + TickType_t xDummy1; + void *pvDummy2[ 2 ]; +}; +typedef struct xSTATIC_MINI_LIST_ITEM StaticMiniListItem_t; + +/* See the comments above the struct xSTATIC_LIST_ITEM definition. */ +typedef struct xSTATIC_LIST +{ + UBaseType_t uxDummy1; + void *pvDummy2; + StaticMiniListItem_t xDummy3; +} StaticList_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the Task structure used internally by + * FreeRTOS is not accessible to application code. However, if the application + * writer wants to statically allocate the memory required to create a task then + * the size of the task object needs to be know. The StaticTask_t structure + * below is provided for this purpose. Its sizes and alignment requirements are + * guaranteed to match those of the genuine structure, no matter which + * architecture is being used, and no matter how the values in FreeRTOSConfig.h + * are set. Its contents are somewhat obfuscated in the hope users will + * recognise that it would be unwise to make direct use of the structure members. + */ +typedef struct xSTATIC_TCB +{ + void *pxDummy1; + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xDummy2; + #endif + StaticListItem_t xDummy3[ 2 ]; + UBaseType_t uxDummy5; + void *pxDummy6; + uint8_t ucDummy7[ configMAX_TASK_NAME_LEN ]; + UBaseType_t uxDummyCoreId; + #if ( portSTACK_GROWTH > 0 ) + void *pxDummy8; + #endif + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxDummy9; + uint32_t OldInterruptState; + #endif + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy10[ 2 ]; + #endif + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxDummy12[ 2 ]; + #endif + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + void *pxDummy14; + #endif + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void *pvDummy15[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) + void *pvDummyLocalStorageCallBack[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + #endif + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulDummy16; + #endif + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + struct _reent xDummy17; + #endif + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + uint32_t ulDummy18; + uint32_t ucDummy19; + #endif + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t uxDummy20; + #endif + +} StaticTask_t; + +/* + * In line with software engineering best practice, especially when supplying a + * library that is likely to change in future versions, FreeRTOS implements a + * strict data hiding policy. This means the Queue structure used internally by + * FreeRTOS is not accessible to application code. However, if the application + * writer wants to statically allocate the memory required to create a queue + * then the size of the queue object needs to be know. The StaticQueue_t + * structure below is provided for this purpose. Its sizes and alignment + * requirements are guaranteed to match those of the genuine structure, no + * matter which architecture is being used, and no matter how the values in + * FreeRTOSConfig.h are set. Its contents are somewhat obfuscated in the hope + * users will recognise that it would be unwise to make direct use of the + * structure members. + */ +typedef struct xSTATIC_QUEUE +{ + void *pvDummy1[ 3 ]; + + union + { + void *pvDummy2; + UBaseType_t uxDummy2; + } u; + + StaticList_t xDummy3[ 2 ]; + UBaseType_t uxDummy4[ 3 ]; + BaseType_t ucDummy5[ 2 ]; + + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucDummy6; + #endif + + #if ( configUSE_QUEUE_SETS == 1 ) + void *pvDummy7; + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxDummy8; + uint8_t ucDummy9; + #endif + + struct { + volatile uint32_t mux; + #ifdef CONFIG_FREERTOS_PORTMUX_DEBUG + const char *lastLockedFn; + int lastLockedLine; + #endif + } mux; + +} StaticQueue_t; +typedef StaticQueue_t StaticSemaphore_t; + #ifdef __cplusplus } #endif diff --git a/components/freertos/include/freertos/FreeRTOSConfig.h b/components/freertos/include/freertos/FreeRTOSConfig.h index 158b40668d..393fc849e0 100644 --- a/components/freertos/include/freertos/FreeRTOSConfig.h +++ b/components/freertos/include/freertos/FreeRTOSConfig.h @@ -251,6 +251,8 @@ #define configUSE_NEWLIB_REENTRANT 1 +#define configSUPPORT_DYNAMIC_ALLOCATION 1 + /* Test FreeRTOS timers (with timer task) and more. */ /* Some files don't compile if this flag is disabled */ #define configUSE_TIMERS 1 diff --git a/components/freertos/include/freertos/portable.h b/components/freertos/include/freertos/portable.h index 9bf4dad99d..9ed378a8ab 100644 --- a/components/freertos/include/freertos/portable.h +++ b/components/freertos/include/freertos/portable.h @@ -200,7 +200,7 @@ void vPortYieldOtherCore( BaseType_t coreid) PRIVILEGED_FUNCTION; #endif /* Multi-core: get current core ID */ -inline uint32_t xPortGetCoreID() { +static inline uint32_t xPortGetCoreID() { int id; asm volatile( "rsr.prid %0\n" diff --git a/components/freertos/include/freertos/portmacro.h b/components/freertos/include/freertos/portmacro.h index 5e2386d721..f20a4a1e26 100644 --- a/components/freertos/include/freertos/portmacro.h +++ b/components/freertos/include/freertos/portmacro.h @@ -234,7 +234,7 @@ static inline unsigned portENTER_CRITICAL_NESTED() { unsigned state = XTOS_SET_I * *bitwise inverse* of the old mem if the mem wasn't written. This doesn't seem to happen on the * ESP32, though. (Would show up directly if it did because the magic wouldn't match.) */ -inline void uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set) { +static inline void uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set) { __asm__ __volatile__( "WSR %2,SCOMPARE1 \n" "ISYNC \n" diff --git a/components/freertos/include/freertos/queue.h b/components/freertos/include/freertos/queue.h index 2095c59b0c..876f1a1b30 100644 --- a/components/freertos/include/freertos/queue.h +++ b/components/freertos/include/freertos/queue.h @@ -170,7 +170,95 @@ typedef void * QueueSetMemberHandle_t; * \defgroup xQueueCreate xQueueCreate * \ingroup QueueManagement */ -#define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( uxQueueLength, uxItemSize, queueQUEUE_TYPE_BASE ) +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xQueueCreate( uxQueueLength, uxItemSize ) xQueueGenericCreate( ( uxQueueLength ), ( uxItemSize ), ( queueQUEUE_TYPE_BASE ) ) +#endif + +/** + * queue. h + *
+ QueueHandle_t xQueueCreateStatic(
+							  UBaseType_t uxQueueLength,
+							  UBaseType_t uxItemSize,
+							  uint8_t *pucQueueStorageBuffer,
+							  StaticQueue_t *pxQueueBuffer
+						  );
+ * 
+ * + * Creates a new queue instance, and returns a handle by which the new queue + * can be referenced. + * + * Internally, within the FreeRTOS implementation, queues use two blocks of + * memory. The first block is used to hold the queue's data structures. The + * second block is used to hold items placed into the queue. If a queue is + * created using xQueueCreate() then both blocks of memory are automatically + * dynamically allocated inside the xQueueCreate() function. (see + * http://www.freertos.org/a00111.html). If a queue is created using + * xQueueCreateStatic() then the application writer must provide the memory that + * will get used by the queue. xQueueCreateStatic() therefore allows a queue to + * be created without using any dynamic memory allocation. + * + * http://www.FreeRTOS.org/Embedded-RTOS-Queues.html + * + * @param uxQueueLength The maximum number of items that the queue can contain. + * + * @param uxItemSize The number of bytes each item in the queue will require. + * Items are queued by copy, not by reference, so this is the number of bytes + * that will be copied for each posted item. Each item on the queue must be + * the same size. + * + * @param pucQueueStorageBuffer If uxItemSize is not zero then + * pucQueueStorageBuffer must point to a uint8_t array that is at least large + * enough to hold the maximum number of items that can be in the queue at any + * one time - which is ( uxQueueLength * uxItemsSize ) bytes. If uxItemSize is + * zero then pucQueueStorageBuffer can be NULL. + * + * @param pxQueueBuffer Must point to a variable of type StaticQueue_t, which + * will be used to hold the queue's data structure. + * + * @return If the queue is created then a handle to the created queue is + * returned. If pxQueueBuffer is NULL then NULL is returned. + * + * Example usage: +
+ struct AMessage
+ {
+	char ucMessageID;
+	char ucData[ 20 ];
+ };
+
+ #define QUEUE_LENGTH 10
+ #define ITEM_SIZE sizeof( uint32_t )
+
+ // xQueueBuffer will hold the queue structure.
+ StaticQueue_t xQueueBuffer;
+
+ // ucQueueStorage will hold the items posted to the queue.  Must be at least
+ // [(queue length) * ( queue item size)] bytes long.
+ uint8_t ucQueueStorage[ QUEUE_LENGTH * ITEM_SIZE ];
+
+ void vATask( void *pvParameters )
+ {
+ QueueHandle_t xQueue1;
+
+	// Create a queue capable of containing 10 uint32_t values.
+	xQueue1 = xQueueCreate( QUEUE_LENGTH, // The number of items the queue can hold.
+							ITEM_SIZE	  // The size of each item in the queue
+							&( ucQueueStorage[ 0 ] ), // The buffer that will hold the items in the queue.
+							&xQueueBuffer ); // The buffer that will hold the queue structure.
+
+	// The queue is guaranteed to be created successfully as no dynamic memory
+	// allocation is used.  Therefore xQueue1 is now a handle to a valid queue.
+
+	// ... Rest of task code.
+ }
+ 
+ * \defgroup xQueueCreateStatic xQueueCreateStatic + * \ingroup QueueManagement + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ /** * queue. h @@ -1479,7 +1567,9 @@ BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTi * these functions directly. */ QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) PRIVILEGED_FUNCTION; void* xQueueGetMutexHolder( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; /* @@ -1538,10 +1628,22 @@ BaseType_t xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) PRIVILEGED_FUNCTION #endif /* - * Generic version of the queue creation function, which is in turn called by - * any queue, semaphore or mutex creation function or macro. + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. */ -QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif + +/* + * Generic version of the function used to creaet a queue using dynamic memory + * allocation. This is called by other functions and macros that create other + * RTOS objects that use the queue structure as their base. + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +#endif /* * Queue sets provide a mechanism to allow a task to block (pend) on a read diff --git a/components/freertos/include/freertos/semphr.h b/components/freertos/include/freertos/semphr.h index 5866ab1ec5..6343d0190a 100644 --- a/components/freertos/include/freertos/semphr.h +++ b/components/freertos/include/freertos/semphr.h @@ -128,19 +128,37 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary * \ingroup Semaphores */ -#define vSemaphoreCreateBinary( xSemaphore ) \ - { \ - ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ - if( ( xSemaphore ) != NULL ) \ - { \ - ( void ) xSemaphoreGive( ( xSemaphore ) ); \ - } \ - } +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define vSemaphoreCreateBinary( xSemaphore ) \ + { \ + ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ + if( ( xSemaphore ) != NULL ) \ + { \ + ( void ) xSemaphoreGive( ( xSemaphore ) ); \ + } \ + } +#endif /** * semphr. h *
SemaphoreHandle_t xSemaphoreCreateBinary( void )
* + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * * The old vSemaphoreCreateBinary() macro is now deprecated in favour of this * xSemaphoreCreateBinary() function. Note that binary semaphores created using * the vSemaphoreCreateBinary() macro are created in a state such that the @@ -182,7 +200,68 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary * \ingroup Semaphores */ -#define xSemaphoreCreateBinary() xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinary() xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer )
+ * + * Creates a new binary semaphore instance, and returns a handle by which the + * new semaphore can be referenced. + * + * NOTE: In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a binary semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, binary semaphores use a block + * of memory, in which the semaphore structure is stored. If a binary semaphore + * is created using xSemaphoreCreateBinary() then the required memory is + * automatically dynamically allocated inside the xSemaphoreCreateBinary() + * function. (see http://www.freertos.org/a00111.html). If a binary semaphore + * is created using xSemaphoreCreateBinaryStatic() then the application writer + * must provide the memory. xSemaphoreCreateBinaryStatic() therefore allows a + * binary semaphore to be created without using any dynamic memory allocation. + * + * This type of semaphore can be used for pure synchronisation between tasks or + * between an interrupt and a task. The semaphore need not be given back once + * obtained, so one task/interrupt can continuously 'give' the semaphore while + * another continuously 'takes' the semaphore. For this reason this type of + * semaphore does not use a priority inheritance mechanism. For an alternative + * that does use priority inheritance see xSemaphoreCreateMutex(). + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the semaphore is created then a handle to the created semaphore is + * returned. If pxSemaphoreBuffer is NULL then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore = NULL;
+ StaticSemaphore_t xSemaphoreBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // Semaphore cannot be used before a call to xSemaphoreCreateBinary().
+    // The semaphore's data structures will be placed in the xSemaphoreBuffer
+    // variable, the address of which is passed into the function.  The
+    // function's parameter is not NULL, so the function will not attempt any
+    // dynamic memory allocation, and therefore the function will not return
+    // return NULL.
+    xSemaphore = xSemaphoreCreateBinary( &xSemaphoreBuffer );
+
+    // Rest of task code goes here.
+ }
+ 
+ * \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateBinaryStatic( pxStaticSemaphore ) xQueueGenericCreateStatic( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticSemaphore, queueQUEUE_TYPE_BINARY_SEMAPHORE ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ /** * semphr. h @@ -652,9 +731,18 @@ typedef QueueHandle_t SemaphoreHandle_t; * Macro that implements a mutex semaphore by using the existing queue * mechanism. * - * Mutexes created using this macro can be accessed using the xSemaphoreTake() + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and - * xSemaphoreGiveRecursive() macros should not be used. + * xSemaphoreGiveRecursive() macros must not be used. * * This type of semaphore uses a priority inheritance mechanism so a task * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the @@ -667,8 +755,9 @@ typedef QueueHandle_t SemaphoreHandle_t; * semaphore and another always 'takes' the semaphore) and from within interrupt * service routines. * - * @return xSemaphore Handle to the created mutex semaphore. Should be of type - * SemaphoreHandle_t. + * @return If the mutex was successfully created then a handle to the created + * semaphore is returned. If there was not enough heap to allocate the mutex + * data structures then NULL is returned. * * Example usage:
@@ -690,19 +779,93 @@ typedef QueueHandle_t SemaphoreHandle_t;
  * \defgroup vSemaphoreCreateMutex vSemaphoreCreateMutex
  * \ingroup Semaphores
  */
-#define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX )
+#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
+	#define xSemaphoreCreateMutex() xQueueCreateMutex( queueQUEUE_TYPE_MUTEX )
+#endif
+
+/**
+ * semphr. h
+ * 
SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer )
+ * + * Creates a new mutex type semaphore instance, and returns a handle by which + * the new mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, mutex semaphores use a block + * of memory, in which the mutex structure is stored. If a mutex is created + * using xSemaphoreCreateMutex() then the required memory is automatically + * dynamically allocated inside the xSemaphoreCreateMutex() function. (see + * http://www.freertos.org/a00111.html). If a mutex is created using + * xSemaphoreCreateMutexStatic() then the application writer must provided the + * memory. xSemaphoreCreateMutexStatic() therefore allows a mutex to be created + * without using any dynamic memory allocation. + * + * Mutexes created using this function can be accessed using the xSemaphoreTake() + * and xSemaphoreGive() macros. The xSemaphoreTakeRecursive() and + * xSemaphoreGiveRecursive() macros must not be used. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will be used to hold the mutex's data structure, removing the need for + * the memory to be allocated dynamically. + * + * @return If the mutex was successfully created then a handle to the created + * mutex is returned. If pxMutexBuffer was NULL then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xMutexBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // A mutex cannot be used before it has been created.  xMutexBuffer is
+    // into xSemaphoreCreateMutexStatic() so no dynamic memory allocation is
+    // attempted.
+    xSemaphore = xSemaphoreCreateMutexStatic( &xMutexBuffer );
+
+    // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
+    // so there is no need to check it.
+ }
+ 
+ * \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic + * \ingroup Semaphores + */ + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateMutexStatic( pxMutexBuffer ) xQueueCreateMutexStatic( queueQUEUE_TYPE_MUTEX, ( pxMutexBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ /** * semphr. h *
SemaphoreHandle_t xSemaphoreCreateRecursiveMutex( void )
* - * Macro that implements a recursive mutex by using the existing queue - * mechanism. + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. * * Mutexes created using this macro can be accessed using the * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The - * xSemaphoreTake() and xSemaphoreGive() macros should not be used. + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. * * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex * doesn't become available again until the owner has called @@ -745,14 +908,104 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup vSemaphoreCreateMutex vSemaphoreCreateMutex * \ingroup Semaphores */ -#define xSemaphoreCreateRecursiveMutex() xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX ) +#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutex() xQueueCreateMutex( queueQUEUE_TYPE_RECURSIVE_MUTEX ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateRecursiveMutexStatic( StaticSemaphore_t *pxMutexBuffer )
+ * + * Creates a new recursive mutex type semaphore instance, and returns a handle + * by which the new recursive mutex can be referenced. + * + * Internally, within the FreeRTOS implementation, recursive mutexs use a block + * of memory, in which the mutex structure is stored. If a recursive mutex is + * created using xSemaphoreCreateRecursiveMutex() then the required memory is + * automatically dynamically allocated inside the + * xSemaphoreCreateRecursiveMutex() function. (see + * http://www.freertos.org/a00111.html). If a recursive mutex is created using + * xSemaphoreCreateRecursiveMutexStatic() then the application writer must + * provide the memory that will get used by the mutex. + * xSemaphoreCreateRecursiveMutexStatic() therefore allows a recursive mutex to + * be created without using any dynamic memory allocation. + * + * Mutexes created using this macro can be accessed using the + * xSemaphoreTakeRecursive() and xSemaphoreGiveRecursive() macros. The + * xSemaphoreTake() and xSemaphoreGive() macros must not be used. + * + * A mutex used recursively can be 'taken' repeatedly by the owner. The mutex + * doesn't become available again until the owner has called + * xSemaphoreGiveRecursive() for each successful 'take' request. For example, + * if a task successfully 'takes' the same mutex 5 times then the mutex will + * not be available to any other task until it has also 'given' the mutex back + * exactly five times. + * + * This type of semaphore uses a priority inheritance mechanism so a task + * 'taking' a semaphore MUST ALWAYS 'give' the semaphore back once the + * semaphore it is no longer required. + * + * Mutex type semaphores cannot be used from within interrupt service routines. + * + * See xSemaphoreCreateBinary() for an alternative implementation that can be + * used for pure synchronisation (where one task or interrupt always 'gives' the + * semaphore and another always 'takes' the semaphore) and from within interrupt + * service routines. + * + * @param pxMutexBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the recursive mutex's data structure, + * removing the need for the memory to be allocated dynamically. + * + * @return If the recursive mutex was successfully created then a handle to the + * created recursive mutex is returned. If pxMutexBuffer was NULL then NULL is + * returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xMutexBuffer;
+
+ void vATask( void * pvParameters )
+ {
+    // A recursive semaphore cannot be used before it is created.  Here a
+    // recursive mutex is created using xSemaphoreCreateRecursiveMutexStatic().
+    // The address of xMutexBuffer is passed into the function, and will hold
+    // the mutexes data structures - so no dynamic memory allocation will be
+    // attempted.
+    xSemaphore = xSemaphoreCreateRecursiveMutexStatic( &xMutexBuffer );
+
+    // As no dynamic memory allocation was performed, xSemaphore cannot be NULL,
+    // so there is no need to check it.
+ }
+ 
+ * \defgroup xSemaphoreCreateRecursiveMutexStatic xSemaphoreCreateRecursiveMutexStatic + * \ingroup Semaphores + */ +#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_RECURSIVE_MUTEXES == 1 ) ) + #define xSemaphoreCreateRecursiveMutexStatic( pxStaticSemaphore ) xQueueCreateMutexStatic( queueQUEUE_TYPE_RECURSIVE_MUTEX, pxStaticSemaphore ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ /** * semphr. h *
SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount )
* - * Macro that creates a counting semaphore by using the existing - * queue mechanism. + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer can + * instead optionally provide the memory that will get used by the counting + * semaphore. xSemaphoreCreateCountingStatic() therefore allows a counting + * semaphore to be created without using any dynamic memory allocation. * * Counting semaphores are typically used for two things: * @@ -808,7 +1061,94 @@ typedef QueueHandle_t SemaphoreHandle_t; * \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting * \ingroup Semaphores */ -#define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount ) xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) ) +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCounting( uxMaxCount, uxInitialCount ) xQueueCreateCountingSemaphore( ( uxMaxCount ), ( uxInitialCount ) ) +#endif + +/** + * semphr. h + *
SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer )
+ * + * Creates a new counting semaphore instance, and returns a handle by which the + * new counting semaphore can be referenced. + * + * In many usage scenarios it is faster and more memory efficient to use a + * direct to task notification in place of a counting semaphore! + * http://www.freertos.org/RTOS-task-notifications.html + * + * Internally, within the FreeRTOS implementation, counting semaphores use a + * block of memory, in which the counting semaphore structure is stored. If a + * counting semaphore is created using xSemaphoreCreateCounting() then the + * required memory is automatically dynamically allocated inside the + * xSemaphoreCreateCounting() function. (see + * http://www.freertos.org/a00111.html). If a counting semaphore is created + * using xSemaphoreCreateCountingStatic() then the application writer must + * provide the memory. xSemaphoreCreateCountingStatic() therefore allows a + * counting semaphore to be created without using any dynamic memory allocation. + * + * Counting semaphores are typically used for two things: + * + * 1) Counting events. + * + * In this usage scenario an event handler will 'give' a semaphore each time + * an event occurs (incrementing the semaphore count value), and a handler + * task will 'take' a semaphore each time it processes an event + * (decrementing the semaphore count value). The count value is therefore + * the difference between the number of events that have occurred and the + * number that have been processed. In this case it is desirable for the + * initial count value to be zero. + * + * 2) Resource management. + * + * In this usage scenario the count value indicates the number of resources + * available. To obtain control of a resource a task must first obtain a + * semaphore - decrementing the semaphore count value. When the count value + * reaches zero there are no free resources. When a task finishes with the + * resource it 'gives' the semaphore back - incrementing the semaphore count + * value. In this case it is desirable for the initial count value to be + * equal to the maximum count value, indicating that all resources are free. + * + * @param uxMaxCount The maximum count value that can be reached. When the + * semaphore reaches this value it can no longer be 'given'. + * + * @param uxInitialCount The count value assigned to the semaphore when it is + * created. + * + * @param pxSemaphoreBuffer Must point to a variable of type StaticSemaphore_t, + * which will then be used to hold the semaphore's data structure, removing the + * need for the memory to be allocated dynamically. + * + * @return If the counting semaphore was successfully created then a handle to + * the created counting semaphore is returned. If pxSemaphoreBuffer was NULL + * then NULL is returned. + * + * Example usage: +
+ SemaphoreHandle_t xSemaphore;
+ StaticSemaphore_t xSemaphoreBuffer;
+
+ void vATask( void * pvParameters )
+ {
+ SemaphoreHandle_t xSemaphore = NULL;
+
+    // Counting semaphore cannot be used before they have been created.  Create
+    // a counting semaphore using xSemaphoreCreateCountingStatic().  The max
+    // value to which the semaphore can count is 10, and the initial value
+    // assigned to the count will be 0.  The address of xSemaphoreBuffer is
+    // passed in and will be used to hold the semaphore structure, so no dynamic
+    // memory allocation will be used.
+    xSemaphore = xSemaphoreCreateCounting( 10, 0, &xSemaphoreBuffer );
+
+    // No memory allocation was attempted so xSemaphore cannot be NULL, so there
+    // is no need to check its value.
+ }
+ 
+ * \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic + * \ingroup Semaphores + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreCreateCountingStatic( uxMaxCount, uxInitialCount, pxSemaphoreBuffer ) xQueueCreateCountingSemaphoreStatic( ( uxMaxCount ), ( uxInitialCount ), ( pxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ /** * semphr. h diff --git a/components/freertos/include/freertos/task.h b/components/freertos/include/freertos/task.h index 9f3f3d659e..71749273c3 100644 --- a/components/freertos/include/freertos/task.h +++ b/components/freertos/include/freertos/task.h @@ -177,6 +177,7 @@ typedef struct xTASK_STATUS UBaseType_t uxCurrentPriority; /* The priority at which the task was running (may be inherited) when the structure was populated. */ UBaseType_t uxBasePriority; /* The priority to which the task will return if the task's current priority has been inherited to avoid unbounded priority inversion when obtaining a mutex. Only valid if configUSE_MUTEXES is defined as 1 in FreeRTOSConfig.h. */ uint32_t ulRunTimeCounter; /* The total run time allocated to the task so far, as defined by the run time stats clock. See http://www.freertos.org/rtos-run-time-stats.html. Only valid when configGENERATE_RUN_TIME_STATS is defined as 1 in FreeRTOSConfig.h. */ + StackType_t *pxStackBase; /* Points to the lowest address of the task's stack area. */ uint16_t usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ } TaskStatus_t; @@ -281,8 +282,19 @@ is used in assert() statements. */ );
* * Create a new task and add it to the list of tasks that are ready to run. - * On multicore environments, this will give no specific affinity to the task. - * Use xTaskCreatePinnedToCore to give affinity. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * See xTaskCreateStatic() for a version that does not use any dynamic memory + * allocation. * * xTaskCreate() can only be used to create a task that has unrestricted * access to the entire microcontroller memory map. Systems that include MPU @@ -350,8 +362,139 @@ is used in assert() statements. */ * \defgroup xTaskCreate xTaskCreate * \ingroup Tasks */ -#define xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask ) xTaskGenericCreate( ( pvTaskCode ), ( pcName ), ( usStackDepth ), ( pvParameters ), ( uxPriority ), ( pxCreatedTask ), ( NULL ), ( NULL ), tskNO_AFFINITY ) -#define xTaskCreatePinnedToCore( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, xCoreID ) xTaskGenericCreate( ( pvTaskCode ), ( pcName ), ( usStackDepth ), ( pvParameters ), ( uxPriority ), ( pxCreatedTask ), ( NULL ), ( NULL ), xCoreID ) +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint16_t usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + const BaseType_t xCoreID); + +#define xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask ) xTaskCreatePinnedToCore( ( pvTaskCode ), ( pcName ), ( usStackDepth ), ( pvParameters ), ( uxPriority ), ( pxCreatedTask ), tskNO_AFFINITY ) +#endif + +/** + * task. h + *
+ TaskHandle_t xTaskCreateStatic( TaskFunction_t pvTaskCode,
+								 const char * const pcName,
+								 uint32_t ulStackDepth,
+								 void *pvParameters,
+								 UBaseType_t uxPriority,
+								 StackType_t *pxStackBuffer,
+								 StaticTask_t *pxTaskBuffer,
+                                 const BaseType_t xCoreID );
+ + * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * http://www.freertos.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * @param pvTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. The maximum length of the string is defined by + * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. + * + * @param ulStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task will run. + * + * @param pxStackBuffer Must point to a StackType_t array that has at least + * ulStackDepth indexes - the array will then be used as the task's stack, + * removing the need for the stack to be allocated dynamically. + * + * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will + * then be used to hold the task's data structures, removing the need for the + * memory to be allocated dynamically. + * + * @return If neither pxStackBuffer or pxTaskBuffer are NULL, then the task will + * be created and pdPASS is returned. If either pxStackBuffer or pxTaskBuffer + * are NULL then the task will not be created and + * errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY is returned. + * + * Example usage: +
+
+    // Dimensions the buffer that the task being created will use as its stack.
+    // NOTE:  This is the number of words the stack will hold, not the number of
+    // bytes.  For example, if each stack item is 32-bits, and this is set to 100,
+    // then 400 bytes (100 * 32-bits) will be allocated.
+    #define STACK_SIZE 200
+
+    // Structure that will hold the TCB of the task being created.
+    StaticTask_t xTaskBuffer;
+
+    // Buffer that the task being created will use as its stack.  Note this is
+    // an array of StackType_t variables.  The size of StackType_t is dependent on
+    // the RTOS port.
+    StackType_t xStack[ STACK_SIZE ];
+
+    // Function that implements the task being created.
+    void vTaskCode( void * pvParameters )
+    {
+        // The parameter value is expected to be 1 as 1 is passed in the
+        // pvParameters value in the call to xTaskCreateStatic().
+        configASSERT( ( uint32_t ) pvParameters == 1UL );
+
+        for( ;; )
+        {
+            // Task code goes here.
+        }
+    }
+
+    // Function that creates a task.
+    void vOtherFunction( void )
+    {
+        TaskHandle_t xHandle = NULL;
+
+        // Create the task without using any dynamic memory allocation.
+        xHandle = xTaskCreateStatic(
+                      vTaskCode,       // Function that implements the task.
+                      "NAME",          // Text name for the task.
+                      STACK_SIZE,      // Stack size in words, not bytes.
+                      ( void * ) 1,    // Parameter passed into the task.
+                      tskIDLE_PRIORITY,// Priority at which the task is created.
+                      xStack,          // Array to use as the task's stack.
+                      &xTaskBuffer );  // Variable to hold the task's data structure.
+
+        // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have
+        // been created, and xHandle will be the task's handle.  Use the handle
+        // to suspend the task.
+        vTaskSuspend( xHandle );
+    }
+   
+ * \defgroup xTaskCreateStatic xTaskCreateStatic + * \ingroup Tasks + */ +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + const BaseType_t xCoreID ); + +#define xTaskCreateStatic( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxStackBuffer, pxTaskBuffer ) xTaskCreateStaticPinnedToCore( ( pvTaskCode ), ( pcName ), ( usStackDepth ), ( pvParameters ), ( uxPriority ), ( pxStackBuffer ), ( pxTaskBuffer ), tskNO_AFFINITY ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ /** * task. h @@ -420,7 +563,9 @@ TaskHandle_t xHandle; * \defgroup xTaskCreateRestricted xTaskCreateRestricted * \ingroup Tasks */ -#define xTaskCreateRestricted( x, pxCreatedTask ) xTaskGenericCreate( ((x)->pvTaskCode), ((x)->pcName), ((x)->usStackDepth), ((x)->pvParameters), ((x)->uxPriority), (pxCreatedTask), ((x)->puxStackBuffer), ((x)->xRegions) ) +#if( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif /** * task. h @@ -1968,12 +2113,6 @@ void vTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTIO */ BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; -/* - * Generic version of the task creation function which is in turn called by the - * xTaskCreate() and xTaskCreateRestricted() macros. - */ -BaseType_t xTaskGenericCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask, StackType_t * const puxStackBuffer, const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - /* * Get the uxTCBNumber assigned to the task referenced by the xTask parameter. */ diff --git a/components/freertos/queue.c b/components/freertos/queue.c index 168f09f1a1..f404a243e6 100644 --- a/components/freertos/queue.c +++ b/components/freertos/queue.c @@ -158,15 +158,19 @@ typedef struct QueueDefinition UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ - #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxQueueNumber; - uint8_t ucQueueType; + #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( configUSE_QUEUE_SETS == 1 ) struct QueueDefinition *pxQueueSetContainer; #endif + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxQueueNumber; + uint8_t ucQueueType; + #endif + portMUX_TYPE mux; } xQUEUE; @@ -238,6 +242,21 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; #endif +/* + * Called after a Queue_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; + +/* + * Mutexes are a special type of queue. When a mutex is created, first the + * queue is created, then prvInitialiseMutex() is called to configure the queue + * as a mutex. + */ +#if( configUSE_MUTEXES == 1 ) + static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION; +#endif + BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue ) { Queue_t * const pxQueue = ( Queue_t * ) xQueue; @@ -293,132 +312,165 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue; } /*-----------------------------------------------------------*/ -QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) -{ -Queue_t *pxNewQueue; -size_t xQueueSizeInBytes; -QueueHandle_t xReturn = NULL; -int8_t *pcAllocatedBuffer; +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) + QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + /* The StaticQueue_t structure and the queue storage area must be + supplied. */ + configASSERT( pxStaticQueue != NULL ); + + /* A queue storage area should be provided if the item size is not 0, and + should not be provided if the item size is 0. */ + configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) ); + configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) ); + + #if( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + variable of type StaticQueue_t or StaticSemaphore_t equals the size of + the real queue and semaphore structures. */ + volatile size_t xSize = sizeof( StaticQueue_t ); + configASSERT( xSize == sizeof( Queue_t ) ); + } + #endif /* configASSERT_DEFINED */ + + /* The address of a statically allocated queue was passed in, use it. + The address of a statically allocated storage area was also passed in + but is already set. */ + pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + + if( pxNewQueue != NULL ) + { + #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Queues can be allocated wither statically or dynamically, so + note this queue was allocated statically in case the queue is + later deleted. */ + pxNewQueue->ucStaticallyAllocated = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + size_t xQueueSizeInBytes; + uint8_t *pucQueueStorage; + + configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); + + if( uxItemSize == ( UBaseType_t ) 0 ) + { + /* There is not going to be a queue storage area. */ + xQueueSizeInBytes = ( size_t ) 0; + } + else + { + /* Allocate enough space to hold the maximum number of items that + can be in the queue at any time. */ + xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + + pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); + + if( pxNewQueue != NULL ) + { + /* Jump past the queue structure to find the location of the queue + storage area. */ + pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t ); + + #if( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + /* Queues can be created either statically or dynamically, so + note this task was created dynamically in case it is later + deleted. */ + pxNewQueue->ucStaticallyAllocated = pdFALSE; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + + prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue ); + } + + return pxNewQueue; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) +{ /* Remove compiler warnings about unused parameters should configUSE_TRACE_FACILITY not be set to 1. */ ( void ) ucQueueType; - configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); - if( uxItemSize == ( UBaseType_t ) 0 ) { - /* There is not going to be a queue storage area. */ - xQueueSizeInBytes = ( size_t ) 0; + /* No RAM was allocated for the queue storage area, but PC head cannot + be set to NULL because NULL is used as a key to say the queue is used as + a mutex. Therefore just set pcHead to point to the queue as a benign + value that is known to be within the memory map. */ + pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; } else { - /* The queue is one byte longer than asked for to make wrap checking - easier/faster. */ - xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + /* Set the head to the start of the queue storage area. */ + pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage; } - /* Allocate the new queue structure and storage area. */ - pcAllocatedBuffer = ( int8_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); + /* Initialise the queue members as described where the queue type is + defined. */ + pxNewQueue->uxLength = uxQueueLength; + pxNewQueue->uxItemSize = uxItemSize; + ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); - if( pcAllocatedBuffer != NULL ) + #if ( configUSE_TRACE_FACILITY == 1 ) { - pxNewQueue = ( Queue_t * ) pcAllocatedBuffer; /*lint !e826 MISRA The buffer cannot be to small because it was dimensioned by sizeof( Queue_t ) + xQueueSizeInBytes. */ - - if( uxItemSize == ( UBaseType_t ) 0 ) - { - /* No RAM was allocated for the queue storage area, but PC head - cannot be set to NULL because NULL is used as a key to say the queue - is used as a mutex. Therefore just set pcHead to point to the queue - as a benign value that is known to be within the memory map. */ - pxNewQueue->pcHead = ( int8_t * ) pxNewQueue; - } - else - { - /* Jump past the queue structure to find the location of the queue - storage area - adding the padding bytes to get a better alignment. */ - pxNewQueue->pcHead = pcAllocatedBuffer + sizeof( Queue_t ); - } - - /* Initialise the queue members as described above where the queue type - is defined. */ - pxNewQueue->uxLength = uxQueueLength; - pxNewQueue->uxItemSize = uxItemSize; - ( void ) xQueueGenericReset( pxNewQueue, pdTRUE ); - - #if ( configUSE_TRACE_FACILITY == 1 ) - { - pxNewQueue->ucQueueType = ucQueueType; - } - #endif /* configUSE_TRACE_FACILITY */ - - #if( configUSE_QUEUE_SETS == 1 ) - { - pxNewQueue->pxQueueSetContainer = NULL; - } - #endif /* configUSE_QUEUE_SETS */ - - traceQUEUE_CREATE( pxNewQueue ); - xReturn = pxNewQueue; + pxNewQueue->ucQueueType = ucQueueType; } - else + #endif /* configUSE_TRACE_FACILITY */ + + #if( configUSE_QUEUE_SETS == 1 ) { - mtCOVERAGE_TEST_MARKER(); + pxNewQueue->pxQueueSetContainer = NULL; } + #endif /* configUSE_QUEUE_SETS */ - configASSERT( xReturn ); - - return xReturn; + traceQUEUE_CREATE( pxNewQueue ); } /*-----------------------------------------------------------*/ -#if ( configUSE_MUTEXES == 1 ) +#if( configUSE_MUTEXES == 1 ) - QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + static void prvInitialiseMutex( Queue_t *pxNewQueue ) { - Queue_t *pxNewQueue; - - /* Prevent compiler warnings about unused parameters if - configUSE_TRACE_FACILITY does not equal 1. */ - ( void ) ucQueueType; - - /* Allocate the new queue structure. */ - pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) ); if( pxNewQueue != NULL ) { - /* Information required for priority inheritance. */ + /* The queue create function will set all the queue structure members + correctly for a generic queue, but this function is creating a + mutex. Overwrite those members that need to be set differently - + in particular the information required for priority inheritance. */ pxNewQueue->pxMutexHolder = NULL; pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX; - /* Queues used as a mutex no data is actually copied into or out - of the queue. */ - pxNewQueue->pcWriteTo = NULL; - pxNewQueue->u.pcReadFrom = NULL; + /* In case this is a recursive mutex. */ + pxNewQueue->u.uxRecursiveCallCount = 0; - /* Each mutex has a length of 1 (like a binary semaphore) and - an item size of 0 as nothing is actually copied into or out - of the mutex. */ - pxNewQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; - pxNewQueue->uxLength = ( UBaseType_t ) 1U; - pxNewQueue->uxItemSize = ( UBaseType_t ) 0U; - - #if ( configUSE_TRACE_FACILITY == 1 ) - { - pxNewQueue->ucQueueType = ucQueueType; - } - #endif - - #if ( configUSE_QUEUE_SETS == 1 ) - { - pxNewQueue->pxQueueSetContainer = NULL; - } - #endif - - /* Ensure the event queues start with the correct state. */ - vListInitialise( &( pxNewQueue->xTasksWaitingToSend ) ); - vListInitialise( &( pxNewQueue->xTasksWaitingToReceive ) ); - - vPortCPUInitializeMutex(&pxNewQueue->mux); + vPortCPUInitializeMutex(&pxNewQueue->mux); traceCREATE_MUTEX( pxNewQueue ); @@ -429,8 +481,41 @@ int8_t *pcAllocatedBuffer; { traceCREATE_MUTEX_FAILED(); } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType ) + { + Queue_t *pxNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType ); + prvInitialiseMutex( pxNewQueue ); + + return pxNewQueue; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue ) + { + Queue_t *pxNewQueue; + const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0; + + /* Prevent compiler warnings about unused parameters if + configUSE_TRACE_FACILITY does not equal 1. */ + ( void ) ucQueueType; + + pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType ); + prvInitialiseMutex( pxNewQueue ); - configASSERT( pxNewQueue ); return pxNewQueue; } @@ -565,7 +650,35 @@ int8_t *pcAllocatedBuffer; #endif /* configUSE_RECURSIVE_MUTEXES */ /*-----------------------------------------------------------*/ -#if ( configUSE_COUNTING_SEMAPHORES == 1 ) +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue ) + { + QueueHandle_t xHandle; + + configASSERT( uxMaxCount != 0 ); + configASSERT( uxInitialCount <= uxMaxCount ); + + xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE ); + + if( xHandle != NULL ) + { + ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount; + + traceCREATE_COUNTING_SEMAPHORE(); + } + else + { + traceCREATE_COUNTING_SEMAPHORE_FAILED(); + } + + return xHandle; + } + +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount ) { @@ -591,7 +704,7 @@ int8_t *pcAllocatedBuffer; return xHandle; } -#endif /* configUSE_COUNTING_SEMAPHORES */ +#endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ /*-----------------------------------------------------------*/ BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition ) @@ -1685,7 +1798,33 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue; vQueueUnregisterQueue( pxQueue ); } #endif - vPortFree( pxQueue ); + + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) + { + /* The queue can only have been allocated dynamically - free it + again. */ + vPortFree( pxQueue ); + } + #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + { + /* The queue could have been allocated statically or dynamically, so + check before attempting to free the memory. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE ) + { + vPortFree( pxQueue ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #else + { + /* The queue must have been statically allocated, so is not going to be + deleted. Avoid compiler warnings about the unused parameter. */ + ( void ) pxQueue; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ } /*-----------------------------------------------------------*/ @@ -2263,7 +2402,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue; #endif /* configUSE_TIMERS */ /*-----------------------------------------------------------*/ -#if ( configUSE_QUEUE_SETS == 1 ) +#if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength ) { diff --git a/components/freertos/tasks.c b/components/freertos/tasks.c index 82b97fe268..3cdca4d433 100644 --- a/components/freertos/tasks.c +++ b/components/freertos/tasks.c @@ -85,7 +85,6 @@ task.h is included from an application file. */ #include "StackMacros.h" #include "portmacro.h" #include "semphr.h" -#include "sys/reent.h" /* Lint e961 and e750 are suppressed as a MISRA exception justified because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the @@ -140,6 +139,26 @@ typedef enum eNotified } eNotifyValue; +/* Sometimes the FreeRTOSConfig.h settings only allow a task to be created using +dynamically allocated RAM, in which case when any task is deleted it is known +that both the task's stack and TCB need to be freed. Sometimes the +FreeRTOSConfig.h settings only allow a task to be created using statically +allocated RAM, in which case when any task is deleted it is known that neither +the task's stack or TCB should be freed. Sometimes the FreeRTOSConfig.h +settings allow a task to be created using either statically or dynamically +allocated RAM, in which case a member of the TCB is used to record whether the +stack and/or TCB were allocated statically or dynamically, so when a task is +deleted the RAM that was allocated dynamically is freed again and no attempt is +made to free the RAM that was allocated statically. +tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE is only true if it is possible for a +task to be created using either statically or dynamically allocated RAM. Note +that if portUSING_MPU_WRAPPERS is 1 then a protected task can be created with +a statically allocated stack and a dynamically allocated TCB. */ +#define tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE ( ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) || ( portUSING_MPU_WRAPPERS == 1 ) ) +#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) +#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) +#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) + /* * Task control block. A task control block (TCB) is allocated for each task, * and stores task state information, including a pointer to the task's context @@ -151,7 +170,6 @@ typedef struct tskTaskControlBlock #if ( portUSING_MPU_WRAPPERS == 1 ) xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ - BaseType_t xUsingStaticallyAllocatedStack; /* Set to pdTRUE if the stack is a statically allocated array, and pdFALSE if the stack is dynamically allocated. */ #endif ListItem_t xGenericListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ @@ -211,6 +229,12 @@ typedef struct tskTaskControlBlock volatile eNotifyValue eNotifyState; #endif + /* See the comments above the definition of + tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + #endif + } tskTCB; /* The old tskTCB name is maintained above then typedefed to the new TCB_t name @@ -459,12 +483,6 @@ to its original value when it is released. */ /* File private functions. --------------------------------*/ -/* - * Utility to ready a TCB for a given task. Mainly just copies the parameters - * into the TCB structure. - */ -static void prvInitialiseTCBVariables( TCB_t * const pxTCB, const char * const pcName, UBaseType_t uxPriority, const MemoryRegion_t * const xRegions, const uint16_t usStackDepth, const BaseType_t xCoreID ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - /** * Utility task that simply returns pdTRUE if the task referenced by xTask is * currently in the Suspended state, or pdFALSE if the task referenced by xTask @@ -519,12 +537,6 @@ static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; */ static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTimeToWake ) PRIVILEGED_FUNCTION; -/* - * Allocates memory from the heap for a TCB and associated stack. Checks the - * allocation was successful. - */ -static TCB_t *prvAllocateTCBAndStack( const uint16_t usStackDepth, StackType_t * const puxStackBuffer ) PRIVILEGED_FUNCTION; - /* * Fills an TaskStatus_t structure with information on each task that is * referenced from the pxList list (which may be a ready list, a delayed list, @@ -581,6 +593,26 @@ static void prvResetNextTaskUnblockTime( void ); #endif +/* + * Called after a Task_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/* + * Called after a new task has been created and initialised to place the task + * under the control of the scheduler. + */ +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID ) PRIVILEGED_FUNCTION; + + /*-----------------------------------------------------------*/ @@ -612,111 +644,408 @@ void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority ) } } +#if( configSUPPORT_STATIC_ALLOCATION == 1 ) -BaseType_t xTaskGenericCreate( TaskFunction_t pxTaskCode, const char * const pcName, const uint16_t usStackDepth, void * const pvParameters, UBaseType_t uxPriority, TaskHandle_t * const pxCreatedTask, StackType_t * const puxStackBuffer, const MemoryRegion_t * const xRegions, const BaseType_t xCoreID) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ -{ -BaseType_t xReturn; -TCB_t * pxNewTCB; -StackType_t *pxTopOfStack; -BaseType_t i; - configASSERT( pxTaskCode ); - configASSERT( ( ( uxPriority & ( ~portPRIVILEGE_BIT ) ) < configMAX_PRIORITIES ) ); - configASSERT( (xCoreID>=0 && xCoreIDxUsingStaticallyAllocatedStack = pdTRUE; - } - else - { - /* The stack was allocated dynamically. Note this so it can be - deleted again if the task is deleted. */ - pxNewTCB->xUsingStaticallyAllocatedStack = pdFALSE; - } - #endif /* portUSING_MPU_WRAPPERS == 1 */ + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + configASSERT( (xCoreID>=0 && xCoreIDpxStack + ( usStackDepth - ( uint16_t ) 1 ); - pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ( portPOINTER_SIZE_TYPE ) ~portBYTE_ALIGNMENT_MASK ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */ + /* The memory used for the task's TCB and stack are passed into this + function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; - /* Check the alignment of the calculated top of stack is correct. */ - configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID ); + prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if( portUSING_MPU_WRAPPERS == 1 ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + /* Allocate space for the TCB. Where the memory comes from depends + on the implementation of the port malloc function and whether or + not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + /* Tasks can be created statically or dynamically, so note + this task had a statically allocated stack in case it is + later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions, + tskNO_AFFINITY ); + + prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint16_t usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + const BaseType_t xCoreID ) + { + TCB_t *pxNewTCB; + BaseType_t xReturn; + + /* If the stack grows down then allocate the stack then the TCB so the stack + does not grow into the TCB. Likewise if the stack grows up then allocate + the TCB then the stack. */ + #if( portSTACK_GROWTH > 0 ) + { + /* Allocate space for the TCB. Where the memory comes from depends on + the implementation of the port malloc function and whether or not static + allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Allocate space for the stack used by the task being created. + The base of the stack memory stored in the TCB so the task can + be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxNewTCB->pxStack == NULL ) + { + /* Could not allocate the stack. Delete the allocated TCB. */ + vPortFree( pxNewTCB ); + pxNewTCB = NULL; + } + } } #else /* portSTACK_GROWTH */ { - pxTopOfStack = pxNewTCB->pxStack; + StackType_t *pxStack; - /* Check the alignment of the stack buffer is correct. */ - configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + /* Allocate space for the stack used by the task being created. */ + pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - /* If we want to use stack checking on architectures that use - a positive stack growth direction then we also need to store the - other extreme of the stack space. */ - pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( usStackDepth - 1 ); + if( pxStack != NULL ) + { + /* Allocate space for the TCB. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e961 MISRA exception as the casts are only redundant for some paths. */ + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxStack; + } + else + { + /* The stack cannot be used as the TCB was not created. Free + it again. */ + vPortFree( pxStack ); + } + } + else + { + pxNewTCB = NULL; + } } #endif /* portSTACK_GROWTH */ - /* Setup the newly allocated TCB with the initial state of the task. */ - prvInitialiseTCBVariables( pxNewTCB, pcName, uxPriority, xRegions, usStackDepth, xCoreID ); + if( pxNewTCB != NULL ) + { + #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + task was created dynamically in case it is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ - /* Initialize the TCB stack to look as if the task was already running, - but had been interrupted by the scheduler. The return address is set - to the start of the task function. Once the stack has been initialised - the top of stack variable is updated. */ - #if( portUSING_MPU_WRAPPERS == 1 ) - { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID ); + prvAddNewTaskToReadyList( pxNewTCB, pxTaskCode, xCoreID ); + xReturn = pdPASS; } - #else /* portUSING_MPU_WRAPPERS */ + else { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; } - #endif /* portUSING_MPU_WRAPPERS */ - if( ( void * ) pxCreatedTask != NULL ) + return xReturn; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t *pxNewTCB, + const MemoryRegion_t * const xRegions, const BaseType_t xCoreID ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ +StackType_t *pxTopOfStack; +UBaseType_t x; + + #if( portUSING_MPU_WRAPPERS == 1 ) + /* Should the task be created in privileged mode? */ + BaseType_t xRunPrivileged; + if( ( uxPriority & portPRIVILEGE_BIT ) != 0U ) { - /* Pass the TCB out - in an anonymous way. The calling function/ - task can use this as a handle to delete the task later if - required.*/ - *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + xRunPrivileged = pdTRUE; + } + else + { + xRunPrivileged = pdFALSE; + } + uxPriority &= ~portPRIVILEGE_BIT; + #endif /* portUSING_MPU_WRAPPERS == 1 */ + + /* Avoid dependency on memset() if it is not required. */ + #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) + { + /* Fill the stack with a known value to assist debugging. */ + ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) ); + } + #endif /* ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) ) */ + + /* Calculate the top of stack address. This depends on whether the stack + grows from high memory to low (as per the 80x86) or vice versa. + portSTACK_GROWTH is used to make the result positive or negative as required + by the port. */ + #if( portSTACK_GROWTH < 0 ) + { + pxTopOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. */ + + /* Check the alignment of the calculated top of stack is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + } + #else /* portSTACK_GROWTH */ + { + pxTopOfStack = pxNewTCB->pxStack; + + /* Check the alignment of the stack buffer is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + /* The other extreme of the stack space is required if stack checking is + performed. */ + pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + } + #endif /* portSTACK_GROWTH */ + + /* Store the task name in the TCB. */ + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + configMAX_TASK_NAME_LEN characters just in case the memory after the + string is not accessible (extremely unlikely). */ + if( pcName[ x ] == 0x00 ) + { + break; } else { mtCOVERAGE_TEST_MARKER(); } + } - /* Ensure interrupts don't access the task lists while they are being - updated. */ - taskENTER_CRITICAL(&xTaskQueueMutex); + /* Ensure the name string is terminated in the case that the string length + was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + + /* This is used as an array index so must ensure it's not too large. First + remove the privilege bit if one is present. */ + if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxNewTCB->uxPriority = uxPriority; + pxNewTCB->xCoreID = xCoreID; + #if ( configUSE_MUTEXES == 1 ) + { + pxNewTCB->uxBasePriority = uxPriority; + pxNewTCB->uxMutexesHeld = 0; + } + #endif /* configUSE_MUTEXES */ + + vListInitialiseItem( &( pxNewTCB->xGenericListItem ) ); + vListInitialiseItem( &( pxNewTCB->xEventListItem ) ); + + /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get + back to the containing TCB from a generic item in a list. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xGenericListItem ), pxNewTCB ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB ); + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + { + pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U; + } + #endif /* portCRITICAL_NESTING_IN_TCB */ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + { + pxNewTCB->pxTaskTag = NULL; + } + #endif /* configUSE_APPLICATION_TASK_TAG */ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxNewTCB->ulRunTimeCounter = 0UL; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth ); + } + #else + { + /* Avoid compiler warning about unreferenced parameter. */ + ( void ) xRegions; + } + #endif + + #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + { + for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ ) { - uxCurrentNumberOfTasks++; + pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL; + #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1) + pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL; + #endif + } + } + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + pxNewTCB->ulNotifiedValue = 0; + pxNewTCB->eNotifyState = eNotWaitingNotification; + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Initialise this task's Newlib reent structure. */ + _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + } + #endif + + #if( INCLUDE_xTaskAbortDelay == 1 ) + { + pxNewTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Initialize the TCB stack to look as if the task was already running, + but had been interrupted by the scheduler. The return address is set + to the start of the task function. Once the stack has been initialised + the top of stack variable is updated. */ + #if( portUSING_MPU_WRAPPERS == 1 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portUSING_MPU_WRAPPERS */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portUSING_MPU_WRAPPERS */ + + if( ( void * ) pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, const BaseType_t xCoreID ) +{ + BaseType_t i; + + /* Ensure interrupts don't access the task lists while the lists are being + updated. */ + taskENTER_CRITICAL(&xTaskQueueMutex); + { + uxCurrentNumberOfTasks++; + if( pxCurrentTCB[ xPortGetCoreID() ] == NULL ) + { + /* There are no other tasks, or all the other tasks are in + the suspended state - make this the current task. */ + pxCurrentTCB[ xPortGetCoreID() ] = pxNewTCB; + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) { /* This is the first task to be created so do the preliminary @@ -724,6 +1053,16 @@ BaseType_t i; fails, but we will report the failure. */ prvInitialiseTaskLists(); } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* If the scheduler is not already running, make this task the + current task if it is the highest priority task to be created + so far. */ if( xSchedulerRunning == pdFALSE ) { /* Scheduler isn't running yet. We need to determine on which CPU to run this task. */ @@ -733,7 +1072,7 @@ BaseType_t i; if (xCoreID == tskNO_AFFINITY || xCoreID == i) { /* Schedule if nothing is scheduled yet, or overwrite a task of lower prio. */ - if ( pxCurrentTCB[i] == NULL || pxCurrentTCB[i]->uxPriority <= uxPriority ) + if ( pxCurrentTCB[i] == NULL || pxCurrentTCB[i]->uxPriority <= pxNewTCB->uxPriority ) { #if portFIRST_TASK_HOOK if ( i == 0) { @@ -751,44 +1090,44 @@ BaseType_t i; { mtCOVERAGE_TEST_MARKER(); } - - uxTaskNumber++; - - #if ( configUSE_TRACE_FACILITY == 1 ) - { - /* Add a counter into the TCB for tracing only. */ - pxNewTCB->uxTCBNumber = uxTaskNumber; - } - #endif /* configUSE_TRACE_FACILITY */ - traceTASK_CREATE( pxNewTCB ); - - prvAddTaskToReadyList( pxNewTCB ); - - xReturn = pdPASS; - portSETUP_TCB( pxNewTCB ); } - taskEXIT_CRITICAL(&xTaskQueueMutex); - } - else - { - xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; - traceTASK_CREATE_FAILED(); - } - if( xReturn == pdPASS ) + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); + + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + } + taskEXIT_CRITICAL(&xTaskQueueMutex); + + if( xSchedulerRunning != pdFALSE ) { - if( xSchedulerRunning != pdFALSE ) + /* Scheduler is running. If the created task is of a higher priority than an executing task + then it should run now. + ToDo: This only works for the current core. If a task is scheduled on an other processor, + the other processor will keep running the task it's working on, and only switch to the newer + task on a timer interrupt. */ + //No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires. + if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < pxNewTCB->uxPriority ) { /* Scheduler is running. If the created task is of a higher priority than an executing task then it should run now. No mux here, uxPriority is mostly atomic and there's not really any harm if this check misfires. */ - if( tskCAN_RUN_HERE( xCoreID ) && pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < uxPriority ) + if( tskCAN_RUN_HERE( xCoreID ) && pxCurrentTCB[ xPortGetCoreID() ]->uxPriority < pxNewTCB->uxPriority ) { taskYIELD_IF_USING_PREEMPTION(); } else if( xCoreID != xPortGetCoreID() ) { - taskYIELD_OTHER_CORE(xCoreID, uxPriority); + taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority); } else { @@ -800,8 +1139,10 @@ BaseType_t i; mtCOVERAGE_TEST_MARKER(); } } - - return xReturn; + else + { + mtCOVERAGE_TEST_MARKER(); + } } /*-----------------------------------------------------------*/ @@ -3010,120 +3351,6 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #endif /* configUSE_TICKLESS_IDLE */ /*-----------------------------------------------------------*/ -static void prvInitialiseTCBVariables( TCB_t * const pxTCB, const char * const pcName, UBaseType_t uxPriority, const MemoryRegion_t * const xRegions, const uint16_t usStackDepth, const BaseType_t xCoreID ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ -{ -UBaseType_t x; - - /* Store the task name in the TCB. */ - for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) - { - pxTCB->pcTaskName[ x ] = pcName[ x ]; - - /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than - configMAX_TASK_NAME_LEN characters just in case the memory after the - string is not accessible (extremely unlikely). */ - if( pcName[ x ] == 0x00 ) - { - break; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - - /* Ensure the name string is terminated in the case that the string length - was greater or equal to configMAX_TASK_NAME_LEN. */ - pxTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; - - /* This is used as an array index so must ensure it's not too large. First - remove the privilege bit if one is present. */ - if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) - { - uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - pxTCB->uxPriority = uxPriority; - pxTCB->xCoreID = xCoreID; - #if ( configUSE_MUTEXES == 1 ) - { - pxTCB->uxBasePriority = uxPriority; - pxTCB->uxMutexesHeld = 0; - } - #endif /* configUSE_MUTEXES */ - - vListInitialiseItem( &( pxTCB->xGenericListItem ) ); - vListInitialiseItem( &( pxTCB->xEventListItem ) ); - - /* Set the pxTCB as a link back from the ListItem_t. This is so we can get - back to the containing TCB from a generic item in a list. */ - listSET_LIST_ITEM_OWNER( &( pxTCB->xGenericListItem ), pxTCB ); - - /* Event lists are always in priority order. */ - listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - listSET_LIST_ITEM_OWNER( &( pxTCB->xEventListItem ), pxTCB ); - - #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - { - pxTCB->uxCriticalNesting = ( UBaseType_t ) 0U; - } - #endif /* portCRITICAL_NESTING_IN_TCB */ - - #if ( configUSE_APPLICATION_TASK_TAG == 1 ) - { - pxTCB->pxTaskTag = NULL; - } - #endif /* configUSE_APPLICATION_TASK_TAG */ - - #if ( configGENERATE_RUN_TIME_STATS == 1 ) - { - pxTCB->ulRunTimeCounter = 0UL; - } - #endif /* configGENERATE_RUN_TIME_STATS */ - - #if ( portUSING_MPU_WRAPPERS == 1 ) - { - vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, pxTCB->pxStack, usStackDepth ); - } - #else /* portUSING_MPU_WRAPPERS */ - { - ( void ) xRegions; - ( void ) usStackDepth; - } - #endif /* portUSING_MPU_WRAPPERS */ - - #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) - { - for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ ) - { - pxTCB->pvThreadLocalStoragePointers[ x ] = NULL; - #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) - pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] = (TlsDeleteCallbackFunction_t)NULL; - #endif - } - } - #endif - - - #if ( configUSE_TASK_NOTIFICATIONS == 1 ) - { - pxTCB->ulNotifiedValue = 0; - pxTCB->eNotifyState = eNotWaitingNotification; - } - #endif - - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - { - /* Initialise this task's Newlib reent structure. */ - _REENT_INIT_PTR( ( &( pxTCB->xNewLib_reent ) ) ); - } - #endif /* configUSE_NEWLIB_REENTRANT */ -} -/*-----------------------------------------------------------*/ #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) @@ -3319,81 +3546,6 @@ static void prvAddCurrentTaskToDelayedList( const BaseType_t xCoreID, const Tick } /*-----------------------------------------------------------*/ -static TCB_t *prvAllocateTCBAndStack( const uint16_t usStackDepth, StackType_t * const puxStackBuffer ) -{ -TCB_t *pxNewTCB; - - /* If the stack grows down then allocate the stack then the TCB so the stack - does not grow into the TCB. Likewise if the stack grows up then allocate - the TCB then the stack. */ - #if( portSTACK_GROWTH > 0 ) - { - /* Allocate space for the TCB. Where the memory comes from depends on - the implementation of the port malloc function. */ - pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); - - if( pxNewTCB != NULL ) - { - /* Allocate space for the stack used by the task being created. - The base of the stack memory stored in the TCB so the task can - be deleted later if required. */ - pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocAligned( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ), puxStackBuffer ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - - if( pxNewTCB->pxStack == NULL ) - { - /* Could not allocate the stack. Delete the allocated TCB. */ - vPortFree( pxNewTCB ); - pxNewTCB = NULL; - } - } - } - #else /* portSTACK_GROWTH */ - { - StackType_t *pxStack; - - /* Allocate space for the stack used by the task being created. */ - pxStack = ( StackType_t * ) pvPortMallocAligned( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ), puxStackBuffer ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ - - if( pxStack != NULL ) - { - /* Allocate space for the TCB. Where the memory comes from depends - on the implementation of the port malloc function. */ - pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); - - if( pxNewTCB != NULL ) - { - /* Store the stack location in the TCB. */ - pxNewTCB->pxStack = pxStack; - } - else - { - /* The stack cannot be used as the TCB was not created. Free it - again. */ - vPortFree( pxStack ); - } - } - else - { - pxNewTCB = NULL; - } - } - #endif /* portSTACK_GROWTH */ - - if( pxNewTCB != NULL ) - { - /* Avoid dependency on memset() if it is not required. */ - #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) - { - /* Just to help debugging. */ - ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) usStackDepth * sizeof( StackType_t ) ); - } - #endif /* ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) ) ) */ - } - - return pxNewTCB; -} -/*-----------------------------------------------------------*/ - BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) { TCB_t *pxTCB; @@ -3561,22 +3713,40 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) } #endif /* configUSE_NEWLIB_REENTRANT */ - #if( portUSING_MPU_WRAPPERS == 1 ) + #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) { - /* Only free the stack if it was allocated dynamically in the first - place. */ - if( pxTCB->xUsingStaticallyAllocatedStack == pdFALSE ) + /* The task can only have been allocated dynamically - free both + the stack and TCB. */ + vPortFreeAligned( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 ) + { + /* The task could have been allocated statically or dynamically, so + check what was statically allocated before trying to free the + memory. */ + if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ) { + /* Both the stack and TCB were allocated dynamically, so both + must be freed. */ vPortFreeAligned( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + /* Only the stack was statically allocated, so the TCB is the + only memory that must be freed. */ + vPortFree( pxTCB ); + } + else + { + /* Neither the stack nor the TCB were allocated dynamically, so + nothing needs to be freed. */ + configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ) + mtCOVERAGE_TEST_MARKER(); } } - #else - { - vPortFreeAligned( pxTCB->pxStack ); - } - #endif - - vPortFree( pxTCB ); + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ } #endif /* INCLUDE_vTaskDelete */ @@ -3978,7 +4148,9 @@ is not running. Re-enabling the scheduler will re-enable the interrupts instead function is executing. */ uxArraySize = uxCurrentNumberOfTasks; - /* Allocate an array index for each task. */ + /* Allocate an array index for each task. NOTE! if + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); if( pxTaskStatusArray != NULL ) @@ -4018,7 +4190,8 @@ is not running. Re-enabling the scheduler will re-enable the interrupts instead pcWriteBuffer += strlen( pcWriteBuffer ); } - /* Free the array again. */ + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ vPortFree( pxTaskStatusArray ); } else @@ -4077,7 +4250,9 @@ is not running. Re-enabling the scheduler will re-enable the interrupts instead function is executing. */ uxArraySize = uxCurrentNumberOfTasks; - /* Allocate an array index for each task. */ + /* Allocate an array index for each task. NOTE! If + configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + equate to NULL. */ pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); if( pxTaskStatusArray != NULL ) @@ -4143,7 +4318,8 @@ is not running. Re-enabling the scheduler will re-enable the interrupts instead mtCOVERAGE_TEST_MARKER(); } - /* Free the array again. */ + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + is 0 then vPortFree() will be #defined to nothing. */ vPortFree( pxTaskStatusArray ); } else diff --git a/components/freertos/xtensa_vectors.S b/components/freertos/xtensa_vectors.S index 7c2fc29607..37802c34e8 100644 --- a/components/freertos/xtensa_vectors.S +++ b/components/freertos/xtensa_vectors.S @@ -96,7 +96,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used. Please change this when the tcb structure is changed */ -#define TASKTCB_XCOREID_OFFSET (0x3C+configMAX_TASK_NAME_LEN+3)&~3 +#define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3 .extern pxCurrentTCB /* Enable stack backtrace across exception/interrupt - see below */ diff --git a/components/lwip/api/api_lib.c b/components/lwip/api/api_lib.c index c38c760811..ecebf4f813 100755 --- a/components/lwip/api/api_lib.c +++ b/components/lwip/api/api_lib.c @@ -55,11 +55,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - #define API_MSG_VAR_REF(name) API_VAR_REF(name) #define API_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct api_msg, name) #define API_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct api_msg, MEMP_API_MSG, name) @@ -178,8 +173,8 @@ netconn_delete(struct netconn *conn) return err; } -#if !LWIP_THREAD_SAFE - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("netconn_delete - free conn\n")); +#if !ESP_THREAD_SAFE + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("netconn_delete - free conn\n")); netconn_free(conn); #endif @@ -502,7 +497,7 @@ netconn_recv_data(struct netconn *conn, void **new_buf) #endif /* LWIP_TCP && (LWIP_UDP || LWIP_RAW) */ #if (LWIP_UDP || LWIP_RAW) { -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE if (buf == NULL){ API_EVENT(conn, NETCONN_EVT_RCVMINUS, 0); return ERR_CLSD; @@ -710,17 +705,7 @@ netconn_write_partly(struct netconn *conn, const void *dataptr, size_t size, } dontblock = netconn_is_nonblocking(conn) || (apiflags & NETCONN_DONTBLOCK); -#ifdef LWIP_ESP8266 - -#ifdef FOR_XIAOMI - if (dontblock && bytes_written) { -#else - if (dontblock && !bytes_written) { -#endif - -#else if (dontblock && !bytes_written) { -#endif /* This implies netconn_write() cannot be used for non-blocking send, since it has no way to return the number of bytes written. */ return ERR_VAL; diff --git a/components/lwip/api/api_msg.c b/components/lwip/api/api_msg.c index e8e967ef40..d504bfb877 100755 --- a/components/lwip/api/api_msg.c +++ b/components/lwip/api/api_msg.c @@ -55,10 +55,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - /* netconns are polled once per second (e.g. continue write on memory error) */ #define NETCONN_TCP_POLL_INTERVAL 2 @@ -314,8 +310,8 @@ poll_tcp(void *arg, struct tcp_pcb *pcb) if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) { /* If the queued byte- or pbuf-count drops below the configured low-water limit, let select mark this pcb as writable again. */ - if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && - (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT(conn->pcb.tcp)) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT(conn->pcb.tcp))) { conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE; API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); } @@ -348,8 +344,8 @@ sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len) /* If the queued byte- or pbuf-count drops below the configured low-water limit, let select mark this pcb as writable again. */ - if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) && - (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) { + if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT(conn->pcb.tcp) && + (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT(conn->pcb.tcp)))) { conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE; API_EVENT(conn, NETCONN_EVT_SENDPLUS, len); } @@ -1216,16 +1212,7 @@ lwip_netconn_do_connect(void *m) if (msg->conn->state == NETCONN_CONNECT) { msg->err = ERR_ALREADY; } else if (msg->conn->state != NETCONN_NONE) { - -#ifdef LWIP_ESP8266 - if( msg->conn->pcb.tcp->state == ESTABLISHED ) msg->err = ERR_ISCONN; - else - msg->err = ERR_ALREADY; -#else - msg->err = ERR_ISCONN; -#endif - } else { setup_tcp(msg->conn); msg->err = tcp_connect(msg->conn->pcb.tcp, API_EXPR_REF(msg->msg.bc.ipaddr), @@ -1540,8 +1527,8 @@ err_mem: and let poll_tcp check writable space to mark the pcb writable again */ API_EVENT(conn, NETCONN_EVT_SENDMINUS, len); conn->flags |= NETCONN_FLAG_CHECK_WRITESPACE; - } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT) || - (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT)) { + } else if ((tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT(conn->pcb.tcp)) || + (tcp_sndqueuelen(conn->pcb.tcp) >= TCP_SNDQUEUELOWAT(conn->pcb.tcp))) { /* The queued byte- or pbuf-count exceeds the configured low-water limit, let select mark this pcb as non-writable. */ API_EVENT(conn, NETCONN_EVT_SENDMINUS, len); @@ -1646,14 +1633,7 @@ lwip_netconn_do_write(void *m) if (lwip_netconn_do_writemore(msg->conn, 0) != ERR_OK) { LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); UNLOCK_TCPIP_CORE(); - -#ifdef LWIP_ESP8266 -//#if 0 - sys_arch_sem_wait( LWIP_API_MSG_SND_SEM(msg), 0); -#else - sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); -#endif - + sys_arch_sem_wait(LWIP_API_MSG_SEM(msg), 0); LOCK_TCPIP_CORE(); LWIP_ASSERT("state!", msg->conn->state != NETCONN_WRITE); } diff --git a/components/lwip/api/lwip_debug.c b/components/lwip/api/lwip_debug.c index 1e5fed40d3..d73a23e1a3 100644 --- a/components/lwip/api/lwip_debug.c +++ b/components/lwip/api/lwip_debug.c @@ -48,6 +48,9 @@ static void dbg_lwip_tcp_pcb_one_show(struct tcp_pcb* pcb) printf("rttest=%d rtseq=%d sa=%d sv=%d\n", pcb->rttest, pcb->rtseq, pcb->sa, pcb->sv); printf("rto=%d nrtx=%d\n", pcb->rto, pcb->nrtx); printf("dupacks=%d lastack=%d\n", pcb->dupacks, pcb->lastack); +#if ESP_PER_SOC_TCP_WND + printf("per_soc_window=%d per_soc_snd_buf=%d\n", pcb->per_soc_tcp_wnd, pcb->per_soc_tcp_snd_buf); +#endif printf("cwnd=%d ssthreash=%d\n", pcb->cwnd, pcb->ssthresh); printf("snd_next=%d snd_wl1=%d snd_wl2=%d\n", pcb->snd_nxt, pcb->snd_wl1, pcb->snd_wl2); printf("snd_lbb=%d snd_wnd=%d snd_wnd_max=%d\n", pcb->snd_lbb, pcb->snd_wnd, pcb->snd_wnd_max); diff --git a/components/lwip/api/netbuf.c b/components/lwip/api/netbuf.c index 6c6dc69ccd..9ab76a4638 100755 --- a/components/lwip/api/netbuf.c +++ b/components/lwip/api/netbuf.c @@ -45,11 +45,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /** * Create (allocate) and initialize a new netbuf. * The netbuf doesn't yet contain a packet buffer! diff --git a/components/lwip/api/netdb.c b/components/lwip/api/netdb.c index 8fd3f41861..65510f55e9 100755 --- a/components/lwip/api/netdb.c +++ b/components/lwip/api/netdb.c @@ -47,11 +47,6 @@ #include #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /** helper struct for gethostbyname_r to access the char* buffer */ struct gethostbyname_r_helper { ip_addr_t *addr_list[2]; diff --git a/components/lwip/api/sockets.c b/components/lwip/api/sockets.c index 350847b57c..455d007ea7 100755 --- a/components/lwip/api/sockets.c +++ b/components/lwip/api/sockets.c @@ -61,11 +61,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /* If the netconn API is not required publicly, then we include the necessary files here to get the implementation */ #if !LWIP_NETCONN @@ -216,7 +211,7 @@ struct lwip_sock { /** last error that occurred on this socket (in fact, all our errnos fit into an u8_t) */ u8_t err; -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE /* lock is used to protect state/ref field, however this lock is not a perfect lock, e.g * taskA and taskB can access sock X, then taskA freed sock X, before taskB detect * this, taskC reuse sock X, then when taskB try to access sock X, problem may happen. @@ -239,7 +234,7 @@ struct lwip_sock { SELWAIT_T select_waiting; }; -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE #define LWIP_SOCK_OPEN 0 #define LWIP_SOCK_CLOSING 1 @@ -247,25 +242,25 @@ struct lwip_sock { #define LWIP_SOCK_LOCK(sock) \ do{\ - /*LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("l\n"));*/\ + /*LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("l\n"));*/\ sys_mutex_lock(&sock->lock);\ - /*LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("l ok\n"));*/\ + /*LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("l ok\n"));*/\ }while(0) #define LWIP_SOCK_UNLOCK(sock) \ do{\ sys_mutex_unlock(&sock->lock);\ - /*LWIP_DEBUGF(THREAD_SAFE_DEBUG1, ("unl\n"));*/\ + /*LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG1, ("unl\n"));*/\ }while(0) #define LWIP_FREE_SOCK(sock) \ do{\ if(sock->conn && NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP){\ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("LWIP_FREE_SOCK:free tcp sock\n"));\ + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("LWIP_FREE_SOCK:free tcp sock\n"));\ free_socket(sock, 1);\ } else {\ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("LWIP_FREE_SOCK:free non-tcp sock\n"));\ + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("LWIP_FREE_SOCK:free non-tcp sock\n"));\ free_socket(sock, 0);\ }\ }while(0) @@ -273,7 +268,7 @@ do{\ #define LWIP_SET_CLOSE_FLAG() \ do{\ LWIP_SOCK_LOCK(__sock);\ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("mark sock closing\n"));\ + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("mark sock closing\n"));\ __sock->state = LWIP_SOCK_CLOSING;\ LWIP_SOCK_UNLOCK(__sock);\ }while(0) @@ -291,7 +286,7 @@ do{\ LWIP_SOCK_LOCK(__sock);\ __sock->ref ++;\ if (__sock->state != LWIP_SOCK_OPEN) {\ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("LWIP_API_LOCK:soc is %d, return\n", __sock->state));\ + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("LWIP_API_LOCK:soc is %d, return\n", __sock->state));\ __sock->ref --;\ LWIP_SOCK_UNLOCK(__sock);\ return -1;\ @@ -306,12 +301,12 @@ do{\ __sock->ref --;\ if (__sock->state == LWIP_SOCK_CLOSING) {\ if (__sock->ref == 0){\ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("LWIP_API_UNLOCK:ref 0, free __sock\n"));\ + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("LWIP_API_UNLOCK:ref 0, free __sock\n"));\ LWIP_FREE_SOCK(__sock);\ LWIP_SOCK_UNLOCK(__sock);\ return __ret;\ }\ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("LWIP_API_UNLOCK: soc state is closing, return\n"));\ + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("LWIP_API_UNLOCK: soc state is closing, return\n"));\ LWIP_SOCK_UNLOCK(__sock);\ return __ret;\ }\ @@ -387,11 +382,9 @@ static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, static void lwip_socket_drop_registered_memberships(int s); #endif /* LWIP_IGMP */ -#ifdef LWIP_ESP8266 - -/* Since esp_wifi_tx_is_stop/system_get_free_heap_size are not an public wifi API, so extern them here*/ -extern size_t system_get_free_heap_size(void); -extern bool esp_wifi_tx_is_stop(void); +#if ESP_LWIP +#include "esp_wifi_internal.h" +#include "esp_system.h" /* Please be notified that this flow control is just a workaround for fixing wifi Q full issue. * Under UDP/TCP pressure test, we found that the sockets may cause wifi tx queue full if the socket @@ -402,9 +395,9 @@ extern bool esp_wifi_tx_is_stop(void); */ static inline void esp32_tx_flow_ctrl(void) { - uint8_t _wait_delay = 0; + uint8_t _wait_delay = 1; - while ((system_get_free_heap_size() < HEAP_HIGHWAT) || esp_wifi_tx_is_stop()){ + while ((system_get_free_heap_size() < HEAP_HIGHWAT) || esp_wifi_internal_tx_is_stop()){ vTaskDelay(_wait_delay/portTICK_RATE_MS); if (_wait_delay < 64) _wait_delay *= 2; } @@ -416,7 +409,7 @@ static inline void esp32_tx_flow_ctrl(void) /** The global array of available sockets */ static struct lwip_sock sockets[NUM_SOCKETS]; -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE static bool sockets_init_flag = false; #endif /** The global list of tasks waiting for select */ @@ -427,13 +420,7 @@ static volatile int select_cb_ctr; /** Table to quickly map an lwIP error (err_t) to a socket error * by using -err as an index */ -#ifdef LWIP_ESP8266 -//TO_DO -//static const int err_to_errno_table[] ICACHE_RODATA_ATTR STORE_ATTR = { static const int err_to_errno_table[] = { -#else -static const int err_to_errno_table[] = { -#endif 0, /* ERR_OK 0 No error, everything OK. */ ENOMEM, /* ERR_MEM -1 Out of memory error. */ ENOBUFS, /* ERR_BUF -2 Buffer error. */ @@ -444,7 +431,7 @@ static const int err_to_errno_table[] = { EWOULDBLOCK, /* ERR_WOULDBLOCK -7 Operation would block. */ EADDRINUSE, /* ERR_USE -8 Address in use. */ -#ifdef LWIP_ESP8266 +#if ESP_LWIP EALREADY, /* ERR_ALREADY -9 Already connected. */ EISCONN, /* ERR_ISCONN -10 Conn already established */ ECONNABORTED, /* ERR_ABRT -11 Connection aborted. */ @@ -585,7 +572,7 @@ alloc_socket(struct netconn *newconn, int accepted) int i; SYS_ARCH_DECL_PROTECT(lev); -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE bool found = false; int oldest = -1; @@ -641,16 +628,16 @@ alloc_socket(struct netconn *newconn, int accepted) if (!sockets[oldest].lock){ /* one time init and never free */ if (sys_mutex_new(&sockets[oldest].lock) != ERR_OK){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("new sock lock fail\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("new sock lock fail\n")); return -1; } } - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("alloc_socket: alloc %d ok\n", oldest)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("alloc_socket: alloc %d ok\n", oldest)); return oldest + LWIP_SOCKET_OFFSET; } - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("alloc_socket: failed\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("alloc_socket: failed\n")); #else @@ -695,12 +682,12 @@ free_socket(struct lwip_sock *sock, int is_tcp) void *lastdata; SYS_ARCH_DECL_PROTECT(lev); - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("free_sockset:free socket s=%p is_tcp=%d\n", sock, is_tcp)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("free_sockset:free socket s=%p is_tcp=%d\n", sock, is_tcp)); lastdata = sock->lastdata; sock->lastdata = NULL; sock->lastoffset = 0; sock->err = 0; -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE if (sock->conn){ netconn_free(sock->conn); } @@ -718,10 +705,10 @@ free_socket(struct lwip_sock *sock, int is_tcp) if (lastdata != NULL) { if (is_tcp) { - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("free_sockset:free lastdata pbuf=%p\n", lastdata)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("free_sockset:free lastdata pbuf=%p\n", lastdata)); pbuf_free((struct pbuf *)lastdata); } else { - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("free_sockset:free lastdata, netbuf=%p\n", lastdata)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("free_sockset:free lastdata, netbuf=%p\n", lastdata)); netbuf_delete((struct netbuf *)lastdata); } } @@ -874,19 +861,19 @@ lwip_close(int s) int is_tcp = 0; err_t err; - LWIP_DEBUGF(SOCKETS_DEBUG|THREAD_SAFE_DEBUG, ("lwip_close: (%d)\n", s)); + LWIP_DEBUGF(SOCKETS_DEBUG|ESP_THREAD_SAFE_DEBUG, ("lwip_close: (%d)\n", s)); sock = get_socket(s); if (!sock) { - LWIP_DEBUGF(SOCKETS_DEBUG|THREAD_SAFE_DEBUG, ("lwip_close: sock is null, return -1\n")); + LWIP_DEBUGF(SOCKETS_DEBUG|ESP_THREAD_SAFE_DEBUG, ("lwip_close: sock is null, return -1\n")); return -1; } if (sock->conn != NULL) { is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP; - LWIP_DEBUGF(SOCKETS_DEBUG|THREAD_SAFE_DEBUG, ("lwip_close: is_tcp=%d\n", is_tcp)); + LWIP_DEBUGF(SOCKETS_DEBUG|ESP_THREAD_SAFE_DEBUG, ("lwip_close: is_tcp=%d\n", is_tcp)); } else { - LWIP_DEBUGF(SOCKETS_DEBUG|THREAD_SAFE_DEBUG, ("conn is null\n")); + LWIP_DEBUGF(SOCKETS_DEBUG|ESP_THREAD_SAFE_DEBUG, ("conn is null\n")); LWIP_ASSERT("lwip_close: sock->lastdata == NULL", sock->lastdata == NULL); } @@ -897,12 +884,12 @@ lwip_close(int s) err = netconn_delete(sock->conn); if (err != ERR_OK) { - LWIP_DEBUGF(SOCKETS_DEBUG|THREAD_SAFE_DEBUG, ("netconn_delete fail, ret=%d\n", err)); + LWIP_DEBUGF(SOCKETS_DEBUG|ESP_THREAD_SAFE_DEBUG, ("netconn_delete fail, ret=%d\n", err)); sock_set_errno(sock, err_to_errno(err)); return -1; } -#if !LWIP_THREAD_SAFE +#if !ESP_THREAD_SAFE free_socket(sock, is_tcp); #endif @@ -1132,22 +1119,13 @@ lwip_recvfrom(int s, void *mem, size_t len, int flags, ip_addr_debug_print(SOCKETS_DEBUG, fromaddr); LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, off)); -#ifdef LWIP_ESP8266 if (from && fromlen) -#else - -#if SOCKETS_DEBUG - if (from && fromlen) -#endif /* SOCKETS_DEBUG */ - -#endif { if (*fromlen > saddr.sa.sa_len) { *fromlen = saddr.sa.sa_len; } MEMCPY(from, &saddr, *fromlen); - -#ifdef LWIP_ESP8266 +#if ESP_LWIP } else { /*fix the code for setting the UDP PROTO's remote infomation by liuh at 2014.8.27*/ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_UDP){ @@ -1439,7 +1417,7 @@ lwip_sendto(int s, const void *data, size_t size, int flags, SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port); } else { -#ifdef LWIP_ESP8266 +#if ESP_LWIP /*fix the code for getting the UDP proto's remote information by liuh at 2014.8.27*/ if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_UDP){ if(NETCONNTYPE_ISIPV6(netconn_type(sock->conn))) { @@ -1455,7 +1433,7 @@ lwip_sendto(int s, const void *data, size_t size, int flags, #endif remote_port = 0; ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr); -#ifdef LWIP_ESP8266 +#if ESP_LWIP } #endif @@ -1988,7 +1966,7 @@ again: int lwip_shutdown(int s, int how) { -#ifndef LWIP_ESP8266 +#if ! ESP_LWIP struct lwip_sock *sock; err_t err; @@ -2395,6 +2373,16 @@ lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *opt s, *(int *)optval)); break; #endif /* LWIP_TCP_KEEPALIVE */ + +#if ESP_PER_SOC_TCP_WND + case TCP_WINDOW: + *(int*)optval = (int)sock->conn->pcb.tcp->per_soc_tcp_wnd; + break; + case TCP_SNDBUF: + *(int*)optval = (int)sock->conn->pcb.tcp->per_soc_tcp_snd_buf; + break; +#endif + default: LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", s, optname)); @@ -2792,6 +2780,16 @@ lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_ s, sock->conn->pcb.tcp->keep_cnt)); break; #endif /* LWIP_TCP_KEEPALIVE */ + +#if ESP_PER_SOC_TCP_WND + case TCP_WINDOW: + sock->conn->pcb.tcp->per_soc_tcp_wnd = ((u32_t)(*(const int*)optval)) * TCP_MSS; + break; + case TCP_SNDBUF: + sock->conn->pcb.tcp->per_soc_tcp_snd_buf = ((u32_t)(*(const int*)optval)) * TCP_MSS; + break; +#endif + default: LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n", s, optname)); @@ -3108,7 +3106,7 @@ static void lwip_socket_drop_registered_memberships(int s) } #endif /* LWIP_IGMP */ -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE int lwip_sendto_r(int s, const void *data, size_t size, int flags, diff --git a/components/lwip/api/tcpip.c b/components/lwip/api/tcpip.c index 9df3c38a1d..0ad60721e4 100755 --- a/components/lwip/api/tcpip.c +++ b/components/lwip/api/tcpip.c @@ -50,18 +50,13 @@ #include "lwip/pbuf.h" #include "netif/etharp.h" -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - #define TCPIP_MSG_VAR_REF(name) API_VAR_REF(name) #define TCPIP_MSG_VAR_DECLARE(name) API_VAR_DECLARE(struct tcpip_msg, name) #define TCPIP_MSG_VAR_ALLOC(name) API_VAR_ALLOC(struct tcpip_msg, MEMP_TCPIP_MSG_API, name) #define TCPIP_MSG_VAR_FREE(name) API_VAR_FREE(MEMP_TCPIP_MSG_API, name) /* global variables */ -#ifdef PERF +#if ESP_PERF uint32_t g_rx_post_mbox_fail_cnt = 0; #endif static tcpip_init_done_fn tcpip_init_done; @@ -144,13 +139,11 @@ tcpip_thread(void *arg) case TCPIP_MSG_INPKT: LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg)); -#ifdef LWIP_ESP8266 -//#if 0 +#if ESP_LWIP if(msg->msg.inp.p != NULL && msg->msg.inp.netif != NULL) { #endif msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif); -#ifdef LWIP_ESP8266 -//#if 0 +#if ESP_LWIP } #endif @@ -230,7 +223,7 @@ tcpip_inpkt(struct pbuf *p, struct netif *inp, netif_input_fn input_fn) msg->msg.inp.netif = inp; msg->msg.inp.input_fn = input_fn; if (sys_mbox_trypost(&mbox, msg) != ERR_OK) { -#ifdef PERF +#if ESP_PERF g_rx_post_mbox_fail_cnt ++; #endif memp_free(MEMP_TCPIP_MSG_INPKT, msg); @@ -503,7 +496,7 @@ tcpip_init(tcpip_init_done_fn initfunc, void *arg) #endif /* LWIP_TCPIP_CORE_LOCKING */ -#ifdef LWIP_ESP8266 +#if ESP_LWIP sys_thread_t xLwipTaskHandle = sys_thread_new(TCPIP_THREAD_NAME , tcpip_thread, NULL, TCPIP_THREAD_STACKSIZE, TCPIP_THREAD_PRIO); @@ -548,8 +541,7 @@ pbuf_free_callback(struct pbuf *p) * @return ERR_OK if callback could be enqueued, an err_t if not */ -#ifdef LWIP_ESP8266 -//#if 0 +#if ESP_LWIP static void mem_free_local(void *arg) { mem_free(arg); diff --git a/components/lwip/apps/dhcpserver.c b/components/lwip/apps/dhcpserver.c index 4cdef4123d..22443e8cde 100644 --- a/components/lwip/apps/dhcpserver.c +++ b/components/lwip/apps/dhcpserver.c @@ -24,7 +24,7 @@ #include "apps/dhcpserver.h" -#ifdef LWIP_ESP8266 +#if ESP_DHCP #define BOOTP_BROADCAST 0x8000 @@ -71,10 +71,6 @@ #define DHCPS_STATE_IDLE 5 #define DHCPS_STATE_RELEASE 6 -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - //////////////////////////////////////////////////////////////////////////////////// static const u32_t magic_cookie = 0x63538263; diff --git a/components/lwip/core/dns.c b/components/lwip/core/dns.c index da8ac95b8d..8f0ac5cc81 100755 --- a/components/lwip/core/dns.c +++ b/components/lwip/core/dns.c @@ -85,10 +85,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - /** Random generator function to create random TXIDs and source ports for queries */ #ifndef DNS_RAND_TXID #if ((LWIP_DNS_SECURE & LWIP_DNS_SECURE_RAND_XID) != 0) @@ -1091,7 +1087,7 @@ dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, u8_t dns_err; /* This entry is now completed. */ -#ifndef LWIP_ESP8266 +#if ! ESP_DNS entry->state = DNS_STATE_DONE; #endif dns_err = hdr.flags2 & DNS_FLAG2_ERR_MASK; @@ -1105,7 +1101,7 @@ dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, if (((hdr.flags1 & DNS_FLAG1_RESPONSE) == 0) || (dns_err != 0) || (nquestions != 1)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", entry->name)); /* call callback to indicate error, clean up memory and return */ -#ifndef LWIP_ESP8266 +#if ! ESP_DNS goto responseerr; } #else diff --git a/components/lwip/core/init.c b/components/lwip/core/init.c index 2a410d0e46..774e9a2beb 100755 --- a/components/lwip/core/init.c +++ b/components/lwip/core/init.c @@ -61,7 +61,7 @@ #include "lwip/api.h" #include "netif/ppp/ppp_impl.h" -#ifndef PERF +#if ! ESP_PERF /* Compile-time sanity checks for configuration errors. * These can be done independently of LWIP_DEBUG, without penalty. */ @@ -135,13 +135,15 @@ //#endif #else /* LWIP_WND_SCALE */ -#ifndef LWIP_ESP8266 +#if (ESP_PER_SOC_TCP_WND == 0) #if (LWIP_TCP && (TCP_WND > 0xffff)) #error "If you want to use TCP, TCP_WND must fit in an u16_t, so, you have to reduce it in your lwipopts.h (or enable window scaling)" #endif #endif #endif /* LWIP_WND_SCALE */ + +#if (ESP_PER_SOC_TCP_WND == 0) #if (LWIP_TCP && (TCP_SND_QUEUELEN > 0xffff)) #error "If you want to use TCP, TCP_SND_QUEUELEN must fit in an u16_t, so, you have to reduce it in your lwipopts.h" #endif @@ -149,7 +151,6 @@ #error "TCP_SND_QUEUELEN must be at least 2 for no-copy TCP writes to work" #endif -#ifndef LWIP_ESP8266 #if (LWIP_TCP && ((TCP_MAXRTX > 12) || (TCP_SYNMAXRTX > 12))) #error "If you want to use TCP, TCP_MAXRTX and TCP_SYNMAXRTX must less or equal to 12 (due to tcp_backoff table), so, you have to reduce them in your lwipopts.h" #endif @@ -289,6 +290,8 @@ #if !MEMP_MEM_MALLOC && (MEMP_NUM_TCP_SEG < TCP_SND_QUEUELEN) #error "lwip_sanity_check: WARNING: MEMP_NUM_TCP_SEG should be at least as big as TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif + +#if (ESP_PER_SOC_TCP_WND == 0) #if TCP_SND_BUF < (2 * TCP_MSS) #error "lwip_sanity_check: WARNING: TCP_SND_BUF must be at least as much as (2 * TCP_MSS) for things to work smoothly. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif @@ -304,11 +307,13 @@ #if TCP_SNDQUEUELOWAT >= TCP_SND_QUEUELEN #error "lwip_sanity_check: WARNING: TCP_SNDQUEUELOWAT must be less than TCP_SND_QUEUELEN. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif +#endif + #if !MEMP_MEM_MALLOC && (PBUF_POOL_BUFSIZE <= (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)) #error "lwip_sanity_check: WARNING: PBUF_POOL_BUFSIZE does not provide enough space for protocol headers. If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif -#ifndef LWIP_ESP8266 +#if ! ESP_LWIP #if !MEMP_MEM_MALLOC && (TCP_WND > (PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - (PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN)))) #error "lwip_sanity_check: WARNING: TCP_WND is larger than space provided by PBUF_POOL_SIZE * (PBUF_POOL_BUFSIZE - protocol headers). If you know what you are doing, define LWIP_DISABLE_TCP_SANITY_CHECKS to 1 to disable this error." #endif @@ -328,13 +333,6 @@ void lwip_init(void) { -#ifdef LWIP_ESP8266 -// MEMP_NUM_TCP_PCB = 5; -// TCP_WND = (4 * TCP_MSS); -// TCP_MAXRTX = 12; -// TCP_SYNMAXRTX = 6; -#endif - /* Modules initialization */ stats_init(); #if !NO_SYS diff --git a/components/lwip/core/ipv4/autoip.c b/components/lwip/core/ipv4/autoip.c index 391e8eeaed..19b1928368 100755 --- a/components/lwip/core/ipv4/autoip.c +++ b/components/lwip/core/ipv4/autoip.c @@ -76,11 +76,6 @@ #include #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /* 169.254.0.0 */ #define AUTOIP_NET 0xA9FE0000 /* 169.254.1.0 */ diff --git a/components/lwip/core/ipv4/dhcp.c b/components/lwip/core/ipv4/dhcp.c index 1f3758fa91..33d13fb326 100755 --- a/components/lwip/core/ipv4/dhcp.c +++ b/components/lwip/core/ipv4/dhcp.c @@ -82,10 +82,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - /** DHCP_CREATE_RAND_XID: if this is set to 1, the xid is created using * LWIP_RAND() (this overrides DHCP_GLOBAL_XID) */ @@ -146,7 +142,7 @@ static u8_t dhcp_discover_select_options[] = { DHCP_OPTION_BROADCAST, DHCP_OPTION_DNS_SERVER -#ifdef LWIP_ESP8266 +#if ESP_DHCP /**add options for support more router by liuHan**/ , DHCP_OPTION_DOMAIN_NAME, DHCP_OPTION_NB_TINS, @@ -454,7 +450,7 @@ dhcp_fine_tmr(void) /* only act on DHCP configured interfaces */ if (netif->dhcp != NULL) { -//#ifdef LWIP_ESP8266 +//#if ESP_DHCP /*add DHCP retries processing by LiuHan*/ #if 0 if (DHCP_MAXRTX != 0) { @@ -997,7 +993,7 @@ dhcp_discover(struct netif *netif) dhcp_option(dhcp, DHCP_OPTION_MAX_MSG_SIZE, DHCP_OPTION_MAX_MSG_SIZE_LEN); dhcp_option_short(dhcp, DHCP_MAX_MSG_LEN(netif)); -#ifdef LWIP_ESP8266 +#if ESP_DHCP #if LWIP_NETIF_HOSTNAME dhcp_option_hostname(dhcp, netif); #endif /* LWIP_NETIF_HOSTNAME */ diff --git a/components/lwip/core/ipv4/icmp.c b/components/lwip/core/ipv4/icmp.c index c492ed75fe..9202bb650c 100755 --- a/components/lwip/core/ipv4/icmp.c +++ b/components/lwip/core/ipv4/icmp.c @@ -51,11 +51,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /** Small optimization: set to 0 if incoming PBUF_POOL pbuf always can be * used to modify and send a response packet (and to 1 if this is not the case, * e.g. when link header is stripped of when receiving) */ diff --git a/components/lwip/core/ipv4/igmp.c b/components/lwip/core/ipv4/igmp.c index d75fe15fd0..03f3ae384b 100755 --- a/components/lwip/core/ipv4/igmp.c +++ b/components/lwip/core/ipv4/igmp.c @@ -92,11 +92,6 @@ Steve Reynolds #include "string.h" -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /* * IGMP constants */ diff --git a/components/lwip/core/ipv4/ip4.c b/components/lwip/core/ipv4/ip4.c index 5f1e77a5e7..1d581d4d85 100755 --- a/components/lwip/core/ipv4/ip4.c +++ b/components/lwip/core/ipv4/ip4.c @@ -59,11 +59,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /** Set this to 0 in the rare case of wanting to call an extra function to * generate the IP checksum (in contrast to calculating it on-the-fly). */ #ifndef LWIP_INLINE_IP_CHKSUM @@ -150,7 +145,7 @@ ip4_route_src(const ip4_addr_t *dest, const ip4_addr_t *src) struct netif * ip4_route(const ip4_addr_t *dest) { -#ifdef LWIP_ESP8266 +#if ESP_LWIP struct netif *non_default_netif = NULL; #endif struct netif *netif; @@ -183,7 +178,7 @@ ip4_route(const ip4_addr_t *dest) } } -#ifdef LWIP_ESP8266 +#if ESP_LWIP if (non_default_netif && !ip4_addr_isbroadcast(dest, non_default_netif)){ return non_default_netif; } diff --git a/components/lwip/core/ipv4/ip4_addr.c b/components/lwip/core/ipv4/ip4_addr.c index 3053cf087e..0501b84e5f 100755 --- a/components/lwip/core/ipv4/ip4_addr.c +++ b/components/lwip/core/ipv4/ip4_addr.c @@ -45,17 +45,8 @@ /* used by IP_ADDR_ANY and IP_ADDR_BROADCAST in ip_addr.h */ -#ifdef LWIP_ESP8266 -//TO_DO -//const ip_addr_t ip_addr_any ICACHE_RODATA_ATTR STORE_ATTR = IPADDR4_INIT(IPADDR_ANY); -//const ip_addr_t ip_addr_broadcast ICACHE_RODATA_ATTR STORE_ATTR = IPADDR4_INIT(IPADDR_BROADCAST); const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); -#else -const ip_addr_t ip_addr_any = IPADDR4_INIT(IPADDR_ANY); -const ip_addr_t ip_addr_broadcast = IPADDR4_INIT(IPADDR_BROADCAST); -#endif - /** * Determine if an address is a broadcast address on a network interface @@ -170,7 +161,7 @@ ip4addr_aton(const char *cp, ip4_addr_t *addr) u32_t parts[4]; u32_t *pp = parts; -#ifdef LWIP_ESP8266 +#if ESP_LWIP //#if 0 char ch; unsigned long cutoff; @@ -199,8 +190,7 @@ ip4addr_aton(const char *cp, ip4_addr_t *addr) } } -#ifdef LWIP_ESP8266 -//#if 0 +#if ESP_IP4_ATON cutoff =(unsigned long)0xffffffff / (unsigned long)base; cutlim =(unsigned long)0xffffffff % (unsigned long)base; for (;;) { diff --git a/components/lwip/core/ipv4/ip_frag.c b/components/lwip/core/ipv4/ip_frag.c index 1e6b053e6f..a647433506 100755 --- a/components/lwip/core/ipv4/ip_frag.c +++ b/components/lwip/core/ipv4/ip_frag.c @@ -51,11 +51,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - #if IP_REASSEMBLY /** * The IP reassembly code currently has the following limitations: diff --git a/components/lwip/core/ipv6/icmp6.c b/components/lwip/core/ipv6/icmp6.c index 0a17da33e1..013983bde1 100755 --- a/components/lwip/core/ipv6/icmp6.c +++ b/components/lwip/core/ipv6/icmp6.c @@ -56,10 +56,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - #ifndef LWIP_ICMP6_DATASIZE #define LWIP_ICMP6_DATASIZE 8 #endif diff --git a/components/lwip/core/ipv6/ip6.c b/components/lwip/core/ipv6/ip6.c index 056d33355f..380bc290cd 100755 --- a/components/lwip/core/ipv6/ip6.c +++ b/components/lwip/core/ipv6/ip6.c @@ -59,10 +59,6 @@ #include "lwip/debug.h" #include "lwip/stats.h" -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - /** * Finds the appropriate network interface for a given IPv6 address. It tries to select * a netif following a sequence of heuristics: diff --git a/components/lwip/core/ipv6/ip6_frag.c b/components/lwip/core/ipv6/ip6_frag.c index 0792c2e1be..c9e13cd208 100755 --- a/components/lwip/core/ipv6/ip6_frag.c +++ b/components/lwip/core/ipv6/ip6_frag.c @@ -52,11 +52,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - #if LWIP_IPV6 && LWIP_IPV6_REASS /* don't build if not configured for use in lwipopts.h */ diff --git a/components/lwip/core/ipv6/mld6.c b/components/lwip/core/ipv6/mld6.c index 6a2d55c549..489c5063a7 100755 --- a/components/lwip/core/ipv6/mld6.c +++ b/components/lwip/core/ipv6/mld6.c @@ -59,11 +59,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /* * MLD constants */ diff --git a/components/lwip/core/ipv6/nd6.c b/components/lwip/core/ipv6/nd6.c index 39e7bfed03..36f8f78c35 100755 --- a/components/lwip/core/ipv6/nd6.c +++ b/components/lwip/core/ipv6/nd6.c @@ -60,11 +60,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /* Router tables. */ struct nd6_neighbor_cache_entry neighbor_cache[LWIP_ND6_NUM_NEIGHBORS]; struct nd6_destination_cache_entry destination_cache[LWIP_ND6_NUM_DESTINATIONS]; diff --git a/components/lwip/core/mem.c b/components/lwip/core/mem.c index 42df6daeba..9ca9e3c4fc 100755 --- a/components/lwip/core/mem.c +++ b/components/lwip/core/mem.c @@ -65,10 +65,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - #if MEM_USE_POOLS #if MEMP_MEM_MALLOC diff --git a/components/lwip/core/memp.c b/components/lwip/core/memp.c index a5169abc81..7895533652 100755 --- a/components/lwip/core/memp.c +++ b/components/lwip/core/memp.c @@ -70,10 +70,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - #define LWIP_MEMPOOL(name,num,size,desc) LWIP_MEMPOOL_DECLARE(name,num,size,desc) #include "lwip/priv/memp_std.h" diff --git a/components/lwip/core/netif.c b/components/lwip/core/netif.c index 33e030412a..5c308a957c 100755 --- a/components/lwip/core/netif.c +++ b/components/lwip/core/netif.c @@ -81,10 +81,6 @@ #define NETIF_LINK_CALLBACK(n) #endif /* LWIP_NETIF_LINK_CALLBACK */ -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - struct netif *netif_list; struct netif *netif_default; @@ -220,7 +216,7 @@ netif_add(struct netif *netif, /* netif not under DHCP control by default */ netif->dhcp = NULL; -#ifdef LWIP_ESP8266 +#if ESP_DHCP netif->dhcps_pcb = NULL; #endif @@ -233,8 +229,7 @@ netif_add(struct netif *netif, #endif /* LWIP_AUTOIP */ #if LWIP_IPV6_AUTOCONFIG -#ifdef LWIP_ESP8266 -//#if 0 +#if ESP_IPV6_AUTOCONFIG netif->ip6_autoconfig_enabled = 1; #else /* IPv6 address autoconfiguration not enabled by default */ @@ -973,7 +968,7 @@ netif_create_ip6_linklocal_address(struct netif *netif, u8_t from_mac_48bit) } } -#ifdef LWIP_ESP8266 +#if ESP_LWIP ip6_addr_set( ip_2_ip6(&netif->link_local_addr), ip_2_ip6(&netif->ip6_addr[0]) ); #endif @@ -1028,7 +1023,7 @@ netif_add_ip6_address(struct netif *netif, const ip6_addr_t *ip6addr, s8_t *chos } -#ifdef LWIP_ESP8266 +#if ESP_LWIP void netif_create_ip4_linklocal_address(struct netif * netif) { diff --git a/components/lwip/core/pbuf.c b/components/lwip/core/pbuf.c index e35f8a6b7f..29e24ef2b4 100755 --- a/components/lwip/core/pbuf.c +++ b/components/lwip/core/pbuf.c @@ -78,12 +78,8 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - -#ifdef LWIP_ESP8266 -#define EP_OFFSET 0 +#if ESP_LWIP +#include "esp_wifi_internal.h" #endif #define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf)) @@ -207,12 +203,7 @@ struct pbuf * pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) { struct pbuf *p, *q, *r; - -#ifdef LWIP_ESP8266 - u16_t offset = 0; -#else - u16_t offset; -#endif + u16_t offset = 0; s32_t rem_len; /* remaining length */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length)); @@ -223,48 +214,16 @@ pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) /* add room for transport (often TCP) layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN; -#ifdef LWIP_ESP8266 //TO_DO - offset += EP_OFFSET; -#endif - break; case PBUF_IP: /* add room for IP layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN; -#ifdef LWIP_ESP8266 //TO_DO - offset += EP_OFFSET; -#endif - break; case PBUF_LINK: /* add room for link layer header */ offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN; -#ifdef LWIP_ESP8266 //TO_DO - /* - * 1. LINK_HLEN 14Byte will be remove in WLAN layer - * 2. IEEE80211_HDR_MAX_LEN needs 40 bytes. - * 3. encryption needs exra 4 bytes ahead of actual data payload, and require - * DAddr and SAddr to be 4-byte aligned. - * 4. TRANSPORT and IP are all 20, 4 bytes aligned, nice... - * 5. LCC add 6 bytes more, We don't consider WAPI yet... - * 6. define LWIP_MEM_ALIGN to be 4 Byte aligned, pbuf struct is 16B, Only thing may be - * matter is ether_hdr is not 4B aligned. - * - * So, we need extra (40 + 4 - 14) = 30 and it's happen to be 4-Byte aligned - * - * 1. lwip - * | empty 30B | eth_hdr (14B) | payload ...| - * total: 44B ahead payload - * 2. net80211 - * | max 80211 hdr, 32B | ccmp/tkip iv (8B) | sec rsv(4B) | payload ...| - * total: 40B ahead sec_rsv and 44B ahead payload - * - */ - offset += EP_OFFSET; //remove LINK hdr in wlan -#endif - break; case PBUF_RAW_TX: /* add room for encapsulating link layer headers (e.g. 802.11) */ @@ -273,10 +232,6 @@ pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) case PBUF_RAW: offset = 0; -#ifdef LWIP_ESP8266 //TO_DO - offset += EP_OFFSET; -#endif - break; default: LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0); @@ -395,9 +350,10 @@ pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type) /* set flags */ p->flags = 0; -#ifdef LWIP_ESP8266 +#if ESP_LWIP p->eb = NULL; #endif + LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p)); return p; } @@ -763,9 +719,8 @@ pbuf_free(struct pbuf *p) /* is this a ROM or RAM referencing pbuf? */ } else if (type == PBUF_ROM || type == PBUF_REF) { -#ifdef LWIP_ESP8266 - extern void system_pp_recycle_rx_pkt(void*); - if (type == PBUF_REF && p->eb != NULL ) system_pp_recycle_rx_pkt(p->eb); +#if ESP_LWIP + if (type == PBUF_REF && p->eb != NULL ) esp_wifi_internal_free_rx_buffer(p->eb); #endif memp_free(MEMP_PBUF, p); diff --git a/components/lwip/core/raw.c b/components/lwip/core/raw.c index 72a58d381d..82ce4e3a73 100755 --- a/components/lwip/core/raw.c +++ b/components/lwip/core/raw.c @@ -54,10 +54,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - /** The list of RAW PCBs */ static struct raw_pcb *raw_pcbs; diff --git a/components/lwip/core/stats.c b/components/lwip/core/stats.c index 77ac3c675e..b47ab0b7fa 100755 --- a/components/lwip/core/stats.c +++ b/components/lwip/core/stats.c @@ -47,10 +47,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - struct stats_ lwip_stats; #if defined(LWIP_DEBUG) || LWIP_STATS_DISPLAY diff --git a/components/lwip/core/tcp.c b/components/lwip/core/tcp.c index e8fda52c8d..87ddf5f1a7 100755 --- a/components/lwip/core/tcp.c +++ b/components/lwip/core/tcp.c @@ -57,10 +57,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - #ifndef TCP_LOCAL_PORT_RANGE_START /* From http://www.iana.org/assignments/port-numbers: "The Dynamic and/or Private Ports are those from 49152 through 65535" */ @@ -77,14 +73,7 @@ static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; #define TCP_KEEP_INTVL(pcb) TCP_KEEPINTVL_DEFAULT #endif /* LWIP_TCP_KEEPALIVE */ -#ifdef LWIP_ESP8266 -//TO_DO -//char tcp_state_str[12]; -//const char tcp_state_str_rodata[][12] ICACHE_RODATA_ATTR STORE_ATTR = { const char * const tcp_state_str[] = { -#else -const char * const tcp_state_str[] = { -#endif "CLOSED", "LISTEN", "SYN_SENT", @@ -100,27 +89,14 @@ const char * const tcp_state_str[] = { /* last local TCP port */ -#ifdef LWIP_ESP8266 static s16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; -#else -static u16_t tcp_port = TCP_LOCAL_PORT_RANGE_START; -#endif /* Incremented every coarse grained timer shot (typically every 500 ms). */ u32_t tcp_ticks; -#ifdef LWIP_ESP8266 -//TO_DO -//const u8_t tcp_backoff[13] ICACHE_RODATA_ATTR STORE_ATTR ={ 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; -//const u8_t tcp_persist_backoff[7] ICACHE_RODATA_ATTR STORE_ATTR = { 3, 6, 12, 24, 48, 96, 120 }; - -const u8_t tcp_backoff[13] = { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; -const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; -#else const u8_t tcp_backoff[13] = { 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7}; /* Times per slowtmr hits */ const u8_t tcp_persist_backoff[7] = { 3, 6, 12, 24, 48, 96, 120 }; -#endif /* The TCP PCB lists. */ @@ -136,19 +112,9 @@ struct tcp_pcb *tcp_active_pcbs; struct tcp_pcb *tcp_tw_pcbs; /** An array with all (non-temporary) PCB lists, mainly used for smaller code size */ -#ifdef LWIP_ESP8266 -//TO_DO -//struct tcp_pcb ** const tcp_pcb_lists[] ICACHE_RODATA_ATTR STORE_ATTR = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, - // &tcp_active_pcbs, &tcp_tw_pcbs}; struct tcp_pcb ** const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, &tcp_active_pcbs, &tcp_tw_pcbs}; -#else -struct tcp_pcb ** const tcp_pcb_lists[] = {&tcp_listen_pcbs.pcbs, &tcp_bound_pcbs, - &tcp_active_pcbs, &tcp_tw_pcbs}; -#endif - - u8_t tcp_active_pcbs_changed; /** Timer counter to handle calling slow-timer from tcp_tmr() */ @@ -638,7 +604,7 @@ u32_t tcp_update_rcv_ann_wnd(struct tcp_pcb *pcb) { u32_t new_right_edge = pcb->rcv_nxt + pcb->rcv_wnd; - if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND / 2), pcb->mss))) { + if (TCP_SEQ_GEQ(new_right_edge, pcb->rcv_ann_right_edge + LWIP_MIN((TCP_WND(pcb) / 2), pcb->mss))) { /* we can advertise more window */ pcb->rcv_ann_wnd = pcb->rcv_wnd; return new_right_edge - pcb->rcv_ann_right_edge; @@ -694,10 +660,10 @@ tcp_recved(struct tcp_pcb *pcb, u16_t len) wnd_inflation = tcp_update_rcv_ann_wnd(pcb); /* If the change in the right edge of window is significant (default - * watermark is TCP_WND/4), then send an explicit update now. + * watermark is TCP_WND(pcb)/4), then send an explicit update now. * Otherwise wait for a packet to be sent in the normal course of * events (or more window to be available later) */ - if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD) { + if (wnd_inflation >= TCP_WND_UPDATE_THRESHOLD(pcb)) { tcp_ack_now(pcb); tcp_output(pcb); } @@ -720,7 +686,7 @@ tcp_new_port(void) again: -#ifdef LWIP_ESP8266 +#if ESP_RANDOM_TCP_PORT tcp_port = system_get_time(); if (tcp_port < 0) tcp_port = LWIP_RAND() - tcp_port; @@ -827,9 +793,9 @@ tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, pcb->snd_lbb = iss - 1; /* Start with a window that does not need scaling. When window scaling is enabled and used, the window is enlarged when both sides agree on scaling. */ - pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND(pcb)); pcb->rcv_ann_right_edge = pcb->rcv_nxt; - pcb->snd_wnd = TCP_WND; + pcb->snd_wnd = TCP_WND(pcb); /* As initial send MSS, we use TCP_MSS but limit it to 536. The send MSS is updated when an MSS option is received. */ pcb->mss = (TCP_MSS > 536) ? 536 : TCP_MSS; @@ -837,7 +803,7 @@ tcp_connect(struct tcp_pcb *pcb, const ip_addr_t *ipaddr, u16_t port, pcb->mss = tcp_eff_send_mss(pcb->mss, &pcb->local_ip, &pcb->remote_ip); #endif /* TCP_CALCULATE_EFF_SEND_MSS */ pcb->cwnd = 1; - pcb->ssthresh = TCP_WND; + pcb->ssthresh = TCP_WND(pcb); #if LWIP_CALLBACK_API pcb->connected = connected; #else /* LWIP_CALLBACK_API */ @@ -915,13 +881,7 @@ tcp_slowtmr_start: /* If snd_wnd is zero, use persist timer to send 1 byte probes * instead of using the standard retransmission mechanism. */ -#ifdef LWIP_ESP8266 -//NEED TO DO - //u8_t backoff_cnt = system_get_data_of_array_8(tcp_persist_backoff, pcb->persist_backoff-1); u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff-1]; -#else - u8_t backoff_cnt = tcp_persist_backoff[pcb->persist_backoff-1]; -#endif if (pcb->persist_cnt < backoff_cnt) { pcb->persist_cnt++; @@ -949,15 +909,7 @@ tcp_slowtmr_start: /* Double retransmission time-out unless we are trying to * connect to somebody (i.e., we are in SYN_SENT). */ if (pcb->state != SYN_SENT) { - -#ifdef LWIP_ESP8266 -//TO_DO -// pcb->rto = ((pcb->sa >> 3) + pcb->sv) << system_get_data_of_array_8(tcp_backoff, pcb->nrtx); pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[pcb->nrtx]; -#else - pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[pcb->nrtx]; -#endif - } /* Reset the retransmission timer. */ @@ -1436,7 +1388,7 @@ tcp_kill_timewait(void) } } -#ifdef LWIP_ESP8266 +#if ESP_LWIP /** * Kills the oldest connection that is in FIN_WAIT_2 state. * Called from tcp_alloc() if no more connections are available. @@ -1502,7 +1454,7 @@ tcp_alloc(u8_t prio) struct tcp_pcb *pcb; u32_t iss; -#ifdef LWIP_ESP8266 +#if ESP_LWIP /*Kills the oldest connection that is in TIME_WAIT state.*/ u8_t time_wait_num = 0; for(pcb = tcp_tw_pcbs; pcb != NULL; pcb = pcb->next) { @@ -1581,11 +1533,11 @@ tcp_alloc(u8_t prio) if (pcb != NULL) { memset(pcb, 0, sizeof(struct tcp_pcb)); pcb->prio = prio; - pcb->snd_buf = TCP_SND_BUF; + pcb->snd_buf = TCP_SND_BUF_DEFAULT; pcb->snd_queuelen = 0; /* Start with a window that does not need scaling. When window scaling is enabled and used, the window is enlarged when both sides agree on scaling. */ - pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCPWND_MIN16(TCP_WND(pcb)); #if LWIP_WND_SCALE /* snd_scale and rcv_scale are zero unless both sides agree to use scaling */ pcb->snd_scale = 0; @@ -1608,7 +1560,6 @@ tcp_alloc(u8_t prio) pcb->snd_lbb = iss; pcb->tmr = tcp_ticks; pcb->last_timer = tcp_timer_ctr; - pcb->polltmr = 0; #if LWIP_CALLBACK_API @@ -1624,7 +1575,13 @@ tcp_alloc(u8_t prio) #endif /* LWIP_TCP_KEEPALIVE */ pcb->keep_cnt_sent = 0; + +#if ESP_PER_SOC_TCP_WND + pcb->per_soc_tcp_wnd = TCP_WND_DEFAULT; + pcb->per_soc_tcp_snd_buf = TCP_SND_BUF_DEFAULT; +#endif } + return pcb; } @@ -2010,14 +1967,7 @@ void tcp_netif_ipv4_addr_changed(const ip4_addr_t* old_addr, const ip4_addr_t* n const char* tcp_debug_state_str(enum tcp_state s) { -#ifdef LWIP_ESP8266 -//TO_DO - //system_get_string_from_flash(tcp_state_str_rodata[s], tcp_state_str, 12); - //return tcp_state_str; return tcp_state_str[s]; -#else - return tcp_state_str[s]; -#endif } #if TCP_DEBUG || TCP_INPUT_DEBUG || TCP_OUTPUT_DEBUG diff --git a/components/lwip/core/tcp_in.c b/components/lwip/core/tcp_in.c index 25d7403851..f3284233e7 100755 --- a/components/lwip/core/tcp_in.c +++ b/components/lwip/core/tcp_in.c @@ -60,11 +60,6 @@ #include "lwip/nd6.h" #endif /* LWIP_ND6_TCP_REACHABILITY_HINTS */ -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /** Initial CWND calculation as defined RFC 2581 */ #define LWIP_TCP_CALC_INITIAL_CWND(mss) LWIP_MIN((4U * (mss)), LWIP_MAX((2U * (mss)), 4380U)); /** Initial slow start threshold value: we use the full window */ @@ -329,20 +324,6 @@ tcp_input(struct pbuf *p, struct netif *inp) if (pcb != NULL) { - -#ifdef LWIP_ESP8266 -//No Need Any more -/* - extern char RxNodeNum(void); - if(RxNodeNum() <= 2) - { -extern void pbuf_free_ooseq(void); - pbuf_free_ooseq(); - } -*/ -#endif - - /* The incoming segment belongs to a connection. */ #if TCP_INPUT_DEBUG tcp_debug_print_state(pcb->state); @@ -1745,9 +1726,9 @@ tcp_parseopt(struct tcp_pcb *pcb) pcb->rcv_scale = TCP_RCV_SCALE; pcb->flags |= TF_WND_SCALE; /* window scaling is enabled, we can use the full receive window */ - LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND)); - LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND)); - pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND; + LWIP_ASSERT("window not at default value", pcb->rcv_wnd == TCPWND_MIN16(TCP_WND(pcb))); + LWIP_ASSERT("window not at default value", pcb->rcv_ann_wnd == TCPWND_MIN16(TCP_WND(pcb))); + pcb->rcv_wnd = pcb->rcv_ann_wnd = TCP_WND(pcb); } break; #endif diff --git a/components/lwip/core/tcp_out.c b/components/lwip/core/tcp_out.c index aac02e4ebe..f189623f5c 100755 --- a/components/lwip/core/tcp_out.c +++ b/components/lwip/core/tcp_out.c @@ -59,10 +59,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - /* Define some copy-macros for checksum-on-copy so that the code looks nicer by preventing too many ifdef's. */ #if TCP_CHECKSUM_ON_COPY @@ -336,9 +332,9 @@ tcp_write_checks(struct tcp_pcb *pcb, u16_t len) /* If total number of pbufs on the unsent/unacked queues exceeds the * configured maximum, return an error */ /* check for configured max queuelen and possible overflow */ - if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { + if ((pcb->snd_queuelen >= TCP_SND_QUEUELEN(pcb)) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n", - pcb->snd_queuelen, TCP_SND_QUEUELEN)); + pcb->snd_queuelen, TCP_SND_QUEUELEN(pcb))); TCP_STATS_INC(tcp.memerr); pcb->flags |= TF_NAGLEMEMERR; return ERR_MEM; @@ -606,9 +602,9 @@ tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags) /* Now that there are more segments queued, we check again if the * length of the queue exceeds the configured maximum or * overflows. */ - if ((queuelen > TCP_SND_QUEUELEN) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { + if ((queuelen > TCP_SND_QUEUELEN(pcb)) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n", - queuelen, (int)TCP_SND_QUEUELEN)); + queuelen, (int)TCP_SND_QUEUELEN(pcb))); pbuf_free(p); goto memerr; } @@ -766,10 +762,10 @@ tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags) (flags & (TCP_SYN | TCP_FIN)) != 0); /* check for configured max queuelen and possible overflow (FIN flag should always come through!) */ - if (((pcb->snd_queuelen >= TCP_SND_QUEUELEN) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) && + if (((pcb->snd_queuelen >= TCP_SND_QUEUELEN(pcb)) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) && ((flags & TCP_FIN) == 0)) { LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n", - pcb->snd_queuelen, TCP_SND_QUEUELEN)); + pcb->snd_queuelen, TCP_SND_QUEUELEN(pcb))); TCP_STATS_INC(tcp.memerr); pcb->flags |= TF_NAGLEMEMERR; return ERR_MEM; @@ -1301,6 +1297,7 @@ tcp_rst(u32_t seqno, u32_t ackno, struct pbuf *p; struct tcp_hdr *tcphdr; struct netif *netif; + p = pbuf_alloc(PBUF_IP, TCP_HLEN, PBUF_RAM); if (p == NULL) { LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n")); @@ -1315,10 +1312,18 @@ tcp_rst(u32_t seqno, u32_t ackno, tcphdr->seqno = htonl(seqno); tcphdr->ackno = htonl(ackno); TCPH_HDRLEN_FLAGS_SET(tcphdr, TCP_HLEN/4, TCP_RST | TCP_ACK); +#if ESP_PER_SOC_TCP_WND +#if LWIP_WND_SCALE + tcphdr->wnd = PP_HTONS(((TCP_WND_DEFAULT >> TCP_RCV_SCALE) & 0xFFFF)); +#else + tcphdr->wnd = PP_HTONS(TCP_WND_DEFAULT); +#endif +#else #if LWIP_WND_SCALE tcphdr->wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF)); #else tcphdr->wnd = PP_HTONS(TCP_WND); +#endif #endif tcphdr->chksum = 0; tcphdr->urgp = 0; diff --git a/components/lwip/core/timers.c b/components/lwip/core/timers.c index 0a361474eb..ef47b2e187 100755 --- a/components/lwip/core/timers.c +++ b/components/lwip/core/timers.c @@ -62,11 +62,6 @@ #include "lwip/sys.h" #include "lwip/pbuf.h" -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - /** The one and only timeout list */ static struct sys_timeo *next_timeout; #if NO_SYS @@ -162,7 +157,7 @@ dhcp_timer_coarse(void *arg) LWIP_DEBUGF(TIMERS_DEBUG, ("tcpip: dhcp_coarse_tmr()\n")); dhcp_coarse_tmr(); -#ifdef LWIP_ESP8266 +#if ESP_DHCP extern void dhcps_coarse_tmr(void); dhcps_coarse_tmr(); #endif @@ -294,12 +289,6 @@ void sys_timeouts_init(void) #endif /* LWIP_ARP */ #if LWIP_DHCP -#ifdef LWIP_ESP8266 - // DHCP_MAXRTX = 0; -#endif - - - sys_timeout(DHCP_COARSE_TIMER_MSECS, dhcp_timer_coarse, NULL); sys_timeout(DHCP_FINE_TIMER_MSECS, dhcp_timer_fine, NULL); #endif /* LWIP_DHCP */ @@ -346,7 +335,7 @@ void sys_timeout_debug(u32_t msecs, sys_timeout_handler handler, void *arg, const char* handler_name) #else /* LWIP_DEBUG_TIMERNAMES */ -#ifdef LWIP_ESP8266 +#if ESP_LIGHT_SLEEP u32_t LwipTimOutLim = 0; // For light sleep. time out. limit is 3000ms #endif @@ -379,7 +368,7 @@ sys_timeout(u32_t msecs, sys_timeout_handler handler, void *arg) timeout->h = handler; timeout->arg = arg; -#ifdef LWIP_ESP8266 +#if ESP_LIGHT_SLEEP if(msecs < LwipTimOutLim) msecs = LwipTimOutLim; #endif diff --git a/components/lwip/core/udp.c b/components/lwip/core/udp.c index e44ab7e73d..37ae2c1796 100755 --- a/components/lwip/core/udp.c +++ b/components/lwip/core/udp.c @@ -67,11 +67,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - - #ifndef UDP_LOCAL_PORT_RANGE_START /* From http://www.iana.org/assignments/port-numbers: "The Dynamic and/or Private Ports are those from 49152 through 65535" */ diff --git a/components/lwip/include/lwip/lwip/api.h b/components/lwip/include/lwip/lwip/api.h index 985eb76d4a..5b6a21ecf3 100755 --- a/components/lwip/include/lwip/lwip/api.h +++ b/components/lwip/include/lwip/lwip/api.h @@ -185,10 +185,6 @@ struct netconn { /** sem that is used to synchronously execute functions in the core context */ sys_sem_t op_completed; -#ifdef LWIP_ESP8266 - sys_sem_t snd_op_completed; //only for snd semphore -#endif - #endif /** mbox where received packets are stored until they are fetched diff --git a/components/lwip/include/lwip/lwip/dhcp.h b/components/lwip/include/lwip/lwip/dhcp.h index 2d8926eca6..76ce1543ff 100755 --- a/components/lwip/include/lwip/lwip/dhcp.h +++ b/components/lwip/include/lwip/lwip/dhcp.h @@ -249,7 +249,7 @@ void dhcp_fine_tmr(void); #define DHCP_OPTION_NTP 42 #define DHCP_OPTION_END 255 -#ifdef LWIP_ESP8266 +#if ESP_LWIP /**add options for support more router by liuHan**/ #define DHCP_OPTION_DOMAIN_NAME 15 #define DHCP_OPTION_PRD 31 diff --git a/components/lwip/include/lwip/lwip/dns.h b/components/lwip/include/lwip/lwip/dns.h index 1ceed0d883..5ef12e56c2 100755 --- a/components/lwip/include/lwip/lwip/dns.h +++ b/components/lwip/include/lwip/lwip/dns.h @@ -36,7 +36,7 @@ #include "lwip/opt.h" -#ifdef LWIP_ESP8266 +#if ESP_DNS #include "lwip/err.h" #endif diff --git a/components/lwip/include/lwip/lwip/err.h b/components/lwip/include/lwip/lwip/err.h index 26fb91db9b..a766ee186d 100755 --- a/components/lwip/include/lwip/lwip/err.h +++ b/components/lwip/include/lwip/lwip/err.h @@ -60,7 +60,7 @@ typedef s8_t err_t; #define ERR_USE -8 /* Address in use. */ -#ifdef LWIP_ESP8266 +#if ESP_LWIP #define ERR_ALREADY -9 /* Already connected. */ #define ERR_ISCONN -10 /* Conn already established.*/ #define ERR_IS_FATAL(e) ((e) < ERR_ISCONN) diff --git a/components/lwip/include/lwip/lwip/mem.h b/components/lwip/include/lwip/lwip/mem.h index ca76f66322..a90d07256b 100755 --- a/components/lwip/include/lwip/lwip/mem.h +++ b/components/lwip/include/lwip/lwip/mem.h @@ -51,8 +51,6 @@ typedef size_t mem_size_t; * allow these defines to be overridden. */ -#ifndef MEMLEAK_DEBUG - #ifndef mem_free #define mem_free free #endif @@ -63,41 +61,6 @@ typedef size_t mem_size_t; #define mem_calloc calloc #endif -/* DYC_NEED_TO_DO_LATER -#ifndef mem_realloc -#define mem_realloc -#endif -#ifndef mem_zalloc -#define mem_zalloc -#endif -*/ - -#else -/* -#ifndef mem_free -#define mem_free(s) \ - do{\ - const char *file = mem_debug_file;\ - vPortFree(s, file, __LINE__);\ - }while(0) -#endif -#ifndef mem_malloc -#define mem_malloc(s) ({const char *file = mem_debug_file; pvPortMalloc(s, file, __LINE__);}) -#endif -#ifndef mem_calloc -#define mem_calloc(s) ({const char *file = mem_debug_file; pvPortCalloc(s, file, __LINE__);}) -#endif -#ifndef mem_realloc -#define mem_realloc(p, s) ({const char *file = mem_debug_file; pvPortRealloc(p, s, file, __LINE__);}) -#endif -#ifndef mem_zalloc -#define mem_zalloc(s) ({const char *file = mem_debug_file; pvPortZalloc(s, file, __LINE__);}) -#endif -*/ -#endif - - - /* Since there is no C library allocation function to shrink memory without moving it, define this to nothing. */ #ifndef mem_trim diff --git a/components/lwip/include/lwip/lwip/netif.h b/components/lwip/include/lwip/lwip/netif.h index 99066a5a1f..666f77eb96 100755 --- a/components/lwip/include/lwip/lwip/netif.h +++ b/components/lwip/include/lwip/lwip/netif.h @@ -177,7 +177,7 @@ typedef err_t (*netif_mld_mac_filter_fn)(struct netif *netif, #endif /* LWIP_IPV6 && LWIP_IPV6_MLD */ -#ifdef LWIP_ESP8266 +#if ESP_DHCP /*add DHCP event processing by LiuHan*/ typedef void (*dhcp_event_fn)(void); #endif @@ -190,7 +190,7 @@ struct netif { /** pointer to next in linked list */ struct netif *next; -#ifdef LWIP_ESP8266 +#if ESP_LWIP //ip_addr_t is changed by marco IPV4, IPV6 ip_addr_t link_local_addr; #endif @@ -248,7 +248,7 @@ struct netif { /** the DHCP client state information for this netif */ struct dhcp *dhcp; -#ifdef LWIP_ESP8266 +#if ESP_LWIP struct udp_pcb *dhcps_pcb; dhcp_event_fn dhcp_event; #endif diff --git a/components/lwip/include/lwip/lwip/opt.h b/components/lwip/include/lwip/lwip/opt.h index 76fff88052..51d340e00b 100755 --- a/components/lwip/include/lwip/lwip/opt.h +++ b/components/lwip/include/lwip/lwip/opt.h @@ -986,7 +986,7 @@ * (2 * TCP_MSS) for things to work well */ #ifndef TCP_WND -#define TCP_WND (4 * TCP_MSS) +#define TCP_WND(pcb) (4 * TCP_MSS) #endif /** @@ -1040,7 +1040,7 @@ * To achieve good performance, this should be at least 2 * TCP_MSS. */ #ifndef TCP_SND_BUF -#define TCP_SND_BUF (2 * TCP_MSS) +#define TCP_SND_BUF(pcb) (2 * TCP_MSS) #endif /** @@ -1048,7 +1048,7 @@ * as much as (2 * TCP_SND_BUF/TCP_MSS) for things to work. */ #ifndef TCP_SND_QUEUELEN -#define TCP_SND_QUEUELEN ((4 * (TCP_SND_BUF) + (TCP_MSS - 1))/(TCP_MSS)) +#define TCP_SND_QUEUELEN(pcb) ((4 * (TCP_SND_BUF((pcb))) + (TCP_MSS - 1))/(TCP_MSS)) #endif /** @@ -1057,7 +1057,7 @@ * TCP snd_buf for select to return writable (combined with TCP_SNDQUEUELOWAT). */ #ifndef TCP_SNDLOWAT -#define TCP_SNDLOWAT LWIP_MIN(LWIP_MAX(((TCP_SND_BUF)/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF) - 1) +#define TCP_SNDLOWAT(pcb) LWIP_MIN(LWIP_MAX(((TCP_SND_BUF((pcb)))/2), (2 * TCP_MSS) + 1), (TCP_SND_BUF((pcb))) - 1) #endif /** @@ -1066,7 +1066,7 @@ * this number, select returns writable (combined with TCP_SNDLOWAT). */ #ifndef TCP_SNDQUEUELOWAT -#define TCP_SNDQUEUELOWAT LWIP_MAX(((TCP_SND_QUEUELEN)/2), 5) +#define TCP_SNDQUEUELOWAT(pcb) LWIP_MAX(((TCP_SND_QUEUELEN((pcb)))/2), 5) #endif /** @@ -1134,7 +1134,7 @@ * explicit window update */ #ifndef TCP_WND_UPDATE_THRESHOLD -#define TCP_WND_UPDATE_THRESHOLD LWIP_MIN((TCP_WND / 4), (TCP_MSS * 4)) +#define TCP_WND_UPDATE_THRESHOLD(pcb) LWIP_MIN((TCP_WND((pcb)) / 4), (TCP_MSS * 4)) #endif /** @@ -3008,8 +3008,8 @@ #define LWIP_PERF 0 #endif -#ifndef THREAD_SAFE_DEBUG -#define THREAD_SAFE_DEBUG 0 +#ifndef ESP_THREAD_SAFE_DEBUG +#define ESP_THREAD_SAFE_DEBUG 0 #endif #endif /* LWIP_HDR_OPT_H */ diff --git a/components/lwip/include/lwip/lwip/pbuf.h b/components/lwip/include/lwip/lwip/pbuf.h index aaf5e294af..1834c4e04c 100755 --- a/components/lwip/include/lwip/lwip/pbuf.h +++ b/components/lwip/include/lwip/lwip/pbuf.h @@ -137,7 +137,7 @@ struct pbuf { */ u16_t ref; -#ifdef LWIP_ESP8266 +#if ESP_LWIP void *eb; #endif }; diff --git a/components/lwip/include/lwip/lwip/priv/api_msg.h b/components/lwip/include/lwip/lwip/priv/api_msg.h index 329fa0de30..02d191a53c 100755 --- a/components/lwip/include/lwip/lwip/priv/api_msg.h +++ b/components/lwip/include/lwip/lwip/priv/api_msg.h @@ -187,7 +187,7 @@ struct dns_api_msg { #endif /* LWIP_DNS */ #if LWIP_NETCONN_SEM_PER_THREAD -#ifdef LWIP_ESP8266 +#if ESP_THREAD_SAFE #define LWIP_NETCONN_THREAD_SEM_GET() sys_thread_sem_get() #define LWIP_NETCONN_THREAD_SEM_ALLOC() sys_thread_sem_init() #define LWIP_NETCONN_THREAD_SEM_FREE() sys_thread_sem_deinit() @@ -222,10 +222,6 @@ struct dns_api_msg { #define TCPIP_APIMSG(m,f,e) do { (m)->function = f; (e) = tcpip_apimsg(m); } while(0) #define TCPIP_APIMSG_ACK(m) do { NETCONN_SET_SAFE_ERR((m)->conn, (m)->err); sys_sem_signal(LWIP_API_MSG_SEM(m)); } while(0) -#ifdef LWIP_ESP8266 -#define TCPIP_APIMSG_ACK_SND(m) do { NETCONN_SET_SAFE_ERR((m)->conn, (m)->err); sys_sem_signal(LWIP_API_MSG_SND_SEM(m)); } while(0) -#endif - #endif /* LWIP_TCPIP_CORE_LOCKING */ void lwip_netconn_do_newconn (void *m); diff --git a/components/lwip/include/lwip/lwip/priv/tcp_priv.h b/components/lwip/include/lwip/lwip/priv/tcp_priv.h index b5261b445c..0c498944b3 100755 --- a/components/lwip/include/lwip/lwip/priv/tcp_priv.h +++ b/components/lwip/include/lwip/lwip/priv/tcp_priv.h @@ -92,7 +92,7 @@ err_t tcp_process_refused_data(struct tcp_pcb *pcb); ((tpcb)->flags & (TF_NODELAY | TF_INFR)) || \ (((tpcb)->unsent != NULL) && (((tpcb)->unsent->next != NULL) || \ ((tpcb)->unsent->len >= (tpcb)->mss))) || \ - ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN)) \ + ((tcp_sndbuf(tpcb) == 0) || (tcp_sndqueuelen(tpcb) >= TCP_SND_QUEUELEN(tpcb))) \ ) ? 1 : 0) #define tcp_output_nagle(tpcb) (tcp_do_output_nagle(tpcb) ? tcp_output(tpcb) : ERR_OK) diff --git a/components/lwip/include/lwip/lwip/sockets.h b/components/lwip/include/lwip/lwip/sockets.h index aafb3d5cf3..d9622ea03d 100755 --- a/components/lwip/include/lwip/lwip/sockets.h +++ b/components/lwip/include/lwip/lwip/sockets.h @@ -190,7 +190,6 @@ struct msghdr { #define SO_CONTIMEO 0x1009 /* Unimplemented: connect timeout */ #define SO_NO_CHECK 0x100a /* don't create UDP checksum */ - /* * Structure used for manipulating linger option. */ @@ -250,6 +249,11 @@ struct linger { #define TCP_KEEPIDLE 0x03 /* set pcb->keep_idle - Same as TCP_KEEPALIVE, but use seconds for get/setsockopt */ #define TCP_KEEPINTVL 0x04 /* set pcb->keep_intvl - Use seconds for get/setsockopt */ #define TCP_KEEPCNT 0x05 /* set pcb->keep_cnt - Use number of probes sent for get/setsockopt */ +#if ESP_PER_SOC_TCP_WND +#define TCP_WINDOW 0x06 /* set pcb->per_soc_tcp_wnd */ +#define TCP_SNDBUF 0x07 /* set pcb->per_soc_tcp_snd_buf */ +#endif + #endif /* LWIP_TCP */ #if LWIP_IPV6 @@ -505,7 +509,7 @@ int lwip_fcntl(int s, int cmd, int val); #if LWIP_COMPAT_SOCKETS #if LWIP_COMPAT_SOCKETS != 2 -#if LWIP_THREAD_SAFE +#if ESP_THREAD_SAFE int lwip_accept_r(int s, struct sockaddr *addr, socklen_t *addrlen); int lwip_bind_r(int s, const struct sockaddr *name, socklen_t namelen); @@ -590,7 +594,7 @@ int lwip_fcntl_r(int s, int cmd, int val); #define fcntl(s,cmd,val) lwip_fcntl(s,cmd,val) #define ioctl(s,cmd,argp) lwip_ioctl(s,cmd,argp) #endif /* LWIP_POSIX_SOCKETS_IO_NAMES */ -#endif /* LWIP_THREAD_SAFE */ +#endif /* ESP_THREAD_SAFE */ #endif /* LWIP_COMPAT_SOCKETS != 2 */ diff --git a/components/lwip/include/lwip/lwip/tcp.h b/components/lwip/include/lwip/lwip/tcp.h index d52040f99c..6b8c4b6c48 100755 --- a/components/lwip/include/lwip/lwip/tcp.h +++ b/components/lwip/include/lwip/lwip/tcp.h @@ -129,14 +129,14 @@ typedef err_t (*tcp_connected_fn)(void *arg, struct tcp_pcb *tpcb, err_t err); #define RCV_WND_SCALE(pcb, wnd) (((wnd) >> (pcb)->rcv_scale)) #define SND_WND_SCALE(pcb, wnd) (((wnd) << (pcb)->snd_scale)) #define TCPWND16(x) ((u16_t)LWIP_MIN((x), 0xFFFF)) -#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND : TCPWND16(TCP_WND))) +#define TCP_WND_MAX(pcb) ((tcpwnd_size_t)(((pcb)->flags & TF_WND_SCALE) ? TCP_WND(pcb) : TCPWND16(TCP_WND(pcb)))) typedef u32_t tcpwnd_size_t; typedef u16_t tcpflags_t; #else #define RCV_WND_SCALE(pcb, wnd) (wnd) #define SND_WND_SCALE(pcb, wnd) (wnd) #define TCPWND16(x) (x) -#define TCP_WND_MAX(pcb) TCP_WND +#define TCP_WND_MAX(pcb) TCP_WND(pcb) typedef u16_t tcpwnd_size_t; typedef u8_t tcpflags_t; #endif @@ -236,6 +236,11 @@ struct tcp_pcb { u8_t dupacks; u32_t lastack; /* Highest acknowledged seqno. */ +#if ESP_PER_SOC_TCP_WND + tcpwnd_size_t per_soc_tcp_wnd; /* per tcp socket tcp window size */ + tcpwnd_size_t per_soc_tcp_snd_buf; /* per tcp socket tcp send buffer size */ +#endif + /* congestion avoidance/control variables */ tcpwnd_size_t cwnd; tcpwnd_size_t ssthresh; @@ -402,6 +407,10 @@ const char* tcp_debug_state_str(enum tcp_state s); /* for compatibility with older implementation */ #define tcp_new_ip6() tcp_new_ip_type(IPADDR_TYPE_V6) +#if ESP_PER_SOC_TCP_WND +#define PER_SOC_WND(pcb) (pcb->per_soc_wnd) +#endif + #ifdef __cplusplus } #endif diff --git a/components/lwip/include/lwip/port/lwipopts.h b/components/lwip/include/lwip/port/lwipopts.h index 2c24b2be92..75f349280a 100755 --- a/components/lwip/include/lwip/port/lwipopts.h +++ b/components/lwip/include/lwip/port/lwipopts.h @@ -37,7 +37,6 @@ #include "sdkconfig.h" /* Enable all Espressif-only options */ -#define LWIP_ESP8266 /* ----------------------------------------------- @@ -225,18 +224,21 @@ extern unsigned long os_random(void); * TCP_WND: The size of a TCP window. This must be at least * (2 * TCP_MSS) for things to work well */ -#define PERF 1 + +#define ESP_PER_SOC_TCP_WND 1 +#if ESP_PER_SOC_TCP_WND +#define TCP_WND_DEFAULT (4*TCP_MSS) +#define TCP_SND_BUF_DEFAULT (2*TCP_MSS) + +#define TCP_WND(pcb) (pcb->per_soc_tcp_wnd) +#define TCP_SND_BUF(pcb) (pcb->per_soc_tcp_snd_buf) +#else #ifdef PERF extern unsigned char misc_prof_get_tcpw(void); extern unsigned char misc_prof_get_tcp_snd_buf(void); -#define TCP_WND (misc_prof_get_tcpw()*TCP_MSS) -#define TCP_SND_BUF (misc_prof_get_tcp_snd_buf()*TCP_MSS) - -#else - -#define TCP_WND (4 * TCP_MSS) -#define TCP_SND_BUF (2 * TCP_MSS) - +#define TCP_WND(pcb) (misc_prof_get_tcpw()*TCP_MSS) +#define TCP_SND_BUF(pcb) (misc_prof_get_tcp_snd_buf()*TCP_MSS) +#endif #endif @@ -507,14 +509,42 @@ extern unsigned char misc_prof_get_tcp_snd_buf(void); */ #define TCPIP_DEBUG LWIP_DBG_OFF +/* Enable all Espressif-only options */ + +#define ESP_LWIP 1 +#define ESP_PER_SOC_TCP_WND 1 +#define ESP_THREAD_SAFE 1 +#define ESP_THREAD_SAFE_DEBUG LWIP_DBG_OFF +#define ESP_DHCP 1 +#define ESP_DNS 1 +#define ESP_IPV6_AUTOCONFIG 1 +#define ESP_PERF 0 +#define ESP_RANDOM_TCP_PORT 1 +#define ESP_IP4_ATON 1 +#define ESP_LIGHT_SLEEP 1 + + +#if ESP_PER_SOC_TCP_WND +#define TCP_WND_DEFAULT (4*TCP_MSS) +#define TCP_SND_BUF_DEFAULT (2*TCP_MSS) +#define TCP_WND(pcb) (pcb->per_soc_tcp_wnd) +#define TCP_SND_BUF(pcb) (pcb->per_soc_tcp_snd_buf) +#else +#if ESP_PERF +extern unsigned char misc_prof_get_tcpw(void); +extern unsigned char misc_prof_get_tcp_snd_buf(void); +#define TCP_WND(pcb) (misc_prof_get_tcpw()*TCP_MSS) +#define TCP_SND_BUF(pcb) (misc_prof_get_tcp_snd_buf()*TCP_MSS) +#endif +#endif + /** * DHCP_DEBUG: Enable debugging in dhcp.c. */ #define DHCP_DEBUG LWIP_DBG_OFF #define LWIP_DEBUG 0 #define TCP_DEBUG LWIP_DBG_OFF -#define THREAD_SAFE_DEBUG LWIP_DBG_OFF -#define LWIP_THREAD_SAFE 1 +#define ESP_THREAD_SAFE_DEBUG LWIP_DBG_OFF #define CHECKSUM_CHECK_UDP 0 #define CHECKSUM_CHECK_IP 0 diff --git a/components/lwip/include/lwip/port/netif/wlanif.h b/components/lwip/include/lwip/port/netif/wlanif.h index 7eb303eab4..c6f7831b3d 100755 --- a/components/lwip/include/lwip/port/netif/wlanif.h +++ b/components/lwip/include/lwip/port/netif/wlanif.h @@ -8,6 +8,8 @@ #include "esp_wifi.h" +#include "esp_wifi_internal.h" + #include "lwip/err.h" #ifdef __cplusplus @@ -18,8 +20,6 @@ err_t wlanif_init(struct netif *netif); void wlanif_input(struct netif *netif, void *buffer, u16_t len, void* eb); -bool ieee80211_output(wifi_interface_t wifi_if, void *buffer, u16_t len); - wifi_interface_t wifi_get_interface(void *dev); void netif_reg_addr_change_cb(void* cb); diff --git a/components/lwip/netif/etharp.c b/components/lwip/netif/etharp.c index 5891c5cfd6..776e949f75 100755 --- a/components/lwip/netif/etharp.c +++ b/components/lwip/netif/etharp.c @@ -55,10 +55,6 @@ #include -#ifdef MEMLEAK_DEBUG -static const char mem_debug_file[] ICACHE_RODATA_ATTR STORE_ATTR = __FILE__; -#endif - #if LWIP_IPV4 && LWIP_ARP /* don't build if not configured for use in lwipopts.h */ /** Re-request a used ARP entry 1 minute before it would expire to prevent diff --git a/components/lwip/port/freertos/sys_arch.c b/components/lwip/port/freertos/sys_arch.c index 74a4a996a8..15ba3011d9 100755 --- a/components/lwip/port/freertos/sys_arch.c +++ b/components/lwip/port/freertos/sys_arch.c @@ -56,7 +56,7 @@ sys_mutex_new(sys_mutex_t *pxMutex) xReturn = ERR_OK; } - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mutex_new: m=%p\n", *pxMutex)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mutex_new: m=%p\n", *pxMutex)); return xReturn; } @@ -89,7 +89,7 @@ sys_mutex_unlock(sys_mutex_t *pxMutex) void sys_mutex_free(sys_mutex_t *pxMutex) { - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mutex_free: m=%p\n", *pxMutex)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mutex_free: m=%p\n", *pxMutex)); vQueueDelete(*pxMutex); } #endif @@ -192,20 +192,20 @@ sys_mbox_new(sys_mbox_t *mbox, int size) { *mbox = malloc(sizeof(struct sys_mbox_s)); if (*mbox == NULL){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("fail to new *mbox\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("fail to new *mbox\n")); return ERR_MEM; } (*mbox)->os_mbox = xQueueCreate(size, sizeof(void *)); if ((*mbox)->os_mbox == NULL) { - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("fail to new *mbox->os_mbox\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("fail to new *mbox->os_mbox\n")); free(*mbox); return ERR_MEM; } if (sys_mutex_new(&((*mbox)->lock)) != ERR_OK){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("fail to new *mbox->lock\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("fail to new *mbox->lock\n")); vQueueDelete((*mbox)->os_mbox); free(*mbox); return ERR_MEM; @@ -213,7 +213,7 @@ sys_mbox_new(sys_mbox_t *mbox, int size) (*mbox)->alive = true; - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("new *mbox ok mbox=%p os_mbox=%p mbox_lock=%p\n", *mbox, (*mbox)->os_mbox, (*mbox)->lock)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("new *mbox ok mbox=%p os_mbox=%p mbox_lock=%p\n", *mbox, (*mbox)->os_mbox, (*mbox)->lock)); return ERR_OK; } @@ -234,7 +234,7 @@ sys_mbox_trypost(sys_mbox_t *mbox, void *msg) if (xQueueSend((*mbox)->os_mbox, &msg, (portTickType)0) == pdPASS) { xReturn = ERR_OK; } else { - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("trypost mbox=%p fail\n", (*mbox)->os_mbox)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("trypost mbox=%p fail\n", (*mbox)->os_mbox)); xReturn = ERR_MEM; } @@ -271,7 +271,7 @@ sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout) if (*mbox == NULL){ *msg = NULL; - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch: null mbox\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch: null mbox\n")); return -1; } @@ -294,14 +294,14 @@ sys_arch_mbox_fetch(sys_mbox_t *mbox, void **msg, u32_t timeout) } else { // block forever for a message. while (1){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch: fetch mbox=%p os_mbox=%p lock=%p\n", mbox, (*mbox)->os_mbox, (*mbox)->lock)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch: fetch mbox=%p os_mbox=%p lock=%p\n", mbox, (*mbox)->os_mbox, (*mbox)->lock)); if (pdTRUE == xQueueReceive((*mbox)->os_mbox, &(*msg), portMAX_DELAY)){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch:mbox rx msg=%p\n", (*msg))); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch:mbox rx msg=%p\n", (*msg))); break; } if ((*mbox)->alive == false){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch:mbox not alive\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_arch_mbox_fetch:mbox not alive\n")); *msg = NULL; break; } @@ -356,24 +356,24 @@ sys_mbox_free(sys_mbox_t *mbox) uint16_t count = 0; bool post_null = true; - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free: set alive false\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free: set alive false\n")); (*mbox)->alive = false; while ( count++ < MAX_POLL_CNT ){ //ESP32_WORKAROUND - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free:try lock=%d\n", count)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free:try lock=%d\n", count)); if (!sys_mutex_trylock( &(*mbox)->lock )){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free:get lock ok %d\n", count)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free:get lock ok %d\n", count)); sys_mutex_unlock( &(*mbox)->lock ); break; } if (post_null){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free: post null to mbox\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free: post null to mbox\n")); if (sys_mbox_trypost( mbox, NULL) != ERR_OK){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free: post null mbox fail\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free: post null mbox fail\n")); } else { post_null = false; - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free: post null mbox ok\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free: post null mbox ok\n")); } } @@ -383,7 +383,7 @@ sys_mbox_free(sys_mbox_t *mbox) sys_delay_ms(PER_POLL_DELAY); } - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sys_mbox_free:free mbox\n")); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sys_mbox_free:free mbox\n")); if (uxQueueMessagesWaiting((*mbox)->os_mbox)) { xQueueReset((*mbox)->os_mbox); @@ -491,7 +491,7 @@ sys_sem_t* sys_thread_sem_get(void) if (!sem){ sem = sys_thread_sem_init(); } - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sem_get s=%p\n", sem)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sem_get s=%p\n", sem)); return sem; } @@ -500,12 +500,12 @@ static void sys_thread_tls_free(int index, void* data) sys_sem_t *sem = (sys_sem_t*)(data); if (sem && *sem){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sem del, i=%d sem=%p\n", index, *sem)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sem del, i=%d sem=%p\n", index, *sem)); vSemaphoreDelete(*sem); } if (sem){ - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sem pointer del, i=%d sem_p=%p\n", index, sem)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sem pointer del, i=%d sem_p=%p\n", index, sem)); free(sem); } } @@ -526,7 +526,7 @@ sys_sem_t* sys_thread_sem_init(void) return 0; } - LWIP_DEBUGF(THREAD_SAFE_DEBUG, ("sem init sem_p=%p sem=%p cb=%p\n", sem, *sem, sys_thread_tls_free)); + LWIP_DEBUGF(ESP_THREAD_SAFE_DEBUG, ("sem init sem_p=%p sem=%p cb=%p\n", sem, *sem, sys_thread_tls_free)); vTaskSetThreadLocalStoragePointerAndDelCallback(xTaskGetCurrentTaskHandle(), SYS_TLS_INDEX, sem, (TlsDeleteCallbackFunction_t)sys_thread_tls_free); return sem; diff --git a/components/lwip/port/netif/wlanif.c b/components/lwip/port/netif/wlanif.c index 9832c41aff..548bb7f970 100755 --- a/components/lwip/port/netif/wlanif.c +++ b/components/lwip/port/netif/wlanif.c @@ -56,16 +56,8 @@ #define IFNAME0 'e' #define IFNAME1 'n' -#ifdef LWIP_ESP8266 -//TO_DO -//char *hostname; -//bool default_hostname = 1; - static char hostname[16]; -#else -static char hostname[16]; -#endif -#ifdef PERF +#if ESP_PERF uint32_t g_rx_alloc_pbuf_fail_cnt = 0; #endif @@ -95,7 +87,7 @@ low_level_init(struct netif *netif) /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP; -#ifdef LWIP_ESP8266 +#if ESP_LWIP #if LWIP_IGMP @@ -133,7 +125,7 @@ low_level_output(struct netif *netif, struct pbuf *p) return ERR_IF; } -#ifdef LWIP_ESP8266 +#if ESP_LWIP q = p; u16_t pbuf_x_len = 0; pbuf_x_len = q->len; @@ -150,12 +142,12 @@ low_level_output(struct netif *netif, struct pbuf *p) } } - ieee80211_output(wifi_if, q->payload, pbuf_x_len); + esp_wifi_internal_tx(wifi_if, q->payload, pbuf_x_len); return ERR_OK; #else for(q = p; q != NULL; q = q->next) { - ieee80211_output(wifi_if, q->payload, q->len); + esp_wifi_internal_tx(wifi_if, q->payload, q->len); } #endif @@ -172,7 +164,7 @@ low_level_output(struct netif *netif, struct pbuf *p) * @param netif the lwip network interface structure for this ethernetif */ void -#ifdef LWIP_ESP8266 +#if ESP_LWIP wlanif_input(struct netif *netif, void *buffer, u16_t len, void* eb) #else wlanif_input(struct netif *netif, void *buffer, uint16 len) @@ -180,17 +172,17 @@ wlanif_input(struct netif *netif, void *buffer, uint16 len) { struct pbuf *p; -#ifdef LWIP_ESP8266 +#if ESP_LWIP if(buffer== NULL) goto _exit; if(netif == NULL) goto _exit; #endif -#ifdef LWIP_ESP8266 +#if ESP_LWIP p = pbuf_alloc(PBUF_RAW, len, PBUF_REF); if (p == NULL){ -#ifdef PERF +#if ESP_PERF g_rx_alloc_pbuf_fail_cnt++; #endif return; @@ -236,7 +228,7 @@ wlanif_init(struct netif *netif) #if LWIP_NETIF_HOSTNAME /* Initialize interface hostname */ -#ifdef LWIP_ESP8266 +#if ESP_LWIP //TO_DO /* if ((struct netif *)wifi_get_netif(STATION_IF) == netif) { diff --git a/components/tcpip_adapter/include/tcpip_adapter.h b/components/tcpip_adapter/include/tcpip_adapter.h index 5b0fc4c627..218325320e 100644 --- a/components/tcpip_adapter/include/tcpip_adapter.h +++ b/components/tcpip_adapter/include/tcpip_adapter.h @@ -67,11 +67,15 @@ typedef struct { typedef dhcps_lease_t tcpip_adapter_dhcps_lease_t; #if CONFIG_DHCP_STA_LIST -struct station_list { - STAILQ_ENTRY(station_list) next; +typedef struct { uint8_t mac[6]; ip4_addr_t ip; -}; +}tcpip_adapter_sta_info_t; + +typedef struct { + tcpip_adapter_sta_info_t sta[ESP_WIFI_MAX_CONN_NUM+2]; + uint8_t num; +}tcpip_adapter_sta_list_t; #endif #endif @@ -359,26 +363,14 @@ wifi_interface_t tcpip_adapter_get_wifi_if(void *dev); /** * @brief Get the station information list * - * @note This function should be called in AP mode and dhcp server started, and the list should - * be by using tcpip_adapter_free_sta_list. - * - * @param[in] sta_info: station information - * @param[out] sta_list: station information list + * @param[in] wifi_sta_list_t *wifi_sta_list: station list info + * @param[out] tcpip_adapter_sta_list_t *tcpip_sta_list: station list info * * @return ESP_OK * ESP_ERR_TCPIP_ADAPTER_NO_MEM * ESP_ERR_TCPIP_ADAPTER_INVALID_PARAMS */ -esp_err_t tcpip_adapter_get_sta_list(struct station_info *sta_info, struct station_list **sta_list); - -/** - * @brief Free the station information list's memory - * - * @param sta_list: station information list - * - * @return ESP_OK - */ -esp_err_t tcpip_adapter_free_sta_list(struct station_list *sta_list); +esp_err_t tcpip_adapter_get_sta_list(wifi_sta_list_t *wifi_sta_list, tcpip_adapter_sta_list_t *tcpip_sta_list); #ifdef __cplusplus } diff --git a/components/tcpip_adapter/tcpip_adapter_lwip.c b/components/tcpip_adapter/tcpip_adapter_lwip.c index 12cf05f95f..677368008d 100644 --- a/components/tcpip_adapter/tcpip_adapter_lwip.c +++ b/components/tcpip_adapter/tcpip_adapter_lwip.c @@ -590,45 +590,17 @@ wifi_interface_t tcpip_adapter_get_wifi_if(void *dev) return WIFI_IF_MAX; } -esp_err_t tcpip_adapter_get_sta_list(struct station_info *sta_info, struct station_list **sta_list) +esp_err_t tcpip_adapter_get_sta_list(wifi_sta_list_t *wifi_sta_list, tcpip_adapter_sta_list_t *tcpip_sta_list) { - struct station_info *info = sta_info; - struct station_list *list; - STAILQ_HEAD(, station_list) list_head; + int i; - if (sta_list == NULL) + if ((wifi_sta_list == NULL) || (tcpip_sta_list == NULL)) return ESP_ERR_TCPIP_ADAPTER_INVALID_PARAMS; - STAILQ_INIT(&list_head); - - while (info != NULL) { - list = (struct station_list *)malloc(sizeof(struct station_list)); - memset(list, 0, sizeof (struct station_list)); - - if (list == NULL) { - return ESP_ERR_TCPIP_ADAPTER_NO_MEM; - } - - memcpy(list->mac, info->bssid, 6); - dhcp_search_ip_on_mac(list->mac, &list->ip); - STAILQ_INSERT_TAIL(&list_head, list, next); - - info = STAILQ_NEXT(info, next); - } - - *sta_list = STAILQ_FIRST(&list_head); - - return ESP_OK; -} - -esp_err_t tcpip_adapter_free_sta_list(struct station_list *sta_list) -{ - struct station_list *list = sta_list; - - while (sta_list != NULL) { - list = sta_list; - sta_list = STAILQ_NEXT(sta_list, next); - free(list); + memset(tcpip_sta_list, 0, sizeof(tcpip_adapter_sta_list_t)); + for (i=0; inum; i++){ + memcpy(tcpip_sta_list->sta[i].mac, wifi_sta_list->sta[i].mac, 6); + dhcp_search_ip_on_mac(tcpip_sta_list->sta[i].mac, &tcpip_sta_list->sta[i].ip); } return ESP_OK;