esp-idf/components/openthread/port/esp_openthread_udp.c
Jiacheng Guo 68ce4f1404 openthread: add platform UDP and border router example
This MR adds the OpenThread border router example and supports the
Thread 1.1 border agent feature.

* Adds the OPENTHREAD_BORDER_ROUTER Kconfig option.
* Adds platform UDP and task queue port for the border agent feature.
* Adds `esp_openthread_border_router_*` api.
* Adds the `esp_otbr` example.
2021-06-17 17:29:16 +08:00

361 lines
11 KiB
C

// Copyright 2021 Espressif Systems (Shanghai) CO LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
#include <string.h>
#include "esp_check.h"
#include "esp_err.h"
#include "esp_netif.h"
#include "esp_openthread.h"
#include "esp_openthread_border_router.h"
#include "esp_openthread_common_macro.h"
#include "esp_openthread_lock.h"
#include "esp_openthread_netif_glue.h"
#include "esp_openthread_task_queue.h"
#include "common/code_utils.hpp"
#include "common/logging.hpp"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "lwip/ip6.h"
#include "lwip/ip6_addr.h"
#include "lwip/ip_addr.h"
#include "lwip/pbuf.h"
#include "lwip/prot/ip4.h"
#include "lwip/tcpip.h"
#include "lwip/udp.h"
#include "openthread/error.h"
#include "openthread/platform/udp.h"
typedef struct {
otUdpSocket *socket;
struct pbuf *recv_buf;
ip_addr_t addr;
uint16_t port;
uint8_t hop_limit;
bool is_host_interface;
} udp_recv_task_t;
typedef struct {
TaskHandle_t source_task;
otUdpSocket *socket;
struct udp_pcb *pcb_ret;
} udp_new_task_t;
typedef struct {
TaskHandle_t source_task;
struct udp_pcb *pcb;
ip_addr_t addr;
uint16_t port;
err_t ret;
} udp_bind_connect_task_t;
typedef struct {
TaskHandle_t source_task;
struct udp_pcb *pcb;
uint8_t netif_index;
} udp_bind_netif_task_t;
typedef struct {
struct udp_pcb *pcb;
otMessage *message;
ip_addr_t addr;
uint16_t port;
bool multicast_loop;
uint8_t hop_limit;
uint8_t netif_index;
} udp_send_task_t;
static void wait_for_task_notification(void)
{
esp_openthread_lock_release();
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
esp_openthread_lock_acquire(portMAX_DELAY);
}
static ip_addr_t map_openthread_addr_to_lwip_addr(const otIp6Address *address)
{
ip_addr_t addr;
memcpy(ip_2_ip6(&addr)->addr, address->mFields.m8, sizeof(ip_2_ip6(&addr)->addr));
if (ip6_addr_isipv4mappedipv6(ip_2_ip6(&addr))) {
unmap_ipv4_mapped_ipv6(ip_2_ip4(&addr), ip_2_ip6(&addr));
addr.type = IPADDR_TYPE_V4;
} else {
addr.type = IPADDR_TYPE_V6;
#if LWIP_IPV6_SCOPES
addr.u_addr.ip6.zone = IP6_NO_ZONE;
#endif
}
return addr;
}
static void udp_recv_task(void *ctx)
{
udp_recv_task_t *task = (udp_recv_task_t *)ctx;
otMessageInfo message_info;
otMessage *message = NULL;
otMessageSettings msg_settings = {.mLinkSecurityEnabled = false, .mPriority = OT_MESSAGE_PRIORITY_NORMAL};
struct pbuf *recv_buf = task->recv_buf;
uint8_t *data_buf = (uint8_t *)recv_buf->payload;
uint8_t *data_buf_to_free = NULL;
message_info.mSockPort = 0;
memset(&message_info.mSockAddr, 0, sizeof(message_info.mSockAddr));
message_info.mHopLimit = task->hop_limit;
message_info.mPeerPort = task->port;
if (task->addr.type == IPADDR_TYPE_V4) {
ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&task->addr), ip_2_ip4(&task->addr));
}
memcpy(&message_info.mPeerAddr, ip_2_ip6(&task->addr)->addr, sizeof(message_info.mPeerAddr));
if (recv_buf->next != NULL) {
data_buf = (uint8_t *)malloc(recv_buf->tot_len);
if (data_buf != NULL) {
data_buf_to_free = data_buf;
pbuf_copy_partial(recv_buf, data_buf, recv_buf->tot_len, 0);
}
}
VerifyOrExit(data_buf != NULL,
ESP_LOGE(OT_PLAT_LOG_TAG, "Failed to allocate data buf when receiving OpenThread plat UDP"));
message = otUdpNewMessage(esp_openthread_get_instance(), &msg_settings);
VerifyOrExit(message != NULL,
ESP_LOGE(OT_PLAT_LOG_TAG, "Failed to allocate OpenThread message when receiving OpenThread plat UDP"));
VerifyOrExit(otMessageAppend(message, data_buf, recv_buf->tot_len) == OT_ERROR_NONE,
ESP_LOGE(OT_PLAT_LOG_TAG, "Failed to copy OpenThread message when receiving OpenThread plat UDP"));
task->socket->mHandler(task->socket->mContext, message, &message_info);
otMessageFree(message);
exit:
free(task);
if (data_buf_to_free) {
free(data_buf_to_free);
}
pbuf_free(recv_buf);
return;
}
static void handle_udp_recv(void *ctx, struct udp_pcb *pcb, struct pbuf *p, const ip_addr_t *addr, uint16_t port)
{
udp_recv_task_t *task = (udp_recv_task_t *)malloc(sizeof(udp_recv_task_t));
const struct ip6_hdr *ip6_hdr = ip6_current_header();
const struct ip_hdr *ip4_hdr = ip4_current_header();
struct netif *source_netif = ip_current_netif();
if (task == NULL) {
otLogCritPlat("Failed to allocate recv task when receiving OpenThread plat UDP");
}
task->socket = (otUdpSocket *)ctx;
task->recv_buf = p;
task->addr = *addr;
task->port = port;
task->hop_limit = (addr->type == IPADDR_TYPE_V6) ? IP6H_HOPLIM(ip6_hdr) : IPH_TTL(ip4_hdr);
task->is_host_interface =
(netif_get_index(source_netif) == esp_netif_get_netif_impl_index(esp_openthread_get_backbone_netif()));
if (esp_openthread_task_queue_post(udp_recv_task, task) != ESP_OK) {
free(task);
}
}
static void udp_new_task(void *ctx)
{
udp_new_task_t *task = (udp_new_task_t *)ctx;
task->pcb_ret = udp_new();
udp_recv(task->pcb_ret, handle_udp_recv, task->socket);
xTaskNotifyGive(task->source_task);
}
otError otPlatUdpSocket(otUdpSocket *udp_socket)
{
otError error = OT_ERROR_NONE;
udp_new_task_t task = {.source_task = xTaskGetCurrentTaskHandle(), .socket = udp_socket};
tcpip_callback(udp_new_task, &task);
wait_for_task_notification();
VerifyOrExit(task.pcb_ret != NULL, error = OT_ERROR_FAILED);
udp_socket->mHandle = task.pcb_ret;
exit:
return error;
}
static void udp_close_task(void *ctx)
{
struct udp_pcb *pcb = (struct udp_pcb *)ctx;
udp_remove(pcb);
}
otError otPlatUdpClose(otUdpSocket *udp_socket)
{
struct udp_pcb *pcb = (struct udp_pcb *)udp_socket->mHandle;
if (pcb) {
tcpip_callback(udp_close_task, pcb);
}
return OT_ERROR_NONE;
}
static void udp_bind_task(void *ctx)
{
udp_bind_connect_task_t *task = (udp_bind_connect_task_t *)ctx;
task->ret = udp_bind(task->pcb, &task->addr, task->port);
xTaskNotifyGive(task->source_task);
}
otError otPlatUdpBind(otUdpSocket *udp_socket)
{
udp_bind_connect_task_t task = {
.source_task = xTaskGetCurrentTaskHandle(),
.pcb = (struct udp_pcb *)udp_socket->mHandle,
.port = udp_socket->mSockName.mPort,
};
ESP_LOGI(OT_PLAT_LOG_TAG, "Platform UDP bound to port %d", udp_socket->mSockName.mPort);
task.addr.type = IPADDR_TYPE_ANY;
memcpy(ip_2_ip6(&task.addr)->addr, udp_socket->mSockName.mAddress.mFields.m8, sizeof(ip_2_ip6(&task.addr)->addr));
tcpip_callback(udp_bind_task, &task);
wait_for_task_notification();
return task.ret == ERR_OK ? OT_ERROR_NONE : OT_ERROR_FAILED;
}
static void udp_bind_netif_task(void *ctx)
{
udp_bind_netif_task_t *task = (udp_bind_netif_task_t *)ctx;
task->netif_index = task->netif_index;
xTaskNotifyGive(task->source_task);
}
static uint8_t get_netif_index(otNetifIdentifier netif_identifier)
{
switch (netif_identifier) {
case OT_NETIF_UNSPECIFIED:
return NETIF_NO_INDEX;
case OT_NETIF_THREAD:
return esp_netif_get_netif_impl_index(esp_openthread_get_netif());
case OT_NETIF_BACKBONE:
return esp_netif_get_netif_impl_index(esp_openthread_get_backbone_netif());
default:
return NETIF_NO_INDEX;
}
}
otError otPlatUdpBindToNetif(otUdpSocket *udp_socket, otNetifIdentifier netif_identifier)
{
udp_bind_netif_task_t task = {
.source_task = xTaskGetCurrentTaskHandle(),
.pcb = (struct udp_pcb *)udp_socket->mHandle,
.netif_index = get_netif_index(netif_identifier),
};
tcpip_callback(udp_bind_netif_task, &task);
wait_for_task_notification();
return OT_ERROR_NONE;
}
static void udp_connect_task(void *ctx)
{
udp_bind_connect_task_t *task = (udp_bind_connect_task_t *)ctx;
task->ret = udp_connect(task->pcb, &task->addr, task->port);
xTaskNotifyGive(task->source_task);
}
otError otPlatUdpConnect(otUdpSocket *udp_socket)
{
udp_bind_connect_task_t task = {
.source_task = xTaskGetCurrentTaskHandle(),
.pcb = (struct udp_pcb *)udp_socket->mHandle,
.port = udp_socket->mPeerName.mPort,
};
task.addr = map_openthread_addr_to_lwip_addr(&udp_socket->mPeerName.mAddress);
tcpip_callback(udp_connect_task, &task);
wait_for_task_notification();
return task.ret == ERR_OK ? OT_ERROR_NONE : OT_ERROR_FAILED;
}
static bool is_link_local(const otIp6Address *address)
{
return address->mFields.m8[0] == 0xfe && address->mFields.m8[1] == 0x80;
}
static bool is_multicast(const otIp6Address *address)
{
return address->mFields.m8[0] == 0xff;
}
static void udp_send_task(void *ctx)
{
udp_send_task_t *task = (udp_send_task_t *)ctx;
struct pbuf *send_buf = NULL;
uint16_t len = otMessageGetLength(task->message);
task->pcb->ttl = task->hop_limit;
task->pcb->netif_idx = task->netif_index;
#if LWIP_IPV6_SCOPES
if (task->addr.type == IPADDR_TYPE_V6) {
ip_2_ip6(&task->addr)->zone = task->netif_index;
}
#endif
task->pcb->flags = (task->pcb->flags & (~UDP_FLAGS_MULTICAST_LOOP));
if (task->multicast_loop) {
task->pcb->flags |= UDP_FLAGS_MULTICAST_LOOP;
}
send_buf = pbuf_alloc(PBUF_TRANSPORT, len, PBUF_RAM);
otMessageRead(task->message, 0, send_buf->payload, len);
VerifyOrExit(send_buf != NULL);
udp_sendto(task->pcb, send_buf, &task->addr, task->port);
exit:
if (send_buf) {
pbuf_free(send_buf);
}
esp_openthread_lock_acquire(portMAX_DELAY);
otMessageFree(task->message);
esp_openthread_lock_release();
free(task);
}
otError otPlatUdpSend(otUdpSocket *udp_socket, otMessage *message, const otMessageInfo *message_info)
{
udp_send_task_t *task = (udp_send_task_t *)malloc(sizeof(udp_send_task_t));
otError error = OT_ERROR_NONE;
VerifyOrExit(task != NULL, error = OT_ERROR_NO_BUFS);
task->pcb = (struct udp_pcb *)udp_socket->mHandle;
task->message = message;
task->port = message_info->mPeerPort;
task->multicast_loop = message_info->mMulticastLoop;
task->hop_limit = message_info->mHopLimit;
task->netif_index = NETIF_NO_INDEX;
task->addr = map_openthread_addr_to_lwip_addr(&message_info->mPeerAddr);
if (is_link_local(&message_info->mPeerAddr) || is_multicast(&message_info->mPeerAddr)) {
task->netif_index = get_netif_index(message_info->mIsHostInterface ? OT_NETIF_BACKBONE : OT_NETIF_THREAD);
}
tcpip_callback(udp_send_task, task);
exit:
return error;
}