mirror of
https://github.com/espressif/esp-idf.git
synced 2024-10-05 20:47:46 -04:00
esp_hw_support: Fix formatting of intr_alloc.h and test_panic.c
This commit is contained in:
parent
7c6a39ed2e
commit
50a58b4a83
@ -106,31 +106,33 @@ static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
|
||||
//with an incrementing cpu.intno value.
|
||||
static void insert_vector_desc(vector_desc_t *to_insert)
|
||||
{
|
||||
vector_desc_t *vd=vector_desc_head;
|
||||
vector_desc_t *prev=NULL;
|
||||
while(vd!=NULL) {
|
||||
vector_desc_t *vd = vector_desc_head;
|
||||
vector_desc_t *prev = NULL;
|
||||
while(vd != NULL) {
|
||||
if (vd->cpu > to_insert->cpu) break;
|
||||
if (vd->cpu == to_insert->cpu && vd->intno >= to_insert->intno) break;
|
||||
prev=vd;
|
||||
vd=vd->next;
|
||||
prev = vd;
|
||||
vd = vd->next;
|
||||
}
|
||||
if ((vector_desc_head==NULL) || (prev==NULL)) {
|
||||
if ((vector_desc_head == NULL) || (prev == NULL)) {
|
||||
//First item
|
||||
to_insert->next = vd;
|
||||
vector_desc_head=to_insert;
|
||||
vector_desc_head = to_insert;
|
||||
} else {
|
||||
prev->next=to_insert;
|
||||
to_insert->next=vd;
|
||||
prev->next = to_insert;
|
||||
to_insert->next = vd;
|
||||
}
|
||||
}
|
||||
|
||||
//Returns a vector_desc entry for an intno/cpu, or NULL if none exists.
|
||||
static vector_desc_t *find_desc_for_int(int intno, int cpu)
|
||||
{
|
||||
vector_desc_t *vd=vector_desc_head;
|
||||
while(vd!=NULL) {
|
||||
if (vd->cpu==cpu && vd->intno==intno) break;
|
||||
vd=vd->next;
|
||||
vector_desc_t *vd = vector_desc_head;
|
||||
while(vd != NULL) {
|
||||
if (vd->cpu == cpu && vd->intno == intno) {
|
||||
break;
|
||||
}
|
||||
vd = vd->next;
|
||||
}
|
||||
return vd;
|
||||
}
|
||||
@ -140,13 +142,15 @@ static vector_desc_t *find_desc_for_int(int intno, int cpu)
|
||||
//it into the list. Returns NULL on malloc fail.
|
||||
static vector_desc_t *get_desc_for_int(int intno, int cpu)
|
||||
{
|
||||
vector_desc_t *vd=find_desc_for_int(intno, cpu);
|
||||
if (vd==NULL) {
|
||||
vector_desc_t *newvd=heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
||||
if (newvd==NULL) return NULL;
|
||||
vector_desc_t *vd = find_desc_for_int(intno, cpu);
|
||||
if (vd == NULL) {
|
||||
vector_desc_t *newvd = heap_caps_malloc(sizeof(vector_desc_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
if (newvd == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
memset(newvd, 0, sizeof(vector_desc_t));
|
||||
newvd->intno=intno;
|
||||
newvd->cpu=cpu;
|
||||
newvd->intno = intno;
|
||||
newvd->cpu = cpu;
|
||||
insert_vector_desc(newvd);
|
||||
return newvd;
|
||||
} else {
|
||||
@ -157,42 +161,52 @@ static vector_desc_t *get_desc_for_int(int intno, int cpu)
|
||||
//Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
|
||||
static vector_desc_t * find_desc_for_source(int source, int cpu)
|
||||
{
|
||||
vector_desc_t *vd=vector_desc_head;
|
||||
while(vd!=NULL) {
|
||||
if ( !(vd->flags & VECDESC_FL_SHARED) ) {
|
||||
if ( vd->source == source && cpu == vd->cpu ) break;
|
||||
} else if ( vd->cpu == cpu ) {
|
||||
vector_desc_t *vd = vector_desc_head;
|
||||
while(vd != NULL) {
|
||||
if (!(vd->flags & VECDESC_FL_SHARED)) {
|
||||
if (vd->source == source && cpu == vd->cpu) {
|
||||
break;
|
||||
}
|
||||
} else if (vd->cpu == cpu) {
|
||||
// check only shared vds for the correct cpu, otherwise skip
|
||||
bool found = false;
|
||||
shared_vector_desc_t *svd = vd->shared_vec_info;
|
||||
assert(svd != NULL );
|
||||
assert(svd != NULL);
|
||||
while(svd) {
|
||||
if ( svd->source == source ) {
|
||||
if (svd->source == source) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
svd = svd->next;
|
||||
}
|
||||
if ( found ) break;
|
||||
if (found) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
vd=vd->next;
|
||||
vd = vd->next;
|
||||
}
|
||||
return vd;
|
||||
}
|
||||
|
||||
esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
|
||||
{
|
||||
if (intno>31) return ESP_ERR_INVALID_ARG;
|
||||
if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
|
||||
if (intno>31) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
if (cpu >= SOC_CPU_CORES_NUM) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&spinlock);
|
||||
vector_desc_t *vd=get_desc_for_int(intno, cpu);
|
||||
if (vd==NULL) {
|
||||
vector_desc_t *vd = get_desc_for_int(intno, cpu);
|
||||
if (vd == NULL) {
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
vd->flags=VECDESC_FL_SHARED;
|
||||
if (is_int_ram) vd->flags|=VECDESC_FL_INIRAM;
|
||||
vd->flags = VECDESC_FL_SHARED;
|
||||
if (is_int_ram) {
|
||||
vd->flags |= VECDESC_FL_INIRAM;
|
||||
}
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
|
||||
return ESP_OK;
|
||||
@ -200,16 +214,20 @@ esp_err_t esp_intr_mark_shared(int intno, int cpu, bool is_int_ram)
|
||||
|
||||
esp_err_t esp_intr_reserve(int intno, int cpu)
|
||||
{
|
||||
if (intno>31) return ESP_ERR_INVALID_ARG;
|
||||
if (cpu>=SOC_CPU_CORES_NUM) return ESP_ERR_INVALID_ARG;
|
||||
if (intno > 31) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
if (cpu >= SOC_CPU_CORES_NUM) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&spinlock);
|
||||
vector_desc_t *vd=get_desc_for_int(intno, cpu);
|
||||
if (vd==NULL) {
|
||||
vector_desc_t *vd = get_desc_for_int(intno, cpu);
|
||||
if (vd == NULL) {
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
vd->flags=VECDESC_FL_RESERVED;
|
||||
vd->flags = VECDESC_FL_RESERVED;
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
|
||||
return ESP_OK;
|
||||
@ -226,45 +244,45 @@ static bool is_vect_desc_usable(vector_desc_t *vd, int flags, int cpu, int force
|
||||
ALCHLOG("....Unusable: reserved");
|
||||
return false;
|
||||
}
|
||||
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force==-1) {
|
||||
if (intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_SPECIAL && force == -1) {
|
||||
ALCHLOG("....Unusable: special-purpose int");
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifndef SOC_CPU_HAS_FLEXIBLE_INTC
|
||||
//Check if the interrupt level is acceptable
|
||||
if (!(flags&(1<<intr_desc.priority))) {
|
||||
ALCHLOG("....Unusable: incompatible level");
|
||||
//Check if the interrupt priority is acceptable
|
||||
if (!(flags & (1 << intr_desc.priority))) {
|
||||
ALCHLOG("....Unusable: incompatible priority");
|
||||
return false;
|
||||
}
|
||||
//check if edge/level type matches what we want
|
||||
if (((flags&ESP_INTR_FLAG_EDGE) && (intr_desc.type==ESP_CPU_INTR_TYPE_LEVEL)) ||
|
||||
(((!(flags&ESP_INTR_FLAG_EDGE)) && (intr_desc.type==ESP_CPU_INTR_TYPE_EDGE)))) {
|
||||
if (((flags & ESP_INTR_FLAG_EDGE) && (intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL)) ||
|
||||
(((!(flags & ESP_INTR_FLAG_EDGE)) && (intr_desc.type == ESP_CPU_INTR_TYPE_EDGE)))) {
|
||||
ALCHLOG("....Unusable: incompatible trigger type");
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
//check if interrupt is reserved at runtime
|
||||
if (vd->flags&VECDESC_FL_RESERVED) {
|
||||
if (vd->flags & VECDESC_FL_RESERVED) {
|
||||
ALCHLOG("....Unusable: reserved at runtime.");
|
||||
return false;
|
||||
}
|
||||
|
||||
//Ints can't be both shared and non-shared.
|
||||
assert(!((vd->flags&VECDESC_FL_SHARED)&&(vd->flags&VECDESC_FL_NONSHARED)));
|
||||
assert(!((vd->flags & VECDESC_FL_SHARED) && (vd->flags & VECDESC_FL_NONSHARED)));
|
||||
//check if interrupt already is in use by a non-shared interrupt
|
||||
if (vd->flags&VECDESC_FL_NONSHARED) {
|
||||
if (vd->flags & VECDESC_FL_NONSHARED) {
|
||||
ALCHLOG("....Unusable: already in (non-shared) use.");
|
||||
return false;
|
||||
}
|
||||
// check shared interrupt flags
|
||||
if (vd->flags&VECDESC_FL_SHARED ) {
|
||||
if (flags&ESP_INTR_FLAG_SHARED) {
|
||||
bool in_iram_flag=((flags&ESP_INTR_FLAG_IRAM)!=0);
|
||||
bool desc_in_iram_flag=((vd->flags&VECDESC_FL_INIRAM)!=0);
|
||||
if (vd->flags & VECDESC_FL_SHARED) {
|
||||
if (flags & ESP_INTR_FLAG_SHARED) {
|
||||
bool in_iram_flag = ((flags & ESP_INTR_FLAG_IRAM) != 0);
|
||||
bool desc_in_iram_flag = ((vd->flags & VECDESC_FL_INIRAM) != 0);
|
||||
//Bail out if int is shared, but iram property doesn't match what we want.
|
||||
if ((vd->flags&VECDESC_FL_SHARED) && (desc_in_iram_flag!=in_iram_flag)) {
|
||||
if ((vd->flags & VECDESC_FL_SHARED) && (desc_in_iram_flag != in_iram_flag)) {
|
||||
ALCHLOG("....Unusable: shared but iram prop doesn't match");
|
||||
return false;
|
||||
}
|
||||
@ -289,7 +307,7 @@ static int get_available_int(int flags, int cpu, int force, int source)
|
||||
{
|
||||
int x;
|
||||
int best=-1;
|
||||
int bestLevel=9;
|
||||
int bestPriority=9;
|
||||
int bestSharedCt=INT_MAX;
|
||||
|
||||
//Default vector desc, for vectors not in the linked list
|
||||
@ -297,32 +315,34 @@ static int get_available_int(int flags, int cpu, int force, int source)
|
||||
memset(&empty_vect_desc, 0, sizeof(vector_desc_t));
|
||||
|
||||
//Level defaults to any low/med interrupt
|
||||
if (!(flags&ESP_INTR_FLAG_LEVELMASK)) flags|=ESP_INTR_FLAG_LOWMED;
|
||||
if (!(flags & ESP_INTR_FLAG_LEVELMASK)) {
|
||||
flags |= ESP_INTR_FLAG_LOWMED;
|
||||
}
|
||||
|
||||
ALCHLOG("get_available_int: try to find existing. Cpu: %d, Source: %d", cpu, source);
|
||||
vector_desc_t *vd = find_desc_for_source(source, cpu);
|
||||
if ( vd ) {
|
||||
if (vd) {
|
||||
// if existing vd found, don't need to search any more.
|
||||
ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno);
|
||||
if ( force != -1 && force != vd->intno ) {
|
||||
ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force);
|
||||
} else if ( !is_vect_desc_usable(vd, flags, cpu, force) ) {
|
||||
} else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
|
||||
ALCHLOG("get_avalible_int: existing vd invalid.");
|
||||
} else {
|
||||
best = vd->intno;
|
||||
}
|
||||
return best;
|
||||
}
|
||||
if (force!=-1) {
|
||||
if (force != -1) {
|
||||
ALCHLOG("get_available_int: try to find force. Cpu: %d, Source: %d, Force: %d", cpu, source, force);
|
||||
//if force assigned, don't need to search any more.
|
||||
vd = find_desc_for_int(force, cpu);
|
||||
if (vd == NULL ) {
|
||||
if (vd == NULL) {
|
||||
//if existing vd not found, just check the default state for the intr.
|
||||
empty_vect_desc.intno = force;
|
||||
vd = &empty_vect_desc;
|
||||
}
|
||||
if ( is_vect_desc_usable(vd, flags, cpu, force) ) {
|
||||
if (is_vect_desc_usable(vd, flags, cpu, force)) {
|
||||
best = vd->intno;
|
||||
} else {
|
||||
ALCHLOG("get_avalible_int: forced vd invalid.");
|
||||
@ -332,53 +352,55 @@ static int get_available_int(int flags, int cpu, int force, int source)
|
||||
|
||||
ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
|
||||
//No allocated handlers as well as forced intr, iterate over the 32 possible interrupts
|
||||
for (x=0; x<32; x++) {
|
||||
for (x = 0; x < 32; x++) {
|
||||
//Grab the vector_desc for this vector.
|
||||
vd=find_desc_for_int(x, cpu);
|
||||
if (vd==NULL) {
|
||||
vd = find_desc_for_int(x, cpu);
|
||||
if (vd == NULL) {
|
||||
empty_vect_desc.intno = x;
|
||||
vd=&empty_vect_desc;
|
||||
vd = &empty_vect_desc;
|
||||
}
|
||||
|
||||
esp_cpu_intr_desc_t intr_desc;
|
||||
esp_cpu_intr_get_desc(cpu, x, &intr_desc);
|
||||
|
||||
ALCHLOG("Int %d reserved %d level %d %s hasIsr %d",
|
||||
ALCHLOG("Int %d reserved %d priority %d %s hasIsr %d",
|
||||
x, intr_desc.flags & ESP_CPU_INTR_DESC_FLAG_RESVD, intr_desc.priority,
|
||||
intr_desc.type==ESP_CPU_INTR_TYPE_LEVEL?"LEVEL":"EDGE", esp_cpu_intr_has_handler(x));
|
||||
intr_desc.type == ESP_CPU_INTR_TYPE_LEVEL? "LEVEL" : "EDGE", esp_cpu_intr_has_handler(x));
|
||||
|
||||
if ( !is_vect_desc_usable(vd, flags, cpu, force) ) continue;
|
||||
if (!is_vect_desc_usable(vd, flags, cpu, force)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (flags&ESP_INTR_FLAG_SHARED) {
|
||||
if (flags & ESP_INTR_FLAG_SHARED) {
|
||||
//We're allocating a shared int.
|
||||
|
||||
//See if int already is used as a shared interrupt.
|
||||
if (vd->flags&VECDESC_FL_SHARED) {
|
||||
if (vd->flags & VECDESC_FL_SHARED) {
|
||||
//We can use this already-marked-as-shared interrupt. Count the already attached isrs in order to see
|
||||
//how useful it is.
|
||||
int no=0;
|
||||
shared_vector_desc_t *svdesc=vd->shared_vec_info;
|
||||
while (svdesc!=NULL) {
|
||||
int no = 0;
|
||||
shared_vector_desc_t *svdesc = vd->shared_vec_info;
|
||||
while (svdesc != NULL) {
|
||||
no++;
|
||||
svdesc=svdesc->next;
|
||||
svdesc = svdesc->next;
|
||||
}
|
||||
if (no<bestSharedCt || bestLevel>intr_desc.priority) {
|
||||
if (no<bestSharedCt || bestPriority > intr_desc.priority) {
|
||||
//Seems like this shared vector is both okay and has the least amount of ISRs already attached to it.
|
||||
best=x;
|
||||
bestSharedCt=no;
|
||||
bestLevel=intr_desc.priority;
|
||||
best = x;
|
||||
bestSharedCt = no;
|
||||
bestPriority = intr_desc.priority;
|
||||
ALCHLOG("...int %d more usable as a shared int: has %d existing vectors", x, no);
|
||||
} else {
|
||||
ALCHLOG("...worse than int %d", best);
|
||||
}
|
||||
} else {
|
||||
if (best==-1) {
|
||||
if (best == -1) {
|
||||
//We haven't found a feasible shared interrupt yet. This one is still free and usable, even if
|
||||
//not marked as shared.
|
||||
//Remember it in case we don't find any other shared interrupt that qualifies.
|
||||
if (bestLevel>intr_desc.priority) {
|
||||
best=x;
|
||||
bestLevel=intr_desc.priority;
|
||||
if (bestPriority > intr_desc.priority) {
|
||||
best = x;
|
||||
bestPriority = intr_desc.priority;
|
||||
ALCHLOG("...int %d usable as a new shared int", x);
|
||||
}
|
||||
} else {
|
||||
@ -387,9 +409,9 @@ static int get_available_int(int flags, int cpu, int force, int source)
|
||||
}
|
||||
} else {
|
||||
//Seems this interrupt is feasible. Select it and break out of the loop; no need to search further.
|
||||
if (bestLevel>intr_desc.priority) {
|
||||
best=x;
|
||||
bestLevel=intr_desc.priority;
|
||||
if (bestPriority > intr_desc.priority) {
|
||||
best = x;
|
||||
bestPriority = intr_desc.priority;
|
||||
} else {
|
||||
ALCHLOG("...worse than int %d", best);
|
||||
}
|
||||
@ -404,13 +426,13 @@ static int get_available_int(int flags, int cpu, int force, int source)
|
||||
//Common shared isr handler. Chain-call all ISRs.
|
||||
static void IRAM_ATTR shared_intr_isr(void *arg)
|
||||
{
|
||||
vector_desc_t *vd=(vector_desc_t*)arg;
|
||||
shared_vector_desc_t *sh_vec=vd->shared_vec_info;
|
||||
vector_desc_t *vd = (vector_desc_t*)arg;
|
||||
shared_vector_desc_t *sh_vec = vd->shared_vec_info;
|
||||
portENTER_CRITICAL_ISR(&spinlock);
|
||||
while(sh_vec) {
|
||||
if (!sh_vec->disabled) {
|
||||
if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
|
||||
traceISR_ENTER(sh_vec->source+ETS_INTERNAL_INTR_SOURCE_OFF);
|
||||
traceISR_ENTER(sh_vec->source + ETS_INTERNAL_INTR_SOURCE_OFF);
|
||||
sh_vec->isr(sh_vec->arg);
|
||||
// check if we will return to scheduler or to interrupted task after ISR
|
||||
if (!os_task_switch_is_pended(esp_cpu_get_core_id())) {
|
||||
@ -418,7 +440,7 @@ static void IRAM_ATTR shared_intr_isr(void *arg)
|
||||
}
|
||||
}
|
||||
}
|
||||
sh_vec=sh_vec->next;
|
||||
sh_vec = sh_vec->next;
|
||||
}
|
||||
portEXIT_CRITICAL_ISR(&spinlock);
|
||||
}
|
||||
@ -427,9 +449,9 @@ static void IRAM_ATTR shared_intr_isr(void *arg)
|
||||
//Common non-shared isr handler wrapper.
|
||||
static void IRAM_ATTR non_shared_intr_isr(void *arg)
|
||||
{
|
||||
non_shared_isr_arg_t *ns_isr_arg=(non_shared_isr_arg_t*)arg;
|
||||
non_shared_isr_arg_t *ns_isr_arg = (non_shared_isr_arg_t*)arg;
|
||||
portENTER_CRITICAL_ISR(&spinlock);
|
||||
traceISR_ENTER(ns_isr_arg->source+ETS_INTERNAL_INTR_SOURCE_OFF);
|
||||
traceISR_ENTER(ns_isr_arg->source + ETS_INTERNAL_INTR_SOURCE_OFF);
|
||||
// FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
|
||||
// when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
|
||||
ns_isr_arg->isr(ns_isr_arg->isr_arg);
|
||||
@ -446,16 +468,24 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
|
||||
void *arg, intr_handle_t *ret_handle)
|
||||
{
|
||||
intr_handle_data_t *ret=NULL;
|
||||
int force=-1;
|
||||
int force = -1;
|
||||
ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): checking args", esp_cpu_get_core_id());
|
||||
//Shared interrupts should be level-triggered.
|
||||
if ((flags&ESP_INTR_FLAG_SHARED) && (flags&ESP_INTR_FLAG_EDGE)) return ESP_ERR_INVALID_ARG;
|
||||
if ((flags & ESP_INTR_FLAG_SHARED) && (flags & ESP_INTR_FLAG_EDGE)) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
//You can't set an handler / arg for a non-C-callable interrupt.
|
||||
if ((flags&ESP_INTR_FLAG_HIGH) && (handler)) return ESP_ERR_INVALID_ARG;
|
||||
if ((flags & ESP_INTR_FLAG_HIGH) && (handler)) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
//Shared ints should have handler and non-processor-local source
|
||||
if ((flags&ESP_INTR_FLAG_SHARED) && (!handler || source<0)) return ESP_ERR_INVALID_ARG;
|
||||
if ((flags & ESP_INTR_FLAG_SHARED) && (!handler || source<0)) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
//Statusreg should have a mask
|
||||
if (intrstatusreg && !intrstatusmask) return ESP_ERR_INVALID_ARG;
|
||||
if (intrstatusreg && !intrstatusmask) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
//If the ISR is marked to be IRAM-resident, the handler must not be in the cached region
|
||||
//ToDo: if we are to allow placing interrupt handlers into the 0x400c0000—0x400c2000 region,
|
||||
//we need to make sure the interrupt is connected to the CPU0.
|
||||
@ -472,70 +502,84 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
|
||||
}
|
||||
|
||||
//Default to prio 1 for shared interrupts. Default to prio 1, 2 or 3 for non-shared interrupts.
|
||||
if ((flags&ESP_INTR_FLAG_LEVELMASK)==0) {
|
||||
if (flags&ESP_INTR_FLAG_SHARED) {
|
||||
flags|=ESP_INTR_FLAG_LEVEL1;
|
||||
if ((flags & ESP_INTR_FLAG_LEVELMASK) == 0) {
|
||||
if (flags & ESP_INTR_FLAG_SHARED) {
|
||||
flags |= ESP_INTR_FLAG_LEVEL1;
|
||||
} else {
|
||||
flags|=ESP_INTR_FLAG_LOWMED;
|
||||
flags |= ESP_INTR_FLAG_LOWMED;
|
||||
}
|
||||
}
|
||||
ESP_EARLY_LOGV(TAG, "esp_intr_alloc_intrstatus (cpu %u): Args okay. Resulting flags 0x%X", esp_cpu_get_core_id(), flags);
|
||||
|
||||
//Check 'special' interrupt sources. These are tied to one specific interrupt, so we
|
||||
//have to force get_free_int to only look at that.
|
||||
if (source==ETS_INTERNAL_TIMER0_INTR_SOURCE) force=ETS_INTERNAL_TIMER0_INTR_NO;
|
||||
if (source==ETS_INTERNAL_TIMER1_INTR_SOURCE) force=ETS_INTERNAL_TIMER1_INTR_NO;
|
||||
if (source==ETS_INTERNAL_TIMER2_INTR_SOURCE) force=ETS_INTERNAL_TIMER2_INTR_NO;
|
||||
if (source==ETS_INTERNAL_SW0_INTR_SOURCE) force=ETS_INTERNAL_SW0_INTR_NO;
|
||||
if (source==ETS_INTERNAL_SW1_INTR_SOURCE) force=ETS_INTERNAL_SW1_INTR_NO;
|
||||
if (source==ETS_INTERNAL_PROFILING_INTR_SOURCE) force=ETS_INTERNAL_PROFILING_INTR_NO;
|
||||
if (source == ETS_INTERNAL_TIMER0_INTR_SOURCE) {
|
||||
force = ETS_INTERNAL_TIMER0_INTR_NO;
|
||||
}
|
||||
if (source == ETS_INTERNAL_TIMER1_INTR_SOURCE) {
|
||||
force = ETS_INTERNAL_TIMER1_INTR_NO;
|
||||
}
|
||||
if (source == ETS_INTERNAL_TIMER2_INTR_SOURCE) {
|
||||
force = ETS_INTERNAL_TIMER2_INTR_NO;
|
||||
}
|
||||
if (source == ETS_INTERNAL_SW0_INTR_SOURCE) {
|
||||
force = ETS_INTERNAL_SW0_INTR_NO;
|
||||
}
|
||||
if (source == ETS_INTERNAL_SW1_INTR_SOURCE) {
|
||||
force = ETS_INTERNAL_SW1_INTR_NO;
|
||||
}
|
||||
if (source == ETS_INTERNAL_PROFILING_INTR_SOURCE) {
|
||||
force = ETS_INTERNAL_PROFILING_INTR_NO;
|
||||
}
|
||||
|
||||
//Allocate a return handle. If we end up not needing it, we'll free it later on.
|
||||
ret=heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
||||
if (ret==NULL) return ESP_ERR_NO_MEM;
|
||||
ret = heap_caps_malloc(sizeof(intr_handle_data_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
if (ret == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&spinlock);
|
||||
uint32_t cpu = esp_cpu_get_core_id();
|
||||
//See if we can find an interrupt that matches the flags.
|
||||
int intr=get_available_int(flags, cpu, force, source);
|
||||
if (intr==-1) {
|
||||
int intr = get_available_int(flags, cpu, force, source);
|
||||
if (intr == -1) {
|
||||
//None found. Bail out.
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
free(ret);
|
||||
return ESP_ERR_NOT_FOUND;
|
||||
}
|
||||
//Get an int vector desc for int.
|
||||
vector_desc_t *vd=get_desc_for_int(intr, cpu);
|
||||
if (vd==NULL) {
|
||||
vector_desc_t *vd = get_desc_for_int(intr, cpu);
|
||||
if (vd == NULL) {
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
free(ret);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
//Allocate that int!
|
||||
if (flags&ESP_INTR_FLAG_SHARED) {
|
||||
if (flags & ESP_INTR_FLAG_SHARED) {
|
||||
//Populate vector entry and add to linked list.
|
||||
shared_vector_desc_t *sh_vec=malloc(sizeof(shared_vector_desc_t));
|
||||
if (sh_vec==NULL) {
|
||||
if (sh_vec == NULL) {
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
free(ret);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
memset(sh_vec, 0, sizeof(shared_vector_desc_t));
|
||||
sh_vec->statusreg=(uint32_t*)intrstatusreg;
|
||||
sh_vec->statusmask=intrstatusmask;
|
||||
sh_vec->isr=handler;
|
||||
sh_vec->arg=arg;
|
||||
sh_vec->next=vd->shared_vec_info;
|
||||
sh_vec->source=source;
|
||||
sh_vec->disabled=0;
|
||||
vd->shared_vec_info=sh_vec;
|
||||
vd->flags|=VECDESC_FL_SHARED;
|
||||
sh_vec->statusreg = (uint32_t*)intrstatusreg;
|
||||
sh_vec->statusmask = intrstatusmask;
|
||||
sh_vec->isr = handler;
|
||||
sh_vec->arg = arg;
|
||||
sh_vec->next = vd->shared_vec_info;
|
||||
sh_vec->source = source;
|
||||
sh_vec->disabled = 0;
|
||||
vd->shared_vec_info = sh_vec;
|
||||
vd->flags |= VECDESC_FL_SHARED;
|
||||
//(Re-)set shared isr handler to new value.
|
||||
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)shared_intr_isr, vd);
|
||||
} else {
|
||||
//Mark as unusable for other interrupt sources. This is ours now!
|
||||
vd->flags=VECDESC_FL_NONSHARED;
|
||||
vd->flags = VECDESC_FL_NONSHARED;
|
||||
if (handler) {
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
non_shared_isr_arg_t *ns_isr_arg=malloc(sizeof(non_shared_isr_arg_t));
|
||||
@ -544,9 +588,9 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
|
||||
free(ret);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
ns_isr_arg->isr=handler;
|
||||
ns_isr_arg->isr_arg=arg;
|
||||
ns_isr_arg->source=source;
|
||||
ns_isr_arg->isr = handler;
|
||||
ns_isr_arg->isr_arg = arg;
|
||||
ns_isr_arg->source = source;
|
||||
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)non_shared_intr_isr, ns_isr_arg);
|
||||
#else
|
||||
esp_cpu_intr_set_handler(intr, (esp_cpu_intr_handler_t)handler, arg);
|
||||
@ -557,29 +601,29 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
|
||||
esp_cpu_intr_edge_ack(intr);
|
||||
}
|
||||
|
||||
vd->source=source;
|
||||
vd->source = source;
|
||||
}
|
||||
if (flags&ESP_INTR_FLAG_IRAM) {
|
||||
vd->flags|=VECDESC_FL_INIRAM;
|
||||
non_iram_int_mask[cpu]&=~(1<<intr);
|
||||
if (flags & ESP_INTR_FLAG_IRAM) {
|
||||
vd->flags |= VECDESC_FL_INIRAM;
|
||||
non_iram_int_mask[cpu] &= ~(1<<intr);
|
||||
} else {
|
||||
vd->flags&=~VECDESC_FL_INIRAM;
|
||||
non_iram_int_mask[cpu]|=(1<<intr);
|
||||
vd->flags &= ~VECDESC_FL_INIRAM;
|
||||
non_iram_int_mask[cpu] |= (1<<intr);
|
||||
}
|
||||
if (source>=0) {
|
||||
esp_rom_route_intr_matrix(cpu, source, intr);
|
||||
}
|
||||
|
||||
//Fill return handle data.
|
||||
ret->vector_desc=vd;
|
||||
ret->shared_vector_desc=vd->shared_vec_info;
|
||||
ret->vector_desc = vd;
|
||||
ret->shared_vector_desc = vd->shared_vec_info;
|
||||
|
||||
//Enable int at CPU-level;
|
||||
ESP_INTR_ENABLE(intr);
|
||||
|
||||
//If interrupt has to be started disabled, do that now; ints won't be enabled for real until the end
|
||||
//of the critical section.
|
||||
if (flags&ESP_INTR_FLAG_INTRDISABLED) {
|
||||
if (flags & ESP_INTR_FLAG_INTRDISABLED) {
|
||||
esp_intr_disable(ret);
|
||||
}
|
||||
|
||||
@ -598,8 +642,8 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
|
||||
//Fill return handle if needed, otherwise free handle.
|
||||
if (ret_handle!=NULL) {
|
||||
*ret_handle=ret;
|
||||
if (ret_handle != NULL) {
|
||||
*ret_handle = ret;
|
||||
} else {
|
||||
free(ret);
|
||||
}
|
||||
@ -620,7 +664,9 @@ esp_err_t esp_intr_alloc(int source, int flags, intr_handler_t handler, void *ar
|
||||
|
||||
esp_err_t IRAM_ATTR esp_intr_set_in_iram(intr_handle_t handle, bool is_in_iram)
|
||||
{
|
||||
if (!handle) return ESP_ERR_INVALID_ARG;
|
||||
if (!handle) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
vector_desc_t *vd = handle->vector_desc;
|
||||
if (vd->flags & VECDESC_FL_SHARED) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
@ -648,11 +694,13 @@ static void esp_intr_free_cb(void *arg)
|
||||
esp_err_t esp_intr_free(intr_handle_t handle)
|
||||
{
|
||||
bool free_shared_vector=false;
|
||||
if (!handle) return ESP_ERR_INVALID_ARG;
|
||||
if (!handle) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
#if !CONFIG_FREERTOS_UNICORE
|
||||
//Assign this routine to the core where this interrupt is allocated on.
|
||||
if (handle->vector_desc->cpu!=esp_cpu_get_core_id()) {
|
||||
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
|
||||
esp_err_t ret = esp_ipc_call_blocking(handle->vector_desc->cpu, &esp_intr_free_cb, (void *)handle);
|
||||
return ret == ESP_OK ? ESP_OK : ESP_FAIL;
|
||||
}
|
||||
@ -660,31 +708,36 @@ esp_err_t esp_intr_free(intr_handle_t handle)
|
||||
|
||||
portENTER_CRITICAL(&spinlock);
|
||||
esp_intr_disable(handle);
|
||||
if (handle->vector_desc->flags&VECDESC_FL_SHARED) {
|
||||
if (handle->vector_desc->flags & VECDESC_FL_SHARED) {
|
||||
//Find and kill the shared int
|
||||
shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
|
||||
shared_vector_desc_t *prevsvd=NULL;
|
||||
shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
|
||||
shared_vector_desc_t *prevsvd = NULL;
|
||||
assert(svd); //should be something in there for a shared int
|
||||
while (svd!=NULL) {
|
||||
if (svd==handle->shared_vector_desc) {
|
||||
while (svd != NULL) {
|
||||
if (svd == handle->shared_vector_desc) {
|
||||
//Found it. Now kill it.
|
||||
if (prevsvd) {
|
||||
prevsvd->next=svd->next;
|
||||
prevsvd->next = svd->next;
|
||||
} else {
|
||||
handle->vector_desc->shared_vec_info=svd->next;
|
||||
handle->vector_desc->shared_vec_info = svd->next;
|
||||
}
|
||||
free(svd);
|
||||
break;
|
||||
}
|
||||
prevsvd=svd;
|
||||
svd=svd->next;
|
||||
prevsvd = svd;
|
||||
svd = svd->next;
|
||||
}
|
||||
//If nothing left, disable interrupt.
|
||||
if (handle->vector_desc->shared_vec_info==NULL) free_shared_vector=true;
|
||||
ESP_EARLY_LOGV(TAG, "esp_intr_free: Deleting shared int: %s. Shared int is %s", svd?"not found or last one":"deleted", free_shared_vector?"empty now.":"still in use");
|
||||
if (handle->vector_desc->shared_vec_info == NULL) {
|
||||
free_shared_vector = true;
|
||||
}
|
||||
ESP_EARLY_LOGV(TAG,
|
||||
"esp_intr_free: Deleting shared int: %s. Shared int is %s",
|
||||
svd ? "not found or last one" : "deleted",
|
||||
free_shared_vector ? "empty now." : "still in use");
|
||||
}
|
||||
|
||||
if ((handle->vector_desc->flags&VECDESC_FL_NONSHARED) || free_shared_vector) {
|
||||
if ((handle->vector_desc->flags & VECDESC_FL_NONSHARED) || free_shared_vector) {
|
||||
ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
if (!free_shared_vector) {
|
||||
@ -699,9 +752,9 @@ esp_err_t esp_intr_free(intr_handle_t handle)
|
||||
//Theoretically, we could free the vector_desc... not sure if that's worth the few bytes of memory
|
||||
//we save.(We can also not use the same exit path for empty shared ints anymore if we delete
|
||||
//the desc.) For now, just mark it as free.
|
||||
handle->vector_desc->flags&=~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
|
||||
handle->vector_desc->flags &= ~(VECDESC_FL_NONSHARED|VECDESC_FL_RESERVED|VECDESC_FL_SHARED);
|
||||
//Also kill non_iram mask bit.
|
||||
non_iram_int_mask[handle->vector_desc->cpu]&=~(1<<(handle->vector_desc->intno));
|
||||
non_iram_int_mask[handle->vector_desc->cpu] &= ~(1<<(handle->vector_desc->intno));
|
||||
}
|
||||
portEXIT_CRITICAL(&spinlock);
|
||||
free(handle);
|
||||
@ -731,11 +784,13 @@ int esp_intr_get_cpu(intr_handle_t handle)
|
||||
|
||||
esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
|
||||
{
|
||||
if (!handle) return ESP_ERR_INVALID_ARG;
|
||||
if (!handle) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
portENTER_CRITICAL_SAFE(&spinlock);
|
||||
int source;
|
||||
if (handle->shared_vector_desc) {
|
||||
handle->shared_vector_desc->disabled=0;
|
||||
handle->shared_vector_desc->disabled = 0;
|
||||
source=handle->shared_vector_desc->source;
|
||||
} else {
|
||||
source=handle->vector_desc->source;
|
||||
@ -745,7 +800,9 @@ esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
|
||||
esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, handle->vector_desc->intno);
|
||||
} else {
|
||||
//Re-enable using cpu int ena reg
|
||||
if (handle->vector_desc->cpu!=esp_cpu_get_core_id()) return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
|
||||
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
|
||||
return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
|
||||
}
|
||||
ESP_INTR_ENABLE(handle->vector_desc->intno);
|
||||
}
|
||||
portEXIT_CRITICAL_SAFE(&spinlock);
|
||||
@ -754,18 +811,20 @@ esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
|
||||
|
||||
esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
|
||||
{
|
||||
if (!handle) return ESP_ERR_INVALID_ARG;
|
||||
if (!handle) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
portENTER_CRITICAL_SAFE(&spinlock);
|
||||
int source;
|
||||
bool disabled = 1;
|
||||
if (handle->shared_vector_desc) {
|
||||
handle->shared_vector_desc->disabled=1;
|
||||
handle->shared_vector_desc->disabled = 1;
|
||||
source=handle->shared_vector_desc->source;
|
||||
|
||||
shared_vector_desc_t *svd=handle->vector_desc->shared_vec_info;
|
||||
assert( svd != NULL );
|
||||
while( svd ) {
|
||||
if ( svd->source == source && svd->disabled == 0 ) {
|
||||
shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
|
||||
assert(svd != NULL);
|
||||
while(svd) {
|
||||
if (svd->source == source && svd->disabled == 0) {
|
||||
disabled = 0;
|
||||
break;
|
||||
}
|
||||
@ -776,13 +835,13 @@ esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
|
||||
}
|
||||
|
||||
if (source >= 0) {
|
||||
if ( disabled ) {
|
||||
if (disabled) {
|
||||
//Disable using int matrix
|
||||
esp_rom_route_intr_matrix(handle->vector_desc->cpu, source, INT_MUX_DISABLED_INTNO);
|
||||
}
|
||||
} else {
|
||||
//Disable using per-cpu regs
|
||||
if (handle->vector_desc->cpu!=esp_cpu_get_core_id()) {
|
||||
if (handle->vector_desc->cpu != esp_cpu_get_core_id()) {
|
||||
portEXIT_CRITICAL_SAFE(&spinlock);
|
||||
return ESP_ERR_INVALID_ARG; //Can only enable these ints on this cpu
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ void __real_esp_cpu_stall(int core_id);
|
||||
void __wrap_esp_panic_handler(panic_info_t *info)
|
||||
{
|
||||
XtExcFrame *frm = (XtExcFrame *)info->frame;
|
||||
if ( frm->exccause == EXCCAUSE_ILLEGAL && g_override_illegal_instruction == true ) {
|
||||
if (frm->exccause == EXCCAUSE_ILLEGAL && g_override_illegal_instruction == true) {
|
||||
frm->pc = frm->a0;
|
||||
return;
|
||||
} else {
|
||||
@ -37,7 +37,7 @@ void __wrap_esp_panic_handler(panic_info_t *info)
|
||||
|
||||
void __wrap_esp_cpu_stall(int core_id)
|
||||
{
|
||||
if ( g_override_illegal_instruction == true ) {
|
||||
if (g_override_illegal_instruction == true) {
|
||||
return;
|
||||
} else {
|
||||
__real_esp_cpu_stall(core_id);
|
||||
|
Loading…
x
Reference in New Issue
Block a user