|
| 1 | +/* |
| 2 | + * Copyright (c) 2025 Nordic Semiconductor ASA |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + */ |
| 6 | + |
| 7 | +#include <mvdma.h> |
| 8 | +#include <zephyr/pm/policy.h> |
| 9 | +#include <hal/nrf_cache.h> |
| 10 | +#include <hal/nrf_mvdma.h> |
| 11 | +#include <zephyr/cache.h> |
| 12 | +#include <zephyr/toolchain.h> |
| 13 | + |
| 14 | +/* To be removed when nrfx comes with those symbols. */ |
| 15 | +#define NRF_MVDMA_INT_COMPLETED0_MASK MVDMA_INTENSET_COMPLETED0_Msk |
| 16 | +#define NRF_MVDMA_EVENT_COMPLETED0 offsetof(NRF_MVDMA_Type, EVENTS_COMPLETED[0]) |
| 17 | + |
| 18 | +#define MVDMA_DO_COUNT(node) 1 + |
| 19 | + |
| 20 | +BUILD_ASSERT((DT_FOREACH_STATUS_OKAY(nordic_nrf_mvdma, MVDMA_DO_COUNT) 0) == 1); |
| 21 | + |
| 22 | +#define MVDMA_NODE() DT_COMPAT_GET_ANY_STATUS_OKAY(nordic_nrf_mvdma) |
| 23 | + |
| 24 | +static sys_slist_t list; |
| 25 | +static atomic_t hw_err; |
| 26 | +static struct mvdma_ctrl *curr_ctrl; |
| 27 | +static NRF_MVDMA_Type *reg = (NRF_MVDMA_Type *)DT_REG_ADDR(MVDMA_NODE()); |
| 28 | + |
| 29 | +static uint32_t dummy_jobs[] __aligned(CONFIG_DCACHE_LINE_SIZE) = { |
| 30 | + NRF_MVDMA_JOB_TERMINATE, |
| 31 | + NRF_MVDMA_JOB_TERMINATE, |
| 32 | +}; |
| 33 | + |
| 34 | +static void xfer_start(uint32_t src, uint32_t sink) |
| 35 | +{ |
| 36 | + nrf_mvdma_event_clear(reg, NRF_MVDMA_EVENT_COMPLETED0); |
| 37 | + nrf_mvdma_source_list_ptr_set(reg, (void *)src); |
| 38 | + nrf_mvdma_sink_list_ptr_set(reg, (void *)sink); |
| 39 | + nrf_mvdma_task_trigger(reg, NRF_MVDMA_TASK_START0); |
| 40 | +} |
| 41 | + |
| 42 | +static int xfer(struct mvdma_ctrl *ctrl, uint32_t src, uint32_t sink, bool queue) |
| 43 | +{ |
| 44 | + int rv, key; |
| 45 | + bool int_en = true; |
| 46 | + |
| 47 | + key = irq_lock(); |
| 48 | + if (nrf_mvdma_activity_check(reg) || (curr_ctrl && curr_ctrl->handler)) { |
| 49 | + if (queue) { |
| 50 | + ctrl->source = src; |
| 51 | + ctrl->sink = sink; |
| 52 | + sys_slist_append(&list, &ctrl->node); |
| 53 | + rv = 1; |
| 54 | + } else { |
| 55 | + irq_unlock(key); |
| 56 | + return -EBUSY; |
| 57 | + } |
| 58 | + } else { |
| 59 | + /* There might be some pending request that need to be marked as finished. */ |
| 60 | + if (curr_ctrl != NULL) { |
| 61 | + sys_snode_t *node; |
| 62 | + struct mvdma_ctrl *prev; |
| 63 | + |
| 64 | + curr_ctrl->handler = (mvdma_handler_t)1; |
| 65 | + while ((node = sys_slist_get(&list)) != NULL) { |
| 66 | + prev = CONTAINER_OF(node, struct mvdma_ctrl, node); |
| 67 | + prev->handler = (mvdma_handler_t)1; |
| 68 | + } |
| 69 | + } |
| 70 | + |
| 71 | + curr_ctrl = ctrl; |
| 72 | + xfer_start(src, sink); |
| 73 | + if (ctrl->handler == NULL) { |
| 74 | + int_en = false; |
| 75 | + } |
| 76 | + rv = 0; |
| 77 | + } |
| 78 | + irq_unlock(key); |
| 79 | + |
| 80 | + if (int_en) { |
| 81 | + nrf_mvdma_int_enable(reg, NRF_MVDMA_INT_COMPLETED0_MASK); |
| 82 | + } |
| 83 | + |
| 84 | + pm_policy_all_lock_get(); |
| 85 | + |
| 86 | + return rv; |
| 87 | +} |
| 88 | + |
| 89 | +int mvdma_xfer(struct mvdma_ctrl *ctrl, struct mvdma_jobs_desc *desc, bool queue) |
| 90 | +{ |
| 91 | + sys_cache_data_flush_range(desc->source, desc->source_desc_size); |
| 92 | + sys_cache_data_flush_range(desc->sink, desc->sink_desc_size); |
| 93 | + return xfer(ctrl, (uint32_t)desc->source, (uint32_t)desc->sink, queue); |
| 94 | +} |
| 95 | + |
| 96 | +int mvdma_basic_xfer(struct mvdma_ctrl *ctrl, struct mvdma_basic_desc *desc, bool queue) |
| 97 | +{ |
| 98 | + sys_cache_data_flush_range(desc, sizeof(*desc)); |
| 99 | + return xfer(ctrl, (uint32_t)&desc->source, (uint32_t)&desc->sink, queue); |
| 100 | +} |
| 101 | + |
| 102 | +int mvdma_xfer_check(const struct mvdma_ctrl *ctrl) |
| 103 | +{ |
| 104 | + if (hw_err != NRF_MVDMA_ERR_NO_ERROR) { |
| 105 | + return -EIO; |
| 106 | + } |
| 107 | + |
| 108 | + if (nrf_mvdma_event_check(reg, NRF_MVDMA_EVENT_COMPLETED0)) { |
| 109 | + curr_ctrl = NULL; |
| 110 | + } else if (ctrl->handler == NULL) { |
| 111 | + return -EBUSY; |
| 112 | + } |
| 113 | + |
| 114 | + pm_policy_all_lock_put(); |
| 115 | + |
| 116 | + return 0; |
| 117 | +} |
| 118 | + |
| 119 | +enum mvdma_err mvdma_error_check(void) |
| 120 | +{ |
| 121 | + return atomic_set(&hw_err, 0); |
| 122 | +} |
| 123 | + |
| 124 | +static void error_handler(void) |
| 125 | +{ |
| 126 | + if (nrf_mvdma_event_check(reg, NRF_MVDMA_EVENT_SOURCEBUSERROR)) { |
| 127 | + nrf_mvdma_event_clear(reg, NRF_MVDMA_EVENT_SOURCEBUSERROR); |
| 128 | + hw_err = NRF_MVDMA_ERR_SOURCE; |
| 129 | + } |
| 130 | + |
| 131 | + if (nrf_mvdma_event_check(reg, NRF_MVDMA_EVENT_SINKBUSERROR)) { |
| 132 | + nrf_mvdma_event_clear(reg, NRF_MVDMA_EVENT_SINKBUSERROR); |
| 133 | + hw_err = NRF_MVDMA_ERR_SINK; |
| 134 | + } |
| 135 | +} |
| 136 | + |
| 137 | +static void ch_handler(int status) |
| 138 | +{ |
| 139 | + int key; |
| 140 | + struct mvdma_ctrl *ctrl = curr_ctrl; |
| 141 | + sys_snode_t *node; |
| 142 | + bool int_dis = true; |
| 143 | + |
| 144 | + key = irq_lock(); |
| 145 | + node = sys_slist_get(&list); |
| 146 | + if (node) { |
| 147 | + struct mvdma_ctrl *next = CONTAINER_OF(node, struct mvdma_ctrl, node); |
| 148 | + |
| 149 | + curr_ctrl = next; |
| 150 | + xfer_start((uint32_t)next->source, (uint32_t)next->sink); |
| 151 | + if (next->handler || !sys_slist_is_empty(&list)) { |
| 152 | + int_dis = false; |
| 153 | + } |
| 154 | + } else { |
| 155 | + curr_ctrl = NULL; |
| 156 | + } |
| 157 | + if (int_dis) { |
| 158 | + nrf_mvdma_int_disable(reg, NRF_MVDMA_INT_COMPLETED0_MASK); |
| 159 | + } |
| 160 | + irq_unlock(key); |
| 161 | + |
| 162 | + if (ctrl->handler) { |
| 163 | + pm_policy_all_lock_put(); |
| 164 | + ctrl->handler(ctrl->user_data, hw_err == NRF_MVDMA_ERR_NO_ERROR ? 0 : -EIO); |
| 165 | + } else { |
| 166 | + /* Set handler variable to non-null to indicated that transfer has finished. */ |
| 167 | + ctrl->handler = (mvdma_handler_t)1; |
| 168 | + } |
| 169 | +} |
| 170 | + |
| 171 | +static void mvdma_isr(const void *arg) |
| 172 | +{ |
| 173 | + uint32_t ints = nrf_mvdma_int_pending_get(reg); |
| 174 | + |
| 175 | + if (ints & NRF_MVDMA_INT_COMPLETED0_MASK) { |
| 176 | + ch_handler(0); |
| 177 | + } else { |
| 178 | + error_handler(); |
| 179 | + } |
| 180 | +} |
| 181 | + |
| 182 | +void mvdma_resume(void) |
| 183 | +{ |
| 184 | + /* bus errors. */ |
| 185 | + nrf_mvdma_int_enable(reg, NRF_MVDMA_INT_SOURCEBUSERROR_MASK | |
| 186 | + NRF_MVDMA_INT_SINKBUSERROR_MASK); |
| 187 | + |
| 188 | + /* Dummy transfer to get COMPLETED event set. */ |
| 189 | + nrf_mvdma_source_list_ptr_set(reg, (void *)&dummy_jobs[0]); |
| 190 | + nrf_mvdma_sink_list_ptr_set(reg, (void *)&dummy_jobs[1]); |
| 191 | + nrf_mvdma_task_trigger(reg, NRF_MVDMA_TASK_START0); |
| 192 | +} |
| 193 | + |
| 194 | +int mvdma_init(void) |
| 195 | +{ |
| 196 | + sys_cache_data_flush_range(dummy_jobs, sizeof(dummy_jobs)); |
| 197 | + |
| 198 | + sys_slist_init(&list); |
| 199 | + |
| 200 | + IRQ_CONNECT(DT_IRQN(MVDMA_NODE()), DT_IRQ(MVDMA_NODE(), priority), mvdma_isr, 0, 0); |
| 201 | + irq_enable(DT_IRQN(MVDMA_NODE())); |
| 202 | + |
| 203 | + mvdma_resume(); |
| 204 | + |
| 205 | + return 0; |
| 206 | +} |
0 commit comments