Skip to content

Commit 544225a

Browse files
committed
hal/arm: pmap_switch full MPU reconfiguration
This commit introduces full MPU regions reconfiguration on context switch, allowing for more flexibile configuration of memory maps on MPU targets. Performed tests show no memory coherence problems and minor improvements in pmap_switch performance. According to ARM documentation, cache maintenance is not required, as long as memory maps are not overlapping, and that assumption is already present in Phoenix-RTOS. Changes include * additional hal_syspage_prog_t structure, initialized in loader, containing program configuration of MPU regions in form of ready-to-copy register values * pmap_t structure contain pointer to above structure instead of regions bitmask * pmap_switch disables MPU and performs full reconfiguration, optimized with LDMIA/STMIA assembly operations * handling of process's kernel-code access is moved to loader * hal/pmap.c functions update to the new pmap_t structure JIRA: RTOS-1149
1 parent 76e72e4 commit 544225a

File tree

32 files changed

+260
-340
lines changed

32 files changed

+260
-340
lines changed

hal/aarch64/pmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ static void _pmap_cacheOpAfterChange(descr_t newEntry, ptr_t vaddr, unsigned int
265265

266266

267267
/* Function creates empty page table */
268-
int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr)
268+
int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr)
269269
{
270270
pmap->ttl1 = vaddr;
271271
pmap->addr = p->addr;

hal/armv7a/pmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static void _pmap_asidDealloc(pmap_t *pmap)
175175

176176

177177
/* Function creates empty page table */
178-
int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr)
178+
int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr)
179179
{
180180
pmap->pdir = vaddr;
181181
pmap->addr = p->addr;

hal/armv7m/arch/pmap.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#define _PH_HAL_PMAP_ARMV7M_H_
1818

1919
#include "hal/types.h"
20+
#include "syspage.h"
2021

2122
/* Architecture dependent page attributes - used for mapping */
2223
#define PGHD_PRESENT 0x01
@@ -55,7 +56,7 @@ typedef struct _page_t {
5556
typedef struct _pmap_t {
5657
void *start;
5758
void *end;
58-
u32 regions;
59+
const hal_syspage_prog_t *hal;
5960
} pmap_t;
6061

6162
#endif

hal/armv7m/pmap.c

Lines changed: 61 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,14 @@
1717
#include "config.h"
1818
#include "syspage.h"
1919
#include "halsyspage.h"
20+
#include "lib/lib.h"
2021
#include <arch/cpu.h>
2122
#include <arch/spinlock.h>
2223

24+
25+
#define MPU_BASE ((volatile u32 *)0xe000ed90)
26+
27+
2328
/* clang-format off */
2429
enum { mpu_type, mpu_ctrl, mpu_rnr, mpu_rbar, mpu_rasr, mpu_rbar_a1, mpu_rasr_a1, mpu_rbar_a2, mpu_rasr_a2,
2530
mpu_rbar_a3, mpu_rasr_a3 };
@@ -35,13 +40,19 @@ static struct {
3540
volatile u32 *mpu;
3641
unsigned int kernelCodeRegion;
3742
spinlock_t lock;
43+
int last_mpu_count;
3844
} pmap_common;
3945

4046

4147
/* Function creates empty page table */
42-
int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, void *vaddr)
48+
int pmap_create(pmap_t *pmap, pmap_t *kpmap, page_t *p, const syspage_prog_t *prog, void *vaddr)
4349
{
44-
pmap->regions = pmap_common.kernelCodeRegion;
50+
if (prog != NULL) {
51+
pmap->hal = &prog->hal;
52+
}
53+
else {
54+
pmap->hal = NULL;
55+
}
4556
return 0;
4657
}
4758

@@ -52,55 +63,40 @@ addr_t pmap_destroy(pmap_t *pmap, int *i)
5263
}
5364

5465

55-
static unsigned int pmap_map2region(unsigned int map)
66+
void pmap_switch(pmap_t *pmap)
5667
{
57-
int i;
58-
unsigned int mask = 0;
59-
60-
for (i = 0; i < sizeof(syspage->hs.mpu.map) / sizeof(*syspage->hs.mpu.map); ++i) {
61-
if (map == syspage->hs.mpu.map[i]) {
62-
mask |= (1 << i);
63-
}
64-
}
65-
66-
return mask;
67-
}
68-
68+
static const volatile u32 *RBAR_ADDR = MPU_BASE + mpu_rbar;
69+
unsigned int allocCnt;
70+
spinlock_ctx_t sc;
71+
unsigned int i;
72+
const u32 *tableCurrent;
6973

70-
int pmap_addMap(pmap_t *pmap, unsigned int map)
71-
{
72-
unsigned int rmask = pmap_map2region(map);
73-
if (rmask == 0) {
74-
return -1;
75-
}
74+
if (pmap != NULL && pmap->hal != NULL) {
75+
hal_spinlockSet(&pmap_common.lock, &sc);
7676

77-
pmap->regions |= rmask;
77+
allocCnt = pmap->hal->mpu.allocCnt;
78+
tableCurrent = &pmap->hal->mpu.table[0].rbar;
7879

79-
return 0;
80-
}
80+
/* Disable MPU */
81+
hal_cpuDataMemoryBarrier();
82+
*(pmap_common.mpu + mpu_ctrl) &= ~1;
83+
84+
for (i = 0; i < max(allocCnt, pmap_common.last_mpu_count); i += 4) {
85+
/* RNR update is done by writes to RBAR */
86+
__asm__ volatile(
87+
"ldmia %[tableCurrent]!, {r3-r8, r10, r11} \n\t" /* Load 4 regions (rbar/rasr pairs) from table, update table pointer */
88+
"stmia %[mpu_rbar], {r3-r8, r10, r11} \n\t" /* Write 4 regions via RBAR/RASR and aliases */
89+
: [tableCurrent] "+&r"(tableCurrent)
90+
: [mpu_rbar] "r"(RBAR_ADDR)
91+
: "r3", "r4", "r5", "r6", "r7", "r8", "r10", "r11");
92+
}
8193

94+
/* Enable MPU */
95+
*(pmap_common.mpu + mpu_ctrl) |= 1;
96+
hal_cpuDataSyncBarrier();
8297

83-
void pmap_switch(pmap_t *pmap)
84-
{
85-
unsigned int i, cnt = syspage->hs.mpu.allocCnt;
86-
spinlock_ctx_t sc;
98+
pmap_common.last_mpu_count = allocCnt;
8799

88-
if (pmap != NULL) {
89-
hal_spinlockSet(&pmap_common.lock, &sc);
90-
for (i = 0; i < cnt; ++i) {
91-
/* Select region */
92-
*(pmap_common.mpu + mpu_rnr) = i;
93-
hal_cpuDataMemoryBarrier();
94-
95-
/* Enable/disable region according to the mask */
96-
if ((pmap->regions & (1 << i)) != 0) {
97-
*(pmap_common.mpu + mpu_rasr) |= 1;
98-
}
99-
else {
100-
*(pmap_common.mpu + mpu_rasr) &= ~1;
101-
}
102-
hal_cpuDataMemoryBarrier();
103-
}
104100
hal_spinlockClear(&pmap_common.lock, &sc);
105101
}
106102
}
@@ -127,13 +123,21 @@ addr_t pmap_resolve(pmap_t *pmap, void *vaddr)
127123
int pmap_isAllowed(pmap_t *pmap, const void *vaddr, size_t size)
128124
{
129125
const syspage_map_t *map = syspage_mapAddrResolve((addr_t)vaddr);
130-
unsigned int rmask;
131126
if (map == NULL) {
132127
return 0;
133128
}
134-
rmask = pmap_map2region(map->id);
135129

136-
return ((pmap->regions & rmask) == 0) ? 0 : 1;
130+
if (pmap->hal == NULL) {
131+
/* Kernel pmap has access to everything */
132+
return 1;
133+
}
134+
135+
for (int i = 0; i < pmap->hal->mpu.allocCnt; ++i) {
136+
if (pmap->hal->mpu.map[i] == map->id) {
137+
return 1;
138+
}
139+
}
140+
return 0;
137141
}
138142

139143

@@ -170,11 +174,8 @@ int pmap_segment(unsigned int i, void **vaddr, size_t *size, vm_prot_t *prot, vo
170174

171175
void _pmap_init(pmap_t *pmap, void **vstart, void **vend)
172176
{
173-
const syspage_map_t *ikmap;
174-
unsigned int ikregion;
175-
u32 t;
176-
addr_t pc;
177-
unsigned int i, cnt = syspage->hs.mpu.allocCnt;
177+
unsigned int cnt = (syspage->hs.mpuType >> 8U) & 0xffU;
178+
unsigned int i;
178179

179180
(*vstart) = (void *)(((ptr_t)_init_vectors + 7) & ~7);
180181
(*vend) = (*((char **)vstart)) + SIZE_PAGE;
@@ -184,8 +185,11 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend)
184185
/* Initial size of kernel map */
185186
pmap->end = (void *)((addr_t)&__bss_start + 32 * 1024);
186187

188+
pmap->hal = NULL;
189+
pmap_common.last_mpu_count = cnt;
190+
187191
/* Configure MPU */
188-
pmap_common.mpu = (void *)0xe000ed90;
192+
pmap_common.mpu = MPU_BASE;
189193

190194
/* Disable MPU just in case */
191195
*(pmap_common.mpu + mpu_ctrl) &= ~1;
@@ -196,52 +200,17 @@ void _pmap_init(pmap_t *pmap, void **vstart, void **vend)
196200
hal_cpuDataMemoryBarrier();
197201

198202
for (i = 0; i < cnt; ++i) {
199-
t = syspage->hs.mpu.table[i].rbar;
200-
if ((t & (1 << 4)) == 0) {
201-
continue;
202-
}
203-
204-
*(pmap_common.mpu + mpu_rbar) = t;
205-
hal_cpuDataMemoryBarrier();
203+
/* Select region */
204+
*(pmap_common.mpu + mpu_rnr) = i;
206205

207-
/* Disable regions for now */
208-
t = syspage->hs.mpu.table[i].rasr & ~1;
209-
*(pmap_common.mpu + mpu_rasr) = t;
206+
/* Disable all regions for now */
207+
*(pmap_common.mpu + mpu_rasr) = 0;
210208
hal_cpuDataMemoryBarrier();
211209
}
212210

213211
/* Enable MPU */
214212
*(pmap_common.mpu + mpu_ctrl) |= 1;
215213
hal_cpuDataMemoryBarrier();
216214

217-
/* FIXME HACK
218-
* allow all programs to execute (and read) kernel code map.
219-
* Needed because of hal_jmp, syscalls handler and signals handler.
220-
* In these functions we need to switch to the user mode when still
221-
* executing kernel code. This will cause memory management fault
222-
* if the application does not have access to the kernel instruction
223-
* map. Possible fix - place return to the user code in the separate
224-
* region and allow this region instead. */
225-
226-
/* Find kernel code region */
227-
__asm__ volatile("\tmov %0, pc;" : "=r"(pc));
228-
ikmap = syspage_mapAddrResolve(pc);
229-
if (ikmap == NULL) {
230-
hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map not found. Bad system config\n");
231-
for (;;) {
232-
hal_cpuHalt();
233-
}
234-
}
235-
236-
ikregion = pmap_map2region(ikmap->id);
237-
if (ikregion == 0) {
238-
hal_consolePrint(ATTR_BOLD, "pmap: Kernel code map has no assigned region. Bad system config\n");
239-
for (;;) {
240-
hal_cpuHalt();
241-
}
242-
}
243-
244-
pmap_common.kernelCodeRegion = ikregion;
245-
246215
hal_spinlockCreate(&pmap_common.lock, "pmap");
247216
}

hal/armv7r/arch/pmap.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#define _PH_HAL_PMAP_ARMV7R_H_
1818

1919
#include "hal/types.h"
20+
#include "syspage.h"
2021

2122
#define PGHD_PRESENT 0x01U
2223
#define PGHD_USER 0x04U
@@ -54,7 +55,7 @@ typedef struct _page_t {
5455
typedef struct _pmap_t {
5556
void *start;
5657
void *end;
57-
u32 regions;
58+
const hal_syspage_prog_t *hal;
5859
} pmap_t;
5960

6061
#endif

0 commit comments

Comments
 (0)