Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions so3/arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,6 @@ if ARCH_ARM64

menu "Platform"

config ARM64VT
depends on AVZ
bool "Virtualization support (ARM64 VT)"
help
Enabling support of CPU virtualization extensions.

choice
prompt "Target"

Expand Down
6 changes: 2 additions & 4 deletions so3/arch/arm64/Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

ifneq ($(CONFIG_ARM64VT),y)
ifneq ($(CONFIG_AVZ),y)
obj-y += hyp-stub.o
endif

Expand All @@ -10,16 +10,14 @@ obj-y += backtrace.o backtrace_asm.o
obj-y += cache_v8.o cache.o context.o
obj-y += semihosting.o semicall.o

obj-$(CONFIG_AVZ) += domain.o mmio.o
obj-$(CONFIG_AVZ) += domain.o mmio.o #smmu.o

obj-y += smccc-call.o

obj-y += thread.o

obj-$(CONFIG_MMU) += mmu.o

obj-$(CONFIG_ARM64VT) += #smmu.o

obj-y += lib/

obj-y += $(TARGET)/
Expand Down
2 changes: 1 addition & 1 deletion so3/arch/arm64/cache_v8.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void mmu_setup(void *pgtable)
invalidate_dcache_all();
__asm_invalidate_tlb_all();

#endif /* !CONFIG_ARM64VT */
#endif /* !CONFIG_AVZ */
}

/*
Expand Down
4 changes: 2 additions & 2 deletions so3/arch/arm64/domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
#include <asm/mmu.h>
#include <asm/processor.h>

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
#include <mach/ipamap.h>
#endif

Expand Down Expand Up @@ -123,7 +123,7 @@ void __setup_dom_pgtable(struct domain *d, addr_t paddr_start, unsigned long map
/* Map the shared page in the IPA space; the shared page is located right after the domain area
* in the IPA space, and if any, the RT shared page follows the shared page (in IPA space).
*/
__create_mapping(new_pt, memslot[slotID].ipa_addr + map_size, __pa(d->avz_shared), PAGE_SIZE, true, S2);
__create_mapping(new_pt, memslot[slotID].ipa_addr + map_size, __pa(d->avz_shared), PAGE_SIZE, false, S2);

#ifdef CONFIG_SOO
/* Initialize the grant pfn (ipa address) area */
Expand Down
8 changes: 4 additions & 4 deletions so3/arch/arm64/include/asm/arm_timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#include <asm/processor.h>

/**
* In AVZ and ARM64VT we are using the ARM physical timer. The guest domains will
* In AVZ we are using the ARM physical timer. The guest domains will
* rely on virtual timer where an offset can be added.
*/

Expand Down Expand Up @@ -53,7 +53,7 @@
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ

static inline void arch_timer_reg_write_el2(enum arch_timer_reg reg, u32 val)
{
Expand Down Expand Up @@ -161,7 +161,7 @@ static inline u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
return 0;
}

#endif /* CONFIG_ARM64VT */
#endif /* CONFIG_AVZ */

/**
* Get the timer frequency
Expand All @@ -183,7 +183,7 @@ static inline u64 arch_counter_get_cntvct(void)
u64 cnt;

isb();
#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
cnt = read_sysreg(cntpct_el0);
#else
cnt = read_sysreg(cntvct_el0);
Expand Down
12 changes: 6 additions & 6 deletions so3/arch/arm64/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ typedef enum { S1, S2 } mmu_stage_t;
#define VA2PA_WR "w"
#define va2pa_at(stage, el, rw, va) asm volatile("at " stage el rw ", %0" : : "r"(va) : "memory", "cc");

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ

typedef struct {
addr_t ipa_addr;
Expand Down Expand Up @@ -556,7 +556,7 @@ static inline void set_pte_page_S2(u64 *pte, enum dcache_option option)
*pte |= S2_PTE_FLAG_NORMAL;
}

#endif /* CONFIG_ARM64_VT */
#endif /* CONFIG_AVZ */

static inline void set_pte_table(u64 *pte, enum dcache_option option)
{
Expand Down Expand Up @@ -620,7 +620,7 @@ static inline bool user_space_vaddr(addr_t addr)
return true;
}

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ

static inline unsigned int get_sctlr(void)
{
Expand All @@ -637,7 +637,7 @@ static inline void set_sctlr(unsigned int val)
asm volatile("isb");
}

#else
#else /* CONFIG_AVZ */

static inline unsigned int get_sctlr(void)
{
Expand All @@ -654,7 +654,7 @@ static inline void set_sctlr(unsigned int val)
asm volatile("isb");
}

#endif
#endif /* !CONFIG_AVZ */

extern addr_t __sys_root_pgtable[], __sys_idmap_l1pgtable[], __sys_linearmap_l1pgtable[], __sys_linearmap_l2pgtable[];

Expand All @@ -678,7 +678,7 @@ extern void __mmu_switch_vttbr(void *root_pgtable_phys);

void __mmu_setup(void *pgtable);

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
void do_ipamap(void *pgtable, ipamap_t ipamap[], int nbelement);
#endif

Expand Down
2 changes: 1 addition & 1 deletion so3/arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
#define SCTLR_EL2_RES1 (SCTLR_UCI_BIT | SCTLR_nTWE | SCTLR_nTWI \
| SCTLR_UCT_BIT | SCTLR_DZE_BIT)
#else
Expand Down
18 changes: 7 additions & 11 deletions so3/arch/arm64/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static void alloc_init_l3(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
/* Attach the L2 PTE to this L3 page table */
*l2pte = __pa((addr_t) l3pgtable) & TTB_L2_TABLE_ADDR_MASK;

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
if (stage == S1)
set_pte_table(l2pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
else
Expand All @@ -105,7 +105,7 @@ static void alloc_init_l3(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,

*l3pte = phys & TTB_L3_PAGE_ADDR_MASK;

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
if (stage == S1)
set_pte_page(l3pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
else
Expand Down Expand Up @@ -161,7 +161,7 @@ static void alloc_init_l2(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
/* Attach the L1 PTE to this L2 page table */
*l1pte = __pa((addr_t) l2pgtable) & TTB_L1_TABLE_ADDR_MASK;

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
if (stage == S1)
set_pte_table(l1pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
else
Expand All @@ -183,7 +183,7 @@ static void alloc_init_l2(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,
if (((addr | next | phys) & ~BLOCK_2M_MASK) == 0) {
*l2pte = phys & TTB_L2_BLOCK_ADDR_MASK;

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
if (stage == S1)
set_pte_block(l2pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
else
Expand Down Expand Up @@ -239,7 +239,7 @@ static void alloc_init_l1(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,

/* Attach the L0 PTE to this L1 page table */
*l0pte = __pa((addr_t) l1pgtable) & TTB_L0_TABLE_ADDR_MASK;
#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
if (stage == S1)
set_pte_table(l0pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
else
Expand All @@ -260,7 +260,7 @@ static void alloc_init_l1(u64 *l0pgtable, addr_t addr, addr_t end, addr_t phys,

if (((addr | next | phys) & ~BLOCK_1G_MASK) == 0) {
*l1pte = phys & TTB_L1_BLOCK_ADDR_MASK;
#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ
if (stage == S1)
set_pte_block(l1pte, (nocache ? DCACHE_OFF : DCACHE_WRITEALLOC));
else
Expand Down Expand Up @@ -712,9 +712,6 @@ void __mmu_switch_kernel(void *pgtable_paddr, bool vttbr)
if (vttbr)
__mmu_switch_vttbr(pgtable_paddr);
else
#endif

#ifdef CONFIG_ARM64VT
__mmu_switch_ttbr0(pgtable_paddr);
#else
__mmu_switch_ttbr1(pgtable_paddr);
Expand Down Expand Up @@ -968,6 +965,7 @@ addr_t virt_to_phys_pt(addr_t vaddr)

l1pte = l1pte_offset(l0pte, vaddr);
BUG_ON(!*l1pte);

#elif CONFIG_VA_BITS_39
if (user_space_vaddr(vaddr))
l1pte = l1pte_offset((u64 *) current_pgtable(), vaddr);
Expand Down Expand Up @@ -1000,7 +998,6 @@ addr_t virt_to_phys_pt(addr_t vaddr)

#ifdef CONFIG_AVZ

#ifdef CONFIG_ARM64VT
/**
* Perform a mapping of IPA regions to physical regions
*
Expand All @@ -1017,4 +1014,3 @@ void do_ipamap(void *pgtable, ipamap_t ipamap[], int nbelement)

#endif /* CONFIG_AVZ */

#endif
15 changes: 8 additions & 7 deletions so3/arch/arm64/rpi4_64/include/mach/ipamap.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ ipamap_t agency_ipamap[] = {

/* I/O Memory space*/
{
.ipa_addr = 0xf0000000,
.phys_addr = 0xf0000000,
.size = 0x10000000,
.ipa_addr = 0xfc000000,
.phys_addr = 0xfc000000,
.size = 0x04000000,
},

/* Null pointer exception */
Expand All @@ -46,12 +46,13 @@ ipamap_t agency_ipamap[] = {
ipamap_t capsule_ipamap[] = {

{
/* Only mapping the CPU interface to the vGIC CPU interface.
/* Only mapping the CPU interface to the vGIC CPU interface (GICV).
* Access to the distributor must lead to a trap and be handled by the hypervisor.
* BCM2711 GIC-400: GICV (virtual CPU interface) at 0xFF846000.
*/
.ipa_addr = 0x08010000,
.phys_addr = 0x08040000,
.size = 0x10000,
.ipa_addr = 0xff842000,
.phys_addr = 0xff846000,
.size = 0x2000,
},
};

Expand Down
13 changes: 11 additions & 2 deletions so3/arch/arm64/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,19 @@ void trap_handle_error(addr_t lr)
{
#ifdef CONFIG_AVZ
unsigned long esr = read_sysreg(esr_el2);
unsigned long far = read_sysreg(far_el2);
unsigned long elr = read_sysreg(elr_el2);
unsigned long hpfar = read_sysreg(hpfar_el2);
#else
unsigned long esr = read_sysreg(esr_el1);
unsigned long far = read_sysreg(far_el1);
unsigned long elr = 0;
unsigned long hpfar = 0;
#endif

printk(" FAR: %lx\n", far);
printk(" ELR: %lx\n", elr);
printk(" HPFAR: %lx (IPA: %lx)\n", hpfar, (hpfar >> 4) << 12);
show_invalid_entry_message(ESR_ELx_EC(esr), esr, lr);
}

Expand Down Expand Up @@ -123,7 +132,7 @@ void trap_handle(cpu_regs_t *regs)
syscall_args_t sys_args;
#endif

#ifdef CONFIG_ARM64VT
#ifdef CONFIG_AVZ

unsigned long esr = read_sysreg(esr_el2);
unsigned long hvc_code;
Expand All @@ -137,7 +146,7 @@ void trap_handle(cpu_regs_t *regs)

#else
unsigned long esr = read_sysreg(esr_el1);
#endif /* CONFIG_ARM64VT */
#endif /* CONFIG_AVZ */

switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW:
Expand Down
1 change: 0 additions & 1 deletion so3/avz/kernel/domain_utils.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,6 @@ void loadAgency(void)

/**
* The ITB image will be parsed and the components placed in their target memory location.
* This work only with ARM64VT support.
*
* @param slotID
* @param itb ITB image
Expand Down
9 changes: 8 additions & 1 deletion so3/avz/kernel/gnttab.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,14 @@ addr_t map_vbstore_pfn(int target_domid, int pfn)
else
grant_paddr = pfn_to_phys(pfn);

__create_mapping((addr_t *) d->pagetable_vaddr, grant_paddr, pfn_to_phys(cur->pfn), PAGE_SIZE, true,
/*
* The vbstore ring buffer is shared Normal memory between agency
* and ME. Map it Normal cacheable (nocache=false) so that both
* sides see coherent writes via the inner-shareable cache domain.
* Using Device attributes here would break ring-buffer coherency
* with the agency which maps the same page as Normal cacheable.
*/
__create_mapping((addr_t *) d->pagetable_vaddr, grant_paddr, pfn_to_phys(cur->pfn), PAGE_SIZE, false,
S2);

return phys_to_pfn(grant_paddr);
Expand Down
3 changes: 2 additions & 1 deletion so3/avz/kernel/hypercalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,5 +209,6 @@ void do_avz_hypercall(void *__args)
break;
}

flush_dcache_all();
dsb(ish); /* inner shareable */
isb();
}
2 changes: 1 addition & 1 deletion so3/avz/kernel/setup.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014-2018 Daniel Rossier <daniel.rossier@heig-vd.ch>
* Copyright (C) 2014-2026 Daniel Rossier <daniel.rossier@heig-vd.ch>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
Expand Down
Loading
Loading