ref: 5608be398ea04d67594b558b30c457b328fe05d6
parent: 913be4e74affe04a2ceb3ab75cb8056dd920a5c8
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Wed Oct 31 15:48:16 EDT 2018
bcm: fix /dev/reboot text/data corruption (thanks richard miller) - clean dcache before turning off caches and mmu (rebootcode.s) - use WFE and inter-core mailboxes for cpu startup (rebootcode.s) - disable SMP during dcache invalidation before enabling caches and mmu (in armv7.s)
--- a/sys/src/9/bcm/archbcm2.c
+++ b/sys/src/9/bcm/archbcm2.c
@@ -145,7 +145,6 @@
{
int n, max;
char *p;
-
n = 4;
if(n > MAXMACH)
n = MAXMACH;
--- a/sys/src/9/bcm/armv7.s
+++ b/sys/src/9/bcm/armv7.s
@@ -46,15 +46,12 @@
BARRIERS
/*
- * turn SMP on
- * invalidate tlb
+ * turn SMP off
*/
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
- ORR $CpACsmp, R1 /* turn SMP on */
+ BIC $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS
- MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
- BARRIERS
/*
* clear mach and page tables
@@ -61,11 +58,12 @@
*/
MOVW $PADDR(MACHADDR), R1
MOVW $PADDR(KTZERO), R2
+ MOVW $0, R0
_ramZ:
MOVW R0, (R1)
ADD $4, R1
CMP R1, R2
- BNE _ramZ
+ BNE _ramZ
/*
* start stack at top of mach (physical addr)
@@ -72,8 +70,10 @@
* set up page tables for kernel
*/
MOVW $PADDR(MACHADDR+MACHSIZE-4), R13
+
MOVW $PADDR(L1), R0
BL mmuinit(SB)
+ BL mmuinvalidate(SB)
/*
* set up domain access control and page table base
@@ -94,6 +94,14 @@
BARRIERS
/*
+ * turn SMP on
+ */
+ MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
+ ORR $CpACsmp, R1
+ MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
+ BARRIERS
+
+ /*
* enable caches, mmu, and high vectors
*/
MRC CpSC, 0, R0, C(CpCONTROL), C(0), CpMainctl
@@ -133,12 +141,10 @@
reset:
/*
* load physical base for SB addressing while mmu is off
- * keep a handy zero in R0 until first function call
*/
MOVW $setR12(SB), R12
SUB $KZERO, R12
ADD $PHYSDRAM, R12
- MOVW $0, R0
/*
* SVC mode, interrupts disabled
@@ -156,15 +162,12 @@
BARRIERS
/*
- * turn SMP on
- * invalidate tlb
+ * turn SMP off
*/
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
- ORR $CpACsmp, R1 /* turn SMP on */
+ BIC $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS
- MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
- BARRIERS
/*
* find Mach for this cpu
@@ -173,6 +176,8 @@
AND $(MAXMACH-1), R2 /* mask out non-cpu-id bits */
SLL $2, R2 /* convert to word index */
MOVW $machaddr(SB), R0
+ BIC $KSEGM, R0
+ ORR $PHYSDRAM, R0
ADD R2, R0 /* R0 = &machaddr[cpuid] */
MOVW (R0), R0 /* R0 = machaddr[cpuid] */
CMP $0, R0
@@ -184,6 +189,8 @@
*/
ADD $(MACHSIZE-4), R(MACH), R13
+ BL mmuinvalidate(SB)
+
/*
* set up domain access control and page table base
*/
@@ -200,6 +207,14 @@
*/
BL cachedinv(SB)
BL cacheiinv(SB)
+ BARRIERS
+
+ /*
+ * turn SMP on
+ */
+ MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
+ ORR $CpACsmp, R1
+ MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS
/*
--- a/sys/src/9/bcm/main.c
+++ b/sys/src/9/bcm/main.c
@@ -242,7 +242,7 @@
}
cachedwbse(machaddr, sizeof machaddr);
if((mach = startcpus(conf.nmach)) < conf.nmach)
- print("only %d cpu%s started\n", mach, mach == 1? "" : "s");
+ print("only %d cpu%s started\n", mach, mach == 1? "" : "s");
}
static void
@@ -551,10 +551,9 @@
}
static void
-rebootjump(ulong entry, ulong code, ulong size)
+rebootjump(void *entry, void *code, ulong size)
{
- static void (*f)(ulong, ulong, ulong);
- static Lock lk;
+ void (*f)(void*, void*, ulong);
intrsoff();
intrcpushutdown();
@@ -562,17 +561,10 @@
/* redo identity map */
mmuinit1(1);
- lock(&lk);
- if(f == nil){
- /* setup reboot trampoline function */
- f = (void*)REBOOTADDR;
- memmove(f, rebootcode, sizeof(rebootcode));
- cachedwbse(f, sizeof(rebootcode));
- }
- unlock(&lk);
-
+ /* setup reboot trampoline function */
+ f = (void*)REBOOTADDR;
+ memmove(f, rebootcode, sizeof(rebootcode));
cacheuwbinv();
- l2cacheuwbinv();
(*f)(entry, code, size);
@@ -587,9 +579,9 @@
{
cpushutdown();
splfhi();
- if(m->machno != 0)
- rebootjump(0, 0, 0);
- archreboot();
+ if(m->machno == 0)
+ archreboot();
+ rebootjump(0, 0, 0);
}
/*
@@ -609,13 +601,13 @@
reboot(void *entry, void *code, ulong size)
{
writeconf();
- if (m->machno != 0) {
+ while(m->machno != 0){
procwired(up, 0);
sched();
}
cpushutdown();
- delay(1000);
+ delay(2000);
splfhi();
@@ -630,7 +622,7 @@
wdogoff();
/* off we go - never to return */
- rebootjump(PADDR(entry), PADDR(code), size);
+ rebootjump(entry, code, size);
}
void
--- a/sys/src/9/bcm/mkfile
+++ b/sys/src/9/bcm/mkfile
@@ -70,8 +70,6 @@
/$objtype/lib/libmp.a\
/$objtype/lib/libc.a\
-9:V: $p$CONF s$p$CONF
-
$p$CONF:DQ: $CONF.c $OBJ $LIB mkfile
$CC $CFLAGS '-DKERNDATE='`{date -n} $CONF.c
echo '# linking raw kernel' # H6: no headers, data segment aligned
@@ -123,8 +121,8 @@
reboot.h:D: rebootcode.s arm.s arm.h mem.h
$AS rebootcode.s
- # -lc is only for memmove. -T arg is REBOOTADDR
- $LD -l -s -T0x1c00 -R4 -o reboot.out rebootcode.$O -lc
+ # -T arg is REBOOTADDR
+ $LD -l -s -T0x1c00 -R4 -o reboot.out rebootcode.$O
{echo 'uchar rebootcode[]={'
xd -1x reboot.out |
sed -e '1,2d' -e 's/^[0-9a-f]+ //' -e 's/ ([0-9a-f][0-9a-f])/0x\1,/g'
--- a/sys/src/9/bcm/rebootcode.s
+++ b/sys/src/9/bcm/rebootcode.s
@@ -6,25 +6,46 @@
#define WFI WORD $0xe320f003 /* wait for interrupt */
#define WFE WORD $0xe320f002 /* wait for event */
-/*
- * Turn off MMU, then copy the new kernel to its correct location
- * in physical memory. Then jump to the start of the kernel.
- */
-
-/* main(PADDR(entry), PADDR(code), size); */
TEXT main(SB), 1, $-4
MOVW $setR12(SB), R12
- /* copy in arguments before stack gets unmapped */
- MOVW R0, R8 /* entry point */
- MOVW p2+4(FP), R9 /* source */
- MOVW n+8(FP), R6 /* byte count */
+ MOVW R0, entry+0(FP)
+ CMP $0, R0
+ BEQ shutdown
- /* SVC mode, interrupts disabled */
- MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
- MOVW R1, CPSR
+ MOVW entry+0(FP), R8
+ MOVW code+4(FP), R9
+ MOVW size+8(FP), R6
- /* turn caches off */
+ /* round to words */
+ BIC $3, R8
+ BIC $3, R9
+ ADD $3, R6
+ BIC $3, R6
+
+memloop:
+ MOVM.IA.W (R9), [R1]
+ MOVM.IA.W [R1], (R8)
+ SUB.S $4, R6
+ BNE memloop
+
+shutdown:
+ /* clean dcache using appropriate code for armv6 or armv7 */
+ MRC CpSC, 0, R1, C(CpID), C(CpIDfeat), 7 /* Memory Model Feature Register 3 */
+ TST $0xF, R1 /* hierarchical cache maintenance? */
+ BNE l2wb
+ DSB
+ MOVW $0, R0
+ MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEall
+ B l2wbx
+l2wb:
+ BL cachedwb(SB)
+ BL l2cacheuwb(SB)
+l2wbx:
+ /* load entry before turning off mmu */
+ MOVW entry+0(FP), R8
+
+ /* disable caches */
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
BIC $(CpCdcache|CpCicache|CpCpredict), R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
@@ -39,42 +60,34 @@
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
BIC $CpCmmu, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
+ BARRIERS
- /* continue with reboot only on cpu0 */
- CPUID(R2)
- BEQ bootcpu
+ /* turn SMP off */
+ MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
+ BIC $CpACsmp, R1
+ MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
+ ISB
+ DSB
- /* other cpus wait for inter processor interrupt from cpu0 */
- /* turn icache back on */
- MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
- ORR $(CpCicache), R1
- MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
- BARRIERS
+ /* have entry? */
+ CMP $0, R8
+ BNE bootcpu
+
+ /* other cpus wait for inter processor interrupt */
+ CPUID(R2)
dowfi:
- WFI
- MOVW $0x40000060, R1
- ADD R2<<2, R1
- MOVW 0(R1), R0
- AND $0x10, R0
- BEQ dowfi
- MOVW $0x8000, R1
- BL (R1)
- B dowfi
+ WFE /* wait for event signal */
+ MOVW $0x400000CC, R1 /* inter-core .startcpu mailboxes */
+ ADD R2<<4, R1 /* mailbox for this core */
+ MOVW 0(R1), R8 /* content of mailbox */
+ CMP $0, R8
+ BEQ dowfi /* if zero, wait again */
bootcpu:
- /* set up a tiny stack for local vars and memmove args */
- MOVW R8, SP /* stack top just before kernel dest */
- SUB $20, SP /* allocate stack frame */
+ BIC $KSEGM, R8 /* entry to physical */
+ ORR $PHYSDRAM, R8
+ BL (R8)
+ B dowfi
- /* copy the kernel to final destination */
- MOVW R8, 16(SP) /* save dest (entry point) */
- MOVW R8, R0 /* first arg is dest */
- MOVW R9, 8(SP) /* push src */
- MOVW R6, 12(SP) /* push size */
- BL memmove(SB)
- MOVW 16(SP), R8 /* restore entry point */
-
- /* jump to kernel physical entry point */
- ORR R8,R8
- B (R8)
- B 0(PC)
+#define ICACHELINESZ 32
+#include "cache.v7.s"