ref: 575015d2b023e5707293830186581f03c33aa90a
parent: e6220b39c97171fe9372e2b14a94dd555e0dc0f5
author: cinap_lenrek <cinap_lenrek@felloff.net>
date: Fri Jun 19 06:27:26 EDT 2015
kernel: do not pull in atom.s from libc for arm kernels - provide our own copy of cas() in l.s - replace use of libc ainc()/adec() with portable incref()/decref()
--- a/sys/src/9/kw/arch.c
+++ b/sys/src/9/kw/arch.c
@@ -172,32 +172,6 @@
return (ureg->psr & PsrMask) == PsrMusr;
}
-/*
- * atomic ops
- * make sure that we don't drag in the C library versions
- */
-int
-ainc(int *p)
-{
- int s, v;
-
- s = splhi();
- v = ++*p;
- splx(s);
- return v;
-}
-
-int
-adec(int *p)
-{
- int s, v;
-
- s = splhi();
- v = --*p;
- splx(s);
- return v;
-}
-
int
cas32(void* addr, u32int old, u32int new)
{
--- a/sys/src/9/omap/arch.c
+++ b/sys/src/9/omap/arch.c
@@ -174,32 +174,6 @@
return (ureg->psr & PsrMask) == PsrMusr;
}
-/*
- * atomic ops
- * make sure that we don't drag in the C library versions
- */
-int
-ainc(int *p)
-{
- int s, v;
-
- s = splhi();
- v = ++*p;
- splx(s);
- return v;
-}
-
-int
-adec(int *p)
-{
- int s, v;
-
- s = splhi();
- v = --*p;
- splx(s);
- return v;
-}
-
int
cas32(void* addr, u32int old, u32int new)
{
--- a/sys/src/9/omap/mmu.c
+++ b/sys/src/9/omap/mmu.c
@@ -246,8 +246,8 @@
panic("mmurelease: page->ref %d", page->ref);
pagechainhead(page);
}
- if(proc->mmul2cache && palloc.r.p)
- wakeup(&palloc.r);
+ if(proc->mmul2cache != nil)
+ pagechaindone();
proc->mmul2cache = nil;
mmul1empty();
--- a/sys/src/9/omap4/l.s
+++ b/sys/src/9/omap4/l.s
@@ -145,26 +145,6 @@
MOVW $0, R0
RET
-TEXT ainc(SB), $-4
-spinainc:
- LDREX(0,1)
- ADD $1, R1
- STREX(0,1,2)
- CMP.S $0, R2
- B.NE spinainc
- MOVW R1, R0
- RET
-
-TEXT adec(SB), $-4
-spinadec:
- LDREX(0,1)
- SUB $1, R1
- STREX(0,1,2)
- CMP.S $0, R2
- B.NE spinadec
- MOVW R1, R0
- RET
-
TEXT setlabel(SB), 1, $-4
MOVW R13, 0(R0)
MOVW R14, 4(R0)
--- a/sys/src/9/teg2/archtegra.c
+++ b/sys/src/9/teg2/archtegra.c
@@ -198,8 +198,8 @@
struct Diag {
Cacheline c0;
Lock;
- long cnt;
- long sync;
+ Ref cnt;
+ Ref sync;
Cacheline c1;
};
@@ -466,10 +466,10 @@
}
static void
-synccpus(volatile long *cntp, int n)
+synccpus(Ref *cntp, int n)
{
- ainc(cntp);
- while (*cntp < n)
+ incref(cntp);
+ while (cntp->ref < n)
;
/* all cpus should now be here */
}
@@ -482,8 +482,8 @@
if(m->machno == 0)
iprint(" %d", pass);
for (i = 1000*1000; --i > 0; ) {
- ainc(&dp->cnt);
- adec(&dp->cnt);
+ incref(&dp->cnt);
+ incref(&dp->cnt);
}
synccpus(&dp->sync, navailcpus);
@@ -490,14 +490,14 @@
/* all cpus are now here */
ilock(dp);
- if(dp->cnt != 0)
- panic("cpu%d: diag: failed w count %ld", m->machno, dp->cnt);
+ if(dp->cnt.ref != 0)
+ panic("cpu%d: diag: failed w count %ld", m->machno, dp->cnt.ref);
iunlock(dp);
synccpus(&dp->sync, 2 * navailcpus);
/* all cpus are now here */
- adec(&dp->sync);
- adec(&dp->sync);
+ decref(&dp->sync);
+ decref(&dp->sync);
}
/*
@@ -532,8 +532,8 @@
iunlock(dp);
synccpus(&dp->sync, 2 * navailcpus);
- adec(&dp->sync);
- adec(&dp->sync);
+ decref(&dp->sync);
+ decref(&dp->sync);
/*
* cpus contend
@@ -546,12 +546,12 @@
*/
synccpus(&dp->sync, navailcpus);
- if(dp->sync < navailcpus || dp->sync >= 2 * navailcpus)
+ if(dp->sync.ref < navailcpus || dp->sync.ref >= 2 * navailcpus)
panic("cpu%d: diag: failed w dp->sync %ld", m->machno,
- dp->sync);
- if(dp->cnt != 0)
+ dp->sync.ref);
+ if(dp->cnt.ref != 0)
panic("cpu%d: diag: failed w dp->cnt %ld", m->machno,
- dp->cnt);
+ dp->cnt.ref);
ilock(dp);
iprint(" cpu%d ok", m->machno);
@@ -558,8 +558,8 @@
iunlock(dp);
synccpus(&dp->sync, 2 * navailcpus);
- adec(&dp->sync);
- adec(&dp->sync);
+ decref(&dp->sync);
+ decref(&dp->sync);
l1cache->wb();
/*
--- a/sys/src/9/teg2/fns.h
+++ b/sys/src/9/teg2/fns.h
@@ -14,8 +14,6 @@
#pragma varargck argpos _uartprint 1
-extern long ainc(long *);
-extern long adec(long *);
extern void allcacheinfo(Memcache *);
extern void allcacheson(void);
extern int archether(unsigned, Ether *);
--- a/sys/src/9/teg2/l.s
+++ b/sys/src/9/teg2/l.s
@@ -843,6 +843,25 @@
#include "cache.v7.s"
+TEXT cas+0(SB),0,$12 /* r0 holds p */
+ MOVW ov+4(FP), R1
+ MOVW nv+8(FP), R2
+spin:
+/* LDREX 0(R0),R3 */
+ LDREX(0,3)
+ CMP.S R3, R1
+ BNE fail
+/* STREX 0(R0),R2,R4 */
+ STREX(0,2,4)
+ CMP.S $0, R4
+ BNE spin
+ MOVW $1, R0
+ DMB
+ RET
+fail:
+ MOVW $0, R0
+ RET
+
TEXT tas(SB), $-4 /* _tas(ulong *) */
/* returns old (R0) after modifying (R0) */
MOVW R0,R5