shithub: riscv

Download patch

ref: c6b757504d33feb7ed968208b4a2215d9bf2b202
parent: 0d6153841d81c5a7b7c850f1ee63677ec493b227
author: Jacob Moody <moody@posixcafe.org>
date: Thu May 16 19:32:00 EDT 2024

/sys/src: riscv64 target

--- /dev/null
+++ b/riscv64/include/ape/_apetypes.h
@@ -1,0 +1,3 @@
+#ifndef _BITS64
+#define _BITS64
+#endif
--- /dev/null
+++ b/riscv64/include/ape/float.h
@@ -1,0 +1,76 @@
+#ifndef __FLOAT
+#define __FLOAT
+/* IEEE, default rounding */
+
+#define FLT_ROUNDS	1
+#define FLT_RADIX	2
+
+#define FLT_DIG		6
+#define FLT_EPSILON	1.19209290e-07
+#define FLT_MANT_DIG	24
+#define FLT_MAX		3.40282347e+38
+#define FLT_MAX_10_EXP	38
+#define FLT_MAX_EXP	128
+#define FLT_MIN		1.17549435e-38
+#define FLT_MIN_10_EXP	-37
+#define FLT_MIN_EXP	-125
+
+#define DBL_DIG		15
+#define DBL_EPSILON	2.2204460492503131e-16
+#define DBL_MANT_DIG	53
+#define DBL_MAX		1.797693134862315708145e+308
+#define DBL_MAX_10_EXP	308
+#define DBL_MAX_EXP	1024
+#define DBL_MIN		2.225073858507201383090233e-308
+#define DBL_MIN_10_EXP	-307
+#define DBL_MIN_EXP	-1021
+#define LDBL_MANT_DIG	DBL_MANT_DIG
+#define LDBL_EPSILON	DBL_EPSILON
+#define LDBL_DIG	DBL_DIG
+#define LDBL_MIN_EXP	DBL_MIN_EXP
+#define LDBL_MIN	DBL_MIN
+#define LDBL_MIN_10_EXP	DBL_MIN_10_EXP
+#define LDBL_MAX_EXP	DBL_MAX_EXP
+#define LDBL_MAX	DBL_MAX
+#define LDBL_MAX_10_EXP	DBL_MAX_10_EXP
+
+typedef 	union FPdbleword FPdbleword;
+union FPdbleword
+{
+	double	x;
+	struct {	/* risc-v is little endian */
+		long lo;
+		long hi;
+	};
+};
+
+#ifdef _RESEARCH_SOURCE
+/* define stuff needed for floating conversion */
+#define IEEE_8087	1	/* little-endian IEEE FP */
+#define Sudden_Underflow 1
+#endif
+
+#ifdef _PLAN9_SOURCE
+/* FCR */
+#define	FPINEX	0		/* trap enables: none on risc-v */
+#define	FPUNFL	0
+#define	FPOVFL	0
+#define	FPZDIV	0
+#define	FPINVAL	0
+#define	FPRNR	(0<<5)		/* rounding modes */
+#define	FPRZ	(1<<5)
+#define	FPRPINF	(3<<5)
+#define	FPRNINF	(2<<5)
+#define	FPRMASK	(7<<5)
+#define	FPPEXT	0		/* precision */
+#define	FPPSGL	0
+#define	FPPDBL	0
+#define	FPPMASK	0
+/* FSR */
+#define	FPAINEX	(1<<0)		/* accrued exceptions */
+#define	FPAOVFL	(1<<2)
+#define	FPAUNFL	(1<<1)
+#define	FPAZDIV	(1<<3)
+#define	FPAINVAL (1<<4)
+#endif
+#endif /* __FLOAT */
--- /dev/null
+++ b/riscv64/include/ape/math.h
@@ -1,0 +1,78 @@
+#ifndef __MATH
+#define __MATH
+#pragma lib "/$M/lib/ape/libap.a"
+
+/* a HUGE_VAL appropriate for IEEE double-precision */
+/* the correct value, 1.797693134862316e+308, causes a ken overflow */
+#define HUGE_VAL 1.79769313486231e+308
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern double acos(double);
+extern double asin(double);
+extern double atan(double);
+extern double atan2(double, double);
+extern double cos(double);
+extern double hypot(double, double);
+extern double sin(double);
+extern double tan(double);
+extern double cosh(double);
+extern double sinh(double);
+extern double tanh(double);
+extern double exp(double);
+extern double frexp(double, int *);
+extern double ldexp(double, int);
+extern double log(double);
+extern double log10(double);
+extern double modf(double, double *);
+extern double pow(double, double);
+extern double sqrt(double);
+extern double ceil(double);
+extern double fabs(double);
+extern double floor(double);
+extern double fmod(double, double);
+extern double NaN(void);
+extern int isNaN(double);
+extern double Inf(int);
+extern int isInf(double, int);
+
+#ifdef _RESEARCH_SOURCE
+/* does >> treat left operand as unsigned ? */
+#define Unsigned_Shifts 1
+#define	M_E		2.7182818284590452354	/* e */
+#define	M_LOG2E		1.4426950408889634074	/* log 2e */
+#define	M_LOG10E	0.43429448190325182765	/* log 10e */
+#define	M_LN2		0.69314718055994530942	/* log e2 */
+#define	M_LN10		2.30258509299404568402	/* log e10 */
+#define	M_PI		3.14159265358979323846	/* pi */
+#define	M_PI_2		1.57079632679489661923	/* pi/2 */
+#define	M_PI_4		0.78539816339744830962	/* pi/4 */
+#define	M_1_PI		0.31830988618379067154	/* 1/pi */
+#define	M_2_PI		0.63661977236758134308	/* 2/pi */
+#define	M_2_SQRTPI	1.12837916709551257390	/* 2/sqrt(pi) */
+#define	M_SQRT2		1.41421356237309504880	/* sqrt(2) */
+#define	M_SQRT1_2	0.70710678118654752440	/* 1/sqrt(2) */
+
+extern double hypot(double, double);
+extern double erf(double);
+extern double erfc(double);
+extern double j0(double);
+extern double y0(double);
+extern double j1(double);
+extern double y1(double);
+extern double jn(int, double);
+extern double yn(int, double);
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#define isnan(x) isNaN(x)
+#define isinf(x) isInf(x, 0)
+
+#endif /* __MATH */
--- /dev/null
+++ b/riscv64/include/ape/stdarg.h
@@ -1,0 +1,26 @@
+#ifndef __STDARG
+#define __STDARG
+
+typedef	unsigned long long va_list;
+
+/* stdarg - little-endian 64-bit */
+/*
+ * types narrower than long are widened to long then pushed.
+ * types wider than long are pushed on vlong alignment boundaries.
+ */
+#define va_start(list, start) list =\
+	(sizeof(start) < 4?\
+		(va_list)((long*)&(start)+1):\
+		(va_list)(&(start)+1))
+#define va_end(list)\
+	USED(list)
+#define va_arg(list, mode)\
+	((sizeof(mode) == 1)?\
+		((list += 4), (mode*)list)[-4]:\
+	(sizeof(mode) == 2)?\
+		((list += 4), (mode*)list)[-2]:\
+	(sizeof(mode) == 4)?\
+		((list += 4), (mode*)list)[-1]:\
+		((mode*)(list = (list + sizeof(mode)+7) & ~7))[-1])
+
+#endif /* __STDARG */
--- /dev/null
+++ b/riscv64/include/ape/ureg.h
@@ -1,0 +1,68 @@
+#ifndef __UREG_H
+#define __UREG_H
+#if !defined(_PLAN9_SOURCE)
+    This header file is an extension to ANSI/POSIX
+#endif
+
+#define uvlong unsigned long long
+
+struct Ureg
+{
+	union {
+		uvlong	pc;
+		uvlong regs[1];
+	};
+	uvlong	r1;		/* link */
+	union{
+		uvlong	r2;
+		uvlong	sp;
+		uvlong	usp;
+	};
+	uvlong	r3;		/* sb */
+	uvlong	r4;
+	uvlong	r5;
+	uvlong	r6;		/* up in kernel */
+	uvlong	r7;		/* m in kernel */
+	union{
+		uvlong	r8;
+		uvlong arg;
+		uvlong ret;
+	};
+	uvlong	r9;
+	uvlong	r10;
+	uvlong	r11;
+	uvlong	r12;
+	uvlong	r13;
+	uvlong	r14;
+	uvlong	r15;
+	uvlong	r16;
+	uvlong	r17;
+	uvlong	r18;
+	uvlong	r19;
+	uvlong	r20;
+	uvlong	r21;
+	uvlong	r22;
+	uvlong	r23;
+	uvlong	r24;
+	uvlong	r25;
+	uvlong	r26;
+	uvlong	r27;
+	uvlong	r28;
+	uvlong	r29;
+	uvlong	r30;
+	uvlong	r31;
+
+	/* csrs: generally supervisor ones */
+	uvlong	status;
+	uvlong	ie;
+	union {
+		uvlong	cause;
+		uvlong	type;
+	};
+	uvlong	tval;			/* faulting address */
+
+	uvlong	curmode;
+};
+#undef uvlong
+
+#endif
--- /dev/null
+++ b/riscv64/include/atom.h
@@ -1,0 +1,63 @@
+/*
+ * atomic memory operations and fences for rv64a
+ *
+ *	assumes the standard A extension
+ *	LR/SC only work on cached regions
+ */
+#define	Amoadd	0
+#define	Amoswap	1
+#define	Amolr	2
+#define	Amosc	3
+#define	Amoxor	4
+#define	Amoor	010
+#define	Amoand	014
+#define	Amomin	020
+#define	Amomax	024
+#define	Amominu	030
+#define	Amomaxu	034
+
+/* AMO operand widths */
+#define	Amow	2		/* long */
+#define	Amod	3		/* vlong */
+
+/* sifive e51 erratum rock-3 requires AQ and RL for correct operation */
+#define AQ	(1<<26)		/* acquire */
+#define RL	(1<<25)		/* release */
+
+/* instructions unknown to the assembler */
+/*
+ * atomically (rd = (rs1); (rs1) = rd func rs2).
+ * setting AQ and RL produces sequential consistency by acting as fences,
+ * *but only for this AMO operand*, not in general.
+ */
+#define AMOW(func, opts, rs2, rs1, rd) \
+	WORD $(((func)<<27)|((rs2)<<20)|((rs1)<<15)|(Amow<<12)|((rd)<<7)|057|(opts))
+#define LRW(rs1, rd)		AMOW(Amolr, AQ, 0, rs1, rd)
+#define SCW(rs2, rs1, rd)	AMOW(Amosc, AQ|RL, rs2, rs1, rd)
+
+#define FNC_I	(1<<3)		/* mmio */
+#define FNC_O	(1<<2)
+#define FNC_R	(1<<1)		/* memory */
+#define FNC_W	(1<<0)
+#define FNC_RW	(FNC_R | FNC_W)
+#define FNC_ALL	0xf		/* 0xf = all i/o, memory r & w ops */
+
+#define FENCE	WORD $(0xf | FNC_ALL<<24 | FNC_ALL<<20)
+#define FENCE_RW WORD $(0xf | FNC_RW<<24 | FNC_RW<<20)	/* memory, not mmio */
+#define PAUSE	WORD $(0xf | FNC_W<<24)	/* FENCE pred=W, Zihintpause ext'n */
+#define FENCE_I	WORD $(0xf | 1<<12)
+/* as and vaddr are register numbers */
+#define SFENCE_VMA(as, vaddr) WORD $(011<<25|(as)<<20|(vaddr)<<15|0<<7|SYSTEM)
+
+#define SEXT_W(r) ADDW R0, r
+
+/* rv64 instructions */
+#define AMOD(func, opts, rs2, rs1, rd) \
+	WORD $(((func)<<27)|((rs2)<<20)|((rs1)<<15)|(Amod<<12)|((rd)<<7)|057|(opts))
+#define LRD(rs1, rd)		AMOD(Amolr, AQ, 0, rs1, rd)
+#define SCD(rs2, rs1, rd)	AMOD(Amosc, AQ|RL, rs2, rs1, rd)
+
+#define	SLLI64(rs2, rs1, rd) \
+	WORD $((rs2)<<20 | (rs1)<<15 | 1<<12 | (rd)<<7 | 0x13)
+#define	SRLI64(rs2, rs1, rd) \
+	WORD $((rs2)<<20 | (rs1)<<15 | 5<<12 | (rd)<<7 | 0x13)
--- /dev/null
+++ b/riscv64/include/u.h
@@ -1,0 +1,88 @@
+#define nil		((void*)0)
+typedef	unsigned short	ushort;
+typedef	unsigned char	uchar;
+typedef	unsigned long	ulong;
+typedef	unsigned int	uint;
+typedef	signed char	schar;
+typedef	long long	vlong;
+typedef	unsigned long long uvlong;
+typedef vlong		intptr;
+typedef unsigned long long uintptr;
+/*
+ * not terribly useful, really should be uintptr, but
+ * changing usize width from ulong changes system call binary interface due to
+ * dubious use of va_arg(list, usize) in 9k/port.
+ */
+typedef unsigned long	usize;
+typedef	uint		Rune;
+typedef union FPdbleword FPdbleword;
+
+typedef uintptr	jmp_buf[2];
+#define	JMPBUFSP	0
+#define	JMPBUFPC	1
+#define	JMPBUFDPC	0
+typedef unsigned int	mpdigit;	/* for /sys/include/mp.h */
+
+typedef unsigned char u8int;
+typedef unsigned short u16int;
+typedef unsigned int	u32int;
+typedef unsigned long long u64int;
+typedef signed char s8int;
+typedef signed short s16int;
+typedef signed int s32int;
+typedef signed long long s64int;
+
+/* FCR */
+#define	FPINEX	0		/* trap enables: none on risc-v */
+#define	FPUNFL	0
+#define	FPOVFL	0
+#define	FPZDIV	0
+#define	FPINVAL	0
+#define	FPRNR	(0<<5)		/* rounding modes */
+#define	FPRZ	(1<<5)
+#define	FPRPINF	(3<<5)
+#define	FPRNINF	(2<<5)
+#define	FPRMASK	(7<<5)
+#define	FPPEXT	0		/* precision */
+#define	FPPSGL	0
+#define	FPPDBL	0
+#define	FPPMASK	0
+/* FSR */
+#define	FPAINEX	(1<<0)		/* accrued exceptions */
+#define	FPAOVFL	(1<<2)
+#define	FPAUNFL	(1<<1)
+#define	FPAZDIV	(1<<3)
+#define	FPAINVAL (1<<4)
+
+union FPdbleword
+{
+	double	x;
+	struct {	/* little endian */
+		ulong lo;
+		ulong hi;
+	};
+};
+
+/* stdarg - little-endian 64-bit */
+/*
+ * types narrower than long are widened to long then pushed.
+ * types wider than long are pushed on vlong alignment boundaries.
+ */
+typedef	uintptr	va_list;
+#define va_start(list, start) list =\
+	(sizeof(start) < 4?\
+		(va_list)((long*)&(start)+1):\
+		(va_list)(&(start)+1))
+#define va_end(list)\
+	USED(list)
+#define va_arg(list, mode)\
+	((sizeof(mode) == 1)?\
+		((list += 4), (mode*)list)[-4]:\
+	(sizeof(mode) == 2)?\
+		((list += 4), (mode*)list)[-2]:\
+	(sizeof(mode) == 4)?\
+		((list += 4), (mode*)list)[-1]:\
+		((list = (list + sizeof(mode)+7) & ~7), (mode*)list)[-1])
+
+#define _BITS64		/* for pool allocator & ape */
+#define _RISCV64	/* for cpucap in Tos */
--- /dev/null
+++ b/riscv64/include/ureg.h
@@ -1,0 +1,62 @@
+struct Ureg
+{
+	union {
+		uintptr	regs[32];		/* regs[n] is R(n), n!=0 */
+		struct {
+			uintptr	pc;		/* instead of r0 */
+			/* the names r1 - r31 are just for libmach */
+			union {
+				uintptr	r1;
+				uintptr	link;	/* r1 */
+			};
+			union {
+				uintptr	r2;
+				uintptr	sp;	/* r2 */
+			};
+			uintptr	r3;
+			uintptr	r4;
+			uintptr	r5;
+			uintptr	r6;		/* up in kernel */
+			uintptr	r7;		/* m in kernel */
+			union{
+				uintptr	r8;
+				uintptr	arg;	/* r8 */
+				uintptr	ret;
+			};
+			uintptr	r9;
+			uintptr	r10;
+			uintptr	r11;
+			uintptr	r12;
+			uintptr	r13;
+			uintptr	r14;
+			uintptr	r15;
+			uintptr	r16;
+			uintptr	r17;
+			uintptr	r18;
+			uintptr	r19;
+			uintptr	r20;
+			uintptr	r21;
+			uintptr	r22;
+			uintptr	r23;
+			uintptr	r24;
+			uintptr	r25;
+			uintptr	r26;
+			uintptr	r27;
+			uintptr	r28;
+			uintptr	r29;
+			uintptr	r30;
+			uintptr	r31;
+		};
+	};
+
+	/* csrs: generally supervisor ones */
+	uintptr	status;
+	uintptr	ie;
+	union {
+		uintptr	cause;
+		uintptr	type;
+	};
+	uintptr	tval;				/* faulting address */
+
+	uintptr	curmode;
+};
--- /dev/null
+++ b/riscv64/mkfile
@@ -1,0 +1,6 @@
+</sys/src/mkfile.proto
+
+O=j
+CC=jc
+AS=ja
+LD=jl
--- a/sys/include/mach.h
+++ b/sys/include/mach.h
@@ -18,6 +18,7 @@
  *		powerpc64
  *		alpha
  *		arm64
+ *		riscv64
  */
 enum
 {
@@ -38,6 +39,7 @@
 	MAMD64,
 	MPOWER64,
 	MARM64,
+	MRISCV64,
 				/* types of executables */
 	FNONE = 0,		/* unidentified */
 	FMIPS,			/* v.out */
@@ -69,6 +71,8 @@
 	FPOWER64B,		/* 9.out bootable */
 	FARM64,			/* arm64 */
 	FARM64B,		/* arm64 bootable */
+	FRISCV64,		/* riscv64 */
+	FRISCV64B,		/* riscv64 bootable */
 
 	ANONE = 0,		/* dissembler types */
 	AMIPS,
@@ -87,6 +91,7 @@
 	AAMD64,
 	APOWER64,
 	AARM64,
+	ARISCV64,
 				/* object file types */
 	Obj68020 = 0,		/* .2 */
 	ObjSparc,		/* .k */
@@ -105,6 +110,7 @@
 	ObjSpim,		/* .0 */
 	ObjPower64,		/* .9 */
 	ObjArm64,		/* .4? */
+	ObjRiscv64,		/* .j */
 	Maxobjtype,
 
 	CNONE  = 0,		/* symbol table classes */
--- /dev/null
+++ b/sys/lib/acid/riscv64
@@ -1,0 +1,240 @@
+// riscv64 support
+
+defn acidinit()			// Called after all the init modules are loaded
+{
+	bplist = {};
+	bpfmt = 'Y';
+
+	srcpath = {
+		"./",
+		"/sys/src/libc/port/",
+		"/sys/src/libc/9sys/",
+		"/sys/src/libc/riscv64/"
+	};
+
+	srcfiles = {};		// list of loaded files
+	srctext = {};		// the text of the files
+}
+
+defn stk()			// trace
+{
+	_stk(*PC, *SP, linkreg(0), 0);
+}
+
+defn lstk()			// trace with locals
+{
+	_stk(*PC, *SP, linkreg(0), 1);
+}
+
+defn gpr()			// print general purpose registers
+{
+	print("R1\t", *R1, " R2\t", *R2, " R3\t", *R3, "\n");
+	print("R4\t", *R4, " R5\t", *R5, " R6\t", *R6, "\n");
+	print("R7\t", *R7, " R8\t", *R8, " R9\t", *R9, "\n");
+	print("R10\t", *R10, " R11\t", *R11, " R12\t", *R12, "\n");
+	print("R13\t", *R13, " R14\t", *R14, " R15\t", *R15, "\n");
+	print("R16\t", *R16, " R17\t", *R17, " R18\t", *R18, "\n");
+	print("R19\t", *R19, " R20\t", *R20, " R21\t", *R21, "\n");
+	print("R22\t", *R22, " R23\t", *R23, " R24\t", *R24, "\n");
+	print("R25\t", *R25, " R26\t", *R26, " R27\t", *R27, "\n");
+	print("R28\t", *R28, " R29\t", *SP, " R30\t", *R30, "\n");
+	print("R31\t", *R31, "\n");
+}
+
+defn Fpr()
+{
+	print("F0\t",  *fmt(F0, 'G'),  "\tF1\t",  *fmt(F1, 'G'), "\n");
+	print("F2\t",  *fmt(F2, 'G'),  "\tF3\t",  *fmt(F3, 'G'), "\n");
+	print("F4\t",  *fmt(F4, 'G'),  "\tF5\t",  *fmt(F5, 'G'), "\n");
+	print("F6\t",  *fmt(F6, 'G'),  "\tF7\t",  *fmt(F7, 'G'), "\n");
+	print("F8\t",  *fmt(F8, 'G'),  "\tF9\t",  *fmt(F9, 'G'), "\n");
+	print("F10\t", *fmt(F10, 'G'), "\tF11\t", *fmt(F11, 'G'), "\n");
+	print("F12\t", *fmt(F12, 'G'), "\tF13\t", *fmt(F13, 'G'), "\n");
+	print("F14\t", *fmt(F14, 'G'), "\tF15\t", *fmt(F15, 'G'), "\n");
+	print("F16\t", *fmt(F16, 'G'), "\tF17\t", *fmt(F17, 'G'), "\n");
+	print("F18\t", *fmt(F18, 'G'), "\tF19\t", *fmt(F19, 'G'), "\n");
+	print("F20\t", *fmt(F20, 'G'), "\tF21\t", *fmt(F21, 'G'), "\n");
+	print("F22\t", *fmt(F22, 'G'), "\tF23\t", *fmt(F23, 'G'), "\n");
+	print("F24\t", *fmt(F24, 'G'), "\tF25\t", *fmt(F25, 'G'), "\n");
+	print("F26\t", *fmt(F26, 'G'), "\tF27\t", *fmt(F27, 'G'), "\n");
+	print("F28\t", *fmt(F28, 'G'), "\tF29\t", *fmt(F29, 'G'), "\n");
+	print("F30\t", *fmt(F30, 'G'), "\tF31\t", *fmt(F31, 'G'), "\n");
+}
+
+defn fpr()
+{
+	print("F0\t",  *fmt(F0, 'g'),  "\tF1\t",  *fmt(F1, 'g'), "\n");
+	print("F2\t",  *fmt(F2, 'g'),  "\tF3\t",  *fmt(F3, 'g'), "\n");
+	print("F4\t",  *fmt(F4, 'g'),  "\tF5\t",  *fmt(F5, 'g'), "\n");
+	print("F6\t",  *fmt(F6, 'g'),  "\tF7\t",  *fmt(F7, 'g'), "\n");
+	print("F8\t",  *fmt(F8, 'g'),  "\tF9\t",  *fmt(F9, 'g'), "\n");
+	print("F10\t", *fmt(F10, 'g'), "\tF11\t", *fmt(F11, 'g'), "\n");
+	print("F12\t", *fmt(F12, 'g'), "\tF13\t", *fmt(F13, 'g'), "\n");
+	print("F14\t", *fmt(F14, 'g'), "\tF15\t", *fmt(F15, 'g'), "\n");
+	print("F16\t", *fmt(F16, 'g'), "\tF17\t", *fmt(F17, 'g'), "\n");
+	print("F18\t", *fmt(F18, 'g'), "\tF19\t", *fmt(F19, 'g'), "\n");
+	print("F20\t", *fmt(F20, 'g'), "\tF21\t", *fmt(F21, 'g'), "\n");
+	print("F22\t", *fmt(F22, 'g'), "\tF23\t", *fmt(F23, 'g'), "\n");
+	print("F24\t", *fmt(F24, 'g'), "\tF25\t", *fmt(F25, 'g'), "\n");
+	print("F26\t", *fmt(F26, 'g'), "\tF27\t", *fmt(F27, 'g'), "\n");
+	print("F28\t", *fmt(F28, 'g'), "\tF29\t", *fmt(F29, 'g'), "\n");
+	print("F30\t", *fmt(F30, 'g'), "\tF31\t", *fmt(F31, 'g'), "\n");
+}
+
+defn spr()				// print special processor registers
+{
+	local pc, link, cause;
+
+	pc = *PC;
+	print("PC\t", pc, " ", fmt(pc, 'a'), "  ");
+	pfl(pc);
+
+	link = *R1;
+	print("SP\t", *SP, "\tLINK\t", link, " ", fmt(link, 'a'), " ");
+	pfl(link);
+
+	cause = *CAUSE;
+	print("STATUS\t", *STATUS, "\tCAUSE\t", cause, " ", reason(cause), "\n");
+}
+
+defn regs()				// print all registers
+{
+	spr();
+	gpr();
+	Fpr();
+}
+
+defn pstop(pid)
+{
+	local l, pc;
+
+	pc = *PC;
+
+	print(pid,": ", reason(*CAUSE), "\t");
+	print(fmt(pc, 'a'), "\t", fmt(pc, 'i'), "\n");
+
+	if notes then {
+		if notes[0] != "sys: breakpoint" then {
+			print("Notes pending:\n");
+			l = notes;
+			while l do {
+				print("\t", head l, "\n");
+				l = tail l;
+			}
+		}
+	}
+}
+
+sizeofUreg = 296;
+aggr Ureg
+{
+	{
+	'W' 0 pc;
+	'a' 0 regs;
+	};
+	'W' 8 r1;
+	{
+	'W' 16 r2;
+	'W' 16 sp;
+	'W' 16 usp;
+	};
+	'W' 24 r3;
+	'W' 32 r4;
+	'W' 40 r5;
+	'W' 48 r6;
+	'W' 56 r7;
+	{
+	'W' 64 r8;
+	'W' 64 arg;
+	'W' 64 ret;
+	};
+	'W' 72 r9;
+	'W' 80 r10;
+	'W' 88 r11;
+	'W' 96 r12;
+	'W' 104 r13;
+	'W' 112 r14;
+	'W' 120 r15;
+	'W' 128 r16;
+	'W' 136 r17;
+	'W' 144 r18;
+	'W' 152 r19;
+	'W' 160 r20;
+	'W' 168 r21;
+	'W' 176 r22;
+	'W' 184 r23;
+	'W' 192 r24;
+	'W' 200 r25;
+	'W' 208 r26;
+	'W' 216 r27;
+	'W' 224 r28;
+	'W' 232 r29;
+	'W' 240 r30;
+	'W' 248 r31;
+	'W' 256 status;
+	'W' 264 ie;
+	{
+	'W' 272 cause;
+	'W' 272 type;
+	};
+	'W' 280 tval;
+	'W' 288 curmode;
+};
+
+defn
+Ureg(addr) {
+	complex Ureg addr;
+	print("_32_ {\n");
+		_32_(addr+0);
+	print("}\n");
+	print("	r1	", addr.r1, "\n");
+	print("_33_ {\n");
+		_33_(addr+16);
+	print("}\n");
+	print("	r3	", addr.r3, "\n");
+	print("	r4	", addr.r4, "\n");
+	print("	r5	", addr.r5, "\n");
+	print("	r6	", addr.r6, "\n");
+	print("	r7	", addr.r7, "\n");
+	print("_34_ {\n");
+		_34_(addr+64);
+	print("}\n");
+	print("	r9	", addr.r9, "\n");
+	print("	r10	", addr.r10, "\n");
+	print("	r11	", addr.r11, "\n");
+	print("	r12	", addr.r12, "\n");
+	print("	r13	", addr.r13, "\n");
+	print("	r14	", addr.r14, "\n");
+	print("	r15	", addr.r15, "\n");
+	print("	r16	", addr.r16, "\n");
+	print("	r17	", addr.r17, "\n");
+	print("	r18	", addr.r18, "\n");
+	print("	r19	", addr.r19, "\n");
+	print("	r20	", addr.r20, "\n");
+	print("	r21	", addr.r21, "\n");
+	print("	r22	", addr.r22, "\n");
+	print("	r23	", addr.r23, "\n");
+	print("	r24	", addr.r24, "\n");
+	print("	r25	", addr.r25, "\n");
+	print("	r26	", addr.r26, "\n");
+	print("	r27	", addr.r27, "\n");
+	print("	r28	", addr.r28, "\n");
+	print("	r29	", addr.r29, "\n");
+	print("	r30	", addr.r30, "\n");
+	print("	r31	", addr.r31, "\n");
+	print("	status	", addr.status, "\n");
+	print("	ie	", addr.ie, "\n");
+	print("_35_ {\n");
+		_35_(addr+272);
+	print("}\n");
+	print("	tval	", addr.tval, "\n");
+	print("	curmode	", addr.curmode, "\n");
+};
+
+defn linkreg(addr)
+{
+	complex Ureg addr;
+	return addr.r1\Y;
+}
+
+print("/sys/lib/acid/riscv64");
--- a/sys/lib/rootstub
+++ b/sys/lib/rootstub
@@ -57,6 +57,7 @@
 mkdir -p acme/bin/power64
 mkdir -p acme/bin/sparc
 mkdir -p acme/bin/sparc64
+mkdir -p acme/bin/riscv64
 mkdir -p amd64/bin/ape
 mkdir -p amd64/bin/audio
 mkdir -p amd64/bin/auth
@@ -187,6 +188,22 @@
 mkdir -p rc/bin/aux
 mkdir -p rc/bin/postscript
 mkdir -p rc/lib/ape
+mkdir -p riscv64/bin/ape
+mkdir -p riscv64/bin/audio
+mkdir -p riscv64/bin/auth
+mkdir -p riscv64/bin/aux
+mkdir -p riscv64/bin/bitsy
+mkdir -p riscv64/bin/dial
+mkdir -p riscv64/bin/disk
+mkdir -p riscv64/bin/fs
+mkdir -p riscv64/bin/games
+mkdir -p riscv64/bin/ip/httpd
+mkdir -p riscv64/bin/ndb
+mkdir -p riscv64/bin/nusb
+mkdir -p riscv64/bin/replica
+mkdir -p riscv64/bin/upas
+mkdir -p riscv64/bin/venti
+mkdir -p riscv64/lib/ape
 mkdir -p sparc/bin/ape
 mkdir -p sparc/bin/audio
 mkdir -p sparc/bin/auth
--- /dev/null
+++ b/sys/src/ape/lib/9/riscv64/getcallerpc.s
@@ -1,0 +1,6 @@
+#define RARG	R8
+
+TEXT	getcallerpc(SB), 1, $0
+	MOV	0(SP), RARG
+	RET
+
--- /dev/null
+++ b/sys/src/ape/lib/9/riscv64/getfcr.s
@@ -1,0 +1,21 @@
+#define ARG	8
+
+#define FFLAGS		1
+#define FRM		2
+#define FCSR		3
+
+TEXT	getfsr(SB), $0
+	MOV	CSR(FCSR), R(ARG)
+	RET
+
+TEXT	setfsr(SB), $0
+	MOV	R(ARG), CSR(FCSR)
+	RET
+
+TEXT	getfcr(SB), $0
+	MOV	CSR(FCSR), R(ARG)
+	RET
+
+TEXT	setfcr(SB), $0
+	MOV	R(ARG), CSR(FCSR)
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/_seek.c
@@ -1,0 +1,11 @@
+extern long __SEEK(long long*, int, long long, int);
+
+long long
+_SEEK(int fd, long long o, int p)
+{
+	long long l;
+
+	if(__SEEK(&l, fd, o, p) < 0)
+		l = -1;
+	return l;
+}
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/argv0.s
@@ -1,0 +1,4 @@
+GLOBL	argv0(SB), $XLEN
+GLOBL	_tos(SB), $XLEN
+GLOBL	_privates(SB), $XLEN
+GLOBL	_nprivates(SB), $4
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/cycles.s
@@ -1,0 +1,8 @@
+#define RARG R8
+
+/* user-accessible CSRs */
+#define CYCLO		0xc00
+
+TEXT cycles(SB), 1, $0				/* cycles since power up */
+	MOV	CSR(CYCLO), RARG
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/getcallerpc.s
@@ -1,0 +1,6 @@
+#define RARG	R8
+
+TEXT	getcallerpc(SB), 1, $0
+	MOV	0(SP), RARG
+	RET
+
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/getfcr.s
@@ -1,0 +1,21 @@
+#define ARG	8
+
+#define FFLAGS		1
+#define FRM		2
+#define FCSR		3
+
+TEXT	getfsr(SB), $0
+	MOV	CSR(FCSR), R(ARG)
+	RET
+
+TEXT	setfsr(SB), $0
+	MOV	R(ARG), CSR(FCSR)
+	RET
+
+TEXT	getfcr(SB), $0
+	MOV	CSR(FCSR), R(ARG)
+	RET
+
+TEXT	setfcr(SB), $0
+	MOV	R(ARG), CSR(FCSR)
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/lock.c
@@ -1,0 +1,26 @@
+#define _LOCK_EXTENSION
+#include "../plan9/sys9.h"
+#include <lock.h>
+
+int	tas(int*);
+
+void
+lock(Lock *lk)
+{
+	while(tas(&lk->val))
+		_SLEEP(0);
+}
+
+int
+canlock(Lock *lk)
+{
+	if(tas(&lk->val))
+		return 0;
+	return 1;
+}
+
+void
+unlock(Lock *lk)
+{
+	lk->val = 0;
+}
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/main9.s
@@ -1,0 +1,42 @@
+/* APE startup following exec.  assume vlong alignment of SP */
+#define NPRIVATES	16
+#define FCSR		3
+
+GLOBL	_tos(SB), $XLEN
+GLOBL	_privates(SB), $XLEN
+GLOBL	_nprivates(SB), $4
+GLOBL	_savedargc(SB), $4
+GLOBL	_savedargv(SB), $XLEN
+
+TEXT	_main(SB), 1, $(4*XLEN + NPRIVATES*XLEN)
+	MOV	$setSB(SB), R3
+	MOV	R0, CSR(FCSR)
+
+	/* _tos = arg */
+	MOV	R8, _tos(SB)
+
+	MOV	$p-(NPRIVATES*XLEN)(SP), R9
+	MOV	R9, _privates(SB)
+	MOV	$NPRIVATES, R9
+	MOVW	R9, _nprivates(SB)
+
+	/* save argc & argv before envsetup */
+	MOVW	inargc-XLEN(FP), R9
+	MOVW	R9, _savedargc(SB)
+	MOV	$inargv+0(FP), R9
+	MOV	R9, _savedargv(SB)
+	MOV	R0, environ(SB)		/* TODO debug */
+	JAL	R1, _envsetup(SB)	/* may trash any non-stable register */
+
+	/* exit(main(argc, argv, environ)); */
+//#define R2 SP
+	MOVW	_savedargc(SB), R8	/* argc */
+	MOVW	R8, XLEN(R2)
+	MOV	_savedargv(SB), R9
+	MOV	R9, (2*XLEN)(R2)	/* argv */
+	MOV	environ(SB), R9
+	MOV	R9, (3*XLEN)(R2)	/* environ */
+	JAL	R1, main(SB)
+
+	JAL	R1, exit(SB)
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/main9p.s
@@ -1,0 +1,55 @@
+/* _mainp - APE startup with profiling _main */
+
+#define NPRIVATES	16
+#define FCSR		3
+
+GLOBL	_tos(SB), $XLEN
+GLOBL	_privates(SB), $XLEN
+GLOBL	_nprivates(SB), $4
+
+TEXT	_mainp(SB), 1, $(4*XLEN + NPRIVATES*XLEN)
+	MOV	$setSB(SB), R3
+	MOV	R0, CSR(FCSR)
+
+	/* _tos = arg */
+	MOV	R8, _tos(SB)
+
+	MOV	$p-(NPRIVATES*XLEN)(SP), R9
+	MOV	R9, _privates(SB)
+	MOV	$NPRIVATES, R9
+	MOVW	R9, _nprivates(SB)
+
+	/* _profmain(); */
+	JAL	R1, _profmain(SB)
+
+	/* _tos->prof.pp = _tos->prof.next; */
+	MOV	_tos+0(SB), R10
+	MOV	XLEN(R10), R9
+	MOV	R9, (R10)
+
+	JAL	R1, _envsetup(SB)	/* may trash any non-stable register */
+
+	/* main(argc, argv, environ); */
+	MOVW	inargc-XLEN(FP), R8	/* argc */
+	MOV	$inargv+0(FP), R9
+//	AND	$~(XLEN-1), R2		/* vlong alignment */
+	MOV	R9, (2*XLEN)(R2)	/* argv */
+	MOV	environ(SB), R9
+	MOV	R9, (3*XLEN)(R2)	/* environ */
+	JAL	R1, main(SB)
+loop:
+	/* exit(main(argc, argv, environ)) */
+	JAL	R1, exit(SB)
+
+	MOV	$_profin(SB), R0	/* force loading of profile */
+	MOV	R0, R8
+	JMP	loop
+
+TEXT	_savearg(SB), 1, $0
+TEXT	_saveret(SB), 1, $0
+	RET
+
+TEXT	_callpc(SB), 1, $0
+	MOV	argp+0(FP), R8
+	MOV	XLEN(R8), R8
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/malloc.c
@@ -1,0 +1,253 @@
+/*
+ * V7 unix malloc, adapted to the modern world
+ */
+
+/*
+ *	C storage allocator
+ *	circular first-fit strategy
+ *	works with noncontiguous, but monotonically linked, arena
+ *	each block is preceded by a ptr to the (pointer of)
+ *	the next following block
+ *	blocks are exact number of words long
+ *	aligned to the data type requirements of ALIGN
+ *	pointers to blocks must have BUSY bit 0
+ *	bit in ptr is 1 for busy, 0 for idle
+ *	gaps in arena are merely noted as busy blocks
+ *	last block of arena (pointed to by alloct) is empty and
+ *	has a pointer to first
+ *	idle blocks are coalesced during space search
+ *
+ *	a different implementation may need to redefine
+ *	ALIGN, NALIGN, BLOCK, BUSY, INT
+ *	where INT is integer type to which a pointer can be cast
+ */
+
+#include <u.h>
+#include <libc.h>
+
+#ifdef debug
+#define ASSERT(p) if(!(p))botch("p");else
+
+void
+botch(char *s)
+{
+	unlock(&malloclck);
+	print("assertion botched: %s\n", s);
+	abort();
+}
+#else
+#define ASSERT(p)
+#endif
+
+#define INT	uintptr
+#define ALIGN	vlong
+#define NALIGN	1
+#define WORD	sizeof(union store)
+#define BLOCK	1024			/* a multiple of WORD */
+
+#define BUSY 1
+#define NULL 0
+
+#define testbusy(p)	((INT)(p)&BUSY)
+#define setbusy(p)	(union store *)((INT)(p)|BUSY)
+#define clearbusy(p)	(union store *)((INT)(p)&~BUSY)
+
+typedef union store Store;
+union store {
+	Store	*ptr;
+	ALIGN	dummy[NALIGN];
+	int	calloc;		/*calloc clears an array of integers*/
+};
+
+static Store allocs[2];	/*initial arena*/
+static Store *allocp;	/*search ptr*/
+static Store *alloct;	/*arena top*/
+static Store *allocx;	/*for benefit of realloc*/
+static Lock malloclck;
+
+void *
+malloc(uintptr nbytes)
+{
+	uintptr nw, temp;
+	Store *p, *q;
+
+	lock(&malloclck);
+	if (allocs[0].ptr == 0) {	/* first time */
+		allocs[0].ptr = setbusy(&allocs[1]);
+		allocs[1].ptr = setbusy(&allocs[0]);
+		alloct = &allocs[1];
+		allocp = &allocs[0];
+	}
+	nw = (nbytes + WORD + WORD - 1) / WORD;
+	ASSERT(allocp >= allocs && allocp <= alloct);
+	ASSERT(allock());
+	for (p = allocp; ; ) {
+		for (temp = 0; ; ) {
+			if (!testbusy(p->ptr)) {
+				while (!testbusy((q = p->ptr)->ptr)) {
+					ASSERT(q > p && q < alloct);
+					p->ptr = q->ptr;
+				}
+				if (q >= p + nw && p + nw >= p)
+					goto found;
+			}
+			q = p;
+			p = clearbusy(p->ptr);
+			if (p > q) {
+				ASSERT(p <= alloct);
+			} else if (q != alloct || p != allocs) {
+				ASSERT(q == alloct && p == allocs);
+				unlock(&malloclck);
+				return NULL;
+			} else if (++temp > 1)
+				break;
+		}
+		temp = ((nw + BLOCK/WORD) / (BLOCK/WORD)) * (BLOCK/WORD);
+		q = (Store *)sbrk(0);
+		if (q + temp < q) {
+			unlock(&malloclck);
+			return NULL;
+		}
+		q = (Store *)sbrk(temp * WORD);
+		if ((INT)q == -1) {
+			unlock(&malloclck);
+			return NULL;
+		}
+		ASSERT(q > alloct);
+		alloct->ptr = q;
+		if (q != alloct + 1)
+			alloct->ptr = setbusy(alloct->ptr);
+		alloct = q->ptr = q + temp - 1;
+		alloct->ptr = setbusy(allocs);
+	}
+found:
+	allocp = p + nw;
+	ASSERT(allocp <= alloct);
+	if (q > allocp) {
+		allocx = allocp->ptr;
+		allocp->ptr = p->ptr;
+	}
+	p->ptr = setbusy(allocp);
+	unlock(&malloclck);
+	return p + 1;
+}
+
+void *
+mallocz(uintptr size, int clr)
+{
+	void *p;
+
+	p = malloc(size);
+	if (p && clr)
+		memset(p, 0, size);
+	return p;
+}
+
+/*
+ *	freeing strategy tuned for LIFO allocation
+ */
+void
+free(void *ap)
+{
+	Store *p = (Store *)ap;
+
+	if (p == 0)
+		return;
+	lock(&malloclck);
+	ASSERT(p > clearbusy(allocs[1].ptr) && p <= alloct);
+	ASSERT(allock());
+	allocp = --p;
+	ASSERT(testbusy(p->ptr));
+	p->ptr = clearbusy(p->ptr);
+	ASSERT(p->ptr > allocp && p->ptr <= alloct);
+	unlock(&malloclck);
+}
+
+/*	realloc(p, nbytes) reallocates a block obtained from malloc()
+ *	and freed since last call of malloc()
+ *	to have new size nbytes, and old content
+ *	returns new location, or 0 on failure
+ */
+
+void *
+realloc(void *ap, uintptr nbytes)
+{
+	Store *p, *q;
+	uintptr nw, onw;
+
+	p = ap;
+	if (nbytes == 0) {
+		free(p);
+		return nil;
+	}
+	if (p == nil)
+		return malloc(nbytes);
+
+	if (testbusy(p[-1].ptr))
+		free(p);
+
+	onw = p[-1].ptr - p;
+	q = (Store *)malloc(nbytes);
+	if (q == NULL || q == p)
+		return q;
+
+	lock(&malloclck);
+	memmove(q, p, nbytes);
+	nw = (nbytes + WORD - 1) / WORD;
+	if (q < p && q + nw >= p)
+		(q + (q + nw - p))->ptr = allocx;
+	unlock(&malloclck);
+	return q;
+}
+
+#ifdef debug
+void
+allock(void)
+{
+#ifdef longdebug
+	Store *p;
+	int	x;
+
+	x = 0;
+	for (p = &allocs[0]; clearbusy(p->ptr) > p; p = clearbusy(p->ptr))
+		if (p == allocp)
+			x++;
+	ASSERT(p == alloct);
+	return x == 1 | p == allocp;
+#else
+	return 1;
+#endif
+}
+#endif
+
+void*
+calloc(uintptr n, uintptr szelem)
+{
+	void *v;
+
+	if (v = mallocz(n * szelem, 1))
+		setmalloctag(v, getcallerpc(&n));
+	return v;
+}
+
+void
+setmalloctag(void *, uintptr)
+{
+}
+
+void
+setrealloctag(void *, uintptr)
+{
+}
+
+uintptr
+getmalloctag(void *)
+{
+	return ~0;
+}
+
+uintptr
+getrealloctag(void *)
+{
+	return ~0;
+}
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/memset.s
@@ -1,0 +1,83 @@
+/* memset(char *p, int c, size_t n) - ulong version */
+	TEXT	memset(SB),$(3*XLEN)
+	MOV	R8, p+0(FP)
+	MOV	n+(2*XLEN)(FP), R10	/* R10 is count */
+	MOV	p+0(FP), R11		/* R11 is pointer */
+	MOVW	c+XLEN(FP), R12		/* R12 is char */
+	ADD	R10,R11, R13		/* R13 is end pointer */
+
+/*
+ * if not at least 4 chars,
+ * dont even mess around.
+ * 3 chars to guarantee any
+ * rounding up to a word
+ * boundary and 4 characters
+ * to get at least maybe one
+ * full word store.
+ */
+	SLT	$4,R10, R8
+	BNE	R8, out
+
+/*
+ * turn R12 into a word of characters
+ */
+	AND	$0xff, R12
+	SLL	$8,R12, R8
+	OR	R8, R12
+	SLL	$16,R12, R8
+	OR	R8, R12
+
+/*
+ * store one byte at a time until pointer
+ * is aligned on a word boundary
+ */
+l1:
+	AND	$3,R11, R8
+	BEQ	R8, l2
+	MOVB	R12, 0(R11)
+	ADD	$1, R11
+	JMP	l1
+
+/*
+ * turn R10 into end pointer-15
+ * store 16 at a time while theres room
+ */
+l2:
+	ADD	$-15,R13, R10
+l3:
+	SLTU	R10,R11, R8
+	BEQ	R8, l4
+	MOVW	R12, 0(R11)
+	MOVW	R12, 4(R11)
+	ADD	$16, R11
+	MOVW	R12, -8(R11)
+	MOVW	R12, -4(R11)
+	JMP	l3
+
+/*
+ * turn R10 into end pointer-3
+ * store 4 at a time while theres room
+ */
+l4:
+	ADD	$-3,R13, R10
+l5:
+	SLTU	R10,R11, R8
+	BEQ	R8, out
+	MOVW	R12, 0(R11)
+	ADD	$4, R11
+	JMP	l5
+
+/*
+ * last loop, store byte at a time
+ */
+out:
+	SLTU	R13,R11 ,R8
+	BEQ	R8, ret
+	MOVB	R12, 0(R11)
+	ADD	$1, R11
+	JMP	out
+
+ret:
+	MOV	s1+0(FP), R8
+	RET
+	END
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/mkfile
@@ -1,0 +1,31 @@
+APE=/sys/src/ape
+objtype=riscv64
+<$APE/config
+LIB=/$objtype/lib/ape/libap.a
+
+OFILES=\
+	argv0.$O\
+	cycles.$O\
+	getcallerpc.$O\
+	getfcr.$O\
+	lock.$O\
+	main9.$O\
+	main9p.$O\
+#	malloc.$O\
+#	memccpy.$O\
+#	memchr.$O\
+#	memcmp.$O\
+#	memmove.$O\
+	memset.$O\
+	notetramp.$O\
+	_seek.$O\
+	setjmp.$O\
+#	sqrt.$O\
+#	strchr.$O\
+#	strcmp.$O\
+#	strcpy.$O\
+	tas.$O\
+
+</sys/src/cmd/mksyslib
+
+CFLAGS=$CFLAGS -c -D_POSIX_SOURCE -D_PLAN9_SOURCE
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/notejmp.c
@@ -1,0 +1,16 @@
+#include <u.h>
+#include <libc.h>
+#include <ureg.h>
+
+void
+notejmp(void *vr, jmp_buf j, int ret)
+{
+	struct Ureg *r = vr;
+
+	r->ret = ret;
+	if(ret == 0)
+		r->ret = 1;
+	r->pc = j[JMPBUFPC];
+	r->sp = j[JMPBUFSP];
+	noted(NCONT);
+}
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/notetramp.c
@@ -1,0 +1,83 @@
+/* must match setjmp.s */
+#include "../plan9/lib.h"
+#include "../plan9/sys9.h"
+#include <signal.h>
+#include <setjmp.h>
+#include <assert.h>
+
+/* A stack to hold pcs when signals nest */
+#define MAXSIGSTACK 20
+
+typedef struct Pcstack Pcstack;
+static struct Pcstack {
+	int sig;
+	void (*hdlr)(int, char*, Ureg*);
+	unsigned long long restorepc;
+	Ureg *u;
+} pcstack[MAXSIGSTACK];
+static int nstack = 0;
+
+static void notecont(Ureg*, char*);
+
+void
+_notetramp(int sig, void (*hdlr)(int, char*, Ureg*), Ureg *u)
+{
+	Pcstack *p;
+
+	if(nstack >= MAXSIGSTACK)
+		_NOTED(1);	/* nesting too deep; just do system default */
+	p = &pcstack[nstack];
+	p->restorepc = u->pc;
+	p->sig = sig;
+	p->hdlr = hdlr;
+	p->u = u;
+	nstack++;
+	u->pc = (unsigned long long) notecont;
+	_NOTED(2);	/* NSAVE: clear note but hold state */
+}
+
+static void
+notecont(Ureg *u, char *s)
+{
+	Pcstack *p;
+	void(*f)(int, char*, Ureg*);
+
+	assert(nstack >= 1);
+	p = &pcstack[nstack-1];
+	f = p->hdlr;
+	u->pc = p->restorepc;
+	nstack--;
+	(*f)(p->sig, s, u);
+	_NOTED(3);	/* NRSTR */
+}
+
+#define JMPBUFPC 1
+#define JMPBUFSP 0
+
+extern sigset_t	_psigblocked;
+
+typedef struct {
+	sigset_t set;
+	sigset_t blocked;
+	jmp_buf jmpbuf;			/* APE version: 4 uintptrs */
+} sigjmp_buf_riscv64;
+
+void
+siglongjmp(sigjmp_buf j, int ret)
+{
+	struct Ureg *u;
+	sigjmp_buf_riscv64 *jb;
+
+	jb = (sigjmp_buf_riscv64 *)j;
+	if(jb->set)
+		_psigblocked = jb->blocked;
+	if(nstack == 0 || pcstack[nstack-1].u->sp > jb->jmpbuf[JMPBUFSP])
+		longjmp((void*)jb->jmpbuf, ret);
+	assert(nstack >= 1);
+	u = pcstack[nstack-1].u;
+	nstack--;
+	u->ret = ret == 0? 1: ret;
+	u->pc = jb->jmpbuf[JMPBUFPC];
+	u->sp = jb->jmpbuf[JMPBUFSP];		/* amd64 adds 8; why? */
+	_NOTED(3);	/* NRSTR */
+}
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/setjmp.s
@@ -1,0 +1,29 @@
+/* riscv64 APE version; must match notetramp.c */
+arg=8
+link=1
+sp=2
+
+TEXT	setjmp(SB), 1, $-4		/* int setjmp(jmp_buf env) */
+	MOV	R(sp), (R(arg))
+	MOV	R(link), XLEN(R(arg))
+	MOV	R0, R(arg)
+	RET
+
+TEXT	sigsetjmp(SB), 1, $-4		/* sigsetjmp(sigjmp_buf, int mask) */
+	MOVW	savemask+8(FP), R(arg+2)	/* save signal stuff */
+	MOVW	R(arg+2), 0(R(arg))
+	MOVW	_psigblocked(SB), R(arg+2)
+	MOVW	R(arg+2), 4(R(arg))		/* save _psigblocked */
+	MOV	R(sp), 8(R(arg))		/* save sp */
+	MOV	R(link), 16(R(arg))		/* save return pc */
+	MOV	R0, R(arg)
+	RET
+
+TEXT	longjmp(SB), 1, $-4		/* void longjmp(jmp_buf env, int val) */
+	MOVW	r+XLEN(FP), R(arg+2)
+	BNE	R(arg+2), ok		/* ansi: "longjmp(0) => longjmp(1)" */
+	MOV	$1, R(arg+2)		/* bless their pointed heads */
+ok:	MOV	(R(arg)), R(sp)
+	MOV	XLEN(R(arg)), R(link)
+	MOV	R(arg+2), R(arg)
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/ap/riscv64/tas.s
@@ -1,0 +1,18 @@
+/*
+ *	risc-v test-and-set
+ *	assumes the standard A extension
+ */
+
+#include "/riscv64/include/atom.h"
+
+#define ARG	8
+
+#define MASK(w)	((1<<(w))-1)
+
+/* atomically set *keyp non-zero and return previous contents */
+TEXT tas(SB), $-4			/* int _tas(ulong *keyp) */
+	MOV	$1, R10
+	FENCE_RW
+	AMOW(Amoswap, AQ|RL, 10, ARG, ARG) /* R10->(R(ARG)), old (R(ARG))->ARG */
+	FENCE_RW
+	RET
--- /dev/null
+++ b/sys/src/ape/lib/mp/riscv64/mkfile
@@ -1,0 +1,26 @@
+APE=/sys/src/ape
+<$APE/config
+
+LIB=/$objtype/lib/ape/libmp.a
+
+SFILES=\
+#	mpvecadd.s\
+#	mpvecdigmuladd.s\
+#	mpvecdigmulsub.s\
+#	mpvecsub.s\
+#	mpdigdiv.s\
+
+HFILES=\
+	/sys/include/ape/mp.h\
+	../../../../libmp/port/dat.h
+
+OFILES=${SFILES:%.s=%.$O}
+
+UPDATE=mkfile\
+	$HFILES\
+	$SFILES\
+
+</sys/src/cmd/mksyslib
+
+%.$O:	    ../../../../libmp/$objtype/%.s
+	$AS ../../../../libmp/$objtype/$stem.s
--- /dev/null
+++ b/sys/src/ape/lib/sec/riscv64/mkfile
@@ -1,0 +1,12 @@
+APE=/sys/src/ape
+<$APE/config
+
+LIB=/$objtype/lib/ape/libsec.a
+
+OFILES=	\
+
+HFILES=/sys/include/ape/libsec.h
+
+UPDATE=mkfile
+
+</sys/src/cmd/mksyslib
--- a/sys/src/cmd/gs/arch.h
+++ b/sys/src/cmd/gs/arch.h
@@ -16,6 +16,8 @@
 #include "arm64.h"
 #elif Tamd64
 #include "amd64.h"
+#elif Triscv64
+#include "riscv64.h"
 #else
 	I do not know about your architecture.
 	Update switch in arch.h with new architecture.
--- /dev/null
+++ b/sys/src/cmd/gs/default.riscv64.h
@@ -1,0 +1,44 @@
+/* Parameters derived from machine and compiler architecture */
+
+	 /* ---------------- Scalar alignments ---------------- */
+
+#define ARCH_ALIGN_SHORT_MOD 2
+#define ARCH_ALIGN_INT_MOD 4
+#define ARCH_ALIGN_LONG_MOD 4
+#define ARCH_ALIGN_PTR_MOD 8
+#define ARCH_ALIGN_FLOAT_MOD 4
+#define ARCH_ALIGN_DOUBLE_MOD 8
+#define ARCH_ALIGN_STRUCT_MOD 8
+
+	 /* ---------------- Scalar sizes ---------------- */
+
+#define ARCH_LOG2_SIZEOF_SHORT 1
+#define ARCH_LOG2_SIZEOF_INT 2
+#define ARCH_LOG2_SIZEOF_LONG 2
+#define ARCH_LOG2_SIZEOF_LONG_LONG 3
+#define ARCH_SIZEOF_PTR 8
+#define ARCH_SIZEOF_FLOAT 4
+#define ARCH_SIZEOF_DOUBLE 8
+#define ARCH_FLOAT_MANTISSA_BITS 24
+#define ARCH_DOUBLE_MANTISSA_BITS 53
+
+	 /* ---------------- Unsigned max values ---------------- */
+
+#define ARCH_MAX_UCHAR ((unsigned char)0xff + (unsigned char)0)
+#define ARCH_MAX_USHORT ((unsigned short)0xffff + (unsigned short)0)
+#define ARCH_MAX_UINT ((unsigned int)~0 + (unsigned int)0)
+#define ARCH_MAX_ULONG ((unsigned long)~0L + (unsigned long)0)
+
+	 /* ---------------- Cache sizes ---------------- */
+
+#define ARCH_CACHE1_SIZE 1048576
+#define ARCH_CACHE2_SIZE 1048576
+
+	 /* ---------------- Miscellaneous ---------------- */
+
+#define ARCH_IS_BIG_ENDIAN 0
+#define ARCH_PTRS_ARE_SIGNED 0
+#define ARCH_FLOATS_ARE_IEEE 1
+#define ARCH_ARITH_RSHIFT 2
+#define ARCH_CAN_SHIFT_FULL_LONG 1
+#define ARCH_DIV_NEG_POS_TRUNCATES 1
--- a/sys/src/cmd/pcc.c
+++ b/sys/src/cmd/pcc.c
@@ -22,6 +22,7 @@
 	{"sparc",	"kc", "kl", "k", "k.out"},
 	{"power",	"qc", "ql", "q", "q.out"},
 	{"mips",	"vc", "vl", "v", "v.out"},
+	{"riscv64",	"jc", "jl", "j", "j.out"},
 };
 
 enum {
--- a/sys/src/libc/9syscall/mkfile
+++ b/sys/src/libc/9syscall/mkfile
@@ -145,6 +145,36 @@
 			echo MOVQ '$'$n, R0
 			echo CALL_PAL '$'0x83
 			echo RET
+		case riscv64
+			if(~ $i seek)
+				# sed s/SYSCALL/$n/ ../$objtype/$i.s
+				echo TEXT _seek'(SB)', 1, '$0'
+			if not
+				echo TEXT $i'(SB)', 1, '$0'
+			#
+			# For architectures which pass the first
+			# argument in a register, if the system call
+			# takes no arguments there will be no
+			# 'a0+0(FP)' reserved on the stack.
+			#
+			# jc doesn't widen longs to vlongs when pushing them.
+			# so we have to be careful here to only push longs for
+			# long first arguments.
+			#
+			switch ($i) {
+			case nsec
+				;
+			case bind chdir exec _exits segbrk open  pipe create \
+			    brk_ remove notify segdetach segfree segflush \
+			    rendezvous unmount semacquire semrelease seek \
+			    errstr stat wstat await tsemacquire
+				echo MOV R8, '0(FP)'	# vlong or ptr
+			case *
+				echo MOVW R8, '0(FP)'	# long
+			}
+			echo MOV '$'$n, R8
+			echo ECALL
+			echo RET
 		}} > $i.s
 		$AS $i.s
 	}
--- /dev/null
+++ b/sys/src/libc/riscv64/_seek.c
@@ -1,0 +1,14 @@
+#include <u.h>
+#include <libc.h>
+
+extern int _seek(vlong*, int, vlong, int);
+
+vlong
+seek(int fd, vlong o, int p)
+{
+	vlong l;
+
+	if(_seek(&l, fd, o, p) < 0)
+		l = -1LL;
+	return l;
+}
--- /dev/null
+++ b/sys/src/libc/riscv64/argv0.s
@@ -1,0 +1,4 @@
+GLOBL	argv0(SB), $XLEN
+GLOBL	_tos(SB), $XLEN
+GLOBL	_privates(SB), $XLEN
+GLOBL	_nprivates(SB), $XLEN
--- /dev/null
+++ b/sys/src/libc/riscv64/atom.s
@@ -1,0 +1,82 @@
+/*
+ *	RV64A atomic operations
+ *	LR/SC only work on cached regions
+ *
+ * see atom(2) for the properties they must implement.
+ *
+ * inserted fences before and after AMOs, to ensure immediate visibility of
+ * protected data on other cpus.  setting AQ and RL produces sequential
+ * consistency by acting as fences, *but only for this AMO*, not in general.
+ */
+#include <atom.h>
+
+#define ARG	8
+
+TEXT adec(SB), 1, $-4			/* long adec(long*); */
+	MOV	$-1, R9
+	JMP	ainctop
+TEXT ainc(SB), 1, $-4			/* long ainc(long *); */
+	MOV	$1, R9
+ainctop:
+	FENCE_RW	/* flush changes to ram in case releasing a lock */
+	/* after: value before add in R10, value after add in memory */
+	AMOW(Amoadd, AQ|RL, 9, ARG, 10)
+	FENCE_RW
+	ADDW	R9, R10, R(ARG)		/* old value ±1 for ainc/adec */
+	RET
+
+/*
+ * int cas(uint* p, int ov, int nv);
+ *
+ * compare-and-swap: atomically set *addr to nv only if it contains ov,
+ * and returns the old value.  this version returns 0 on failure, 1 on success
+ * instead.
+ */
+TEXT cas(SB), 1, $-4
+	MOVWU	ov+XLEN(FP), R12
+	MOVWU	nv+(XLEN+4)(FP), R13
+	MOV	R0, R11		/* default to failure */
+	FENCE_RW
+spincas:
+	LRW(ARG, 14)		/* (R(ARG)) -> R14 */
+	SLL	$32, R14
+	SRL	$32, R14	/* don't sign extend */
+	BNE	R12, R14, fail
+	FENCE_RW
+	SCW(13, ARG, 14)	/* R13 -> (R(ARG)) maybe, R14=0 if ok */
+	BNE	R14, spincas	/* R14 != 0 means store failed */
+ok:
+	MOV	$1, R11
+fail:
+	FENCE_RW
+	MOV	R11, R(ARG)
+	RET
+
+/*
+ * int	casp(void **p, void *ov, void *nv);
+ * int	casv(uvlong *p, uvlong ov, uvlong nv);
+ */
+TEXT casp(SB), 1, $-4
+TEXT casv(SB), 1, $-4
+	MOV	ov+XLEN(FP), R12
+	MOV	nv+(2*XLEN)(FP), R13
+	MOV	R0, R11		/* default to failure */
+	FENCE_RW
+spincasp:
+	LRD(ARG, 14)		/* (R(ARG)) -> R14 */
+	BNE	R12, R14, fail
+	FENCE_RW
+	SCD(13, ARG, 14)	/* R13 -> (R(ARG)) maybe, R14=0 if ok */
+	BNE	R14, spincasp	/* R14 != 0 means store failed */
+	JMP	ok
+
+TEXT loadlinked(SB), $-4	/* long loadlinked(long *); */
+	LRW(ARG, ARG)
+	RET
+
+TEXT storecond(SB), $-4		/* int storecond(long *, long); */
+	MOVW	ov+XLEN(FP), R12
+	FENCE_RW		/* make protected data current before release */
+	SCW(12, ARG, ARG)
+	BNE	R(ARG), fail	/* R(ARG) != 0 means store failed */
+	JMP	ok
--- /dev/null
+++ b/sys/src/libc/riscv64/cycles.s
@@ -1,0 +1,8 @@
+#define RARG R8
+
+/* user-accessible CSRs */
+#define CYCLO		0xc00
+
+TEXT cycles(SB), 1, $-4				/* cycles since power up */
+	MOV	CSR(CYCLO), RARG
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/getcallerpc.s
@@ -1,0 +1,5 @@
+#define RARG	R8
+
+TEXT	getcallerpc(SB), 1, $0
+	MOV	0(SP), RARG
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/getfcr.s
@@ -1,0 +1,15 @@
+#define ARG	8
+
+#define FFLAGS		1
+#define FRM		2
+#define FCSR		3
+
+TEXT	getfcr(SB), $-4
+TEXT	getfsr(SB), $-4
+	MOV	CSR(FCSR), R(ARG)
+	RET
+
+TEXT	setfcr(SB), $-4
+TEXT	setfsr(SB), $-4
+	MOV	R(ARG), CSR(FCSR)
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/main9.s
@@ -1,0 +1,30 @@
+/* normal startup following exec.  assume vlong alignment of SP */
+#define NPRIVATES	16
+
+GLOBL	_tos(SB), $XLEN
+GLOBL	_privates(SB), $XLEN
+GLOBL	_nprivates(SB), $4
+
+TEXT	_main(SB), 1, $(4*XLEN + NPRIVATES*XLEN)
+	MOV	$setSB(SB), R3
+	/* _tos = arg */
+	MOV	R8, _tos(SB)
+
+	MOV	$p-(NPRIVATES*XLEN)(SP), R9
+	MOV	R9, _privates(SB)
+	MOV	$NPRIVATES, R9
+	MOVW	R9, _nprivates(SB)
+
+	MOV	inargc-XLEN(FP), R8
+	MOV	$inargv+0(FP), R10
+	MOV	R8, XLEN(R2)		/* R2 -> SP? */
+	MOV	R10, (2*XLEN)(R2)
+	JAL	R1, main(SB)
+loop:
+	MOV	$_exitstr<>(SB), R8
+	MOV	R8, XLEN(SP)
+	JAL	R1, exits(SB)
+	JMP	loop
+
+DATA	_exitstr<>+0(SB)/4, $"main"
+GLOBL	_exitstr<>+0(SB), $5
--- /dev/null
+++ b/sys/src/libc/riscv64/main9p.s
@@ -1,0 +1,46 @@
+/* _mainp - profiling _main */
+/* assume vlong alignment of SP */
+
+#define NPRIVATES	16
+
+TEXT	_mainp(SB), 1, $(4*XLEN + NPRIVATES*XLEN)
+	MOV	$setSB(SB), R3
+	/* _tos = arg */
+	MOV	R8, _tos(SB)
+
+	MOV	$p-(NPRIVATES*XLEN)(SP), R9
+	MOV	R9, _privates(SB)
+	MOV	$NPRIVATES, R9
+	MOV	R9, _nprivates(SB)
+
+	/* _profmain(); */
+	JAL	R1, _profmain(SB)
+	/* _tos->prof.pp = _tos->prof.next; */
+	MOV	_tos(SB), R9
+	MOV	XLEN(R9), R10
+	MOV	R10, 0(R9)
+
+	/* main(argc, argv); */
+	MOV	inargc-XLEN(FP), R8
+	MOV	$inargv+0(FP), R10
+	MOV	R8, XLEN(R2)
+	MOV	R10, (2*XLEN)(R2)
+	JAL	R1, main(SB)
+loop:
+	/* exits("main"); */
+	MOV	$_exitstr<>(SB), R8
+	MOV	R8, XLEN(R2)
+	JAL	R1, exits(SB)
+	MOV	$_profin(SB), R0	/* force loading of profile */
+	JMP	loop
+
+TEXT	_savearg(SB), 1, $0
+TEXT	_saveret(SB), 1, $0
+	RET
+
+TEXT	_callpc(SB), 1, $0
+	MOV	argp-XLEN(FP), R8
+	RET
+
+DATA	_exitstr<>+0(SB)/4, $"main"
+GLOBL	_exitstr<>+0(SB), $5
--- /dev/null
+++ b/sys/src/libc/riscv64/memccpy.s
@@ -1,0 +1,21 @@
+/* void* memccpy(void *s1, void *s2, int c, ulong n) */
+	TEXT	memccpy(SB), $0
+	MOV	R8, 0(FP)
+	MOVW	n+(2*XLEN+4)(FP), R8
+	BEQ	R8, ret
+	MOV	s1+0(FP), R10
+	MOV	s2+XLEN(FP), R9
+	MOVBU	c+(2*XLEN)(FP), R11
+	ADD	R8, R9, R12
+
+l1:	MOVBU	(R9), R13
+	ADD	$1, R9
+	MOVB	R13, (R10)
+	ADD	$1, R10
+	BEQ	R11, R13, eq
+	BNE	R9, R12, l1
+	MOV	R0, R8
+	RET
+
+eq:	MOV	R10, R8
+ret:	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/memchr.s
@@ -1,0 +1,39 @@
+/* void* memchr(void *s, int c, ulong n) */
+	TEXT	memchr(SB), $0
+	MOV	R8, 0(FP)
+	MOVW	n+(XLEN+4)(FP), R8
+	MOV	s+0(FP), R9
+	MOVBU	c+XLEN(FP), R10
+	ADD	R8, R9, R13
+
+	AND	$(~1), R8, R12
+	ADD	R9, R12
+	BEQ	R9, R12, lt2
+
+l1:
+	MOVBU	0(R9), R11
+	MOVBU	1(R9), R14
+	BEQ	R10, R11, eq0
+	ADD	$2, R9
+	BEQ	R10, R14, eq
+	BNE	R9, R12, l1
+
+lt2:
+	BEQ	R9, R13, zret
+
+l2:
+	MOVBU	(R9), R11
+	ADD	$1, R9
+	BEQ	R10, R11, eq
+	BNE	R9, R13, l2
+zret:
+	MOV	R0, R8
+	RET
+
+eq0:
+	MOV	R9, R8
+	RET
+
+eq:
+	SUB	$1, R9, R8
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/memcmp.s
@@ -1,0 +1,113 @@
+	TEXT	memcmp(SB), $0
+	MOV	R8, s1+0(FP)
+	MOVW	n+(2*XLEN)(FP), R15	/* R15 is count */
+	MOV	s1+0(FP), R9		/* R9 is pointer1 */
+	MOV	s2+XLEN(FP), R10	/* R10 is pointer2 */
+	ADD	R15, R9, R11		/* R11 is end pointer1 */
+
+/*
+ * if not at least 4 chars,
+ * dont even mess around.
+ * 3 chars to guarantee any
+ * rounding up to a word
+ * boundary and 4 characters
+ * to get at least maybe one
+ * full word cmp.
+ */
+	SLT	$4,R15, R8
+	BNE	R8, out
+
+/*
+ * test if both pointers
+ * are similarly word aligned
+ */
+	XOR	R9,R10, R8
+	AND	$3, R8
+	BNE	R8, out
+
+/*
+ * byte at a time to word align
+ */
+l1:
+	AND	$3,R9, R8
+	BEQ	R8, l2
+	MOVBU	0(R9), R14
+	MOVBU	0(R10), R15
+	ADD	$1, R9
+	BNE	R14,R15, ne
+	ADD	$1, R10
+	JMP	l1
+
+/*
+ * turn R15 into end pointer1-15
+ * cmp 16 at a time while theres room
+ */
+l2:
+	ADD	$-15,R11, R15
+l3:
+	SLTU	R15,R9, R8
+	BEQ	R8, l4
+	MOVW	0(R9), R12
+	MOVW	0(R10), R13
+	BNE	R12,R13, ne1
+	MOVW	4(R9), R12
+	MOVW	4(R10), R13
+	BNE	R12,R13, ne1
+	MOVW	8(R9), R12
+	MOVW	8(R10), R13
+	BNE	R12,R13, ne1
+	MOVW	12(R9), R12
+	MOVW	12(R10), R13
+	BNE	R12,R13, ne1
+	ADD	$16, R9
+	ADD	$16, R10
+	JMP	l3
+
+/*
+ * turn R15 into end pointer1-3
+ * cmp 4 at a time while theres room
+ */
+l4:
+	ADD	$-3,R11, R15
+l5:
+	SLTU	R15,R9, R8
+	BEQ	R8, out
+	MOVW	0(R9), R12
+	MOVW	0(R10), R13
+	ADD	$4, R9
+	BNE	R12,R13, ne1
+	ADD	$4, R10
+	JMP	l5
+
+/*
+ * last loop, cmp byte at a time
+ */
+out:
+	SLTU	R11,R9, R8
+	BEQ	R8, ret
+	MOVBU	0(R9), R14
+	MOVBU	0(R10), R15
+	ADD	$1, R9
+	BNE	R14,R15, ne
+	ADD	$1, R10
+	JMP	out
+
+/*
+ * compare bytes in R12 and R13, lsb first
+ */
+ne1:
+	MOVW	$0xff, R8
+ne1x:
+	AND	R8, R12, R14
+	AND	R8, R13, R15
+	BNE	R14, R15, ne
+	SLL	$8, R8
+	BNE	R8, ne1x
+	RET
+
+ne:
+	SLTU	R14,R15, R8
+	BNE	R8, ret
+	MOV	$-1, R8
+ret:
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/memmove.s
@@ -1,0 +1,141 @@
+	TEXT	memcpy(SB), $-4
+	TEXT	memmove(SB), $-4
+	MOV	R8, s1+0(FP)
+
+	MOVW	n+(2*XLEN)(FP), R9	/* count */
+	BEQ	R9, return
+	BGT	R9, ok
+	MOV	R0, R9
+ok:
+	MOV	s1+0(FP), R11		/* dest pointer */
+	MOV	s2+XLEN(FP), R10	/* source pointer */
+
+	BLTU	R11, R10, back
+
+/*
+ * byte-at-a-time forward copy to
+ * get source (R10) aligned.
+ */
+f1:
+	AND	$(XLEN-1), R10, R8
+	BEQ	R8, f2
+	SUB	$1, R9
+	BLT	R9, return
+	MOVB	(R10), R8
+	MOVB	R8, (R11)
+	ADD	$1, R10
+	ADD	$1, R11
+	JMP	f1
+
+/*
+ * check that dest is aligned
+ * if not, just go byte-at-a-time
+ */
+f2:
+	AND	$(XLEN-1), R11, R8
+	BEQ	R8, f3
+	SUB	$1, R9
+	BLT	R9, return
+	JMP	f5
+/*
+ * quad-vlong-at-a-time forward copy
+ */
+f3:
+	SUB	$(4*XLEN), R9
+	BLT	R9, f4
+	MOV	0(R10), R12
+	MOV	XLEN(R10), R13
+	MOV	(2*XLEN)(R10), R14
+	MOV	(3*XLEN)(R10), R15
+	MOV	R12, 0(R11)
+	MOV	R13, XLEN(R11)
+	MOV	R14, (2*XLEN)(R11)
+	MOV	R15, (3*XLEN)(R11)
+	ADD	$(4*XLEN), R10
+	ADD	$(4*XLEN), R11
+	JMP	f3
+
+/*
+ * cleanup byte-at-a-time
+ */
+f4:
+	ADD	$(4*XLEN-1), R9
+	BLT	R9, return
+f5:
+	MOVB	(R10), R8
+	MOVB	R8, (R11)
+	ADD	$1, R10
+	ADD	$1, R11
+	SUB	$1, R9
+	BGE	R9, f5
+	JMP	return
+
+return:
+	MOV	s1+0(FP), R8
+	RET
+
+/*
+ * everything the same, but
+ * copy backwards
+ */
+back:
+	ADD	R9, R10
+	ADD	R9, R11
+
+/*
+ * byte-at-a-time backward copy to
+ * get source (R10) aligned.
+ */
+b1:
+	AND	$(XLEN-1), R10, R8
+	BEQ	R8, b2
+	SUB	$1, R9
+	BLT	R9, return
+	SUB	$1, R10
+	SUB	$1, R11
+	MOVB	(R10), R8
+	MOVB	R8, (R11)
+	JMP	b1
+
+/*
+ * check that dest is aligned
+ * if not, just go byte-at-a-time
+ */
+b2:
+	AND	$(XLEN-1), R11, R8
+	BEQ	R8, b3
+	SUB	$1, R9
+	BLT	R9, return
+	JMP	b5
+/*
+ * quad-vlong-at-a-time backward copy
+ */
+b3:
+	SUB	$(4*XLEN), R9
+	BLT	R9, b4
+	SUB	$(4*XLEN), R10
+	SUB	$(4*XLEN), R11
+	MOV	0(R10), R12
+	MOV	XLEN(R10), R13
+	MOV	(2*XLEN)(R10), R14
+	MOV	(3*XLEN)(R10), R15
+	MOV	R12, 0(R11)
+	MOV	R13, XLEN(R11)
+	MOV	R14, (2*XLEN)(R11)
+	MOV	R15, (3*XLEN)(R11)
+	JMP	b3
+
+/*
+ * cleanup byte-at-a-time backward
+ */
+b4:
+	ADD	$(4*XLEN-1), R9
+	BLT	R9, return
+b5:
+	SUB	$1, R10
+	SUB	$1, R11
+	MOVB	(R10), R8
+	MOVB	R8, (R11)
+	SUB	$1, R9
+	BGE	R9, b5
+	JMP	return
--- /dev/null
+++ b/sys/src/libc/riscv64/memset.s
@@ -1,0 +1,81 @@
+/* memset(void *p, int c, uintptr n) - clear vlongs */
+	TEXT	memset(SB),$(3*XLEN)
+	MOV	R8, p+0(FP)
+	MOV	R8, R11			/* R11 is pointer */
+	MOVWU	c+XLEN(FP), R12		/* R12 is char */
+	MOV	n+(2*XLEN)(FP), R10	/* R10 is count.  NB: uintptr */
+	ADD	R10,R11, R13		/* R13 is end pointer */
+
+/*
+ * if not at least XLEN chars,
+ * dont even mess around.
+ * XLEN-1 chars to guarantee any
+ * rounding up to a doubleword
+ * boundary and XLEN characters
+ * to get at least maybe one
+ * full doubleword store.
+ */
+	SLT	$XLEN,R10, R8
+	BNE	R8, out
+
+/*
+ * turn R12 into a doubleword of characters
+ */
+	AND	$0xff, R12
+	SLL	$8,R12, R8
+	OR	R8, R12
+	SLL	$16,R12, R8
+	OR	R8, R12
+	SLL	$32,R12, R8
+	OR	R8, R12
+
+/*
+ * store one byte at a time until pointer
+ * is aligned on a doubleword boundary
+ */
+l1:
+	AND	$(XLEN-1),R11, R8
+	BEQ	R8, l2
+	MOVB	R12, 0(R11)
+	ADD	$1, R11
+	JMP	l1
+
+/*
+ * turn R10 into end pointer-(4*XLEN-1)
+ * store 4*XLEN at a time while there's room
+ */
+l2:
+	ADD	$-(4*XLEN-1),R13, R10
+l3:
+	BGEU	R10,R11, l4
+	MOV	R12, 0(R11)
+	MOV	R12, XLEN(R11)
+	MOV	R12, (2*XLEN)(R11)
+	MOV	R12, (3*XLEN)(R11)
+	ADD	$(4*XLEN), R11
+	JMP	l3
+
+/*
+ * turn R10 into end pointer-(XLEN-1)
+ * store XLEN at a time while there's room
+ */
+l4:
+	ADD	$-(XLEN-1),R13, R10
+l5:
+	BGEU	R10,R11, out
+	MOV	R12, 0(R11)
+	ADD	$XLEN, R11
+	JMP	l5
+
+/*
+ * last loop, store byte at a time
+ */
+out:
+	BGEU	R13,R11, ret
+	MOVB	R12, 0(R11)
+	ADD	$1, R11
+	JMP	out
+
+ret:
+	MOV	p+0(FP), R8
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/mkfile
@@ -1,0 +1,39 @@
+objtype=riscv64
+</$objtype/mkfile
+
+LIB=/$objtype/lib/libc.a
+SFILES=\
+	argv0.s\
+	atom.s\
+	cycles.s\
+	getcallerpc.s\
+	getfcr.s\
+	main9.s\
+	main9p.s\
+	memccpy.s\
+	memchr.s\
+	memcmp.s\
+	memmove.s\
+	memset.s\
+	setjmp.s\
+	sqrt.s\
+	strchr.s\
+	strcmp.s\
+	strcpy.s\
+	tas.s\
+
+# seek.s was included by ../9syscall/mkfile
+CFILES=\
+	_seek.c\
+	notejmp.c\
+
+HFILES=/sys/include/libc.h
+
+OFILES=${CFILES:%.c=%.$O} ${SFILES:%.s=%.$O}
+
+UPDATE=mkfile\
+	$HFILES\
+	$CFILES\
+	$SFILES\
+
+</sys/src/cmd/mksyslib
--- /dev/null
+++ b/sys/src/libc/riscv64/notejmp.c
@@ -1,0 +1,16 @@
+#include <u.h>
+#include <libc.h>
+#include <ureg.h>
+
+void
+notejmp(void *vr, jmp_buf j, int ret)
+{
+	struct Ureg *r = vr;
+
+	r->ret = ret;
+	if(ret == 0)
+		r->ret = 1;
+	r->pc = j[JMPBUFPC];
+	r->sp = j[JMPBUFSP];
+	noted(NCONT);
+}
--- /dev/null
+++ b/sys/src/libc/riscv64/setjmp.s
@@ -1,0 +1,17 @@
+#define LINK	R1
+#define RARG	R8
+
+TEXT	setjmp(SB), 1, $-4
+	MOV	R2, (RARG)	/* store sp in jmp_buf */
+	MOV	LINK, XLEN(RARG) /* store return pc */
+	MOV	R0, RARG	/* return 0 */
+	RET
+
+TEXT	longjmp(SB), 1, $-4
+	MOVW	r+XLEN(FP), R13
+	BNE	R13, ok		/* ansi: "longjmp(0) => longjmp(1)" */
+	MOV	$1, R13		/* bless their pointed heads */
+ok:	MOV	(RARG), R2	/* restore sp */
+	MOV	XLEN(RARG), LINK /* restore return pc */
+	MOV	R13, RARG
+	RET			/* jump to saved pc */
--- /dev/null
+++ b/sys/src/libc/riscv64/sqrt.s
@@ -1,0 +1,10 @@
+#define D	1	/* double precision */
+#define DYN	7	/* rounding mode: from fcsr */
+
+#define FSQRT(src, dst) \
+	WORD $(013<<27 | D<<25 | (src)<<15 | DYN<<12 | (dst)<<7 | 0123)
+
+TEXT	sqrt(SB), $0
+	MOVD	arg+0(FP), F0
+	FSQRT(0, 0)
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/strchr.s
@@ -1,0 +1,62 @@
+	TEXT	strchr(SB), $0
+	MOV	R8, R10
+	MOVBU	c+XLEN(FP), R11
+	BEQ	R11, l2
+
+/*
+ * char is not null
+ */
+l1:
+	MOVBU	(R10), R8
+	ADD	$1, R10
+	BEQ	R8, ret
+	BNE	R8,R11, l1
+	JMP	rm1
+
+/*
+ * char is null
+ * align to word
+ */
+l2:
+	AND	$3,R10, R8
+	BEQ	R8, l3
+	MOVBU	(R10), R8
+	ADD	$1, R10
+	BNE	R8, l2
+	JMP	rm1
+
+l3:
+	MOVW	$0xff000000, R13
+	MOVW	$0x00ff0000, R14
+	MOVW	$0x0000ff00, R15
+
+l4:
+	MOVW	(R10), R12
+	ADD	$4, R10
+	AND	$0xff, R12, R8
+	BEQ	R8, b0
+	AND	R15, R12, R8
+	BEQ	R8, b1
+	AND	R14, R12, R8
+	BEQ	R8, b2
+	AND	R13, R12, R8
+	BNE	R8, l4
+
+rm1:
+	ADD	$-1,R10, R8
+	JMP	ret
+
+b2:
+	ADD	$-2,R10, R8
+	JMP	ret
+
+b1:
+	ADD	$-3,R10, R8
+	JMP	ret
+
+b0:
+	ADD	$-4,R10, R8
+	JMP	ret
+
+ret:
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/strcmp.s
@@ -1,0 +1,20 @@
+TEXT	strcmp(SB), $0
+	MOV	s2+XLEN(FP), R9
+
+l1:
+	MOVBU	(R9), R10
+	MOVBU	(R8), R11
+	ADD	$1, R8
+	BEQ	R10, end
+	ADD	$1, R9
+	BEQ	R10, R11, l1
+
+	SLTU	R11, R10, R8
+	BNE	R8, ret
+	MOV	$-1, R8
+	RET
+
+end:
+	SLTU	R11, R10, R8
+ret:
+	RET
--- /dev/null
+++ b/sys/src/libc/riscv64/strcpy.s
@@ -1,0 +1,91 @@
+TEXT	strcpy(SB), $0
+	MOV	s2+XLEN(FP), R9		/* R9 is from pointer */
+	MOV	R8, R10			/* R10 is to pointer */
+
+/*
+ * align 'from' pointer
+ */
+l1:
+	AND	$3, R9, R12
+	ADD	$1, R9
+	BEQ	R12, l2
+	MOVB	-1(R9), R12
+	ADD	$1, R10
+	MOVB	R12, -1(R10)
+	BNE	R12, l1
+	RET
+
+/*
+ * test if 'to' is also aligned
+ */
+l2:
+	AND	$3,R10, R12
+	BEQ	R12, l4
+
+/*
+ * copy 4 at a time, 'to' not aligned
+ */
+l3:
+	MOVW	-1(R9), R11
+	ADD	$4, R9
+	ADD	$4, R10
+	MOVB	R11, -4(R10)
+	AND	$0xff, R11, R12
+	BEQ	R12, out
+
+	SRL	$8, R11
+	MOVB	R11, -3(R10)
+	AND	$0xff, R11, R12
+	BEQ	R12, out
+
+	SRL	$8, R11
+	MOVB	R11, -2(R10)
+	AND	$0xff, R11, R12
+	BEQ	R12, out
+
+	SRL	$8, R11
+	MOVB	R11, -1(R10)
+	BNE	R11, l3
+
+out:
+	RET
+
+/*
+ * word at a time both aligned
+ */
+l4:
+	MOVW	$0xff000000, R14
+	MOVW	$0x00ff0000, R15
+	MOVW	$0x0000ff00, R13
+
+l5:
+	ADD	$4, R10
+	MOVW	-1(R9), R11	/* fetch */
+	ADD	$4, R9
+
+	AND	$0xff, R11, R12	/* is it byte 0 */
+	BEQ	R12, b0
+	AND	R13, R11, R12	/* is it byte 1 */
+	BEQ	R12, b1
+	AND	R15, R11, R12	/* is it byte 2 */
+	BEQ	R12, b2
+	MOVW	R11, -4(R10)	/* store */
+	AND	R14, R11, R12	/* is it byte 3 */
+	BNE	R12, l5
+	JMP	out
+
+b0:
+	MOVB	R0, -4(R10)
+	JMP	out
+
+b1:
+	MOVB	R11, -4(R10)
+	MOVB	R0, -3(R10)
+	JMP	out
+
+b2:
+	MOVB	R11, -4(R10)
+	SRL	$8, R11
+	MOVB	R11, -3(R10)
+	MOVB	R0, -2(R10)
+	JMP	out
--- /dev/null
+++ b/sys/src/libc/riscv64/tas.s
@@ -1,0 +1,18 @@
+/*
+ *	risc-v test-and-set
+ */
+#include <atom.h>
+
+#define ARG	8
+
+/*
+ * atomically set *keyp non-zero and return previous contents
+ * (zero means that we have the lock, other values mean someone else does).
+ * see atom(2) for the properties it must implement.
+ */
+TEXT _tas(SB), $-4			/* int _tas(ulong *keyp) */
+	MOV	$1, R10
+	FENCE_RW
+	AMOW(Amoswap, AQ|RL, 10, ARG, ARG) /* R10->(R(ARG)), old (R(ARG))->ARG */
+	FENCE_RW
+	RET
--- a/sys/src/libmach/executable.c
+++ b/sys/src/libmach/executable.c
@@ -70,6 +70,7 @@
 extern	Mach	marm64;
 extern	Mach	mpower;
 extern	Mach	mpower64;
+extern	Mach	mriscv64;
 
 ExecTable exectab[] =
 {
@@ -243,6 +244,15 @@
 		&marm64,
 		sizeof(Exec)+8,
 		nil,
+		commonllp64 },
+	{ B_MAGIC,
+		"riscv64 plan 9 executable",
+		"riscv64 plan 9 dlm",
+		FRISCV64,
+		1,
+		&mriscv64,
+		sizeof(Exec)+8,
+		leswal,
 		commonllp64 },
 	{ 0 },
 };
--- /dev/null
+++ b/sys/src/libmach/j.c
@@ -1,0 +1,114 @@
+/*
+ * RISC-V RV64 definition
+ */
+#include <u.h>
+#include <libc.h>
+#include <bio.h>
+#include "/riscv64/include/ureg.h"
+#include <mach.h>
+
+#define	REGOFF(x)	(u64int)(&((struct Ureg *) 0)->x)
+#define	REGSIZE		sizeof(struct Ureg)
+
+#define RCURMODE	REGOFF(curmode)
+#define FP_REG(x)	(RCURMODE+8+8*(x))
+#define	FPREGSIZE	(8*33)	
+
+Reglist riscv64reglist[] = {
+	{"STATUS",	REGOFF(status),	RINT|RRDONLY, 'Y'},
+	{"CAUSE",	REGOFF(cause),	RINT|RRDONLY, 'Y'},
+	{"IE",		REGOFF(ie),	RINT|RRDONLY, 'Y'},
+	{"TVAL",	REGOFF(tval),	RINT|RRDONLY, 'Y'},
+	{"CURMODE",	REGOFF(curmode), RINT|RRDONLY, 'Y'},
+	{"PC",		REGOFF(pc),	RINT, 'Y'},
+	{"SP",		REGOFF(r2),	RINT, 'Y'},
+	{"R31",		REGOFF(r31),	RINT, 'Y'},
+	{"R30",		REGOFF(r30),	RINT, 'Y'},
+	{"R28",		REGOFF(r28),	RINT, 'Y'},
+	{"R27",		REGOFF(r27),	RINT, 'Y'},
+	{"R26",		REGOFF(r26),	RINT, 'Y'},
+	{"R25",		REGOFF(r25),	RINT, 'Y'},
+	{"R24",		REGOFF(r24),	RINT, 'Y'},
+	{"R23",		REGOFF(r23),	RINT, 'Y'},
+	{"R22",		REGOFF(r22),	RINT, 'Y'},
+	{"R21",		REGOFF(r21),	RINT, 'Y'},
+	{"R20",		REGOFF(r20),	RINT, 'Y'},
+	{"R19",		REGOFF(r19),	RINT, 'Y'},
+	{"R18",		REGOFF(r18),	RINT, 'Y'},
+	{"R17",		REGOFF(r17),	RINT, 'Y'},
+	{"R16",		REGOFF(r16),	RINT, 'Y'},
+	{"R15",		REGOFF(r15),	RINT, 'Y'},
+	{"R14",		REGOFF(r14),	RINT, 'Y'},
+	{"R13",		REGOFF(r13),	RINT, 'Y'},
+	{"R12",		REGOFF(r12),	RINT, 'Y'},
+	{"R11",		REGOFF(r11),	RINT, 'Y'},
+	{"R10",		REGOFF(r10),	RINT, 'Y'},
+	{"R9",		REGOFF(r9),	RINT, 'Y'},
+	{"R8",		REGOFF(r8),	RINT, 'Y'},
+	{"R7",		REGOFF(r7),	RINT, 'Y'},
+	{"R6",		REGOFF(r6),	RINT, 'Y'},
+	{"R5",		REGOFF(r5),	RINT, 'Y'},
+	{"R4",		REGOFF(r4),	RINT, 'Y'},
+	{"R3",		REGOFF(r3),	RINT, 'Y'},
+	{"R2",		REGOFF(r2),	RINT, 'Y'},
+	{"R1",		REGOFF(r1),	RINT, 'Y'},
+	{"F0",		FP_REG(0),	RFLT,		'F'},
+	{"F1",		FP_REG(1),	RFLT,		'F'},
+	{"F2",		FP_REG(2),	RFLT,		'F'},
+	{"F3",		FP_REG(3),	RFLT,		'F'},
+	{"F4",		FP_REG(4),	RFLT,		'F'},
+	{"F5",		FP_REG(5),	RFLT,		'F'},
+	{"F6",		FP_REG(6),	RFLT,		'F'},
+	{"F7",		FP_REG(7),	RFLT,		'F'},
+	{"F8",		FP_REG(8),	RFLT,		'F'},
+	{"F9",		FP_REG(9),	RFLT,		'F'},
+	{"F10",		FP_REG(10),	RFLT,		'F'},
+	{"F11",		FP_REG(11),	RFLT,		'F'},
+	{"F12",		FP_REG(12),	RFLT,		'F'},
+	{"F13",		FP_REG(13),	RFLT,		'F'},
+	{"F14",		FP_REG(14),	RFLT,		'F'},
+	{"F15",		FP_REG(15),	RFLT,		'F'},
+	{"F16",		FP_REG(16),	RFLT,		'F'},
+	{"F17",		FP_REG(17),	RFLT,		'F'},
+	{"F18",		FP_REG(18),	RFLT,		'F'},
+	{"F19",		FP_REG(19),	RFLT,		'F'},
+	{"F20",		FP_REG(20),	RFLT,		'F'},
+	{"F21",		FP_REG(21),	RFLT,		'F'},
+	{"F22",		FP_REG(22),	RFLT,		'F'},
+	{"F23",		FP_REG(23),	RFLT,		'F'},
+	{"F24",		FP_REG(24),	RFLT,		'F'},
+	{"F25",		FP_REG(25),	RFLT,		'F'},
+	{"F26",		FP_REG(26),	RFLT,		'F'},
+	{"F27",		FP_REG(27),	RFLT,		'F'},
+	{"F28",		FP_REG(28),	RFLT,		'F'},
+	{"F29",		FP_REG(29),	RFLT,		'F'},
+	{"F30",		FP_REG(30),	RFLT,		'F'},
+	{"F31",		FP_REG(31),	RFLT,		'F'},
+	{"FPCSR",	FP_REG(32)+4,	RFLT,		'X'},
+	{  0 }
+};
+
+	/* the machine description */
+Mach mriscv64 =
+{
+	"riscv64",
+	MRISCV64,		/* machine type */
+	riscv64reglist,	/* register set */
+	REGSIZE,	/* register set size */
+	FPREGSIZE,	/* FP register set size */
+	"PC",		/* name of PC */
+	"SP",		/* name of SP */
+	"R1",		/* name of link register */
+	"setSB",	/* static base register name */
+	0,		/* static base register value */
+	0x1000,		/* page size */
+	/* these are Sv39 values */
+	0xffffffc000000000ULL,	/* kernel base */
+	0xffffffc000000000ULL,	/* kernel text mask for Sv39 and above */
+	0x0000003fffffffffULL,	/* user stack top */
+	2,		/* quantization of pc */
+	8,		/* szaddr */
+	8,		/* szreg */
+	4,		/* szfloat */
+	8,		/* szdouble */
+};
--- /dev/null
+++ b/sys/src/libmach/jdb.c
@@ -1,0 +1,754 @@
+#include <u.h>
+#include <libc.h>
+#include <bio.h>
+#include <mach.h>
+#include "jc/j.out.h"
+
+static char *riscvexcep(Map*, Rgetter);
+
+/*
+ * RISCV-specific debugger interface
+ */
+
+typedef struct	Instr	Instr;
+struct	Instr
+{
+	Map	*map;
+	ulong	w;
+	uvlong	addr;
+	char *fmt;
+	int n;
+	int	op;
+	int aop;
+	int	func3;
+	int	func7;
+	char	rs1, rs2, rs3, rd;
+	char	rv64;
+	long	imm;
+
+	char*	curr;			/* fill point in buffer */
+	char*	end;			/* end of buffer */
+};
+
+typedef struct Optab	Optab;
+struct Optab {
+	int	func7;
+	int	op[8];
+};
+		
+typedef struct Opclass	Opclass;
+struct Opclass {
+	char	*fmt;
+	Optab	tab[4];
+};
+
+/* Major opcodes */
+enum {
+	OLOAD,	 OLOAD_FP,  Ocustom_0,	OMISC_MEM, OOP_IMM, OAUIPC, OOP_IMM_32,	O48b,
+	OSTORE,	 OSTORE_FP, Ocustom_1,	OAMO,	   OOP,	    OLUI,   OOP_32,	O64b,
+	OMADD,	 OMSUB,	    ONMSUB,	ONMADD,	   OOP_FP,  Ores_0, Ocustom_2,	O48b_2,
+	OBRANCH, OJALR,	    Ores_1,	OJAL,	   OSYSTEM, Ores_2, Ocustom_3,	O80b
+};
+
+/* copy anames from compiler */
+static
+#include "jc/enam.c"
+
+static Opclass opOLOAD = {
+	"a,d",
+	0,	AMOVB,	AMOVH,	AMOVW,	AMOV,	AMOVBU,	AMOVHU,	AMOVWU,	0,
+};
+static Opclass opOLOAD_FP = {
+	"a,fd",
+	0,	0,	0,	AMOVF,	AMOVD,	0,	0,	0,	0,
+};
+static Opclass opOMISC_MEM = {
+	"",
+	0,	AFENCE,	AFENCE_I,0,	0,	0,	0,	0,	0,
+};
+static Opclass opOOP_IMM = {
+	"$i,s,d",
+	0x20,	0,	0,	0,	0,	0,	ASRA,	0,	0,
+	0,	AADD,	ASLL,	ASLT,	ASLTU,	AXOR,	ASRL,	AOR,	AAND,
+};
+static Opclass opOAUIPC = {
+	"$i(PC),d",
+	0,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,
+};
+static Opclass opOOP_IMM_32 = {
+	"$i,s,d",
+	0x20,	0,	0,	0,	0,	0,	ASRAW,	0,	0,
+	0,	AADDW,	ASLLW,	0,	0,	0,	ASRLW,	0,	0,
+};
+static Opclass opOSTORE = {
+	"2,a",
+	0,	AMOVB,	AMOVH,	AMOVW,	AMOV,	0,	0,	0,	0,
+};
+static Opclass opOSTORE_FP = {
+	"f2,a",
+	0,	0,	0,	AMOVF,	AMOVD,	0,	0,	0,	0,
+};
+static Opclass opOAMO = {
+	"7,2,s,d",
+	0x04,	0,	0,	ASWAP_W,ASWAP_D,0,	0,	0,	0,
+	0x08,	0,	0,	ALR_W,	ALR_D,	0,	0,	0,	0,
+	0x0C,	0,	0,	ASC_W,	ASC_D,	0,	0,	0,	0,
+	0,	0,	0,	AAMO_W,	AAMO_D,	0,	0,	0,	0,
+};
+static Opclass opOOP = {
+	"2,s,d",
+	0x01,	AMUL,	AMULH,	AMULHSU,AMULHU,	ADIV,	ADIVU,	AREM,	AREMU,
+	0x20,	ASUB,	0,	0,	0,	0,	ASRA,	0,	0,
+	0,	AADD,	ASLL,	ASLT,	ASLTU,	AXOR,	ASRL,	AOR,	AAND,
+};
+static Opclass opOLUI = {
+	"$i,d",
+	0,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,	ALUI,
+};
+static Opclass opOOP_32 = {
+	"2,s,d",
+	0x01,	AMULW,	0,	0,	0,	ADIVW,	ADIVUW,	AREMW,	AREMUW,
+	0x20,	ASUBW,	0,	0,	0,	0,	ASRAW,	0,	0,
+	0,	AADDW,	ASLLW,	0,	0,	0,	ASRLW,	0,	0,
+};
+static Opclass opOBRANCH = {
+	"2,s,p",
+	0,	ABEQ,	ABNE,	0,	0,	ABLT,	ABGE,	ABLTU,	ABGEU,
+};
+static Opclass opOJALR = {
+	"d,a",
+	0,	AJALR,	AJALR,	AJALR,	AJALR,	AJALR,	AJALR,	AJALR,	AJALR,
+};
+static Opclass opOJAL = {
+	"d,p",
+	0,	AJAL,	AJAL,	AJAL,	AJAL,	AJAL,	AJAL,	AJAL,	AJAL,
+};
+static Opclass opOSYSTEM = {
+	"",
+	0,	ASYS,	ACSRRW,	ACSRRS,	ACSRRC,	0,	ACSRRWI,ACSRRSI,ACSRRCI,
+};
+static char fmtcsr[] = "c,s,d";
+static char fmtcsri[] = "c,js,d";
+static char *fmtOSYSTEM[8] = {
+	"$i", fmtcsr, fmtcsr, fmtcsr, "", fmtcsri, fmtcsri, fmtcsri,
+};
+static Opclass opOOP_FP = {
+	"fs,fd",
+	0x0,	AADDF,	ASUBF,	AMULF,	ADIVF,	AMOVF,	0,	0,	0,
+	0x1,	AMOVDF,	0,	0,	0,	0,	0,	0,	0,
+	0x2,	ACMPLEF,ACMPLTF,ACMPEQF,0,	0,	0,	0,	0,
+	0x3,	AMOVFW,	0,	AMOVFV,	0,	AMOVWF,	AMOVUF,	AMOVVF,	AMOVUVF,
+};
+static Opclass opOOP_DP = {
+	"f2,fs,fd",
+	0x0,	AADDD,	ASUBD,	AMULD,	ADIVD,	AMOVD,	0,	0,	0,
+	0x1,	AMOVFD,	0,	0,	0,	0,	0,	0,	0,
+	0x2,	ACMPLED,ACMPLTD,ACMPEQD,0,	0,	0,	0,	0,
+	0x3,	AMOVDW,	0,	AMOVDV,	0,	AMOVWD,	AMOVUD,	AMOVVD,	AMOVUVD,
+};
+
+typedef struct Compclass Compclass;
+struct Compclass {
+	char	*fmt;
+	uchar	immbits[18];
+};
+
+static Compclass rv32compressed[0x2E] = {
+/* 00-07 ([1:0] = 0) ([15:13] = 0-7) */
+	{"ADDI4SPN $i,d", 22, 6, 5, 11, 12, 7, 8, 9, 10},          /* 12:5 → 5:4|9:6|2|3 */
+	{"FLD a,fd",      24, 10, 11, 12, 5, 6},                   /* 12:10|6:5 → 5:3|7:6 */
+	{"LW a,d",        25, 6, 10, 11, 12, 5},                   /* 12:10|6:5 → 5:2|6 */
+	{"FLW a,fd",      25, 6, 10, 11, 12, 5},                   /* 12:10|6:5 → 5:2|6 rv32 */
+	{"? ",	0},
+	{"FSD f2,a",      24, 10, 11, 12, 5, 6},                   /* 12:10|6:5 → 5:3|7:6 */
+	{"SW 2,a",        25, 6, 10, 11, 12, 5},                   /* 12:10|6:5 → 5:2|6 */
+	{"FSW f2,a",      25, 6, 10, 11, 12, 5},                   /* 12:10|6:5 → 5:2|6 rv32 */
+
+/* 08-0F ([1:0] = 1) ([15:13] = 0-7 not 4) */
+	{"ADDI $i,d",    ~26, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → * 5:0 */
+	{"JAL p",        ~20, 3, 4, 5, 11, 2, 7, 6, 9, 10, 8, 12}, /* 12:2 → * 11|4|9:8|10|6|7|3:1|5 rv32 D*/
+	{"LI $i,d",      ~26, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → * 5:0 */
+	{"LUI $i,d",     ~14, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → * 17:12 */
+	{"? ",	0},
+	{"J p",          ~20, 3, 4, 5, 11, 2, 7, 6, 9, 10, 8, 12}, /* 12:2 → * 11|4|9:8|10|6|7|3:1|5 */
+	{"BEQZ s,p",     ~23, 3, 4, 10, 11, 2, 5, 6, 12},          /* 12:10|6:2 → * 8|4|3|7:6|2:1|5 */
+	{"BNEZ s,p",     ~23, 3, 4, 10, 11, 2, 5, 6, 12},          /* 12:10|6:2 → * 8|4|3|7:6|2:1|5 */
+
+/* 10-17  ([1:0] = 2) ([15:13] = 0-7 not 4) */
+	{"SLLI $i,d",     26, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → 5:0 */
+	{"FLDSP i,fd",    23, 5, 6, 12, 2, 3, 4},                  /* 12|6:2 → 5:3|8:6 */
+	{"LWSP i,d",      24, 4, 5, 6, 12, 2, 3},                  /* 12|6:2 → 5:2|7:6 */
+	{"FLWSP i,fd",    24, 4, 5, 6, 12, 2, 3},                  /* 12|6:2 → 5:2|7:6 rv32 */
+	{"? ",	0},
+	{"FSDSP f2,$i",   23, 10, 11, 12, 7, 8, 9},                /* 12:7 → 5:3|8:6 */
+	{"SWSP 2,$i",     24, 9, 10, 11, 12, 7, 8},                /* 12:7 → 5:2|7:6 */
+	{"FSWSP f2,$i",   24, 9, 10, 11, 12, 7, 8},                /* 12:7 → 5:2|7:6 rv32 */
+
+/* 18-1A  ([1:0] = 1) ([15:13] = 4) ([11:10] = 0-2) */
+	{"SRLI $i,d",     26, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → 5:0 */
+	{"SRAI $i,d",     26, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → 5:0 */
+	{"ANDI $i,d",    ~26, 2, 3, 4, 5, 6, 12},                  /* 12|6:2 → * 5:0 */
+
+/* 1B-22 ([1:0] = 1) ([15:13] = 4) ([11:10] = 3) ([12] = 0-1) ([6:5] = 0-3) */
+	{"SUB 2,d",	0},
+	{"XOR 2,d",	0},
+	{"OR 2,d",	0},
+	{"AND 2,d",	0},
+	{"SUBW 2,d",	0},		/* rv64 */
+	{"ADDW 2,d",	0},		/* rv64 */
+	{"? ",	0},
+	{"? ",	0},
+
+/* 23-26 ([1:0] = 2) ([15:13] = 4) ([12] = 0-1) ((rs2 != 0) = 0-1) */
+	{"JR s",	0},
+	{"MV 2,d",	0},
+	{"JALR s",	0},
+	{"ADD 2,d",	0},
+
+/* 27-27 ([1:0] = 1) ([15:13] = 3) ( rd = 2) */
+	{"ADDI16SP $i",  ~22, 6, 2, 5, 3, 4, 12},                  /* 12|6:2 → * 9|4|6|8:7|5 */
+
+/* 28-2C  rv64 alternates */
+	{"LD a,d",	24, 10, 11, 12, 5, 6},                         /* 12:10|6:5 → 5:3|7:6 */
+	{"SD 2,a",	24, 10, 11, 12, 5, 6},                         /* 12:10|6:5 → 5:3|7:6 */
+	{"ADDIW $i,d",	~26, 2, 3, 4, 5, 6, 12},                   /* 12|6:2 → * 5:0 */
+	{"LDSP i,d",	23, 5, 6, 12, 2, 3, },                     /* 12|6:2 → 5:3|8:6 */
+	{"SDSP 2,i",	23, 10, 11, 12, 7, 8, 9},	               /* 12:7 → 5:3|8:6 */
+
+/* 2D-2D  C.ADD with (rd = 0) */
+	{"EBREAK",	0 }
+};
+
+/* map major opcodes to opclass table */
+static Opclass *opclass[32] = {
+	[OLOAD]		&opOLOAD,
+	[OLOAD_FP]	&opOLOAD_FP,
+	[OMISC_MEM]	&opOMISC_MEM,
+	[OOP_IMM]	&opOOP_IMM,
+	[OAUIPC]	&opOAUIPC,
+	[OOP_IMM_32]	&opOOP_IMM_32,
+	[OSTORE]	&opOSTORE,
+	[OSTORE_FP]	&opOSTORE_FP,
+	[OAMO]		&opOAMO,
+	[OOP]		&opOOP,
+	[OLUI]		&opOLUI,
+	[OOP_FP]	&opOOP_FP,
+	[OOP_32]	&opOOP_32,
+	[OBRANCH]	&opOBRANCH,
+	[OJALR]		&opOJALR,
+	[OJAL]		&opOJAL,
+	[OSYSTEM]	&opOSYSTEM,
+};
+
+/*
+ * Print value v as name[+offset]
+ */
+static int
+gsymoff(char *buf, int n, uvlong v, int space)
+{
+	Symbol s;
+	int r;
+	long delta;
+
+	r = delta = 0;		/* to shut compiler up */
+	if (v) {
+		r = findsym(v, space, &s);
+		if (r)
+			delta = v-s.value;
+		if (delta < 0)
+			delta = -delta;
+	}
+	if (v == 0 || r == 0 || delta >= 4096)
+		return snprint(buf, n, "#%llux", v);
+	if (strcmp(s.name, ".string") == 0)
+		return snprint(buf, n, "#%llux", v);
+	if (!delta)
+		return snprint(buf, n, "%s", s.name);
+	return snprint(buf, n, "%s+%llux", s.name, v-s.value);
+}
+
+#pragma	varargck	argpos	bprint		2
+
+static void
+bprint(Instr *i, char *fmt, ...)
+{
+	va_list arg;
+
+	va_start(arg, fmt);
+	i->curr = vseprint(i->curr, i->end, fmt, arg);
+	va_end(arg);
+}
+
+static void
+format(Instr *i, char *opcode, char *f)
+{
+	int c;
+	long imm;
+	char reg;
+
+	reg = 'R';
+	if(opcode != nil){
+		bprint(i, "%s", opcode);
+		if(f == 0)
+			return;
+		bprint(i, "\t");
+	}else
+		bprint(i, "C.");
+	for(; (c = *f); f++){
+		switch(c){
+		default:
+			bprint(i, "%c", c);
+			break;
+		case ' ':
+			bprint(i, "\t");
+			break;
+		case 'f':
+			reg = 'F';
+			break;
+		case 'j':
+			reg = '$';
+			break;
+		case 's':
+			bprint(i, "%c%d", reg, i->rs1);
+			reg = 'R';
+			break;
+		case '2':
+			bprint(i, "%c%d", reg, i->rs2);
+			reg = 'R';
+			break;
+		case '3':
+			bprint(i, "%c%d", reg, i->rs3);
+			break;
+		case 'd':
+			bprint(i, "%c%d", reg, i->rd);
+			reg = 'R';
+			break;
+		case 'i':
+			imm = i->imm;
+			if(imm < 0)
+				bprint(i, "-%lux", -imm);
+			else
+				bprint(i, "%lux", imm);
+			break;
+		case 'p':
+			i->curr += gsymoff(i->curr, i->end-i->curr, i->addr + i->imm, CANY);
+			break;
+		case 'a':
+			if(i->rs1 == REGSB && mach->sb){
+				i->curr += gsymoff(i->curr, i->end-i->curr, i->imm+mach->sb, CANY);
+				bprint(i, "(SB)");
+				break;
+			}
+			bprint(i, "%lx(R%d)", i->imm, i->rs1);
+			break;
+		case '7':
+			bprint(i, "%ux", i->func7);
+			break;
+		case 'c':
+			bprint(i, "CSR(%lx)", i->imm&0xFFF);
+			break;
+		}
+	}
+}
+
+static int
+badinst(Instr *i)
+{
+	format(i, "???", 0);
+	return 4;
+}
+
+static long
+immshuffle(uint w, uchar *p)
+{
+	int shift, i;
+	ulong imm;
+
+	shift = *p++;
+	imm = 0;
+	while((i = *p++) != 0){
+		imm >>= 1;
+		if((w>>i) & 0x01)
+			imm |= (1<<31);
+	}
+	if(shift & 0x80)
+		imm = (long)imm >> (shift ^ 0xFF);
+	else
+		imm >>= shift;
+	return imm;
+}
+
+static int
+decompress(Instr *i)
+{
+	ushort w;
+	int op, aop;
+	Compclass *cop;
+
+	w = i->w;
+	i->n = 2;
+	i->func3 = (w>>13)&0x7;
+	op = w&0x3;
+	i->op = op;
+	switch(op){
+	case 0:
+		i->rd  = 8 + ((w>>2)&0x7);
+		i->rs1 = 8 + ((w>>7)&0x7);
+		i->rs2 = i->rd;
+		break;
+	case 1:
+		i->rd = (w>>7)&0x1F;
+		if((i->func3&0x4) != 0)
+			i->rd = 8 + (i->rd&0x7);
+		i->rs1 = i->rd;
+		i->rs2 = 8 + ((w>>2)&0x7);
+		break;
+	case 2:
+		i->rd = (w>>7)&0x1F;
+		i->rs1 = i->rd;
+		i->rs2 = (w>>2)&0x1F;
+	}
+	aop = (op << 3) + i->func3;
+	if((aop & 0x7) == 4){
+		switch(op){
+		case 1:
+			aop = 0x18 + ((w>>10) & 0x3);
+			if(aop == 0x1B)
+				aop += ((w>>10) & 0x4) + ((w>>5) & 0x3);
+			break;
+		case 2:
+			aop = 0x23 + ((w>>11) & 0x2) + (i->rs2 != 0);
+			if(aop == 0x26 && i->rd == 0)
+				aop = 0x2D;
+			break;
+		}
+	}
+	if(aop == 0x0B && i->rd == 2)
+		aop = 0x27;
+	if(i->rv64) switch(aop){
+	case 0x03:	aop = 0x28; break;
+	case 0x07:	aop = 0x29; break;
+	case 0x09:	aop = 0x2A; break;
+	case 0x13:	aop = 0x2B; break;
+	case 0x17:	aop = 0x2C; break;
+	}
+	i->aop = aop;
+	cop = &rv32compressed[aop];
+	i->fmt = cop->fmt;
+	i->imm = immshuffle(w, cop->immbits);
+	return 2;
+}
+
+static int
+decode(Map *map, uvlong pc, Instr *i)
+{
+	ulong w;
+	int op;
+
+	if(get4(map, pc, &w) < 0) {
+		werrstr("can't read instruction: %r");
+		return -1;
+	}
+	i->addr = pc;
+	i->map = map;
+	if((w&0x3) != 3){
+		i->w = w & 0xFFFF;
+		return decompress(i);
+	}
+	i->w = w;
+	i->n = 4;
+	op = (w&0x7F);
+	i->op = op;
+	i->func3 = (w>>12)&0x7;
+	i->func7 = (w>>25)&0x7F;
+	i->rs1 = (w>>15)&0x1F;
+	i->rs2 = (w>>20)&0x1F;
+	i->rs3 = (w>>27)&0x1F;
+	i->rd = (w>>7)&0x1F;
+#define FIELD(hi,lo,off)	(w>>(lo-off))&(((1<<(hi-lo+1))-1)<<off)
+#define LFIELD(hi,lo,off)	(w<<(off-lo))&(((1<<(hi-lo+1))-1)<<off)
+#define SFIELD(lo,off)		((long)(w&((~0)<<lo))>>(lo-off))
+	switch(op>>2) {
+	case OSTORE:	/* S-type */
+	case OSTORE_FP:
+		i->imm = SFIELD(25,5) | FIELD(11,7,0);
+		break;
+	case OBRANCH:	/* B-type */
+		i->imm = SFIELD(31,12) | LFIELD(7,7,11) | FIELD(30,25,5) | FIELD(11,8,1);
+		break;
+	case OOP_IMM:	/* I-type */
+	case OOP_IMM_32:
+		if(i->func3 == 1 || i->func3 == 5){		/* special case ASL/ASR */
+			i->imm = FIELD(25,20,0);
+			break;
+		}
+	/* fall through */
+	case OLOAD:
+	case OLOAD_FP:
+	case OMISC_MEM:
+	case OJALR:
+	case OSYSTEM:
+		i->imm = SFIELD(20,0);
+		break;
+	case OAUIPC:	/* U-type */
+	case OLUI:
+		i->imm = SFIELD(12,12);
+		break;
+	case OJAL:	/* J-type */
+		i->imm = SFIELD(31,20) | FIELD(19,12,12) | FIELD(20,20,11) | FIELD(30,21,1);
+		break;
+	}
+	return 4;
+}
+
+static int
+pseudo(Instr *i, int aop)
+{
+	char *op;
+
+	switch(aop){
+	case AJAL:
+		if(i->rd == 0){
+			format(i, "JMP",	"p");
+			return 1;
+		}
+		break;
+	case AJALR:
+		if(i->rd == 0){
+			format(i, "JMP", "a");
+			return 1;
+		}
+		break;
+	case AADD:
+		if((i->op>>2) == OOP_IMM){
+			op = i->rv64 ? "MOV" : "MOVW";
+			if(i->rs1 == 0)
+				format(i, op, "$i,d");
+			else if(i->rs1 == REGSB && mach->sb && i->rd != REGSB)
+				format(i, op, "$a,d");
+			else if(i->imm == 0)
+				format(i, op, "s,d");
+			else break;
+			return 1;
+		}
+		break;
+	case ASYS:
+		switch(i->imm){
+		case 0:
+			format(i, "ECALL", nil);
+			return 1;
+		case 1:
+			format(i, "EBREAK", nil);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static int
+mkinstr(Instr *i)
+{
+	Opclass *oc;
+	Optab *o;
+	char *fmt;
+	int aop;
+
+	if((i->op&0x3) != 0x3){
+		format(i, nil, i->fmt);
+		return 2;
+	}
+	oc = opclass[i->op>>2];
+	if(oc == 0)
+		return badinst(i);
+	fmt = oc->fmt;
+	if(oc == &opOSYSTEM)
+		fmt = fmtOSYSTEM[i->func3];
+	if(oc == &opOOP_FP){
+		if(i->func7 & 1)
+			oc = &opOOP_DP;
+		o = &oc->tab[i->func7>>5];
+		switch(o->func7){
+		case 0:
+			fmt = "f2,fs,fd";
+		/* fall through */
+		default:
+			aop = o->op[(i->func7>>2)&0x7];
+			if((i->func7&~1) == 0x10){
+				if(i->func3 == 0 && i->rs1 == i->rs2)
+					fmt = "fs,fd";
+				else
+					aop = 0;
+			}
+			break;
+		case 2:
+			aop = o->op[i->func3];
+			break;
+		case 3:
+			if(i->func7 & 0x10)
+				return badinst(i);
+			aop = o->op[(i->func7>>1)&0x4 | (i->rs2&0x3)];
+			if(i->func7 & 0x8)
+				fmt = "s,fd";
+			else
+				fmt = "fs,d";
+			break;
+		}
+		if(aop == 0)
+			return badinst(i);
+		format(i, anames[aop], fmt);
+		return 4;
+	}
+	o = oc->tab;
+	while(o->func7 != 0 && (i->func7 != o->func7 || o->op[i->func3] == 0))
+		o++;
+	if((aop = o->op[i->func3]) == 0)
+		return badinst(i);
+	if(pseudo(i, aop))
+		return 4;
+	format(i, anames[aop], fmt);
+	return 4;
+}
+
+static int
+riscvdas(Map *map, uvlong pc, char modifier, char *buf, int n)
+{
+	Instr i;
+
+	USED(modifier);
+	i.rv64 = 0;
+	i.curr = buf;
+	i.end = buf+n;
+	if(decode(map, pc, &i) < 0)
+		return -1;
+	return mkinstr(&i);
+}
+
+static int
+riscv64das(Map *map, uvlong pc, char modifier, char *buf, int n)
+{
+	Instr i;
+
+	USED(modifier);
+	i.rv64 = 1;
+	i.curr = buf;
+	i.end = buf+n;
+	if(decode(map, pc, &i) < 0)
+		return -1;
+	return mkinstr(&i);
+}
+
+static int
+riscvhexinst(Map *map, uvlong pc, char *buf, int n)
+{
+	Instr i;
+
+	i.curr = buf;
+	i.end = buf+n;
+	if(decode(map, pc, &i) < 0)
+		return -1;
+	if(i.end-i.curr > 2*i.n)
+		i.curr = _hexify(buf, i.w, 2*i.n - 1);
+	*i.curr = 0;
+	return i.n;
+}
+
+static int
+riscvinstlen(Map *map, uvlong pc)
+{
+	Instr i;
+
+	return decode(map, pc, &i);
+}
+
+static char*
+riscvexcep(Map*, Rgetter)
+{
+	return "Trap";
+}
+
+static int
+riscvfoll(Map *map, uvlong pc, Rgetter rget, uvlong *foll)
+{
+	Instr i;
+	char buf[8];
+	int len;
+
+	len = decode(map, pc, &i);
+	if(len < 0)
+		return -1;
+	foll[0] = pc + len;
+	if(len == 2){
+		switch(i.aop){
+		case 0x0D: /* C.J */
+		case 0x0E: /* C.BEQZ */
+		case 0x0F: /* C.BNEZ */
+			foll[1] = pc + i.imm;
+			return 2;
+		case 0x09:	/* C.JAL */
+			foll[0] = pc + i.imm;
+			break;
+		case 0x23: /* C.JR */
+		case 0x25: /* C.JALR */
+			sprint(buf, "R%d", i.rs1);
+			foll[0] = (*rget)(map, buf);
+			break;
+		}
+		return 1;
+	}
+	switch(i.op>>2) {
+	case OBRANCH:
+		foll[1] = pc + i.imm;
+		return 2;
+	case OJAL:
+		foll[0] = pc + i.imm;
+		break;
+	case OJALR:
+		sprint(buf, "R%d", i.rd);
+		foll[0] = (*rget)(map, buf);
+		break;
+	}
+	return 1;
+}
+
+/*
+ *	Debugger interface
+ */
+Machdata riscvmach =
+{
+	{0x02, 0x90},		/* break point */
+	2,			/* break point size */
+
+	leswab,			/* short to local byte order */
+	leswal,			/* long to local byte order */
+	leswav,			/* long to local byte order */
+	risctrace,		/* C traceback */
+	riscframe,		/* Frame finder */
+	riscvexcep,		/* print exception */
+	0,				/* breakpoint fixup */
+	leieeesftos,		/* single precision float printer */
+	leieeedftos,		/* double precision float printer */
+	riscvfoll,		/* following addresses */
+	riscvdas,		/* symbolic disassembly */
+	riscvhexinst,	/* hex disassembly */
+	riscvinstlen,	/* instruction size */
+};
+
+Machdata riscv64mach =
+{
+	{0x02, 0x90},		/* break point */
+	2,			/* break point size */
+
+	leswab,			/* short to local byte order */
+	leswal,			/* long to local byte order */
+	leswav,			/* long to local byte order */
+	risctrace,		/* C traceback */
+	riscframe,		/* Frame finder */
+	riscvexcep,		/* print exception */
+	0,				/* breakpoint fixup */
+	leieeesftos,		/* single precision float printer */
+	leieeedftos,		/* double precision float printer */
+	riscvfoll,		/* following addresses */
+	riscv64das,		/* symbolic disassembly */
+	riscvhexinst,	/* hex disassembly */
+	riscvinstlen,	/* instruction size */
+};
--- /dev/null
+++ b/sys/src/libmach/jobj.c
@@ -1,0 +1,135 @@
+/*
+ * iobj.c - identify and parse a riscv object file
+ */
+#include <u.h>
+#include <libc.h>
+#include <bio.h>
+#include <mach.h>
+#include "jc/j.out.h"
+#include "obj.h"
+
+typedef struct Addr	Addr;
+struct Addr
+{
+	char	type;
+	char	sym;
+	char	name;
+};
+static Addr addr(Biobuf*);
+static char type2char(int);
+static void skip(Biobuf*, int);
+
+int
+_isj(char *s)
+{
+	return  s[0] == ANAME				/* ANAME */
+		&& s[1] == D_FILE			/* type */
+		&& s[2] == 1				/* sym */
+		&& s[3] == '<';				/* name of file */
+}
+
+int
+_readj(Biobuf *bp, Prog *p)
+{
+	int as, n;
+	Addr a;
+
+	as = Bgetc(bp);			/* as */
+	if(as < 0)
+		return 0;
+	p->kind = aNone;
+	p->sig = 0;
+	if(as == ANAME || as == ASIGNAME){
+		if(as == ASIGNAME){
+			Bread(bp, &p->sig, 4);
+			p->sig = leswal(p->sig);
+		}
+		p->kind = aName;
+		p->type = type2char(Bgetc(bp));		/* type */
+		p->sym = Bgetc(bp);			/* sym */
+		n = 0;
+		for(;;) {
+			as = Bgetc(bp);
+			if(as < 0)
+				return 0;
+			n++;
+			if(as == 0)
+				break;
+		}
+		p->id = malloc(n);
+		if(p->id == 0)
+			return 0;
+		Bseek(bp, -n, 1);
+		if(Bread(bp, p->id, n) != n)
+			return 0;
+		return 1;
+	}
+	if(as == ATEXT)
+		p->kind = aText;
+	else if(as == AGLOBL)
+		p->kind = aData;
+	skip(bp, 5);		/* reg(1), lineno(4) */
+	a = addr(bp);
+	addr(bp);
+	if(a.type != D_OREG || a.name != D_STATIC && a.name != D_EXTERN)
+		p->kind = aNone;
+	p->sym = a.sym;
+	return 1;
+}
+
+static Addr
+addr(Biobuf *bp)
+{
+	Addr a;
+	long off;
+
+	a.type = Bgetc(bp);	/* a.type */
+	skip(bp,1);		/* reg */
+	a.sym = Bgetc(bp);	/* sym index */
+	a.name = Bgetc(bp);	/* sym type */
+	switch(a.type){
+	default:
+	case D_NONE: case D_REG: case D_FREG:
+		break;
+	case D_OREG:
+	case D_CONST:
+	case D_BRANCH:
+	case D_CTLREG:
+		off = Bgetc(bp);
+		off |= Bgetc(bp) << 8;
+		off |= Bgetc(bp) << 16;
+		off |= Bgetc(bp) << 24;
+		if(off < 0)
+			off = -off;
+		if(a.sym && (a.name==D_PARAM || a.name==D_AUTO))
+			_offset(a.sym, off);
+		break;
+	case D_VCONST:
+	case D_SCONST:
+		skip(bp, NSNAME);
+		break;
+	case D_FCONST:
+		skip(bp, 8);
+		break;
+	}
+	return a;
+}
+
+static char
+type2char(int t)
+{
+	switch(t){
+	case D_EXTERN:		return 'U';
+	case D_STATIC:		return 'b';
+	case D_AUTO:		return 'a';
+	case D_PARAM:		return 'p';
+	default:		return UNKNOWN;
+	}
+}
+
+static void
+skip(Biobuf *bp, int n)
+{
+	while (n-- > 0)
+		Bgetc(bp);
+}
--- a/sys/src/libmach/mkfile
+++ b/sys/src/libmach/mkfile
@@ -10,6 +10,7 @@
 	access\
 	machdata\
 	setmach\
+	j\
 	t\
 	v\
 	k\
@@ -22,6 +23,7 @@
 	7\
 	8\
 	9\
+	jdb\
 	tdb\
 	vdb\
 	kdb\
@@ -31,6 +33,7 @@
 	5db\
 	7db\
 	8db\
+	jobj\
 	vobj\
 	kobj\
 	uobj\
@@ -66,6 +69,7 @@
 kobj.$O: /sys/src/cmd/kc/k.out.h
 qobj.$O: /sys/src/cmd/qc/q.out.h
 vobj.$O: /sys/src/cmd/vc/v.out.h
+jobj.$O: /sys/src/cmd/jc/j.out.h
 
 # 9obj.$O: /sys/src/cmd/9c/9.out.h
 # uobj.$O: uc/u.out.h
--- a/sys/src/libmach/obj.c
+++ b/sys/src/libmach/obj.c
@@ -30,6 +30,7 @@
 	_isq(char*),
 	_isv(char*),
 	_isu(char*),
+	_isj(char*),
 	_read2(Biobuf*, Prog*),
 	_read5(Biobuf*, Prog*),
 	_read6(Biobuf*, Prog*),
@@ -39,7 +40,8 @@
 	_readk(Biobuf*, Prog*),
 	_readq(Biobuf*, Prog*),
 	_readv(Biobuf*, Prog*),
-	_readu(Biobuf*, Prog*);
+	_readu(Biobuf*, Prog*),
+	_readj(Biobuf*, Prog*);
 
 typedef struct Obj	Obj;
 typedef struct Symtab	Symtab;
@@ -63,6 +65,7 @@
 	[ObjMips]	"mips .v",	_isv, _readv,
 	[ObjSparc64]	"sparc64 .u",	_isu, _readu,
 	[ObjPower64]	"power64 .9",	_is9, _read9,
+	[ObjRiscv64]	"riscv64 .j",	_isj, _readj,
 	[Maxobjtype]	0, 0
 };
 
--- a/sys/src/libmach/setmach.c
+++ b/sys/src/libmach/setmach.c
@@ -17,9 +17,11 @@
 };
 
 extern	Mach		mmips, msparc, m68020, mi386, mamd64,
-			marm, marm64, mmips2be, mmips2le, mpower, mpower64, msparc64;
+			marm, marm64, mmips2be, mmips2le, mpower, mpower64, msparc64,
+			mriscv64;
 extern	Machdata	mipsmach, mipsmachle, sparcmach, m68020mach, i386mach,
-			armmach, arm64mach, mipsmach2le, powermach, sparc64mach;
+			armmach, arm64mach, mipsmach2le, powermach, sparc64mach,
+			riscv64mach;
 
 /*
  *	machine selection table.  machines with native disassemblers should
@@ -130,6 +132,12 @@
 		ASPARC64,
 		&msparc64,
 		&sparc64mach,	},
+	{	"riscv64",
+		FRISCV64,
+		FRISCV64B,
+		ARISCV64,
+		&mriscv64,
+		&riscv64mach,	},
 	{	0		},		/*the terminator*/
 };
 
--- /dev/null
+++ b/sys/src/libmp/riscv64/mkfile
@@ -1,0 +1,17 @@
+objtype=riscv64
+</riscv64/mkfile
+
+LIB=/$objtype/lib/libmp.a
+OFILES=\
+#	mpvecadd.$O\
+#	mpvecdigmuladd.$O\
+#	mpvecdigmulsub.$O\
+#	mpvecsub.$O\
+
+HFILES=/$objtype/include/u.h /sys/include/mp.h ../port/dat.h
+
+UPDATE=\
+	mkfile\
+	$HFILES\
+
+</sys/src/cmd/mksyslib
--- /dev/null
+++ b/sys/src/libsec/riscv64/mkfile
@@ -1,0 +1,11 @@
+objtype=riscv64
+</$objtype/mkfile
+
+LIB=/$objtype/lib/libsec.a
+OFILES=	\
+
+HFILES=/sys/include/libsec.h
+
+UPDATE=mkfile
+
+</sys/src/cmd/mksyslib
--- /dev/null
+++ b/sys/src/libthread/riscv64.c
@@ -1,0 +1,26 @@
+#include <u.h>
+#include <libc.h>
+#include <thread.h>
+#include "threadimpl.h"
+
+/* first argument goes in a register; simplest just to ignore it */
+static void
+launcherriscv64(int, void (*f)(void *arg), void *arg)
+{
+	if (f == nil)
+		sysfatal("launcherriscv64: nil f passed: arg %#p", arg);
+	(*f)(arg);
+	threadexits(nil);
+}
+
+void
+_threadinitstack(Thread *t, void (*f)(void*), void *arg)
+{
+	uintptr *tos;
+
+	tos = (uintptr *)&t->stk[t->stksize&~7];
+	*--tos = (uintptr)arg;
+	*--tos = (uintptr)f;
+	t->sched[JMPBUFPC] = (uintptr)launcherriscv64+JMPBUFDPC;
+	t->sched[JMPBUFSP] = (uintptr)tos - 2*sizeof(uintptr); /* 1st arg, return PC */
+}
--- a/sys/src/mkfile.proto
+++ b/sys/src/mkfile.proto
@@ -2,8 +2,8 @@
 # common mkfile parameters shared by all architectures
 #
 
-OS=056789qvt
-CPUS=spim arm arm64 amd64 386 power power64 mips
+OS=056789jqvt
+CPUS=spim arm arm64 amd64 386 power power64 mips riscv64
 CFLAGS=-FTVw
 LEX=lex
 YACC=yacc