From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001
From: André Fabian Silva Delgado <emulatorman@parabola.nu>
Date: Wed, 5 Aug 2015 17:04:01 -0300
Subject: Initial import

---
 arch/m32r/kernel/.gitignore      |   1 +
 arch/m32r/kernel/Makefile        |  11 +
 arch/m32r/kernel/align.c         | 584 +++++++++++++++++++++++++++
 arch/m32r/kernel/asm-offsets.c   |  14 +
 arch/m32r/kernel/entry.S         | 552 ++++++++++++++++++++++++++
 arch/m32r/kernel/head.S          | 283 +++++++++++++
 arch/m32r/kernel/irq.c           |  43 ++
 arch/m32r/kernel/m32r_ksyms.c    |  88 +++++
 arch/m32r/kernel/module.c        | 203 ++++++++++
 arch/m32r/kernel/process.c       | 167 ++++++++
 arch/m32r/kernel/ptrace.c        | 701 ++++++++++++++++++++++++++++++++
 arch/m32r/kernel/setup.c         | 420 ++++++++++++++++++++
 arch/m32r/kernel/signal.c        | 335 ++++++++++++++++
 arch/m32r/kernel/smp.c           | 835 +++++++++++++++++++++++++++++++++++++++
 arch/m32r/kernel/smpboot.c       | 626 +++++++++++++++++++++++++++++
 arch/m32r/kernel/sys_m32r.c      |  90 +++++
 arch/m32r/kernel/syscall_table.S | 327 +++++++++++++++
 arch/m32r/kernel/time.c          | 198 ++++++++++
 arch/m32r/kernel/traps.c         | 319 +++++++++++++++
 arch/m32r/kernel/vmlinux.lds.S   |  77 ++++
 20 files changed, 5874 insertions(+)
 create mode 100644 arch/m32r/kernel/.gitignore
 create mode 100644 arch/m32r/kernel/Makefile
 create mode 100644 arch/m32r/kernel/align.c
 create mode 100644 arch/m32r/kernel/asm-offsets.c
 create mode 100644 arch/m32r/kernel/entry.S
 create mode 100644 arch/m32r/kernel/head.S
 create mode 100644 arch/m32r/kernel/irq.c
 create mode 100644 arch/m32r/kernel/m32r_ksyms.c
 create mode 100644 arch/m32r/kernel/module.c
 create mode 100644 arch/m32r/kernel/process.c
 create mode 100644 arch/m32r/kernel/ptrace.c
 create mode 100644 arch/m32r/kernel/setup.c
 create mode 100644 arch/m32r/kernel/signal.c
 create mode 100644 arch/m32r/kernel/smp.c
 create mode 100644 arch/m32r/kernel/smpboot.c
 create mode 100644 arch/m32r/kernel/sys_m32r.c
 create mode 100644 arch/m32r/kernel/syscall_table.S
 create mode 100644 arch/m32r/kernel/time.c
 create mode 100644 arch/m32r/kernel/traps.c
 create mode 100644 arch/m32r/kernel/vmlinux.lds.S

(limited to 'arch/m32r/kernel')

diff --git a/arch/m32r/kernel/.gitignore b/arch/m32r/kernel/.gitignore
new file mode 100644
index 000000000..c5f676c3c
--- /dev/null
+++ b/arch/m32r/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
new file mode 100644
index 000000000..0c09dad8b
--- /dev/null
+++ b/arch/m32r/kernel/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Linux/M32R kernel.
+#
+
+extra-y	:= head.o vmlinux.lds
+
+obj-y	:= process.o entry.o traps.o align.o irq.o setup.o time.o \
+	m32r_ksyms.o sys_m32r.o signal.o ptrace.o
+
+obj-$(CONFIG_SMP)		+= smp.o smpboot.o
+obj-$(CONFIG_MODULES)		+= module.o
diff --git a/arch/m32r/kernel/align.c b/arch/m32r/kernel/align.c
new file mode 100644
index 000000000..ab871ccd3
--- /dev/null
+++ b/arch/m32r/kernel/align.c
@@ -0,0 +1,584 @@
+/*
+ * align.c - address exception handler for M32R
+ *
+ * Copyright (c) 2003 Hitoshi Yamamoto
+ */
+
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+static int get_reg(struct pt_regs *regs, int nr)
+{
+	int val;
+
+	if (nr < 4)
+		val = *(unsigned long *)(&regs->r0 + nr);
+	else if (nr < 7)
+		val = *(unsigned long *)(&regs->r4 + (nr - 4));
+	else if (nr < 13)
+		val = *(unsigned long *)(&regs->r7 + (nr - 7));
+	else
+		val = *(unsigned long *)(&regs->fp + (nr - 13));
+
+	return val;
+}
+
+static void set_reg(struct pt_regs *regs, int nr, int val)
+{
+	if (nr < 4)
+		*(unsigned long *)(&regs->r0 + nr) = val;
+	else if (nr < 7)
+		*(unsigned long *)(&regs->r4 + (nr - 4)) = val;
+	else if (nr < 13)
+		*(unsigned long *)(&regs->r7 + (nr - 7)) = val;
+	else
+		*(unsigned long *)(&regs->fp + (nr - 13)) = val;
+}
+
+#define REG1(insn)	(((insn) & 0x0f00) >> 8)
+#define REG2(insn)	((insn) & 0x000f)
+#define PSW_BC		0x100
+
+/* O- instruction */
+#define ISA_LD1		0x20c0	/* ld Rdest, @Rsrc */
+#define ISA_LD2		0x20e0	/* ld Rdest, @Rsrc+ */
+#define ISA_LDH		0x20a0	/* ldh Rdest, @Rsrc */
+#define ISA_LDUH	0x20b0	/* lduh Rdest, @Rsrc */
+#define ISA_ST1		0x2040	/* st Rsrc1, @Rsrc2 */
+#define ISA_ST2		0x2060	/* st Rsrc1, @+Rsrc2 */
+#define ISA_ST3		0x2070	/* st Rsrc1, @-Rsrc2 */
+#define ISA_STH1	0x2020	/* sth Rsrc1, @Rsrc2 */
+#define ISA_STH2	0x2030	/* sth Rsrc1, @Rsrc2+ */
+
+#ifdef CONFIG_ISA_DUAL_ISSUE
+
+/* OS instruction */
+#define ISA_ADD		0x00a0	/* add Rdest, Rsrc */
+#define ISA_ADDI	0x4000	/* addi Rdest, #imm8 */
+#define ISA_ADDX	0x0090	/* addx Rdest, Rsrc */
+#define ISA_AND		0x00c0	/* and Rdest, Rsrc */
+#define ISA_CMP		0x0040	/* cmp Rsrc1, Rsrc2 */
+#define ISA_CMPEQ	0x0060	/* cmpeq Rsrc1, Rsrc2 */
+#define ISA_CMPU	0x0050	/* cmpu Rsrc1, Rsrc2 */
+#define ISA_CMPZ	0x0070	/* cmpz Rsrc */
+#define ISA_LDI		0x6000	/* ldi Rdest, #imm8 */
+#define ISA_MV		0x1080	/* mv Rdest, Rsrc */
+#define ISA_NEG		0x0030	/* neg Rdest, Rsrc */
+#define ISA_NOP		0x7000	/* nop */
+#define ISA_NOT		0x00b0	/* not Rdest, Rsrc */
+#define ISA_OR		0x00e0	/* or Rdest, Rsrc */
+#define ISA_SUB		0x0020	/* sub Rdest, Rsrc */
+#define ISA_SUBX	0x0010	/* subx Rdest, Rsrc */
+#define ISA_XOR		0x00d0	/* xor Rdest, Rsrc */
+
+/* -S instruction */
+#define ISA_MUL		0x1060	/* mul Rdest, Rsrc */
+#define ISA_MULLO_A0	0x3010	/* mullo Rsrc1, Rsrc2, A0 */
+#define ISA_MULLO_A1	0x3090	/* mullo Rsrc1, Rsrc2, A1 */
+#define ISA_MVFACMI_A0	0x50f2	/* mvfacmi Rdest, A0 */
+#define ISA_MVFACMI_A1	0x50f6	/* mvfacmi Rdest, A1 */
+
+static int emu_addi(unsigned short insn, struct pt_regs *regs)
+{
+	char imm = (char)(insn & 0xff);
+	int dest = REG1(insn);
+	int val;
+
+	val = get_reg(regs, dest);
+	val += imm;
+	set_reg(regs, dest, val);
+
+	return 0;
+}
+
+static int emu_ldi(unsigned short insn, struct pt_regs *regs)
+{
+	char imm = (char)(insn & 0xff);
+
+	set_reg(regs, REG1(insn), (int)imm);
+
+	return 0;
+}
+
+static int emu_add(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	int src = REG2(insn);
+	int val;
+
+	val = get_reg(regs, dest);
+	val += get_reg(regs, src);
+	set_reg(regs, dest, val);
+
+	return 0;
+}
+
+static int emu_addx(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	unsigned int val, tmp;
+
+	val = regs->psw & PSW_BC ? 1 : 0;
+	tmp = get_reg(regs, dest);
+	val += tmp;
+	val += (unsigned int)get_reg(regs, REG2(insn));
+	set_reg(regs, dest, val);
+
+	/* C bit set */
+	if (val < tmp)
+		regs->psw |= PSW_BC;
+	else
+		regs->psw &= ~(PSW_BC);
+
+	return 0;
+}
+
+static int emu_and(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	int val;
+
+	val = get_reg(regs, dest);
+	val &= get_reg(regs, REG2(insn));
+	set_reg(regs, dest, val);
+
+	return 0;
+}
+
+static int emu_cmp(unsigned short insn, struct pt_regs *regs)
+{
+	if (get_reg(regs, REG1(insn)) < get_reg(regs, REG2(insn)))
+		regs->psw |= PSW_BC;
+	else
+		regs->psw &= ~(PSW_BC);
+
+	return 0;
+}
+
+static int emu_cmpeq(unsigned short insn, struct pt_regs *regs)
+{
+	if (get_reg(regs, REG1(insn)) == get_reg(regs, REG2(insn)))
+		regs->psw |= PSW_BC;
+	else
+		regs->psw &= ~(PSW_BC);
+
+	return 0;
+}
+
+static int emu_cmpu(unsigned short insn, struct pt_regs *regs)
+{
+	if ((unsigned int)get_reg(regs, REG1(insn))
+		< (unsigned int)get_reg(regs, REG2(insn)))
+		regs->psw |= PSW_BC;
+	else
+		regs->psw &= ~(PSW_BC);
+
+	return 0;
+}
+
+static int emu_cmpz(unsigned short insn, struct pt_regs *regs)
+{
+	if (!get_reg(regs, REG2(insn)))
+		regs->psw |= PSW_BC;
+	else
+		regs->psw &= ~(PSW_BC);
+
+	return 0;
+}
+
+static int emu_mv(unsigned short insn, struct pt_regs *regs)
+{
+	int val;
+
+	val = get_reg(regs, REG2(insn));
+	set_reg(regs, REG1(insn), val);
+
+	return 0;
+}
+
+static int emu_neg(unsigned short insn, struct pt_regs *regs)
+{
+	int val;
+
+	val = get_reg(regs, REG2(insn));
+	set_reg(regs, REG1(insn), 0 - val);
+
+	return 0;
+}
+
+static int emu_not(unsigned short insn, struct pt_regs *regs)
+{
+	int val;
+
+	val = get_reg(regs, REG2(insn));
+	set_reg(regs, REG1(insn), ~val);
+
+	return 0;
+}
+
+static int emu_or(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	int val;
+
+	val = get_reg(regs, dest);
+	val |= get_reg(regs, REG2(insn));
+	set_reg(regs, dest, val);
+
+	return 0;
+}
+
+static int emu_sub(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	int val;
+
+	val = get_reg(regs, dest);
+	val -= get_reg(regs, REG2(insn));
+	set_reg(regs, dest, val);
+
+	return 0;
+}
+
+static int emu_subx(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	unsigned int val, tmp;
+
+	val = tmp = get_reg(regs, dest);
+	val -= (unsigned int)get_reg(regs, REG2(insn));
+	val -= regs->psw & PSW_BC ? 1 : 0;
+	set_reg(regs, dest, val);
+
+	/* C bit set */
+	if (val > tmp)
+		regs->psw |= PSW_BC;
+	else
+		regs->psw &= ~(PSW_BC);
+
+	return 0;
+}
+
+static int emu_xor(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	unsigned int val;
+
+	val = (unsigned int)get_reg(regs, dest);
+	val ^= (unsigned int)get_reg(regs, REG2(insn));
+	set_reg(regs, dest, val);
+
+	return 0;
+}
+
+static int emu_mul(unsigned short insn, struct pt_regs *regs)
+{
+	int dest = REG1(insn);
+	int reg1, reg2;
+
+	reg1 = get_reg(regs, dest);
+	reg2 = get_reg(regs, REG2(insn));
+
+	__asm__ __volatile__ (
+		"mul	%0, %1;		\n\t"
+		: "+r" (reg1) : "r" (reg2)
+	);
+
+	set_reg(regs, dest, reg1);
+
+	return 0;
+}
+
+static int emu_mullo_a0(unsigned short insn, struct pt_regs *regs)
+{
+	int reg1, reg2;
+
+	reg1 = get_reg(regs, REG1(insn));
+	reg2 = get_reg(regs, REG2(insn));
+
+	__asm__ __volatile__ (
+		"mullo		%0, %1, a0;	\n\t"
+		"mvfachi	%0, a0;		\n\t"
+		"mvfaclo	%1, a0;		\n\t"
+		: "+r" (reg1), "+r" (reg2)
+	);
+
+	regs->acc0h = reg1;
+	regs->acc0l = reg2;
+
+	return 0;
+}
+
+static int emu_mullo_a1(unsigned short insn, struct pt_regs *regs)
+{
+	int reg1, reg2;
+
+	reg1 = get_reg(regs, REG1(insn));
+	reg2 = get_reg(regs, REG2(insn));
+
+	__asm__ __volatile__ (
+		"mullo		%0, %1, a0;	\n\t"
+		"mvfachi	%0, a0;		\n\t"
+		"mvfaclo	%1, a0;		\n\t"
+		: "+r" (reg1), "+r" (reg2)
+	);
+
+	regs->acc1h = reg1;
+	regs->acc1l = reg2;
+
+	return 0;
+}
+
+static int emu_mvfacmi_a0(unsigned short insn, struct pt_regs *regs)
+{
+	unsigned long val;
+
+	val = (regs->acc0h << 16) | (regs->acc0l >> 16);
+	set_reg(regs, REG1(insn), (int)val);
+
+	return 0;
+}
+
+static int emu_mvfacmi_a1(unsigned short insn, struct pt_regs *regs)
+{
+	unsigned long val;
+
+	val = (regs->acc1h << 16) | (regs->acc1l >> 16);
+	set_reg(regs, REG1(insn), (int)val);
+
+	return 0;
+}
+
+static int emu_m32r2(unsigned short insn, struct pt_regs *regs)
+{
+	int res = -1;
+
+	if ((insn & 0x7fff) == ISA_NOP)	/* nop */
+		return 0;
+
+	switch(insn & 0x7000) {
+	case ISA_ADDI:		/* addi Rdest, #imm8 */
+		res = emu_addi(insn, regs);
+		break;
+	case ISA_LDI:		/* ldi Rdest, #imm8 */
+		res = emu_ldi(insn, regs);
+		break;
+	default:
+		break;
+	}
+
+	if (!res)
+		return 0;
+
+	switch(insn & 0x70f0) {
+	case ISA_ADD:		/* add Rdest, Rsrc */
+		res = emu_add(insn, regs);
+		break;
+	case ISA_ADDX:		/* addx Rdest, Rsrc */
+		res = emu_addx(insn, regs);
+		break;
+	case ISA_AND:		/* and Rdest, Rsrc */
+		res = emu_and(insn, regs);
+		break;
+	case ISA_CMP:		/* cmp Rsrc1, Rsrc2 */
+		res = emu_cmp(insn, regs);
+		break;
+	case ISA_CMPEQ:		/* cmpeq Rsrc1, Rsrc2 */
+		res = emu_cmpeq(insn, regs);
+		break;
+	case ISA_CMPU:		/* cmpu Rsrc1, Rsrc2 */
+		res = emu_cmpu(insn, regs);
+		break;
+	case ISA_CMPZ:		/* cmpz Rsrc */
+		res = emu_cmpz(insn, regs);
+		break;
+	case ISA_MV:		/* mv Rdest, Rsrc */
+		res = emu_mv(insn, regs);
+		break;
+	case ISA_NEG:		/* neg Rdest, Rsrc */
+		res = emu_neg(insn, regs);
+		break;
+	case ISA_NOT:		/* not Rdest, Rsrc */
+		res = emu_not(insn, regs);
+		break;
+	case ISA_OR:		/* or Rdest, Rsrc */
+		res = emu_or(insn, regs);
+		break;
+	case ISA_SUB:		/* sub Rdest, Rsrc */
+		res = emu_sub(insn, regs);
+		break;
+	case ISA_SUBX:		/* subx Rdest, Rsrc */
+		res = emu_subx(insn, regs);
+		break;
+	case ISA_XOR:		/* xor Rdest, Rsrc */
+		res = emu_xor(insn, regs);
+		break;
+	case ISA_MUL:		/* mul Rdest, Rsrc */
+		res = emu_mul(insn, regs);
+		break;
+	case ISA_MULLO_A0:	/* mullo Rsrc1, Rsrc2 */
+		res = emu_mullo_a0(insn, regs);
+		break;
+	case ISA_MULLO_A1:	/* mullo Rsrc1, Rsrc2 */
+		res = emu_mullo_a1(insn, regs);
+		break;
+	default:
+		break;
+	}
+
+	if (!res)
+		return 0;
+
+	switch(insn & 0x70ff) {
+	case ISA_MVFACMI_A0:	/* mvfacmi Rdest */
+		res = emu_mvfacmi_a0(insn, regs);
+		break;
+	case ISA_MVFACMI_A1:	/* mvfacmi Rdest */
+		res = emu_mvfacmi_a1(insn, regs);
+		break;
+	default:
+		break;
+	}
+
+	return res;
+}
+
+#endif	/* CONFIG_ISA_DUAL_ISSUE */
+
+/*
+ * ld   : ?010 dest 1100 src
+ *        0010 dest 1110 src : ld Rdest, @Rsrc+
+ * ldh  : ?010 dest 1010 src
+ * lduh : ?010 dest 1011 src
+ * st   : ?010 src1 0100 src2
+ *        0010 src1 0110 src2 : st Rsrc1, @+Rsrc2
+ *        0010 src1 0111 src2 : st Rsrc1, @-Rsrc2
+ * sth  : ?010 src1 0010 src2
+ */
+
+static int insn_check(unsigned long insn, struct pt_regs *regs,
+	unsigned char **ucp)
+{
+	int res = 0;
+
+	/*
+	 * 32bit insn
+	 *  ld Rdest, @(disp16, Rsrc)
+	 *  st Rdest, @(disp16, Rsrc)
+	 */
+	if (insn & 0x80000000) {	/* 32bit insn */
+		*ucp += (short)(insn & 0x0000ffff);
+		regs->bpc += 4;
+	} else {			/* 16bit insn */
+#ifdef CONFIG_ISA_DUAL_ISSUE
+		/* parallel exec check */
+		if (!(regs->bpc & 0x2) && insn & 0x8000) {
+			res = emu_m32r2((unsigned short)insn, regs);
+			regs->bpc += 4;
+		} else
+#endif	/* CONFIG_ISA_DUAL_ISSUE */
+			regs->bpc += 2;
+	}
+
+	return res;
+}
+
+static int emu_ld(unsigned long insn32, struct pt_regs *regs)
+{
+	unsigned char *ucp;
+	unsigned long val;
+	unsigned short insn16;
+	int size, src;
+
+	insn16 = insn32 >> 16;
+	src = REG2(insn16);
+	ucp = (unsigned char *)get_reg(regs, src);
+
+	if (insn_check(insn32, regs, &ucp))
+		return -1;
+
+	size = insn16 & 0x0040 ? 4 : 2;
+	if (copy_from_user(&val, ucp, size))
+		return -1;
+
+	if (size == 2)
+		val >>= 16;
+
+	/* ldh sign check */
+	if ((insn16 & 0x00f0) == 0x00a0 && (val & 0x8000))
+		val |= 0xffff0000;
+
+	set_reg(regs, REG1(insn16), val);
+
+	/* ld increment check */
+	if ((insn16 & 0xf0f0) == ISA_LD2)	/* ld Rdest, @Rsrc+ */
+		set_reg(regs, src, (unsigned long)(ucp + 4));
+
+	return 0;
+}
+
+static int emu_st(unsigned long insn32, struct pt_regs *regs)
+{
+	unsigned char *ucp;
+	unsigned long val;
+	unsigned short insn16;
+	int size, src2;
+
+	insn16 = insn32 >> 16;
+	src2 = REG2(insn16);
+
+	ucp = (unsigned char *)get_reg(regs, src2);
+
+	if (insn_check(insn32, regs, &ucp))
+		return -1;
+
+	size = insn16 & 0x0040 ? 4 : 2;
+	val = get_reg(regs, REG1(insn16));
+	if (size == 2)
+		val <<= 16;
+
+	/* st inc/dec check */
+	if ((insn16 & 0xf0e0) == 0x2060) {
+		if (insn16 & 0x0010)
+			ucp -= 4;
+		else
+			ucp += 4;
+
+		set_reg(regs, src2, (unsigned long)ucp);
+	}
+
+	if (copy_to_user(ucp, &val, size))
+		return -1;
+
+	/* sth inc check */
+	if ((insn16 & 0xf0f0) == ISA_STH2) {
+		ucp += 2;
+		set_reg(regs, src2, (unsigned long)ucp);
+	}
+
+	return 0;
+}
+
+int handle_unaligned_access(unsigned long insn32, struct pt_regs *regs)
+{
+	unsigned short insn16;
+	int res;
+
+	insn16 = insn32 >> 16;
+
+	/* ld or st check */
+	if ((insn16 & 0x7000) != 0x2000)
+		return -1;
+
+	/* insn alignment check */
+	if ((insn16 & 0x8000) && (regs->bpc & 3))
+		return -1;
+
+	if (insn16 & 0x0080)	/* ld */
+		res = emu_ld(insn32, regs);
+	else			/* st */
+		res = emu_st(insn32, regs);
+
+	return res;
+}
+
diff --git a/arch/m32r/kernel/asm-offsets.c b/arch/m32r/kernel/asm-offsets.c
new file mode 100644
index 000000000..cd3d2fc9c
--- /dev/null
+++ b/arch/m32r/kernel/asm-offsets.c
@@ -0,0 +1,14 @@
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+
+int foo(void)
+{
+	OFFSET(TI_TASK, thread_info, task);
+	OFFSET(TI_FLAGS, thread_info, flags);
+	OFFSET(TI_STATUS, thread_info, status);
+	OFFSET(TI_CPU, thread_info, cpu);
+	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+
+	return 0;
+}
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
new file mode 100644
index 000000000..c639bfa32
--- /dev/null
+++ b/arch/m32r/kernel/entry.S
@@ -0,0 +1,552 @@
+/*
+ *  linux/arch/m32r/kernel/entry.S
+ *
+ *  Copyright (c) 2001, 2002  Hirokazu Takata, Hitoshi Yamamoto, H. Kondo
+ *  Copyright (c) 2003  Hitoshi Yamamoto
+ *  Copyright (c) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ *
+ *  Taken from i386 version.
+ *    Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * Stack layout in 'ret_from_system_call':
+ * 	ptrace needs to have all regs on the stack.
+ *	if the order here is changed, it needs to be
+ *	updated in fork.c:copy_thread, signal.c:do_signal,
+ *	ptrace.c and ptrace.h
+ *
+ * M32R/M32Rx/M32R2
+ *       @(sp)      - r4
+ *       @(0x04,sp) - r5
+ *       @(0x08,sp) - r6
+ *       @(0x0c,sp) - *pt_regs
+ *       @(0x10,sp) - r0
+ *       @(0x14,sp) - r1
+ *       @(0x18,sp) - r2
+ *       @(0x1c,sp) - r3
+ *       @(0x20,sp) - r7
+ *       @(0x24,sp) - r8
+ *       @(0x28,sp) - r9
+ *       @(0x2c,sp) - r10
+ *       @(0x30,sp) - r11
+ *       @(0x34,sp) - r12
+ *       @(0x38,sp) - syscall_nr
+ *       @(0x3c,sp) - acc0h
+ *       @(0x40,sp) - acc0l
+ *       @(0x44,sp) - acc1h		; ISA_DSP_LEVEL2 only
+ *       @(0x48,sp) - acc1l		; ISA_DSP_LEVEL2 only
+ *       @(0x4c,sp) - psw
+ *       @(0x50,sp) - bpc
+ *       @(0x54,sp) - bbpsw
+ *       @(0x58,sp) - bbpc
+ *       @(0x5c,sp) - spu (cr3)
+ *       @(0x60,sp) - fp (r13)
+ *       @(0x64,sp) - lr (r14)
+ *       @(0x68,sp) - spi (cr2)
+ *       @(0x6c,sp) - orig_r0
+ */
+
+#include <linux/linkage.h>
+#include <asm/irq.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/m32r.h>
+#include <asm/mmu_context.h>
+#include <asm/asm-offsets.h>
+
+#if !defined(CONFIG_MMU)
+#define sys_madvise		sys_ni_syscall
+#define sys_readahead		sys_ni_syscall
+#define sys_mprotect		sys_ni_syscall
+#define sys_msync		sys_ni_syscall
+#define sys_mlock		sys_ni_syscall
+#define sys_munlock		sys_ni_syscall
+#define sys_mlockall		sys_ni_syscall
+#define sys_munlockall		sys_ni_syscall
+#define sys_mremap		sys_ni_syscall
+#define sys_mincore		sys_ni_syscall
+#define sys_remap_file_pages	sys_ni_syscall
+#endif /* CONFIG_MMU */
+
+#define R4(reg)			@reg
+#define R5(reg)			@(0x04,reg)
+#define R6(reg)			@(0x08,reg)
+#define PTREGS(reg)		@(0x0C,reg)
+#define R0(reg)			@(0x10,reg)
+#define R1(reg)			@(0x14,reg)
+#define R2(reg)			@(0x18,reg)
+#define R3(reg)			@(0x1C,reg)
+#define R7(reg)			@(0x20,reg)
+#define R8(reg)			@(0x24,reg)
+#define R9(reg)			@(0x28,reg)
+#define R10(reg)		@(0x2C,reg)
+#define R11(reg)		@(0x30,reg)
+#define R12(reg)		@(0x34,reg)
+#define SYSCALL_NR(reg)		@(0x38,reg)
+#define ACC0H(reg)		@(0x3C,reg)
+#define ACC0L(reg)		@(0x40,reg)
+#define ACC1H(reg)		@(0x44,reg)
+#define ACC1L(reg)		@(0x48,reg)
+#define PSW(reg)		@(0x4C,reg)
+#define BPC(reg)		@(0x50,reg)
+#define BBPSW(reg)		@(0x54,reg)
+#define BBPC(reg)		@(0x58,reg)
+#define SPU(reg)		@(0x5C,reg)
+#define FP(reg)			@(0x60,reg)  /* FP = R13 */
+#define LR(reg)			@(0x64,reg)
+#define SP(reg)			@(0x68,reg)
+#define ORIG_R0(reg)		@(0x6C,reg)
+
+#define nr_syscalls ((syscall_table_size)/4)
+
+#ifdef CONFIG_PREEMPT
+#define preempt_stop(x)		DISABLE_INTERRUPTS(x)
+#else
+#define preempt_stop(x)
+#define resume_kernel		restore_all
+#endif
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg)	GET_THREAD_INFO reg
+	.macro GET_THREAD_INFO reg
+	ldi	\reg, #-THREAD_SIZE
+	and	\reg, sp
+	.endm
+
+ENTRY(ret_from_kernel_thread)
+	pop	r0
+	bl	schedule_tail
+	GET_THREAD_INFO(r8)
+	ld	r0, R0(r8)
+	ld	r1, R1(r8)
+	jl	r1
+	bra	syscall_exit
+
+ENTRY(ret_from_fork)
+	pop	r0
+	bl	schedule_tail
+	GET_THREAD_INFO(r8)
+	bra	syscall_exit
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+	; userspace resumption stub bypassing syscall exit tracing
+	ALIGN
+ret_from_exception:
+	preempt_stop(r4)
+ret_from_intr:
+	ld	r4, PSW(sp)
+#ifdef CONFIG_ISA_M32R2
+	and3	r4, r4, #0x8800		; check BSM and BPM bits
+#else
+	and3	r4, r4, #0x8000		; check BSM bit
+#endif
+	beqz	r4, resume_kernel
+resume_userspace:
+	DISABLE_INTERRUPTS(r4)		; make sure we don't miss an interrupt
+					; setting need_resched or sigpending
+					; between sampling and the iret
+	GET_THREAD_INFO(r8)
+	ld	r9, @(TI_FLAGS, r8)
+	and3	r4, r9, #_TIF_WORK_MASK	; is there any work to be done on
+					; int/exception return?
+	bnez	r4, work_pending
+	bra	restore_all
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+	GET_THREAD_INFO(r8)
+	ld	r9, @(TI_PRE_COUNT, r8)	; non-zero preempt_count ?
+	bnez	r9, restore_all
+need_resched:
+	ld	r9, @(TI_FLAGS, r8)	; need_resched set ?
+	and3	r4, r9, #_TIF_NEED_RESCHED
+	beqz	r4, restore_all
+	ld	r4, PSW(sp)		; interrupts off (exception path) ?
+	and3	r4, r4, #0x4000
+	beqz	r4, restore_all
+	bl	preempt_schedule_irq
+	bra	need_resched
+#endif
+
+	; system call handler stub
+ENTRY(system_call)
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+	ENABLE_INTERRUPTS(r4)		; Enable interrupt
+	st	sp, PTREGS(sp)		; implicit pt_regs parameter
+	cmpui	r7, #NR_syscalls
+	bnc	syscall_badsys
+	st	r7, SYSCALL_NR(sp)	; syscall_nr
+					; system call tracing in operation
+	GET_THREAD_INFO(r8)
+	ld	r9, @(TI_FLAGS, r8)
+	and3	r4, r9, #_TIF_SYSCALL_TRACE
+	bnez	r4, syscall_trace_entry
+syscall_call:
+	slli	r7, #2			; table jump for the system call
+	LDIMM	(r4, sys_call_table)
+	add	r7, r4
+	ld	r7, @r7
+	jl	r7			; execute system call
+	st	r0, R0(sp)		; save the return value
+syscall_exit:
+	DISABLE_INTERRUPTS(r4)		; make sure we don't miss an interrupt
+					; setting need_resched or sigpending
+					; between sampling and the iret
+	ld	r9, @(TI_FLAGS, r8)
+	and3	r4, r9, #_TIF_ALLWORK_MASK	; current->work
+	bnez	r4, syscall_exit_work
+restore_all:
+	RESTORE_ALL
+
+	# perform work that needs to be done immediately before resumption
+	# r9 : flags
+	ALIGN
+work_pending:
+	and3	r4, r9, #_TIF_NEED_RESCHED
+	beqz	r4, work_notifysig
+work_resched:
+	bl	schedule
+	DISABLE_INTERRUPTS(r4)		; make sure we don't miss an interrupt
+					; setting need_resched or sigpending
+					; between sampling and the iret
+	ld	r9, @(TI_FLAGS, r8)
+	and3	r4, r9, #_TIF_WORK_MASK	; is there any work to be done other
+					; than syscall tracing?
+	beqz	r4, restore_all
+	and3	r4, r4, #_TIF_NEED_RESCHED
+	bnez	r4, work_resched
+
+work_notifysig:				; deal with pending signals and
+					; notify-resume requests
+	mv	r0, sp			; arg1 : struct pt_regs *regs
+	mv	r1, r9			; arg2 : __u32 thread_info_flags
+	bl	do_notify_resume
+	bra	resume_userspace
+
+	; perform syscall exit tracing
+	ALIGN
+syscall_trace_entry:
+	ldi	r4, #-ENOSYS
+	st	r4, R0(sp)
+	bl	do_syscall_trace
+	ld	r0, ORIG_R0(sp)
+	ld	r1, R1(sp)
+	ld	r2, R2(sp)
+	ld	r3, R3(sp)
+	ld	r4, R4(sp)
+	ld	r5, R5(sp)
+	ld	r6, R6(sp)
+	ld	r7, SYSCALL_NR(sp)
+	cmpui	r7, #NR_syscalls
+	bc	syscall_call
+	bra	syscall_exit
+
+	; perform syscall exit tracing
+	ALIGN
+syscall_exit_work:
+	ld	r9, @(TI_FLAGS, r8)
+	and3	r4, r9, #_TIF_SYSCALL_TRACE
+	beqz	r4, work_pending
+	ENABLE_INTERRUPTS(r4)		; could let do_syscall_trace() call
+					; schedule() instead
+	bl	do_syscall_trace
+	bra	resume_userspace
+
+	ALIGN
+syscall_fault:
+	SAVE_ALL
+	GET_THREAD_INFO(r8)
+	ldi	r4, #-EFAULT
+	st	r4, R0(sp)
+	bra	resume_userspace
+
+	ALIGN
+syscall_badsys:
+	ldi	r4, #-ENOSYS
+	st	r4, R0(sp)
+	bra	resume_userspace
+
+	.global	eit_vector
+
+	.equ ei_vec_table, eit_vector + 0x0200
+
+/*
+ * EI handler routine
+ */
+ENTRY(ei_handler)
+#if defined(CONFIG_CHIP_M32700)
+	; WORKAROUND: force to clear SM bit and use the kernel stack (SPI).
+	SWITCH_TO_KERNEL_STACK
+#endif
+	SAVE_ALL
+	mv	r1, sp			; arg1(regs)
+	; get ICU status
+	seth	r0, #shigh(M32R_ICU_ISTS_ADDR)
+	ld	r0, @(low(M32R_ICU_ISTS_ADDR),r0)
+	push	r0
+#if defined(CONFIG_SMP)
+	/*
+	 * If IRQ == 0      --> Nothing to do,  Not write IMASK
+	 * If IRQ == IPI    --> Do IPI handler, Not write IMASK
+	 * If IRQ != 0, IPI --> Do do_IRQ(),    Write IMASK
+	 */
+	slli	r0, #4
+	srli	r0, #24			; r0(irq_num<<2)
+	;; IRQ exist check
+#if defined(CONFIG_CHIP_M32700)
+	/* WORKAROUND: IMASK bug M32700-TS1, TS2 chip. */
+	bnez	r0, 0f
+	ld24	r14, #0x00070000
+	seth	r0, #shigh(M32R_ICU_IMASK_ADDR)
+	st	r14, @(low(M32R_ICU_IMASK_ADDR),r0)
+	bra	1f
+	.fillinsn
+0:
+#endif /* CONFIG_CHIP_M32700 */
+	beqz	r0, 1f			; if (!irq_num) goto exit
+	;; IPI check
+	cmpi	r0, #(M32R_IRQ_IPI0<<2)	; ISN < IPI0 check
+	bc	2f
+	cmpi	r0, #((M32R_IRQ_IPI7+1)<<2)	; ISN > IPI7 check
+	bnc	2f
+	LDIMM	(r2, ei_vec_table)
+	add	r2, r0
+	ld	r2, @r2
+	beqz	r2, 1f			; if (no IPI handler) goto exit
+	mv	r0, r1			; arg0(regs)
+	jl	r2
+	.fillinsn
+1:
+	addi	sp, #4
+	bra	restore_all
+	.fillinsn
+2:
+	srli	r0, #2
+#else /* not CONFIG_SMP */
+	srli	r0, #22			; r0(irq)
+#endif /* not CONFIG_SMP */
+
+#if defined(CONFIG_PLAT_HAS_INT1ICU)
+	add3	r2, r0, #-(M32R_IRQ_INT1)	; INT1# interrupt
+	bnez	r2, 3f
+	seth	r0, #shigh(M32R_INT1ICU_ISTS)
+	lduh	r0, @(low(M32R_INT1ICU_ISTS),r0)	; bit10-6 : ISN
+	slli	r0, #21
+	srli	r0, #27				; ISN
+	addi	r0, #(M32R_INT1ICU_IRQ_BASE)
+	bra	check_end
+	.fillinsn
+3:
+#endif /* CONFIG_PLAT_HAS_INT1ICU */
+#if defined(CONFIG_PLAT_HAS_INT0ICU)
+	add3	r2, r0, #-(M32R_IRQ_INT0)	; INT0# interrupt
+	bnez	r2, 4f
+	seth	r0, #shigh(M32R_INT0ICU_ISTS)
+	lduh	r0, @(low(M32R_INT0ICU_ISTS),r0)	; bit10-6 : ISN
+	slli	r0, #21
+	srli	r0, #27				; ISN
+	add3	r0, r0, #(M32R_INT0ICU_IRQ_BASE)
+	bra	check_end
+	.fillinsn
+4:
+#endif /* CONFIG_PLAT_HAS_INT0ICU */
+#if defined(CONFIG_PLAT_HAS_INT2ICU)
+	add3	r2, r0, #-(M32R_IRQ_INT2)	; INT2# interrupt
+	bnez	r2, 5f
+	seth	r0, #shigh(M32R_INT2ICU_ISTS)
+	lduh	r0, @(low(M32R_INT2ICU_ISTS),r0)	; bit10-6 : ISN
+	slli	r0, #21
+	srli	r0, #27				; ISN
+	add3	r0, r0, #(M32R_INT2ICU_IRQ_BASE)
+	; bra	check_end
+	.fillinsn
+5:
+#endif /* CONFIG_PLAT_HAS_INT2ICU */
+
+check_end:
+	bl	do_IRQ
+	pop	r14
+	seth	r0, #shigh(M32R_ICU_IMASK_ADDR)
+	st	r14, @(low(M32R_ICU_IMASK_ADDR),r0)
+	bra  ret_from_intr
+
+/*
+ * Default EIT handler
+ */
+	ALIGN
+int_msg:
+	.asciz  "Unknown interrupt\n"
+	.byte	0
+
+ENTRY(default_eit_handler)
+	push	r0
+	mvfc	r0, psw
+	push	r1
+	push	r2
+	push	r3
+	push	r0
+	LDIMM	(r0, __KERNEL_DS)
+	mv	r0, r1
+	mv	r0, r2
+	LDIMM	(r0, int_msg)
+	bl	printk
+	pop	r0
+	pop	r3
+	pop	r2
+	pop	r1
+	mvtc	r0, psw
+	pop	r0
+infinit:
+	bra	infinit
+
+#ifdef CONFIG_MMU
+/*
+ * Access Exception handler
+ */
+ENTRY(ace_handler)
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+
+	seth	r2, #shigh(MMU_REG_BASE)	/* Check status register */
+	ld	r4, @(low(MESTS_offset),r2)
+	st	r4, @(low(MESTS_offset),r2)
+	srl3	r1, r4, #4
+#ifdef CONFIG_CHIP_M32700
+	and3	r1, r1, #0x0000ffff
+	; WORKAROUND: ignore TME bit for the M32700(TS1).
+#endif /* CONFIG_CHIP_M32700 */
+	beqz	r1, inst
+oprand:
+	ld	r2, @(low(MDEVA_offset),r2)	; set address
+	srli	r1, #1
+	bra	1f
+inst:
+	and3	r1, r4, #2
+	srli	r1, #1
+	or3	r1, r1, #8
+	mvfc	r2, bpc				; set address
+	.fillinsn
+1:
+	mvfc	r3, psw
+	mv	r0, sp
+	and3	r3, r3, 0x800
+	srli	r3, #9
+	or	r1, r3
+	/*
+	 * do_page_fault():
+	 *    r0 : struct pt_regs *regs
+	 *    r1 : unsigned long error-code
+	 *    r2 : unsigned long address
+	 * error-code:
+	 *    +------+------+------+------+
+	 *    | bit3 | bit2 | bit1 | bit0 |
+	 *    +------+------+------+------+
+	 *    bit 3 == 0:means data,          1:means instruction
+	 *    bit 2 == 0:means kernel,        1:means user-mode
+	 *    bit 1 == 0:means read,          1:means write
+	 *    bit 0 == 0:means no page found  1:means protection fault
+	 *
+	 */
+	bl	do_page_fault
+	bra	ret_from_intr
+#endif  /* CONFIG_MMU */
+
+
+ENTRY(alignment_check)
+	/* void alignment_check(int error_code) */
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+	ldi	r1, #0x30			; error_code
+	mv	r0, sp				; pt_regs
+	bl	do_alignment_check
+error_code:
+	bra	ret_from_exception
+
+ENTRY(rie_handler)
+	/* void rie_handler(int error_code) */
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+	ldi	r1, #0x20			; error_code
+	mv	r0, sp				; pt_regs
+	bl	do_rie_handler
+	bra	error_code
+
+ENTRY(pie_handler)
+	/* void pie_handler(int error_code) */
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+	ldi	r1, #0				; error_code ; FIXME
+	mv	r0, sp				; pt_regs
+	bl	do_pie_handler
+	bra	error_code
+
+ENTRY(debug_trap)
+	/* void debug_trap(void) */
+	.global	withdraw_debug_trap
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+	mv	r0, sp				; pt_regs
+	bl	withdraw_debug_trap
+	ldi	r1, #0				; error_code
+	mv	r0, sp				; pt_regs
+	bl	do_debug_trap
+	bra	error_code
+
+ENTRY(ill_trap)
+	/* void ill_trap(void) */
+	SWITCH_TO_KERNEL_STACK
+	SAVE_ALL
+	ldi	r1, #0				; error_code ; FIXME
+	mv	r0, sp				; pt_regs
+	bl	do_ill_trap
+	bra	error_code
+
+ENTRY(cache_flushing_handler)
+	/* void _flush_cache_all(void); */
+	.global	_flush_cache_all
+	SWITCH_TO_KERNEL_STACK
+	push	r0
+	push	r1
+	push	r2
+	push	r3
+	push	r4
+	push	r5
+	push	r6
+	push	r7
+	push	lr
+	bl	_flush_cache_all
+	pop	lr
+	pop	r7
+	pop	r6
+	pop	r5
+	pop	r4
+	pop	r3
+	pop	r2
+	pop	r1
+	pop	r0
+	rte
+
+	.section .rodata,"a"
+#include "syscall_table.S"
+
+syscall_table_size=(.-sys_call_table)
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S
new file mode 100644
index 000000000..a46652dd8
--- /dev/null
+++ b/arch/m32r/kernel/head.S
@@ -0,0 +1,283 @@
+/*
+ *  linux/arch/m32r/kernel/head.S
+ *
+ *  M32R startup code.
+ *
+ *  Copyright (c) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata,
+ *                            Hitoshi Yamamoto
+ */
+
+#include <linux/init.h>
+__INIT
+__INITDATA
+
+	.text
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/assembler.h>
+#include <asm/m32r.h>
+#include <asm/mmu_context.h>
+
+/*
+ * References to members of the boot_cpu_data structure.
+ */
+__HEAD
+	.global	start_kernel
+	.global __bss_start
+	.global _end
+ENTRY(stext)
+ENTRY(_stext)
+	/* Setup up the stack pointer */
+	LDIMM	(r0, spi_stack_top)
+	LDIMM	(r1, spu_stack_top)
+	mvtc	r0, spi
+	mvtc	r1, spu
+
+	/* Initilalize PSW */
+	ldi	r0, #0x0000		/* use SPI, disable EI */
+	mvtc	r0, psw
+
+	/* Set up the stack pointer */
+	LDIMM	(r0, stack_start)
+	ld	r0, @r0
+	mvtc	r0, spi
+
+/*
+ * Clear BSS first so that there are no surprises...
+ */
+#ifdef CONFIG_ISA_DUAL_ISSUE
+
+	LDIMM	(r2, __bss_start)
+	LDIMM	(r3, _end)
+	sub	r3, r2		; BSS size in bytes
+	; R4 = BSS size in longwords (rounded down)
+	mv	r4, r3		    ||	ldi	r1, #0
+	srli	r4, #4		    ||	addi	r2, #-4
+	beqz	r4, .Lendloop1
+.Lloop1:
+#ifndef CONFIG_CHIP_M32310
+	; Touch memory for the no-write-allocating cache.
+	ld	r0, @(4,r2)
+#endif
+	st	r1, @+r2	    ||	addi	r4, #-1
+	st	r1, @+r2
+	st	r1, @+r2
+	st	r1, @+r2	    ||	cmpeq	r1, r4	; R4 = 0?
+	bnc	.Lloop1
+.Lendloop1:
+	and3	r4, r3, #15
+	addi	r2, #4
+	beqz	r4, .Lendloop2
+.Lloop2:
+	stb	r1, @r2		    ||	addi	r4, #-1
+	addi	r2, #1
+	bnez	r4, .Lloop2
+.Lendloop2:
+
+#else /* not CONFIG_ISA_DUAL_ISSUE */
+
+	LDIMM	(r2, __bss_start)
+	LDIMM	(r3, _end)
+	sub	r3, r2		; BSS size in bytes
+	mv	r4, r3
+	srli	r4, #2		; R4 = BSS size in longwords (rounded down)
+	ldi	r1, #0		; clear R1 for longwords store
+	addi	r2, #-4		; account for pre-inc store
+	beqz	r4, .Lendloop1	; any more to go?
+.Lloop1:
+	st	r1, @+r2	; yep, zero out another longword
+	addi	r4, #-1		; decrement count
+	bnez	r4, .Lloop1	; go do some more
+.Lendloop1:
+	and3	r4, r3, #3	; get no. of remaining BSS bytes to clear
+	addi	r2, #4		; account for pre-inc store
+	beqz	r4, .Lendloop2	; any more to go?
+.Lloop2:
+	stb	r1, @r2		; yep, zero out another byte
+	addi	r2, #1		; bump address
+	addi	r4, #-1		; decrement count
+	bnez	r4, .Lloop2	; go do some more
+.Lendloop2:
+
+#endif /* not CONFIG_ISA_DUAL_ISSUE */
+
+#if 0  /* M32R_FIXME */
+/*
+ * Copy data segment from ROM to RAM.
+ */
+	.global ROM_D, TOP_DATA, END_DATA
+
+	LDIMM	(r1, ROM_D)
+	LDIMM	(r2, TOP_DATA)
+	LDIMM	(r3, END_DATA)
+	addi	r2, #-4
+	addi	r3, #-4
+loop1:
+	ld	r0, @r1+
+	st	r0, @+r2
+	cmp	r2, r3
+	bc	loop1
+#endif /* 0 */
+
+/* Jump to kernel */
+	LDIMM	(r2, start_kernel)
+	jl	r2
+	.fillinsn
+1:
+	bra	1b		; main should never return here, but
+				; just in case, we know what happens.
+
+#ifdef CONFIG_SMP
+/*
+ * AP startup routine
+ */
+	.global	eit_vector
+ENTRY(startup_AP)
+;; setup EVB
+	LDIMM  (r4, eit_vector)
+	mvtc   r4, cr5
+
+;; enable MMU
+	LDIMM	(r2, init_tlb)
+	jl	r2
+	seth  r4, #high(MATM)
+	or3   r4, r4, #low(MATM)
+	ldi   r5, #0x01
+	st    r5, @r4            ; Set MATM Reg(T bit ON)
+	ld    r6, @r4            ; MATM Check
+	LDIMM (r5, 1f)
+	jmp   r5                 ; enable MMU
+	nop
+	.fillinsn
+1:
+;; ISN check
+	ld    r6, @r4            ; MATM Check
+	seth  r4, #high(M32R_ICU_ISTS_ADDR)
+	or3   r4, r4, #low(M32R_ICU_ISTS_ADDR)
+	ld    r5, @r4		; Read ISTSi reg.
+	mv    r6, r5
+	slli  r5, #13  ; PIML check
+	srli  r5, #13  ;
+	seth  r4, #high(M32R_ICU_IMASK_ADDR)
+	or3   r4, r4, #low(M32R_ICU_IMASK_ADDR)
+	st    r5, @r4		; Write IMASKi reg.
+	slli  r6, #4   ; ISN check
+	srli  r6, #26  ;
+	seth  r4, #high(M32R_IRQ_IPI5)
+	or3   r4, r4, #low(M32R_IRQ_IPI5)
+	bne   r4, r6, 2f  ; if (ISN != CPU_BOOT_IPI) goto sleep;
+
+;; check cpu_bootout_map and set cpu_bootin_map
+	LDIMM (r4, cpu_bootout_map)
+	ld    r4, @r4
+	seth  r5, #high(M32R_CPUID_PORTL)
+	or3   r5, r5, #low(M32R_CPUID_PORTL)
+	ld    r5, @r5
+	ldi   r6, #1
+	sll   r6, r5
+	and   r4, r6
+	beqz  r4, 2f
+	LDIMM (r4, cpu_bootin_map)
+	ld    r5, @r4
+	or    r5, r6
+	st    r6, @r4
+
+;; clear PSW
+	ldi   r4, #0
+	mvtc  r4, psw
+
+;; setup SPI
+	LDIMM (r4, stack_start)
+	ld    r4, @r4
+	mvtc  r4, spi
+
+;; setup BPC (start_secondary)
+	LDIMM (r4, start_secondary)
+	mvtc  r4, bpc
+
+	rte  ; goto startup_secondary
+	nop
+	nop
+
+	.fillinsn
+2:
+	;; disable MMU
+	seth  r4, #high(MATM)
+	or3   r4, r4, #low(MATM)
+	ldi   r5, #0
+    	st    r5, @r4            ; Set MATM Reg(T bit OFF)
+    	ld    r6, @r4            ; MATM Check
+	LDIMM (r4, 3f)
+	seth  r5, #high(__PAGE_OFFSET)
+	or3   r5, r5, #low(__PAGE_OFFSET)
+	not   r5, r5
+	and   r4, r5
+	jmp   r4                 ; disable MMU
+	nop
+	.fillinsn
+3:
+	;; SLEEP and wait IPI
+	LDIMM (r4, AP_loop)
+	seth  r5, #high(__PAGE_OFFSET)
+	or3   r5, r5, #low(__PAGE_OFFSET)
+	not   r5, r5
+	and   r4, r5
+	jmp   r4
+	nop
+	nop
+#endif  /* CONFIG_SMP */
+
+	.text
+ENTRY(stack_start)
+	.long	init_thread_union+8192
+	.long	__KERNEL_DS
+
+/*
+ * This is initialized to create a identity-mapping at 0-4M (for bootup
+ * purposes) and another mapping of the 0-4M area at virtual address
+ * PAGE_OFFSET.
+ */
+	.text
+
+#define  MOUNT_ROOT_RDONLY    1
+#define  RAMDISK_FLAGS        0		; 1024KB
+#define  ORIG_ROOT_DEV        0x0100	; /dev/ram0 (major:01, minor:00)
+#define  LOADER_TYPE          1		; (??? - non-zero value seems
+					; to be needed to boot from initrd)
+
+#define  COMMAND_LINE ""
+
+	.section	.empty_zero_page, "aw"
+ENTRY(empty_zero_page)
+	.long	MOUNT_ROOT_RDONLY		/* offset: +0x00 */
+	.long	RAMDISK_FLAGS
+	.long	ORIG_ROOT_DEV
+	.long	LOADER_TYPE
+	.long	0	/* INITRD_START */	/* +0x10 */
+	.long	0	/* INITRD_SIZE */
+	.long	0	/* CPU_CLOCK */
+	.long	0	/* BUS_CLOCK */
+	.long	0	/* TIMER_DIVIDE */	/* +0x20 */
+	.balign	256,0
+	.asciz	COMMAND_LINE
+  	.byte	0
+	.balign	4096,0,4096
+
+/*------------------------------------------------------------------------
+ * Stack area
+ */
+	.section .init.data, "aw"
+	ALIGN
+	.global spi_stack_top
+	.zero	1024
+spi_stack_top:
+
+	.section .init.data, "aw"
+	ALIGN
+	.global spu_stack_top
+	.zero	1024
+spu_stack_top:
+
+	.end
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
new file mode 100644
index 000000000..c7272b894
--- /dev/null
+++ b/arch/m32r/kernel/irq.c
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/m32r/kernel/irq.c
+ *
+ *  Copyright (c) 2003, 2004  Hitoshi Yamamoto
+ *  Copyright (c) 2004  Hirokazu Takata <takata at linux-m32r.org>
+ */
+
+/*
+ *	linux/arch/i386/kernel/irq.c
+ *
+ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file contains the lowest level m32r-specific interrupt
+ * entry and irq statistics code. All the remaining irq logic is
+ * done by the generic kernel/irq/ code and in the
+ * m32r-specific irq controller code.
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+/*
+ * do_IRQ handles all normal device IRQs (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
+{
+	struct pt_regs *old_regs;
+	old_regs = set_irq_regs(regs);
+	irq_enter();
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+	/* FIXME M32R */
+#endif
+	generic_handle_irq(irq);
+	irq_exit();
+	set_irq_regs(old_regs);
+
+	return 1;
+}
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
new file mode 100644
index 000000000..b727e693c
--- /dev/null
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -0,0 +1,88 @@
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+
+/* platform dependent support */
+EXPORT_SYMBOL(boot_cpu_data);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+
+EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__generic_copy_from_user);
+EXPORT_SYMBOL(__generic_copy_to_user);
+EXPORT_SYMBOL(strnlen_user);
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_CHIP_M32700_TS1
+extern void *dcache_dummy;
+EXPORT_SYMBOL(dcache_dummy);
+#endif
+EXPORT_SYMBOL(cpu_data);
+
+/* TLB flushing */
+EXPORT_SYMBOL(smp_flush_tlb_page);
+#endif
+
+/* compiler generated symbol */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __lshldi3(void);
+extern void __lshrdi3(void);
+extern void __muldi3(void);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+
+/* memory and string operations */
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(empty_zero_page);
+
+EXPORT_SYMBOL(_inb);
+EXPORT_SYMBOL(_inw);
+EXPORT_SYMBOL(_inl);
+EXPORT_SYMBOL(_outb);
+EXPORT_SYMBOL(_outw);
+EXPORT_SYMBOL(_outl);
+EXPORT_SYMBOL(_inb_p);
+EXPORT_SYMBOL(_inw_p);
+EXPORT_SYMBOL(_inl_p);
+EXPORT_SYMBOL(_outb_p);
+EXPORT_SYMBOL(_outw_p);
+EXPORT_SYMBOL(_outl_p);
+EXPORT_SYMBOL(_insb);
+EXPORT_SYMBOL(_insw);
+EXPORT_SYMBOL(_insl);
+EXPORT_SYMBOL(_outsb);
+EXPORT_SYMBOL(_outsw);
+EXPORT_SYMBOL(_outsl);
+EXPORT_SYMBOL(_readb);
+EXPORT_SYMBOL(_readw);
+EXPORT_SYMBOL(_readl);
+EXPORT_SYMBOL(_writeb);
+EXPORT_SYMBOL(_writew);
+EXPORT_SYMBOL(_writel);
+
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
new file mode 100644
index 000000000..38233b659
--- /dev/null
+++ b/arch/m32r/kernel/module.c
@@ -0,0 +1,203 @@
+/*  Kernel module help for M32R.
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+#define COPY_UNALIGNED_WORD(sw, tw, align) \
+{ \
+	void *__s = &(sw), *__t = &(tw); \
+	unsigned short *__s2 = __s, *__t2 =__t; \
+	unsigned char *__s1 = __s, *__t1 =__t; \
+	switch ((align)) \
+	{ \
+	case 0: \
+		*(unsigned long *) __t = *(unsigned long *) __s; \
+		break; \
+	case 2: \
+		*__t2++ = *__s2++; \
+		*__t2 = *__s2; \
+		break; \
+	default: \
+		*__t1++ = *__s1++; \
+		*__t1++ = *__s1++; \
+		*__t1++ = *__s1++; \
+		*__t1 = *__s1; \
+		break; \
+	} \
+}
+
+#define COPY_UNALIGNED_HWORD(sw, tw, align) \
+  { \
+    void *__s = &(sw), *__t = &(tw); \
+    unsigned short *__s2 = __s, *__t2 =__t; \
+    unsigned char *__s1 = __s, *__t1 =__t; \
+    switch ((align)) \
+    { \
+    case 0: \
+      *__t2 = *__s2; \
+      break; \
+    default: \
+      *__t1++ = *__s1++; \
+      *__t1 = *__s1; \
+      break; \
+    } \
+  }
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		   const char *strtab,
+		   unsigned int symindex,
+		   unsigned int relsec,
+		   struct module *me)
+{
+	unsigned int i;
+	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	Elf32_Addr relocation;
+	uint32_t *location;
+	uint32_t value;
+	unsigned short *hlocation;
+	unsigned short hvalue;
+	int svalue;
+	int align;
+
+	DEBUGP("Applying relocate section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+		relocation = sym->st_value + rel[i].r_addend;
+		align = (int)location & 3;
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_M32R_32_RELA:
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		case R_M32R_HI16_ULO_RELA:
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+                        relocation = (relocation >>16) & 0xffff;
+			/* RELA must has 0 at relocation field. */
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		case R_M32R_HI16_SLO_RELA:
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+			if (relocation & 0x8000) relocation += 0x10000;
+                        relocation = (relocation >>16) & 0xffff;
+			/* RELA must has 0 at relocation field. */
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		case R_M32R_16_RELA:
+			hlocation = (unsigned short *)location;
+                        relocation = relocation & 0xffff;
+			/* RELA must has 0 at relocation field. */
+			hvalue = relocation;
+	    		COPY_UNALIGNED_WORD (hvalue, *hlocation, align);
+			break;
+		case R_M32R_SDA16_RELA:
+		case R_M32R_LO16_RELA:
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+                        relocation = relocation & 0xffff;
+			/* RELA must has 0 at relocation field. */
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		case R_M32R_24_RELA:
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+                        relocation = relocation & 0xffffff;
+			/* RELA must has 0 at relocation field. */
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		case R_M32R_18_PCREL_RELA:
+	  		relocation = (relocation - (Elf32_Addr) location);
+			if (relocation < -0x20000 || 0x1fffc < relocation)
+				{
+					printk(KERN_ERR "module %s: relocation overflow: %u\n",
+					me->name, relocation);
+					return -ENOEXEC;
+				}
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+			if (value & 0xffff)
+				{
+					/* RELA must has 0 at relocation field. */
+					printk(KERN_ERR "module %s: illegal relocation field: %u\n",
+					me->name, value);
+					return -ENOEXEC;
+				}
+                        relocation = (relocation >> 2) & 0xffff;
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		case R_M32R_10_PCREL_RELA:
+			hlocation = (unsigned short *)location;
+	  		relocation = (relocation - (Elf32_Addr) location);
+	    		COPY_UNALIGNED_HWORD (*hlocation, hvalue, align);
+			svalue = (int)hvalue;
+			svalue = (signed char)svalue << 2;
+			relocation += svalue;
+                        relocation = (relocation >> 2) & 0xff;
+			hvalue = hvalue & 0xff00;
+			hvalue += relocation;
+	    		COPY_UNALIGNED_HWORD (hvalue, *hlocation, align);
+			break;
+		case R_M32R_26_PCREL_RELA:
+	  		relocation = (relocation - (Elf32_Addr) location);
+			if (relocation < -0x2000000 || 0x1fffffc < relocation)
+				{
+					printk(KERN_ERR "module %s: relocation overflow: %u\n",
+					me->name, relocation);
+					return -ENOEXEC;
+				}
+	    		COPY_UNALIGNED_WORD (*location, value, align);
+			if (value & 0xffffff)
+				{
+					/* RELA must has 0 at relocation field. */
+					printk(KERN_ERR "module %s: illegal relocation field: %u\n",
+					me->name, value);
+					return -ENOEXEC;
+				}
+                        relocation = (relocation >> 2) & 0xffffff;
+			value += relocation;
+	    		COPY_UNALIGNED_WORD (value, *location, align);
+			break;
+		default:
+			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+			       me->name, ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
new file mode 100644
index 000000000..e69221d58
--- /dev/null
+++ b/arch/m32r/kernel/process.c
@@ -0,0 +1,167 @@
+/*
+ *  linux/arch/m32r/kernel/process.c
+ *
+ *  Copyright (c) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata,
+ *                            Hitoshi Yamamoto
+ *  Taken from sh version.
+ *    Copyright (C) 1995  Linus Torvalds
+ *    SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+ */
+
+#undef DEBUG_PROCESS
+#ifdef DEBUG_PROCESS
+#define DPRINTK(fmt, args...)  printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \
+  __func__, ##args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/elf.h>
+#include <asm/m32r.h>
+
+#include <linux/err.h>
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+	return tsk->thread.lr;
+}
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_restart(char *__unused)
+{
+#if defined(CONFIG_PLAT_MAPPI3)
+	outw(1, (unsigned long)PLD_REBOOT);
+#endif
+
+	printk("Please push reset button!\n");
+	while (1)
+		cpu_relax();
+}
+
+void machine_halt(void)
+{
+	printk("Please push reset button!\n");
+	while (1)
+		cpu_relax();
+}
+
+void machine_power_off(void)
+{
+	/* M32R_FIXME */
+}
+
+void show_regs(struct pt_regs * regs)
+{
+	printk("\n");
+	show_regs_print_info(KERN_DEFAULT);
+
+	printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
+	  regs->bpc, regs->psw, regs->lr, regs->fp);
+	printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
+	  regs->bbpc, regs->bbpsw, regs->spu, regs->spi);
+	printk("R0 [%08lx]:R1 [%08lx]:R2 [%08lx]:R3 [%08lx]\n", \
+	  regs->r0, regs->r1, regs->r2, regs->r3);
+	printk("R4 [%08lx]:R5 [%08lx]:R6 [%08lx]:R7 [%08lx]\n", \
+	  regs->r4, regs->r5, regs->r6, regs->r7);
+	printk("R8 [%08lx]:R9 [%08lx]:R10[%08lx]:R11[%08lx]\n", \
+	  regs->r8, regs->r9, regs->r10, regs->r11);
+	printk("R12[%08lx]\n", \
+	  regs->r12);
+
+#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
+	printk("ACC0H[%08lx]:ACC0L[%08lx]\n", \
+	  regs->acc0h, regs->acc0l);
+	printk("ACC1H[%08lx]:ACC1L[%08lx]\n", \
+	  regs->acc1h, regs->acc1l);
+#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
+	printk("ACCH[%08lx]:ACCL[%08lx]\n", \
+	  regs->acc0h, regs->acc0l);
+#else
+#error unknown isa configuration
+#endif
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+	/* Nothing to do. */
+	DPRINTK("pid = %d\n", current->pid);
+}
+
+void flush_thread(void)
+{
+	DPRINTK("pid = %d\n", current->pid);
+	memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap));
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+	/* do nothing */
+	DPRINTK("pid = %d\n", dead_task->pid);
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+	return 0; /* Task didn't use the fpu at all. */
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long spu,
+	unsigned long arg, struct task_struct *tsk)
+{
+	struct pt_regs *childregs = task_pt_regs(tsk);
+	extern void ret_from_fork(void);
+	extern void ret_from_kernel_thread(void);
+
+	if (unlikely(tsk->flags & PF_KTHREAD)) {
+		memset(childregs, 0, sizeof(struct pt_regs));
+		childregs->psw = M32R_PSW_BIE;
+		childregs->r1 = spu;	/* fn */
+		childregs->r0 = arg;
+		tsk->thread.lr = (unsigned long)ret_from_kernel_thread;
+	} else {
+		/* Copy registers */
+		*childregs = *current_pt_regs();
+		if (spu)
+			childregs->spu = spu;
+		childregs->r0 = 0;	/* Child gets zero as return value */
+		tsk->thread.lr = (unsigned long)ret_from_fork;
+	}
+	tsk->thread.sp = (unsigned long)childregs;
+
+	return 0;
+}
+
+/*
+ * These bracket the sleeping functions..
+ */
+#define first_sched	((unsigned long) scheduling_functions_start_here)
+#define last_sched	((unsigned long) scheduling_functions_end_here)
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	/* M32R_FIXME */
+	return (0);
+}
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
new file mode 100644
index 000000000..51f5e9aa4
--- /dev/null
+++ b/arch/m32r/kernel/ptrace.c
@@ -0,0 +1,701 @@
+/*
+ * linux/arch/m32r/kernel/ptrace.c
+ *
+ * Copyright (C) 2002  Hirokazu Takata, Takeo Takahashi
+ * Copyright (C) 2004  Hirokazu Takata, Kei Sakamoto
+ *
+ * Original x86 implementation:
+ *	By Ross Biro 1/23/92
+ *	edited by Linus Torvalds
+ *
+ * Some code taken from sh version:
+ *   Copyright (C) 1999, 2000  Kaz Kojima & Niibe Yutaka
+ * Some code taken from arm version:
+ *   Copyright (C) 2000 Russell King
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/signal.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+
+/*
+ * This routine will get a word off of the process kernel stack.
+ */
+static inline unsigned long int
+get_stack_long(struct task_struct *task, int offset)
+{
+	unsigned long *stack;
+
+	stack = (unsigned long *)task_pt_regs(task);
+
+	return stack[offset];
+}
+
+/*
+ * This routine will put a word on the process kernel stack.
+ */
+static inline int
+put_stack_long(struct task_struct *task, int offset, unsigned long data)
+{
+	unsigned long *stack;
+
+	stack = (unsigned long *)task_pt_regs(task);
+	stack[offset] = data;
+
+	return 0;
+}
+
+static int reg_offset[] = {
+	PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
+	PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
+};
+
+/*
+ * Read the word at offset "off" into the "struct user".  We
+ * actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
+			    unsigned long __user *data)
+{
+	unsigned long tmp;
+#ifndef NO_FPU
+	struct user * dummy = NULL;
+#endif
+
+	if ((off & 3) || off > sizeof(struct user) - 3)
+		return -EIO;
+
+	off >>= 2;
+	switch (off) {
+	case PT_EVB:
+		__asm__ __volatile__ (
+			"mvfc	%0, cr5 \n\t"
+	 		: "=r" (tmp)
+		);
+		break;
+	case PT_CBR: {
+			unsigned long psw;
+			psw = get_stack_long(tsk, PT_PSW);
+			tmp = ((psw >> 8) & 1);
+		}
+		break;
+	case PT_PSW: {
+			unsigned long psw, bbpsw;
+			psw = get_stack_long(tsk, PT_PSW);
+			bbpsw = get_stack_long(tsk, PT_BBPSW);
+			tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
+		}
+		break;
+	case PT_PC:
+		tmp = get_stack_long(tsk, PT_BPC);
+		break;
+	case PT_BPC:
+		off = PT_BBPC;
+		/* fall through */
+	default:
+		if (off < (sizeof(struct pt_regs) >> 2))
+			tmp = get_stack_long(tsk, off);
+#ifndef NO_FPU
+		else if (off >= (long)(&dummy->fpu >> 2) &&
+			 off < (long)(&dummy->u_fpvalid >> 2)) {
+			if (!tsk_used_math(tsk)) {
+				if (off == (long)(&dummy->fpu.fpscr >> 2))
+					tmp = FPSCR_INIT;
+				else
+					tmp = 0;
+			} else
+				tmp = ((long *)(&tsk->thread.fpu >> 2))
+					[off - (long)&dummy->fpu];
+		} else if (off == (long)(&dummy->u_fpvalid >> 2))
+			tmp = !!tsk_used_math(tsk);
+#endif /* not NO_FPU */
+		else
+			tmp = 0;
+	}
+
+	return put_user(tmp, data);
+}
+
+static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
+			     unsigned long data)
+{
+	int ret = -EIO;
+#ifndef NO_FPU
+	struct user * dummy = NULL;
+#endif
+
+	if ((off & 3) || off > sizeof(struct user) - 3)
+		return -EIO;
+
+	off >>= 2;
+	switch (off) {
+	case PT_EVB:
+	case PT_BPC:
+	case PT_SPI:
+		/* We don't allow to modify evb. */
+		ret = 0;
+		break;
+	case PT_PSW:
+	case PT_CBR: {
+			/* We allow to modify only cbr in psw */
+			unsigned long psw;
+			psw = get_stack_long(tsk, PT_PSW);
+			psw = (psw & ~0x100) | ((data & 1) << 8);
+			ret = put_stack_long(tsk, PT_PSW, psw);
+		}
+		break;
+	case PT_PC:
+		off = PT_BPC;
+		data &= ~1;
+		/* fall through */
+	default:
+		if (off < (sizeof(struct pt_regs) >> 2))
+			ret = put_stack_long(tsk, off, data);
+#ifndef NO_FPU
+		else if (off >= (long)(&dummy->fpu >> 2) &&
+			 off < (long)(&dummy->u_fpvalid >> 2)) {
+			set_stopped_child_used_math(tsk);
+			((long *)&tsk->thread.fpu)
+				[off - (long)&dummy->fpu] = data;
+			ret = 0;
+		} else if (off == (long)(&dummy->u_fpvalid >> 2)) {
+			conditional_stopped_child_used_math(data, tsk);
+			ret = 0;
+		}
+#endif /* not NO_FPU */
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Get all user integer registers.
+ */
+static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
+{
+	struct pt_regs *regs = task_pt_regs(tsk);
+
+	return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
+}
+
+/*
+ * Set all user integer registers.
+ */
+static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
+{
+	struct pt_regs newregs;
+	int ret;
+
+	ret = -EFAULT;
+	if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
+		struct pt_regs *regs = task_pt_regs(tsk);
+		*regs = newregs;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+
+static inline int
+check_condition_bit(struct task_struct *child)
+{
+	return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
+}
+
+static int
+check_condition_src(unsigned long op, unsigned long regno1,
+		    unsigned long regno2, struct task_struct *child)
+{
+	unsigned long reg1, reg2;
+
+	reg2 = get_stack_long(child, reg_offset[regno2]);
+
+	switch (op) {
+	case 0x0: /* BEQ */
+		reg1 = get_stack_long(child, reg_offset[regno1]);
+		return reg1 == reg2;
+	case 0x1: /* BNE */
+		reg1 = get_stack_long(child, reg_offset[regno1]);
+		return reg1 != reg2;
+	case 0x8: /* BEQZ */
+		return reg2 == 0;
+	case 0x9: /* BNEZ */
+		return reg2 != 0;
+	case 0xa: /* BLTZ */
+		return (int)reg2 < 0;
+	case 0xb: /* BGEZ */
+		return (int)reg2 >= 0;
+	case 0xc: /* BLEZ */
+		return (int)reg2 <= 0;
+	case 0xd: /* BGTZ */
+		return (int)reg2 > 0;
+	default:
+		/* never reached */
+		return 0;
+	}
+}
+
+static void
+compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
+			       unsigned long *next_pc,
+			       struct task_struct *child)
+{
+	unsigned long op, op2, op3;
+	unsigned long disp;
+	unsigned long regno;
+	int parallel = 0;
+
+	if (insn & 0x00008000)
+		parallel = 1;
+	if (pc & 3)
+		insn &= 0x7fff;	/* right slot */
+	else
+		insn >>= 16;	/* left slot */
+
+	op = (insn >> 12) & 0xf;
+	op2 = (insn >> 8) & 0xf;
+	op3 = (insn >> 4) & 0xf;
+
+	if (op == 0x7) {
+		switch (op2) {
+		case 0xd: /* BNC */
+		case 0x9: /* BNCL */
+			if (!check_condition_bit(child)) {
+				disp = (long)(insn << 24) >> 22;
+				*next_pc = (pc & ~0x3) + disp;
+				return;
+			}
+			break;
+		case 0x8: /* BCL */
+		case 0xc: /* BC */
+			if (check_condition_bit(child)) {
+				disp = (long)(insn << 24) >> 22;
+				*next_pc = (pc & ~0x3) + disp;
+				return;
+			}
+			break;
+		case 0xe: /* BL */
+		case 0xf: /* BRA */
+			disp = (long)(insn << 24) >> 22;
+			*next_pc = (pc & ~0x3) + disp;
+			return;
+			break;
+		}
+	} else if (op == 0x1) {
+		switch (op2) {
+		case 0x0:
+			if (op3 == 0xf) { /* TRAP */
+#if 1
+				/* pass through */
+#else
+ 				/* kernel space is not allowed as next_pc */
+				unsigned long evb;
+				unsigned long trapno;
+				trapno = insn & 0xf;
+				__asm__ __volatile__ (
+					"mvfc %0, cr5\n"
+		 			:"=r"(evb)
+		 			:
+				);
+				*next_pc = evb + (trapno << 2);
+				return;
+#endif
+			} else if (op3 == 0xd) { /* RTE */
+				*next_pc = get_stack_long(child, PT_BPC);
+				return;
+			}
+			break;
+		case 0xc: /* JC */
+			if (op3 == 0xc && check_condition_bit(child)) {
+				regno = insn & 0xf;
+				*next_pc = get_stack_long(child,
+							  reg_offset[regno]);
+				return;
+			}
+			break;
+		case 0xd: /* JNC */
+			if (op3 == 0xc && !check_condition_bit(child)) {
+				regno = insn & 0xf;
+				*next_pc = get_stack_long(child,
+							  reg_offset[regno]);
+				return;
+			}
+			break;
+		case 0xe: /* JL */
+		case 0xf: /* JMP */
+			if (op3 == 0xc) { /* JMP */
+				regno = insn & 0xf;
+				*next_pc = get_stack_long(child,
+							  reg_offset[regno]);
+				return;
+			}
+			break;
+		}
+	}
+	if (parallel)
+		*next_pc = pc + 4;
+	else
+		*next_pc = pc + 2;
+}
+
+static void
+compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
+			       unsigned long *next_pc,
+			       struct task_struct *child)
+{
+	unsigned long op;
+	unsigned long op2;
+	unsigned long disp;
+	unsigned long regno1, regno2;
+
+	op = (insn >> 28) & 0xf;
+	if (op == 0xf) { 	/* branch 24-bit relative */
+		op2 = (insn >> 24) & 0xf;
+		switch (op2) {
+		case 0xd:	/* BNC */
+		case 0x9:	/* BNCL */
+			if (!check_condition_bit(child)) {
+				disp = (long)(insn << 8) >> 6;
+				*next_pc = (pc & ~0x3) + disp;
+				return;
+			}
+			break;
+		case 0x8:	/* BCL */
+		case 0xc:	/* BC */
+			if (check_condition_bit(child)) {
+				disp = (long)(insn << 8) >> 6;
+				*next_pc = (pc & ~0x3) + disp;
+				return;
+			}
+			break;
+		case 0xe:	/* BL */
+		case 0xf:	/* BRA */
+			disp = (long)(insn << 8) >> 6;
+			*next_pc = (pc & ~0x3) + disp;
+			return;
+		}
+	} else if (op == 0xb) { /* branch 16-bit relative */
+		op2 = (insn >> 20) & 0xf;
+		switch (op2) {
+		case 0x0: /* BEQ */
+		case 0x1: /* BNE */
+		case 0x8: /* BEQZ */
+		case 0x9: /* BNEZ */
+		case 0xa: /* BLTZ */
+		case 0xb: /* BGEZ */
+		case 0xc: /* BLEZ */
+		case 0xd: /* BGTZ */
+			regno1 = ((insn >> 24) & 0xf);
+			regno2 = ((insn >> 16) & 0xf);
+			if (check_condition_src(op2, regno1, regno2, child)) {
+				disp = (long)(insn << 16) >> 14;
+				*next_pc = (pc & ~0x3) + disp;
+				return;
+			}
+			break;
+		}
+	}
+	*next_pc = pc + 4;
+}
+
+static inline void
+compute_next_pc(unsigned long insn, unsigned long pc,
+		unsigned long *next_pc, struct task_struct *child)
+{
+	if (insn & 0x80000000)
+		compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
+	else
+		compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
+}
+
+static int
+register_debug_trap(struct task_struct *child, unsigned long next_pc,
+	unsigned long next_insn, unsigned long *code)
+{
+	struct debug_trap *p = &child->thread.debug_trap;
+	unsigned long addr = next_pc & ~3;
+
+	if (p->nr_trap == MAX_TRAPS) {
+		printk("kernel BUG at %s %d: p->nr_trap = %d\n",
+					__FILE__, __LINE__, p->nr_trap);
+		return -1;
+	}
+	p->addr[p->nr_trap] = addr;
+	p->insn[p->nr_trap] = next_insn;
+	p->nr_trap++;
+	if (next_pc & 3) {
+		*code = (next_insn & 0xffff0000) | 0x10f1;
+		/* xxx --> TRAP1 */
+	} else {
+		if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
+			*code = 0x10f17000;
+			/* TRAP1 --> NOP */
+		} else {
+			*code = (next_insn & 0xffff) | 0x10f10000;
+			/* TRAP1 --> xxx */
+		}
+	}
+	return 0;
+}
+
+static int
+unregister_debug_trap(struct task_struct *child, unsigned long addr,
+		      unsigned long *code)
+{
+	struct debug_trap *p = &child->thread.debug_trap;
+        int i;
+
+	/* Search debug trap entry. */
+	for (i = 0; i < p->nr_trap; i++) {
+		if (p->addr[i] == addr)
+			break;
+	}
+	if (i >= p->nr_trap) {
+		/* The trap may be requested from debugger.
+		 * ptrace should do nothing in this case.
+		 */
+		return 0;
+	}
+
+	/* Recover original instruction code. */
+	*code = p->insn[i];
+
+	/* Shift debug trap entries. */
+	while (i < p->nr_trap - 1) {
+		p->insn[i] = p->insn[i + 1];
+		p->addr[i] = p->addr[i + 1];
+		i++;
+	}
+	p->nr_trap--;
+	return 1;
+}
+
+static void
+unregister_all_debug_traps(struct task_struct *child)
+{
+	struct debug_trap *p = &child->thread.debug_trap;
+	int i;
+
+	for (i = 0; i < p->nr_trap; i++)
+		access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
+	p->nr_trap = 0;
+}
+
+static inline void
+invalidate_cache(void)
+{
+#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
+
+	_flush_cache_copyback_all();
+
+#else	/* ! CONFIG_CHIP_M32700 */
+
+	/* Invalidate cache */
+	__asm__ __volatile__ (
+                "ldi    r0, #-1					\n\t"
+                "ldi    r1, #0					\n\t"
+                "stb    r1, @r0		; cache off		\n\t"
+                ";						\n\t"
+                "ldi    r0, #-2					\n\t"
+                "ldi    r1, #1					\n\t"
+                "stb    r1, @r0		; cache invalidate	\n\t"
+                ".fillinsn					\n"
+                "0:						\n\t"
+                "ldb    r1, @r0		; invalidate check	\n\t"
+                "bnez   r1, 0b					\n\t"
+                ";						\n\t"
+                "ldi    r0, #-1					\n\t"
+                "ldi    r1, #1					\n\t"
+                "stb    r1, @r0		; cache on		\n\t"
+		: : : "r0", "r1", "memory"
+	);
+	/* FIXME: copying-back d-cache and invalidating i-cache are needed.
+	 */
+#endif	/* CONFIG_CHIP_M32700 */
+}
+
+/* Embed a debug trap (TRAP1) code */
+static int
+embed_debug_trap(struct task_struct *child, unsigned long next_pc)
+{
+	unsigned long next_insn, code;
+	unsigned long addr = next_pc & ~3;
+
+	if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
+	    != sizeof(next_insn)) {
+		return -1; /* error */
+	}
+
+	/* Set a trap code. */
+	if (register_debug_trap(child, next_pc, next_insn, &code)) {
+		return -1; /* error */
+	}
+	if (access_process_vm(child, addr, &code, sizeof(code), 1)
+	    != sizeof(code)) {
+		return -1; /* error */
+	}
+	return 0; /* success */
+}
+
+void
+withdraw_debug_trap(struct pt_regs *regs)
+{
+	unsigned long addr;
+	unsigned long code;
+
+ 	addr = (regs->bpc - 2) & ~3;
+	regs->bpc -= 2;
+	if (unregister_debug_trap(current, addr, &code)) {
+	    access_process_vm(current, addr, &code, sizeof(code), 1);
+	    invalidate_cache();
+	}
+}
+
+void
+init_debug_traps(struct task_struct *child)
+{
+	struct debug_trap *p = &child->thread.debug_trap;
+	int i;
+	p->nr_trap = 0;
+	for (i = 0; i < MAX_TRAPS; i++) {
+		p->addr[i] = 0;
+		p->insn[i] = 0;
+	}
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+	unsigned long next_pc;
+	unsigned long pc, insn;
+
+	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
+	/* Compute next pc.  */
+	pc = get_stack_long(child, PT_BPC);
+
+	if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+	    != sizeof(insn))
+		return;
+
+	compute_next_pc(insn, pc, &next_pc, child);
+	if (next_pc & 0x80000000)
+		return;
+
+	if (embed_debug_trap(child, next_pc))
+		return;
+
+	invalidate_cache();
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+	unregister_all_debug_traps(child);
+	invalidate_cache();
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	/* nothing to do.. */
+}
+
+long
+arch_ptrace(struct task_struct *child, long request,
+	    unsigned long addr, unsigned long data)
+{
+	int ret;
+	unsigned long __user *datap = (unsigned long __user *) data;
+
+	switch (request) {
+	/*
+	 * read word at location "addr" in the child process.
+	 */
+	case PTRACE_PEEKTEXT:
+	case PTRACE_PEEKDATA:
+		ret = generic_ptrace_peekdata(child, addr, data);
+		break;
+
+	/*
+	 * read the word at location addr in the USER area.
+	 */
+	case PTRACE_PEEKUSR:
+		ret = ptrace_read_user(child, addr, datap);
+		break;
+
+	/*
+	 * write the word at location addr.
+	 */
+	case PTRACE_POKETEXT:
+	case PTRACE_POKEDATA:
+		ret = generic_ptrace_pokedata(child, addr, data);
+		if (ret == 0 && request == PTRACE_POKETEXT)
+			invalidate_cache();
+		break;
+
+	/*
+	 * write the word at location addr in the USER area.
+	 */
+	case PTRACE_POKEUSR:
+		ret = ptrace_write_user(child, addr, data);
+		break;
+
+	case PTRACE_GETREGS:
+		ret = ptrace_getregs(child, datap);
+		break;
+
+	case PTRACE_SETREGS:
+		ret = ptrace_setregs(child, datap);
+		break;
+
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+/* notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+void do_syscall_trace(void)
+{
+	if (!test_thread_flag(TIF_SYSCALL_TRACE))
+		return;
+	if (!(current->ptrace & PT_PTRACED))
+		return;
+	/* the 0x80 provides a way for the tracing parent to distinguish
+	   between a syscall stop and SIGTRAP delivery */
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
new file mode 100644
index 000000000..0392112a5
--- /dev/null
+++ b/arch/m32r/kernel/setup.c
@@ -0,0 +1,420 @@
+/*
+ *  linux/arch/m32r/kernel/setup.c
+ *
+ *  Setup routines for Renesas M32R
+ *
+ *  Copyright (c) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata,
+ *                            Hitoshi Yamamoto
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/console.h>
+#include <linux/initrd.h>
+#include <linux/major.h>
+#include <linux/root_dev.h>
+#include <linux/seq_file.h>
+#include <linux/timex.h>
+#include <linux/screen_info.h>
+#include <linux/cpu.h>
+#include <linux/nodemask.h>
+#include <linux/pfn.h>
+
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/m32r.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+
+#ifdef CONFIG_MMU
+extern void init_mmu(void);
+#endif
+
+extern char _end[];
+
+/*
+ * Machine setup..
+ */
+struct cpuinfo_m32r boot_cpu_data;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload;	/* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt;	/* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start;	/* starting block # of image */
+#endif
+
+#if defined(CONFIG_VGA_CONSOLE)
+struct screen_info screen_info = {
+	.orig_video_lines      = 25,
+	.orig_video_cols       = 80,
+	.orig_video_mode       = 0,
+	.orig_video_ega_bx     = 0,
+	.orig_video_isVGA      = 1,
+	.orig_video_points     = 8
+};
+#endif
+
+extern int root_mountflags;
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+
+static struct resource data_resource = {
+	.name   = "Kernel data",
+	.start  = 0,
+	.end    = 0,
+	.flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+	.name   = "Kernel code",
+	.start  = 0,
+	.end    = 0,
+	.flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+unsigned long memory_start;
+unsigned long memory_end;
+
+void __init setup_arch(char **);
+int get_cpuinfo(char *);
+
+static __inline__ void parse_mem_cmdline(char ** cmdline_p)
+{
+	char c = ' ';
+	char *to = command_line;
+	char *from = COMMAND_LINE;
+	int len = 0;
+	int usermem = 0;
+
+	/* Save unparsed command line copy for /proc/cmdline */
+	memcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+	memory_start = (unsigned long)CONFIG_MEMORY_START+PAGE_OFFSET;
+	memory_end = memory_start+(unsigned long)CONFIG_MEMORY_SIZE;
+
+	for ( ; ; ) {
+		if (c == ' ' && !memcmp(from, "mem=", 4)) {
+			if (to != command_line)
+				to--;
+
+			{
+				unsigned long mem_size;
+
+				usermem = 1;
+				mem_size = memparse(from+4, &from);
+				memory_end = memory_start + mem_size;
+			}
+		}
+		c = *(from++);
+		if (!c)
+			break;
+
+		if (COMMAND_LINE_SIZE <= ++len)
+			break;
+
+		*(to++) = c;
+	}
+	*to = '\0';
+	*cmdline_p = command_line;
+	if (usermem)
+		printk(KERN_INFO "user-defined physical RAM map:\n");
+}
+
+#ifndef CONFIG_DISCONTIGMEM
+static unsigned long __init setup_memory(void)
+{
+	unsigned long start_pfn, max_low_pfn, bootmap_size;
+
+	start_pfn = PFN_UP( __pa(_end) );
+	max_low_pfn = PFN_DOWN( __pa(memory_end) );
+
+	/*
+	 * Initialize the boot-time allocator (with low memory only):
+	 */
+	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
+		CONFIG_MEMORY_START>>PAGE_SHIFT, max_low_pfn);
+
+	/*
+	 * Register fully available low RAM pages with the bootmem allocator.
+	 */
+	{
+		unsigned long curr_pfn;
+		unsigned long last_pfn;
+		unsigned long pages;
+
+		/*
+		 * We are rounding up the start address of usable memory:
+		 */
+		curr_pfn = PFN_UP(__pa(memory_start));
+
+		/*
+		 * ... and at the end of the usable range downwards:
+		 */
+		last_pfn = PFN_DOWN(__pa(memory_end));
+
+		if (last_pfn > max_low_pfn)
+			last_pfn = max_low_pfn;
+
+		pages = last_pfn - curr_pfn;
+		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
+	}
+
+	/*
+	 * Reserve the kernel text and
+	 * Reserve the bootmem bitmap. We do this in two steps (first step
+	 * was init_bootmem()), because this catches the (definitely buggy)
+	 * case of us accidentally initializing the bootmem allocator with
+	 * an invalid RAM area.
+	 */
+	reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
+		(PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
+		- CONFIG_MEMORY_START,
+		BOOTMEM_DEFAULT);
+
+	/*
+	 * reserve physical page 0 - it's a special BIOS page on many boxes,
+	 * enabling clean reboots, SMP operation, laptop functions.
+	 */
+	reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
+
+	/*
+	 * reserve memory hole
+	 */
+#ifdef CONFIG_MEMHOLE
+	reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE,
+			BOOTMEM_DEFAULT);
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (LOADER_TYPE && INITRD_START) {
+		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+			reserve_bootmem(INITRD_START, INITRD_SIZE,
+					BOOTMEM_DEFAULT);
+			initrd_start = INITRD_START + PAGE_OFFSET;
+			initrd_end = initrd_start + INITRD_SIZE;
+			printk("initrd:start[%08lx],size[%08lx]\n",
+				initrd_start, INITRD_SIZE);
+		} else {
+			printk("initrd extends beyond end of memory "
+				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+				INITRD_START + INITRD_SIZE,
+				max_low_pfn << PAGE_SHIFT);
+
+			initrd_start = 0;
+		}
+	}
+#endif
+
+	return max_low_pfn;
+}
+#else	/* CONFIG_DISCONTIGMEM */
+extern unsigned long setup_memory(void);
+#endif	/* CONFIG_DISCONTIGMEM */
+
+void __init setup_arch(char **cmdline_p)
+{
+	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+
+	boot_cpu_data.cpu_clock = M32R_CPUCLK;
+	boot_cpu_data.bus_clock = M32R_BUSCLK;
+	boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE;
+
+#ifdef CONFIG_BLK_DEV_RAM
+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+
+	if (!MOUNT_ROOT_RDONLY)
+		root_mountflags &= ~MS_RDONLY;
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+	conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;
+#endif
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+	nodes_clear(node_online_map);
+	node_set_online(0);
+	node_set_online(1);
+#endif	/* CONFIG_DISCONTIGMEM */
+
+	init_mm.start_code = (unsigned long) _text;
+	init_mm.end_code = (unsigned long) _etext;
+	init_mm.end_data = (unsigned long) _edata;
+	init_mm.brk = (unsigned long) _end;
+
+	code_resource.start = virt_to_phys(_text);
+	code_resource.end = virt_to_phys(_etext)-1;
+	data_resource.start = virt_to_phys(_etext);
+	data_resource.end = virt_to_phys(_edata)-1;
+
+	parse_mem_cmdline(cmdline_p);
+
+	setup_memory();
+
+	paging_init();
+}
+
+static struct cpu cpu_devices[NR_CPUS];
+
+static int __init topology_init(void)
+{
+	int i;
+
+	for_each_present_cpu(i)
+		register_cpu(&cpu_devices[i], i);
+
+	return 0;
+}
+
+subsys_initcall(topology_init);
+
+#ifdef CONFIG_PROC_FS
+/*
+ *	Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+	struct cpuinfo_m32r *c = v;
+	unsigned long cpu = c - cpu_data;
+
+#ifdef CONFIG_SMP
+	if (!cpu_online(cpu))
+		return 0;
+#endif	/* CONFIG_SMP */
+
+	seq_printf(m, "processor\t: %ld\n", cpu);
+
+#if defined(CONFIG_CHIP_VDEC2)
+	seq_printf(m, "cpu family\t: VDEC2\n"
+		"cache size\t: Unknown\n");
+#elif defined(CONFIG_CHIP_M32700)
+	seq_printf(m,"cpu family\t: M32700\n"
+		"cache size\t: I-8KB/D-8KB\n");
+#elif defined(CONFIG_CHIP_M32102)
+	seq_printf(m,"cpu family\t: M32102\n"
+		"cache size\t: I-8KB\n");
+#elif defined(CONFIG_CHIP_OPSP)
+	seq_printf(m,"cpu family\t: OPSP\n"
+		"cache size\t: I-8KB/D-8KB\n");
+#elif defined(CONFIG_CHIP_MP)
+	seq_printf(m, "cpu family\t: M32R-MP\n"
+		"cache size\t: I-xxKB/D-xxKB\n");
+#elif  defined(CONFIG_CHIP_M32104)
+	seq_printf(m,"cpu family\t: M32104\n"
+		"cache size\t: I-8KB/D-8KB\n");
+#else
+	seq_printf(m, "cpu family\t: Unknown\n");
+#endif
+	seq_printf(m, "bogomips\t: %lu.%02lu\n",
+		c->loops_per_jiffy/(500000/HZ),
+		(c->loops_per_jiffy/(5000/HZ)) % 100);
+#if defined(CONFIG_PLAT_MAPPI)
+	seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
+#elif defined(CONFIG_PLAT_MAPPI2)
+	seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
+#elif defined(CONFIG_PLAT_MAPPI3)
+	seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n");
+#elif defined(CONFIG_PLAT_M32700UT)
+	seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
+#elif defined(CONFIG_PLAT_OPSPUT)
+	seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
+#elif defined(CONFIG_PLAT_USRV)
+	seq_printf(m, "Machine\t\t: uServer\n");
+#elif defined(CONFIG_PLAT_OAKS32R)
+	seq_printf(m, "Machine\t\t: OAKS32R\n");
+#elif  defined(CONFIG_PLAT_M32104UT)
+	seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n");
+#else
+	seq_printf(m, "Machine\t\t: Unknown\n");
+#endif
+
+#define PRINT_CLOCK(name, value)				\
+	seq_printf(m, name " clock\t: %d.%02dMHz\n",		\
+		((value) / 1000000), ((value) % 1000000)/10000)
+
+	PRINT_CLOCK("CPU", (int)c->cpu_clock);
+	PRINT_CLOCK("Bus", (int)c->bus_clock);
+
+	seq_printf(m, "\n");
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start = c_start,
+	.next = c_next,
+	.stop = c_stop,
+	.show = show_cpuinfo,
+};
+#endif	/* CONFIG_PROC_FS */
+
+unsigned long cpu_initialized __initdata = 0;
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process.
+ * We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2)	\
+	|| defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
+	|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
+void __init cpu_init (void)
+{
+	int cpu_id = smp_processor_id();
+
+	if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+		for ( ; ; )
+			local_irq_enable();
+	}
+	printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+	/* Set up and load the per-CPU TSS and LDT */
+	atomic_inc(&init_mm.mm_count);
+	current->active_mm = &init_mm;
+	if (current->mm)
+		BUG();
+
+	/* Force FPU initialization */
+	current_thread_info()->status = 0;
+	clear_used_math();
+
+#ifdef CONFIG_MMU
+	/* Set up MMU */
+	init_mmu();
+#endif
+
+	/* Set up ICUIMASK */
+	outl(0x00070000, M32R_ICU_IMASK_PORTL);		/* imask=111 */
+}
+#endif	/* defined(CONFIG_CHIP_VDEC2) ... */
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
new file mode 100644
index 000000000..1c81e24fd
--- /dev/null
+++ b/arch/m32r/kernel/signal.c
@@ -0,0 +1,335 @@
+/*
+ *  linux/arch/m32r/kernel/signal.c
+ *
+ *  Copyright (c) 2003  Hitoshi Yamamoto
+ *
+ *  Taken from i386 version.
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
+ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/tracehook.h>
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct rt_sigframe
+{
+	int sig;
+	struct siginfo __user *pinfo;
+	void __user *puc;
+	struct siginfo info;
+	struct ucontext uc;
+//	struct _fpstate fpstate;
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+		   int *r0_p)
+{
+	unsigned int err = 0;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+#define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
+	COPY(r4);
+	COPY(r5);
+	COPY(r6);
+	COPY(pt_regs);
+	/* COPY(r0); Skip r0 */
+	COPY(r1);
+	COPY(r2);
+	COPY(r3);
+	COPY(r7);
+	COPY(r8);
+	COPY(r9);
+	COPY(r10);
+	COPY(r11);
+	COPY(r12);
+	COPY(acc0h);
+	COPY(acc0l);
+	COPY(acc1h);		/* ISA_DSP_LEVEL2 only */
+	COPY(acc1l);		/* ISA_DSP_LEVEL2 only */
+	COPY(psw);
+	COPY(bpc);
+	COPY(bbpsw);
+	COPY(bbpc);
+	COPY(spu);
+	COPY(fp);
+	COPY(lr);
+	COPY(spi);
+#undef COPY
+
+	regs->syscall_nr = -1;	/* disable syscall checks */
+	err |= __get_user(*r0_p, &sc->sc_r0);
+
+	return err;
+}
+
+asmlinkage int
+sys_rt_sigreturn(unsigned long r0, unsigned long r1,
+		 unsigned long r2, unsigned long r3, unsigned long r4,
+		 unsigned long r5, unsigned long r6, struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->spu;
+	sigset_t set;
+	int result;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	set_current_blocked(&set);
+
+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
+		goto badframe;
+
+	if (restore_altstack(&frame->uc.uc_stack))
+		goto badframe;
+
+	return result;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+	         unsigned long mask)
+{
+	int err = 0;
+
+#define COPY(x)	err |= __put_user(regs->x, &sc->sc_##x)
+	COPY(r4);
+	COPY(r5);
+	COPY(r6);
+	COPY(pt_regs);
+	COPY(r0);
+	COPY(r1);
+	COPY(r2);
+	COPY(r3);
+	COPY(r7);
+	COPY(r8);
+	COPY(r9);
+	COPY(r10);
+	COPY(r11);
+	COPY(r12);
+	COPY(acc0h);
+	COPY(acc0l);
+	COPY(acc1h);		/* ISA_DSP_LEVEL2 only */
+	COPY(acc1l);		/* ISA_DSP_LEVEL2 only */
+	COPY(psw);
+	COPY(bpc);
+	COPY(bbpsw);
+	COPY(bbpc);
+	COPY(spu);
+	COPY(fp);
+	COPY(lr);
+	COPY(spi);
+#undef COPY
+
+	err |= __put_user(mask, &sc->oldmask);
+
+	return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size)
+{
+	return (void __user *)((sigsp(sp, ksig) - frame_size) & -8ul);
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+			  struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	int err = 0;
+	int sig = ksig->sig;
+
+	frame = get_sigframe(ksig, regs->spu, sizeof(*frame));
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+		return -EFAULT;
+
+	err |= __put_user(sig, &frame->sig);
+	if (err)
+		return -EFAULT;
+
+	err |= __put_user(&frame->info, &frame->pinfo);
+	err |= __put_user(&frame->uc, &frame->puc);
+	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+	if (err)
+		return -EFAULT;
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __save_altstack(&frame->uc.uc_stack, regs->spu);
+	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		return -EFAULT;
+
+	/* Set up to return from userspace.  */
+	regs->lr = (unsigned long)ksig->ka.sa.sa_restorer;
+
+	/* Set up registers for signal handler */
+	regs->spu = (unsigned long)frame;
+	regs->r0 = sig;	/* Arg for signal handler */
+	regs->r1 = (unsigned long)&frame->info;
+	regs->r2 = (unsigned long)&frame->uc;
+	regs->bpc = (unsigned long)ksig->ka.sa.sa_handler;
+
+#if DEBUG_SIG
+	printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
+		current->comm, current->pid, frame, regs->pc);
+#endif
+
+	return 0;
+}
+
+static int prev_insn(struct pt_regs *regs)
+{
+	u16 inst;
+	if (get_user(inst, (u16 __user *)(regs->bpc - 2)))
+		return -EFAULT;
+	if ((inst & 0xfff0) == 0x10f0)	/* trap ? */
+		regs->bpc -= 2;
+	else
+		regs->bpc -= 4;
+	regs->syscall_nr = -1;
+	return 0;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+	int ret;
+
+	/* Are we from a system call? */
+	if (regs->syscall_nr >= 0) {
+		/* If so, check system call restarting.. */
+		switch (regs->r0) {
+		        case -ERESTART_RESTARTBLOCK:
+			case -ERESTARTNOHAND:
+				regs->r0 = -EINTR;
+				break;
+
+			case -ERESTARTSYS:
+				if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+					regs->r0 = -EINTR;
+					break;
+				}
+			/* fallthrough */
+			case -ERESTARTNOINTR:
+				regs->r0 = regs->orig_r0;
+				if (prev_insn(regs) < 0)
+					return;
+		}
+	}
+
+	/* Set up the stack frame */
+	ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
+
+	signal_setup_done(ret, ksig, 0);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+	struct ksignal ksig;
+
+	/*
+	 * We want the common case to go fast, which
+	 * is why we may in certain cases get here from
+	 * kernel mode. Just return without doing anything
+	 * if so.
+	 */
+	if (!user_mode(regs))
+		return;
+
+	if (get_signal(&ksig)) {
+		/* Re-enable any watchpoints before delivering the
+		 * signal to user space. The processor register will
+		 * have been cleared if the watchpoint triggered
+		 * inside the kernel.
+		 */
+
+		/* Whee!  Actually deliver the signal.  */
+		handle_signal(&ksig, regs);
+
+		return;
+	}
+
+	/* Did we come from a system call? */
+	if (regs->syscall_nr >= 0) {
+		/* Restart the system call - no handlers present */
+		if (regs->r0 == -ERESTARTNOHAND ||
+		    regs->r0 == -ERESTARTSYS ||
+		    regs->r0 == -ERESTARTNOINTR) {
+			regs->r0 = regs->orig_r0;
+			prev_insn(regs);
+		} else if (regs->r0 == -ERESTART_RESTARTBLOCK){
+			regs->r0 = regs->orig_r0;
+			regs->r7 = __NR_restart_syscall;
+			prev_insn(regs);
+		}
+	}
+	restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
+{
+	/* Pending single-step? */
+	if (thread_info_flags & _TIF_SINGLESTEP)
+		clear_thread_flag(TIF_SINGLESTEP);
+
+	/* deal with pending signal delivery */
+	if (thread_info_flags & _TIF_SIGPENDING)
+		do_signal(regs);
+
+	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+	}
+}
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
new file mode 100644
index 000000000..c18ddc74e
--- /dev/null
+++ b/arch/m32r/kernel/smp.c
@@ -0,0 +1,835 @@
+/*
+ *  linux/arch/m32r/kernel/smp.c
+ *
+ *  M32R SMP support routines.
+ *
+ *  Copyright (c) 2001, 2002  Hitoshi Yamamoto
+ *
+ *  Taken from i386 version.
+ *    (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *    (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ *  This code is released under the GNU General Public License version 2 or
+ *  later.
+ */
+
+#undef DEBUG_SMP
+
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+#include <linux/atomic.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/m32r.h>
+#include <asm/tlbflush.h>
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Data structures and variables                                             */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*
+ * For flush_cache_all()
+ */
+static DEFINE_SPINLOCK(flushcache_lock);
+static volatile unsigned long flushcache_cpumask = 0;
+
+/*
+ * For flush_tlb_others()
+ */
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static struct vm_area_struct *flush_vma;
+static volatile unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+#define FLUSH_ALL 0xffffffff
+
+DECLARE_PER_CPU(int, prof_multiplier);
+DECLARE_PER_CPU(int, prof_old_multiplier);
+DECLARE_PER_CPU(int, prof_counter);
+
+extern spinlock_t ipi_lock[];
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Function Prototypes                                                       */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+void smp_reschedule_interrupt(void);
+void smp_flush_cache_all_interrupt(void);
+
+static void flush_tlb_all_ipi(void *);
+static void flush_tlb_others(cpumask_t, struct mm_struct *,
+	struct vm_area_struct *, unsigned long);
+
+void smp_invalidate_interrupt(void);
+
+static void stop_this_cpu(void *);
+
+void smp_ipi_timer_interrupt(struct pt_regs *);
+void smp_local_timer_interrupt(void);
+
+static void send_IPI_allbutself(int, int);
+static void send_IPI_mask(const struct cpumask *, int, int);
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Rescheduling request Routines                                             */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name:         smp_send_reschedule
+ *
+ * Description:  This routine requests other CPU to execute rescheduling.
+ *               1.Send 'RESCHEDULE_IPI' to other CPU.
+ *                 Request other CPU to execute 'smp_reschedule_interrupt()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    cpu_id - Target CPU ID
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_send_reschedule(int cpu_id)
+{
+	WARN_ON(cpu_is_offline(cpu_id));
+	send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
+}
+
+/*==========================================================================*
+ * Name:         smp_reschedule_interrupt
+ *
+ * Description:  This routine executes on CPU which received
+ *               'RESCHEDULE_IPI'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_reschedule_interrupt(void)
+{
+	scheduler_ipi();
+}
+
+/*==========================================================================*
+ * Name:         smp_flush_cache_all
+ *
+ * Description:  This routine sends a 'INVALIDATE_CACHE_IPI' to all other
+ *               CPUs in the system.
+ *
+ * Born on Date: 2003-05-28
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_cache_all(void)
+{
+	cpumask_t cpumask;
+	unsigned long *mask;
+
+	preempt_disable();
+	cpumask_copy(&cpumask, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &cpumask);
+	spin_lock(&flushcache_lock);
+	mask=cpumask_bits(&cpumask);
+	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
+	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
+	_flush_cache_copyback_all();
+	while (flushcache_cpumask)
+		mb();
+	spin_unlock(&flushcache_lock);
+	preempt_enable();
+}
+
+void smp_flush_cache_all_interrupt(void)
+{
+	_flush_cache_copyback_all();
+	clear_bit(smp_processor_id(), &flushcache_cpumask);
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* TLB flush request Routines                                                */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name:         smp_flush_tlb_all
+ *
+ * Description:  This routine flushes all processes TLBs.
+ *               1.Request other CPU to execute 'flush_tlb_all_ipi()'.
+ *               2.Execute 'do_flush_tlb_all_local()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_all(void)
+{
+	unsigned long flags;
+
+	preempt_disable();
+	local_irq_save(flags);
+	__flush_tlb_all();
+	local_irq_restore(flags);
+	smp_call_function(flush_tlb_all_ipi, NULL, 1);
+	preempt_enable();
+}
+
+/*==========================================================================*
+ * Name:         flush_tlb_all_ipi
+ *
+ * Description:  This routine flushes all local TLBs.
+ *               1.Execute 'do_flush_tlb_all_local()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *info - not used
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void flush_tlb_all_ipi(void *info)
+{
+	__flush_tlb_all();
+}
+
+/*==========================================================================*
+ * Name:         smp_flush_tlb_mm
+ *
+ * Description:  This routine flushes the specified mm context TLB's.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *mm - a pointer to the mm struct for flush TLB
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+	int cpu_id;
+	cpumask_t cpu_mask;
+	unsigned long *mmc;
+	unsigned long flags;
+
+	preempt_disable();
+	cpu_id = smp_processor_id();
+	mmc = &mm->context[cpu_id];
+	cpumask_copy(&cpu_mask, mm_cpumask(mm));
+	cpumask_clear_cpu(cpu_id, &cpu_mask);
+
+	if (*mmc != NO_CONTEXT) {
+		local_irq_save(flags);
+		*mmc = NO_CONTEXT;
+		if (mm == current->mm)
+			activate_context(mm);
+		else
+			cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
+		local_irq_restore(flags);
+	}
+	if (!cpumask_empty(&cpu_mask))
+		flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
+
+	preempt_enable();
+}
+
+/*==========================================================================*
+ * Name:         smp_flush_tlb_range
+ *
+ * Description:  This routine flushes a range of pages.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *mm - a pointer to the mm struct for flush TLB
+ *               start - not used
+ *               end - not used
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+	unsigned long end)
+{
+	smp_flush_tlb_mm(vma->vm_mm);
+}
+
+/*==========================================================================*
+ * Name:         smp_flush_tlb_page
+ *
+ * Description:  This routine flushes one page.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *vma - a pointer to the vma struct include va
+ *               va - virtual address for flush TLB
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int cpu_id;
+	cpumask_t cpu_mask;
+	unsigned long *mmc;
+	unsigned long flags;
+
+	preempt_disable();
+	cpu_id = smp_processor_id();
+	mmc = &mm->context[cpu_id];
+	cpumask_copy(&cpu_mask, mm_cpumask(mm));
+	cpumask_clear_cpu(cpu_id, &cpu_mask);
+
+#ifdef DEBUG_SMP
+	if (!mm)
+		BUG();
+#endif
+
+	if (*mmc != NO_CONTEXT) {
+		local_irq_save(flags);
+		va &= PAGE_MASK;
+		va |= (*mmc & MMU_CONTEXT_ASID_MASK);
+		__flush_tlb_page(va);
+		local_irq_restore(flags);
+	}
+	if (!cpumask_empty(&cpu_mask))
+		flush_tlb_others(cpu_mask, mm, vma, va);
+
+	preempt_enable();
+}
+
+/*==========================================================================*
+ * Name:         flush_tlb_others
+ *
+ * Description:  This routine requests other CPU to execute flush TLB.
+ *               1.Setup parameters.
+ *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
+ *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
+ *               3.Wait for other CPUs operation finished.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    cpumask - bitmap of target CPUs
+ *               *mm -  a pointer to the mm struct for flush TLB
+ *               *vma -  a pointer to the vma struct include va
+ *               va - virtual address for flush TLB
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+	struct vm_area_struct *vma, unsigned long va)
+{
+	unsigned long *mask;
+#ifdef DEBUG_SMP
+	unsigned long flags;
+	__save_flags(flags);
+	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
+		BUG();
+#endif /* DEBUG_SMP */
+
+	/*
+	 * A couple of (to be removed) sanity checks:
+	 *
+	 * - we do not send IPIs to not-yet booted CPUs.
+	 * - current CPU must not be in mask
+	 * - mask must exist :)
+	 */
+	BUG_ON(cpumask_empty(&cpumask));
+
+	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
+	BUG_ON(!mm);
+
+	/* If a CPU which we ran on has gone down, OK. */
+	cpumask_and(&cpumask, &cpumask, cpu_online_mask);
+	if (cpumask_empty(&cpumask))
+		return;
+
+	/*
+	 * i'm not happy about this global shared spinlock in the
+	 * MM hot path, but we'll see how contended it is.
+	 * Temporarily this turns IRQs off, so that lockups are
+	 * detected by the NMI watchdog.
+	 */
+	spin_lock(&tlbstate_lock);
+
+	flush_mm = mm;
+	flush_vma = vma;
+	flush_va = va;
+	mask=cpumask_bits(&cpumask);
+	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
+
+	/*
+	 * We have to send the IPI only to
+	 * CPUs affected.
+	 */
+	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
+
+	while (!cpumask_empty(&flush_cpumask)) {
+		/* nothing. lockup detection does not belong here */
+		mb();
+	}
+
+	flush_mm = NULL;
+	flush_vma = NULL;
+	flush_va = 0;
+	spin_unlock(&tlbstate_lock);
+}
+
+/*==========================================================================*
+ * Name:         smp_invalidate_interrupt
+ *
+ * Description:  This routine executes on CPU which received
+ *               'INVALIDATE_TLB_IPI'.
+ *               1.Flush local TLB.
+ *               2.Report flush TLB process was finished.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_invalidate_interrupt(void)
+{
+	int cpu_id = smp_processor_id();
+	unsigned long *mmc = &flush_mm->context[cpu_id];
+
+	if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
+		return;
+
+	if (flush_va == FLUSH_ALL) {
+		*mmc = NO_CONTEXT;
+		if (flush_mm == current->active_mm)
+			activate_context(flush_mm);
+		else
+			cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
+	} else {
+		unsigned long va = flush_va;
+
+		if (*mmc != NO_CONTEXT) {
+			va &= PAGE_MASK;
+			va |= (*mmc & MMU_CONTEXT_ASID_MASK);
+			__flush_tlb_page(va);
+		}
+	}
+	cpumask_clear_cpu(cpu_id, &flush_cpumask);
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Stop CPU request Routines                                                 */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name:         smp_send_stop
+ *
+ * Description:  This routine requests stop all CPUs.
+ *               1.Request other CPU to execute 'stop_this_cpu()'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_send_stop(void)
+{
+	smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+/*==========================================================================*
+ * Name:         stop_this_cpu
+ *
+ * Description:  This routine halt CPU.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void stop_this_cpu(void *dummy)
+{
+	int cpu_id = smp_processor_id();
+
+	/*
+	 * Remove this CPU:
+	 */
+	set_cpu_online(cpu_id, false);
+
+	/*
+	 * PSW IE = 1;
+	 * IMASK = 0;
+	 * goto SLEEP
+	 */
+	local_irq_disable();
+	outl(0, M32R_ICU_IMASK_PORTL);
+	inl(M32R_ICU_IMASK_PORTL);	/* dummy read */
+	local_irq_enable();
+
+	for ( ; ; );
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+	send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
+}
+
+/*==========================================================================*
+ * Name:         smp_call_function_interrupt
+ *
+ * Description:  This routine executes on CPU which received
+ *               'CALL_FUNCTION_IPI'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_call_function_interrupt(void)
+{
+	irq_enter();
+	generic_smp_call_function_interrupt();
+	irq_exit();
+}
+
+void smp_call_function_single_interrupt(void)
+{
+	irq_enter();
+	generic_smp_call_function_single_interrupt();
+	irq_exit();
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Timer Routines                                                            */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name:         smp_send_timer
+ *
+ * Description:  This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
+ *               in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_send_timer(void)
+{
+	send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
+}
+
+/*==========================================================================*
+ * Name:         smp_send_timer
+ *
+ * Description:  This routine executes on CPU which received
+ *               'LOCAL_TIMER_IPI'.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *regs - a pointer to the saved regster info
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+void smp_ipi_timer_interrupt(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs;
+	old_regs = set_irq_regs(regs);
+	irq_enter();
+	smp_local_timer_interrupt();
+	irq_exit();
+	set_irq_regs(old_regs);
+}
+
+/*==========================================================================*
+ * Name:         smp_local_timer_interrupt
+ *
+ * Description:  Local timer interrupt handler. It does both profiling and
+ *               process statistics/rescheduling.
+ *               We do profiling in every local tick, statistics/rescheduling
+ *               happen only every 'profiling multiplier' ticks. The default
+ *               multiplier is 1 and it can be changed by writing the new
+ *               multiplier value into /proc/profile.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *regs - a pointer to the saved regster info
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Original:     arch/i386/kernel/apic.c
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy  use per_cpu structure.
+ *==========================================================================*/
+void smp_local_timer_interrupt(void)
+{
+	int user = user_mode(get_irq_regs());
+	int cpu_id = smp_processor_id();
+
+	/*
+	 * The profiling function is SMP safe. (nothing can mess
+	 * around with "current", and the profiling counters are
+	 * updated with atomic operations). This is especially
+	 * useful with a profiling multiplier != 1
+	 */
+
+	profile_tick(CPU_PROFILING);
+
+	if (--per_cpu(prof_counter, cpu_id) <= 0) {
+		/*
+		 * The multiplier may have changed since the last time we got
+		 * to this point as a result of the user writing to
+		 * /proc/profile. In this case we need to adjust the APIC
+		 * timer accordingly.
+		 *
+		 * Interrupts are already masked off at this point.
+		 */
+		per_cpu(prof_counter, cpu_id)
+			= per_cpu(prof_multiplier, cpu_id);
+		if (per_cpu(prof_counter, cpu_id)
+			!= per_cpu(prof_old_multiplier, cpu_id))
+		{
+			per_cpu(prof_old_multiplier, cpu_id)
+				= per_cpu(prof_counter, cpu_id);
+		}
+
+		update_process_times(user);
+	}
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Send IPI Routines                                                         */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name:         send_IPI_allbutself
+ *
+ * Description:  This routine sends a IPI to all other CPUs in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    ipi_num - Number of IPI
+ *               try -  0 : Send IPI certainly.
+ *                     !0 : The following IPI is not sent when Target CPU
+ *                          has not received the before IPI.
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void send_IPI_allbutself(int ipi_num, int try)
+{
+	cpumask_t cpumask;
+
+	cpumask_copy(&cpumask, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &cpumask);
+
+	send_IPI_mask(&cpumask, ipi_num, try);
+}
+
+/*==========================================================================*
+ * Name:         send_IPI_mask
+ *
+ * Description:  This routine sends a IPI to CPUs in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    cpu_mask - Bitmap of target CPUs logical ID
+ *               ipi_num - Number of IPI
+ *               try -  0 : Send IPI certainly.
+ *                     !0 : The following IPI is not sent when Target CPU
+ *                          has not received the before IPI.
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
+{
+	cpumask_t physid_mask, tmp;
+	int cpu_id, phys_id;
+	int num_cpus = num_online_cpus();
+
+	if (num_cpus <= 1)	/* NO MP */
+		return;
+
+	cpumask_and(&tmp, cpumask, cpu_online_mask);
+	BUG_ON(!cpumask_equal(cpumask, &tmp));
+
+	cpumask_clear(&physid_mask);
+	for_each_cpu(cpu_id, cpumask) {
+		if ((phys_id = cpu_to_physid(cpu_id)) != -1)
+			cpumask_set_cpu(phys_id, &physid_mask);
+	}
+
+	send_IPI_mask_phys(&physid_mask, ipi_num, try);
+}
+
+/*==========================================================================*
+ * Name:         send_IPI_mask_phys
+ *
+ * Description:  This routine sends a IPI to other CPUs in the system.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    cpu_mask - Bitmap of target CPUs physical ID
+ *               ipi_num - Number of IPI
+ *               try -  0 : Send IPI certainly.
+ *                     !0 : The following IPI is not sent when Target CPU
+ *                          has not received the before IPI.
+ *
+ * Returns:      IPICRi regster value.
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ *
+ *==========================================================================*/
+unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
+	int try)
+{
+	spinlock_t *ipilock;
+	volatile unsigned long *ipicr_addr;
+	unsigned long ipicr_val;
+	unsigned long my_physid_mask;
+	unsigned long mask = cpumask_bits(physid_mask)[0];
+
+
+	if (mask & ~physids_coerce(phys_cpu_present_map))
+		BUG();
+	if (ipi_num >= NR_IPIS || ipi_num < 0)
+		BUG();
+
+	mask <<= IPI_SHIFT;
+	ipilock = &ipi_lock[ipi_num];
+	ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
+		+ (ipi_num << 2));
+	my_physid_mask = ~(1 << smp_processor_id());
+
+	/*
+	 * lock ipi_lock[i]
+	 * check IPICRi == 0
+	 * write IPICRi (send IPIi)
+	 * unlock ipi_lock[i]
+	 */
+	spin_lock(ipilock);
+	__asm__ __volatile__ (
+		";; CHECK IPICRi == 0		\n\t"
+		".fillinsn			\n"
+		"1:				\n\t"
+		"ld	%0, @%1			\n\t"
+		"and	%0, %4			\n\t"
+		"beqz	%0, 2f			\n\t"
+		"bnez	%3, 3f			\n\t"
+		"bra	1b			\n\t"
+		";; WRITE IPICRi (send IPIi)	\n\t"
+		".fillinsn			\n"
+		"2:				\n\t"
+		"st	%2, @%1			\n\t"
+		".fillinsn			\n"
+		"3:				\n\t"
+		: "=&r"(ipicr_val)
+		: "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
+		: "memory"
+	);
+	spin_unlock(ipilock);
+
+	return ipicr_val;
+}
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
new file mode 100644
index 000000000..a46846754
--- /dev/null
+++ b/arch/m32r/kernel/smpboot.c
@@ -0,0 +1,626 @@
+/*
+ *  linux/arch/m32r/kernel/smpboot.c
+ *    orig : i386 2.4.10
+ *
+ *  M32R SMP booting functions
+ *
+ *  Copyright (c) 2001, 2002, 2003  Hitoshi Yamamoto
+ *
+ *  Taken from i386 version.
+ *	  (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ *	  (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *
+ *	Much of the core SMP work is based on previous work by Thomas Radke, to
+ *	whom a great many thanks are extended.
+ *
+ *	Thanks to Intel for making available several different Pentium,
+ *	Pentium Pro and Pentium-II/Xeon MP machines.
+ *	Original development of Linux SMP code supported by Caldera.
+ *
+ *	This code is released under the GNU General Public License version 2 or
+ *	later.
+ *
+ *	Fixes
+ *		Felix Koop	:	NR_CPUS used properly
+ *		Jose Renau	:	Handle single CPU case.
+ *		Alan Cox	:	By repeated request
+ *					8) - Total BogoMIP report.
+ *		Greg Wright	:	Fix for kernel stacks panic.
+ *		Erich Boleyn	:	MP v1.4 and additional changes.
+ *	Matthias Sattler	:	Changes for 2.1 kernel map.
+ *	Michel Lespinasse	:	Changes for 2.1 kernel map.
+ *	Michael Chastain	:	Change trampoline.S to gnu as.
+ *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
+ *		Ingo Molnar	:	Added APIC timers, based on code
+ *					from Jose Renau
+ *		Ingo Molnar	:	various cleanups and rewrites
+ *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
+ *		Martin J. Bligh	: 	Added support for multi-quad systems
+ */
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(x...) printk(x)
+#else
+#define Dprintk(x...)
+#endif
+
+extern cpumask_t cpu_initialized;
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Data structures and variables                                             */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/* Processor that is doing the boot up */
+static unsigned int bsp_phys_id = -1;
+
+/* Bitmask of physically existing CPUs */
+physid_mask_t phys_cpu_present_map;
+
+cpumask_t cpu_bootout_map;
+cpumask_t cpu_bootin_map;
+static cpumask_t cpu_callin_map;
+cpumask_t cpu_callout_map;
+EXPORT_SYMBOL(cpu_callout_map);
+
+/* Per CPU bogomips and other parameters */
+struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount;
+static cpumask_t smp_commenced_mask;
+
+extern struct {
+	void * spi;
+	unsigned short ss;
+} stack_start;
+
+/* which physical physical ID maps to which logical CPU number */
+static volatile int physid_2_cpu[NR_CPUS];
+#define physid_to_cpu(physid)	physid_2_cpu[physid]
+
+/* which logical CPU number maps to which physical ID */
+volatile int cpu_2_physid[NR_CPUS];
+
+DEFINE_PER_CPU(int, prof_multiplier) = 1;
+DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
+DEFINE_PER_CPU(int, prof_counter) = 1;
+
+spinlock_t ipi_lock[NR_IPIS];
+
+static unsigned int calibration_result;
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Function Prototypes                                                       */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+static void init_ipi_lock(void);
+static void do_boot_cpu(int);
+
+int start_secondary(void *);
+static void smp_callin(void);
+static void smp_online(void);
+
+static void show_mp_info(int);
+static void smp_store_cpu_info(int);
+static void show_cpu_info(int);
+int setup_profiling_timer(unsigned int);
+static void init_cpu_to_physid(void);
+static void map_cpu_to_physid(int, int);
+static void unmap_cpu_to_physid(int, int);
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Boot up APs Routines : BSP                                                */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+void smp_prepare_boot_cpu(void)
+{
+	bsp_phys_id = hard_smp_processor_id();
+	physid_set(bsp_phys_id, phys_cpu_present_map);
+	set_cpu_online(0, true);	/* BSP's cpu_id == 0 */
+	cpumask_set_cpu(0, &cpu_callout_map);
+	cpumask_set_cpu(0, &cpu_callin_map);
+
+	/*
+	 * Initialize the logical to physical CPU number mapping
+	 */
+	init_cpu_to_physid();
+	map_cpu_to_physid(0, bsp_phys_id);
+	current_thread_info()->cpu = 0;
+}
+
+/*==========================================================================*
+ * Name:         smp_prepare_cpus (old smp_boot_cpus)
+ *
+ * Description:  This routine boot up APs.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy  modify for linux-2.5.69
+ *
+ *==========================================================================*/
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	int phys_id;
+	unsigned long nr_cpu;
+
+	nr_cpu = inl(M32R_FPGA_NUM_OF_CPUS_PORTL);
+	if (nr_cpu > NR_CPUS) {
+		printk(KERN_INFO "NUM_OF_CPUS reg. value [%ld] > NR_CPU [%d]",
+			nr_cpu, NR_CPUS);
+		goto smp_done;
+	}
+	for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
+		physid_set(phys_id, phys_cpu_present_map);
+#ifndef CONFIG_HOTPLUG_CPU
+	init_cpu_present(cpu_possible_mask);
+#endif
+
+	show_mp_info(nr_cpu);
+
+	init_ipi_lock();
+
+	/*
+	 * Setup boot CPU information
+	 */
+	smp_store_cpu_info(0); /* Final full version of the data */
+
+	/*
+	 * If SMP should be disabled, then really disable it!
+	 */
+	if (!max_cpus) {
+		printk(KERN_INFO "SMP mode deactivated by commandline.\n");
+		goto smp_done;
+	}
+
+	/*
+	 * Now scan the CPU present map and fire up the other CPUs.
+	 */
+	Dprintk("CPU present map : %lx\n", physids_coerce(phys_cpu_present_map));
+
+	for (phys_id = 0 ; phys_id < NR_CPUS ; phys_id++) {
+		/*
+		 * Don't even attempt to start the boot CPU!
+		 */
+		if (phys_id == bsp_phys_id)
+			continue;
+
+		if (!physid_isset(phys_id, phys_cpu_present_map))
+			continue;
+
+		if (max_cpus <= cpucount + 1)
+			continue;
+
+		do_boot_cpu(phys_id);
+
+		/*
+		 * Make sure we unmap all failed CPUs
+		 */
+		if (physid_to_cpu(phys_id) == -1) {
+			physid_clear(phys_id, phys_cpu_present_map);
+			printk("phys CPU#%d not responding - " \
+				"cannot use it.\n", phys_id);
+		}
+	}
+
+smp_done:
+	Dprintk("Boot done.\n");
+}
+
+/*
+ * init_ipi_lock : Initialize IPI locks.
+ */
+static void __init init_ipi_lock(void)
+{
+	int ipi;
+
+	for (ipi = 0 ; ipi < NR_IPIS ; ipi++)
+		spin_lock_init(&ipi_lock[ipi]);
+}
+
+/*==========================================================================*
+ * Name:         do_boot_cpu
+ *
+ * Description:  This routine boot up one AP.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    phys_id - Target CPU physical ID
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy  modify for linux-2.5.69
+ *
+ *==========================================================================*/
+static void __init do_boot_cpu(int phys_id)
+{
+	struct task_struct *idle;
+	unsigned long send_status, boot_status;
+	int timeout, cpu_id;
+
+	cpu_id = ++cpucount;
+
+	/*
+	 * We can't use kernel_thread since we must avoid to
+	 * reschedule the child.
+	 */
+	idle = fork_idle(cpu_id);
+	if (IS_ERR(idle))
+		panic("failed fork for CPU#%d.", cpu_id);
+
+	idle->thread.lr = (unsigned long)start_secondary;
+
+	map_cpu_to_physid(cpu_id, phys_id);
+
+	/* So we see what's up   */
+	printk("Booting processor %d/%d\n", phys_id, cpu_id);
+	stack_start.spi = (void *)idle->thread.sp;
+	task_thread_info(idle)->cpu = cpu_id;
+
+	/*
+	 * Send Startup IPI
+	 *   1.IPI received by CPU#(phys_id).
+	 *   2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S)
+	 *   3.CPU#(phys_id) enter start_secondary()
+	 */
+	send_status = 0;
+	boot_status = 0;
+
+	cpumask_set_cpu(phys_id, &cpu_bootout_map);
+
+	/* Send Startup IPI */
+	send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
+
+	Dprintk("Waiting for send to finish...\n");
+	timeout = 0;
+
+	/* Wait 100[ms] */
+	do {
+		Dprintk("+");
+		udelay(1000);
+		send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
+	} while (send_status && (timeout++ < 100));
+
+	Dprintk("After Startup.\n");
+
+	if (!send_status) {
+		/*
+		 * allow APs to start initializing.
+		 */
+		Dprintk("Before Callout %d.\n", cpu_id);
+		cpumask_set_cpu(cpu_id, &cpu_callout_map);
+		Dprintk("After Callout %d.\n", cpu_id);
+
+		/*
+		 * Wait 5s total for a response
+		 */
+		for (timeout = 0; timeout < 5000; timeout++) {
+			if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
+				break;	/* It has booted */
+			udelay(1000);
+		}
+
+		if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
+			/* number CPUs logically, starting from 1 (BSP is 0) */
+			Dprintk("OK.\n");
+		} else {
+			boot_status = 1;
+			printk("Not responding.\n");
+		}
+	} else
+		printk("IPI never delivered???\n");
+
+	if (send_status || boot_status) {
+		unmap_cpu_to_physid(cpu_id, phys_id);
+		cpumask_clear_cpu(cpu_id, &cpu_callout_map);
+		cpumask_clear_cpu(cpu_id, &cpu_callin_map);
+		cpumask_clear_cpu(cpu_id, &cpu_initialized);
+		cpucount--;
+	}
+}
+
+int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
+{
+	int timeout;
+
+	cpumask_set_cpu(cpu_id, &smp_commenced_mask);
+
+	/*
+	 * Wait 5s total for a response
+	 */
+	for (timeout = 0; timeout < 5000; timeout++) {
+		if (cpu_online(cpu_id))
+			break;
+		udelay(1000);
+	}
+	if (!cpu_online(cpu_id))
+		BUG();
+
+	return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+	int cpu_id, timeout;
+	unsigned long bogosum = 0;
+
+	for (timeout = 0; timeout < 5000; timeout++) {
+		if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
+			break;
+		udelay(1000);
+	}
+	if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
+		BUG();
+
+	for_each_online_cpu(cpu_id)
+		show_cpu_info(cpu_id);
+
+	/*
+	 * Allow the user to impress friends.
+	 */
+	Dprintk("Before bogomips.\n");
+	if (cpucount) {
+		for_each_cpu(cpu_id,cpu_online_mask)
+			bogosum += cpu_data[cpu_id].loops_per_jiffy;
+
+		printk(KERN_INFO "Total of %d processors activated " \
+			"(%lu.%02lu BogoMIPS).\n", cpucount + 1,
+			bogosum / (500000 / HZ),
+			(bogosum / (5000 / HZ)) % 100);
+		Dprintk("Before bogocount - setting activated=1.\n");
+	}
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Activate a secondary processor Routines                                   */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+
+/*==========================================================================*
+ * Name:         start_secondary
+ *
+ * Description:  This routine activate a secondary processor.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    *unused - currently unused.
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy  modify for linux-2.5.69
+ *
+ *==========================================================================*/
+int __init start_secondary(void *unused)
+{
+	cpu_init();
+	preempt_disable();
+	smp_callin();
+	while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
+		cpu_relax();
+
+	smp_online();
+
+	/*
+	 * low-memory mappings have been cleared, flush them from
+	 * the local TLBs too.
+	 */
+	local_flush_tlb_all();
+
+	cpu_startup_entry(CPUHP_ONLINE);
+	return 0;
+}
+
+/*==========================================================================*
+ * Name:         smp_callin
+ *
+ * Description:  This routine activate a secondary processor.
+ *
+ * Born on Date: 2002.02.05
+ *
+ * Arguments:    NONE
+ *
+ * Returns:      void (cannot fail)
+ *
+ * Modification log:
+ * Date       Who Description
+ * ---------- --- --------------------------------------------------------
+ * 2003-06-24 hy  modify for linux-2.5.69
+ *
+ *==========================================================================*/
+static void __init smp_callin(void)
+{
+	int phys_id = hard_smp_processor_id();
+	int cpu_id = smp_processor_id();
+	unsigned long timeout;
+
+	if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
+		printk("huh, phys CPU#%d, CPU#%d already present??\n",
+			phys_id, cpu_id);
+		BUG();
+	}
+	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpu_id, phys_id);
+
+	/* Waiting 2s total for startup (udelay is not yet working) */
+	timeout = jiffies + (2 * HZ);
+	while (time_before(jiffies, timeout)) {
+		/* Has the boot CPU finished it's STARTUP sequence ? */
+		if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
+			break;
+		cpu_relax();
+	}
+
+	if (!time_before(jiffies, timeout)) {
+		printk("BUG: CPU#%d started up but did not get a callout!\n",
+			cpu_id);
+		BUG();
+	}
+
+	/* Allow the master to continue. */
+	cpumask_set_cpu(cpu_id, &cpu_callin_map);
+}
+
+static void __init smp_online(void)
+{
+	int cpu_id = smp_processor_id();
+
+	notify_cpu_starting(cpu_id);
+
+	local_irq_enable();
+
+	/* Get our bogomips. */
+	calibrate_delay();
+
+	/* Save our processor parameters */
+ 	smp_store_cpu_info(cpu_id);
+
+	set_cpu_online(cpu_id, true);
+}
+
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+/* Boot up CPUs common Routines                                              */
+/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
+static void __init show_mp_info(int nr_cpu)
+{
+	int i;
+	char cpu_model0[17], cpu_model1[17], cpu_ver[9];
+
+	strncpy(cpu_model0, (char *)M32R_FPGA_CPU_NAME_ADDR, 16);
+	strncpy(cpu_model1, (char *)M32R_FPGA_MODEL_ID_ADDR, 16);
+	strncpy(cpu_ver, (char *)M32R_FPGA_VERSION_ADDR, 8);
+
+	cpu_model0[16] = '\0';
+	for (i = 15 ; i >= 0 ; i--) {
+		if (cpu_model0[i] != ' ')
+			break;
+		cpu_model0[i] = '\0';
+	}
+	cpu_model1[16] = '\0';
+	for (i = 15 ; i >= 0 ; i--) {
+		if (cpu_model1[i] != ' ')
+			break;
+		cpu_model1[i] = '\0';
+	}
+	cpu_ver[8] = '\0';
+	for (i = 7 ; i >= 0 ; i--) {
+		if (cpu_ver[i] != ' ')
+			break;
+		cpu_ver[i] = '\0';
+	}
+
+	printk(KERN_INFO "M32R-mp information\n");
+	printk(KERN_INFO "  On-chip CPUs : %d\n", nr_cpu);
+	printk(KERN_INFO "  CPU model : %s/%s(%s)\n", cpu_model0,
+		cpu_model1, cpu_ver);
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+static void __init smp_store_cpu_info(int cpu_id)
+{
+	struct cpuinfo_m32r *ci = cpu_data + cpu_id;
+
+	*ci = boot_cpu_data;
+	ci->loops_per_jiffy = loops_per_jiffy;
+}
+
+static void __init show_cpu_info(int cpu_id)
+{
+	struct cpuinfo_m32r *ci = &cpu_data[cpu_id];
+
+	printk("CPU#%d : ", cpu_id);
+
+#define PRINT_CLOCK(name, value) \
+	printk(name " clock %d.%02dMHz", \
+		((value) / 1000000), ((value) % 1000000) / 10000)
+
+	PRINT_CLOCK("CPU", (int)ci->cpu_clock);
+	PRINT_CLOCK(", Bus", (int)ci->bus_clock);
+	printk(", loops_per_jiffy[%ld]\n", ci->loops_per_jiffy);
+}
+
+/*
+ * the frequency of the profiling timer can be changed
+ * by writing a multiplier value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+	int i;
+
+	/*
+	 * Sanity check. [at least 500 APIC cycles should be
+	 * between APIC interrupts as a rule of thumb, to avoid
+	 * irqs flooding us]
+	 */
+	if ( (!multiplier) || (calibration_result / multiplier < 500))
+		return -EINVAL;
+
+	/*
+	 * Set the new multiplier for each CPU. CPUs don't start using the
+	 * new values until the next timer interrupt in which they do process
+	 * accounting. At that time they also adjust their APIC timers
+	 * accordingly.
+	 */
+	for_each_possible_cpu(i)
+		per_cpu(prof_multiplier, i) = multiplier;
+
+	return 0;
+}
+
+/* Initialize all maps between cpu number and apicids */
+static void __init init_cpu_to_physid(void)
+{
+	int  i;
+
+	for (i = 0 ; i < NR_CPUS ; i++) {
+		cpu_2_physid[i] = -1;
+		physid_2_cpu[i] = -1;
+	}
+}
+
+/*
+ * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
+ * else physical apic ids
+ */
+static void __init map_cpu_to_physid(int cpu_id, int phys_id)
+{
+	physid_2_cpu[phys_id] = cpu_id;
+	cpu_2_physid[cpu_id] = phys_id;
+}
+
+/*
+ * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
+ * else physical apic ids
+ */
+static void __init unmap_cpu_to_physid(int cpu_id, int phys_id)
+{
+	physid_2_cpu[phys_id] = -1;
+	cpu_2_physid[cpu_id] = -1;
+}
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
new file mode 100644
index 000000000..c3fdd632f
--- /dev/null
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -0,0 +1,90 @@
+/*
+ * linux/arch/m32r/kernel/sys_m32r.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/M32R platform.
+ *
+ * Taken from i386 version.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/ipc.h>
+
+#include <asm/uaccess.h>
+#include <asm/cachectl.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>
+#include <asm/unistd.h>
+
+/*
+ * sys_tas() - test-and-set
+ */
+asmlinkage int sys_tas(int __user *addr)
+{
+	int oldval;
+
+	if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
+		return -EFAULT;
+
+	/* atomic operation:
+	 *   oldval = *addr; *addr = 1;
+	 */
+	__asm__ __volatile__ (
+		DCACHE_CLEAR("%0", "r4", "%1")
+		"	.fillinsn\n"
+		"1:\n"
+		"	lock	%0, @%1	    ->	unlock	%2, @%1\n"
+		"2:\n"
+		/* NOTE:
+		 *   The m32r processor can accept interrupts only
+		 *   at the 32-bit instruction boundary.
+		 *   So, in the above code, the "unlock" instruction
+		 *   can be executed continuously after the "lock"
+		 *   instruction execution without any interruptions.
+		 */
+		".section .fixup,\"ax\"\n"
+		"	.balign 4\n"
+		"3:	ldi	%0, #%3\n"
+		"	seth	r14, #high(2b)\n"
+		"	or3	r14, r14, #low(2b)\n"
+		"	jmp	r14\n"
+		".previous\n"
+		".section __ex_table,\"a\"\n"
+		"	.balign 4\n"
+		"	.long 1b,3b\n"
+		".previous\n"
+		: "=&r" (oldval)
+		: "r" (addr), "r" (1), "i"(-EFAULT)
+		: "r14", "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+		  , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+	);
+
+	return oldval;
+}
+
+asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
+{
+	/* This should flush more selectively ...  */
+	_flush_cache_all();
+	return 0;
+}
+
+asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
+{
+	/* Not implemented yet. */
+	return -ENOSYS;
+}
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
new file mode 100644
index 000000000..f365c1979
--- /dev/null
+++ b/arch/m32r/kernel/syscall_table.S
@@ -0,0 +1,327 @@
+ENTRY(sys_call_table)
+	.long sys_restart_syscall	/* 0  -  old "setup()" system call*/
+	.long sys_exit
+	.long sys_fork
+	.long sys_read
+	.long sys_write
+	.long sys_open			/* 5 */
+	.long sys_close
+	.long sys_waitpid
+	.long sys_creat
+	.long sys_link
+	.long sys_unlink		/* 10 */
+	.long sys_execve
+	.long sys_chdir
+	.long sys_time
+	.long sys_mknod
+	.long sys_chmod			/* 15 */
+	.long sys_ni_syscall		/* lchown16 syscall holder */
+	.long sys_ni_syscall		/* old break syscall holder */
+	.long sys_ni_syscall		/* old stat syscall holder */
+	.long sys_lseek
+	.long sys_getpid		/* 20 */
+	.long sys_mount
+	.long sys_oldumount
+	.long sys_ni_syscall		/* setuid16 syscall holder */
+	.long sys_ni_syscall		/* getuid16 syscall holder */
+	.long sys_stime			/* 25 */
+	.long sys_ptrace
+	.long sys_alarm
+	.long sys_ni_syscall		/* old fstat syscall holder */
+	.long sys_pause
+	.long sys_utime			/* 30 */
+	.long sys_ni_syscall		/* old stty syscall holder */
+	.long sys_cachectl		/* for M32R */ /* old gtty syscall holder */
+	.long sys_access
+	.long sys_ni_syscall		/* nice	syscall holder */
+	.long sys_ni_syscall		/* 35  -  old ftime syscall holder */
+	.long sys_sync
+	.long sys_kill
+	.long sys_rename
+	.long sys_mkdir
+	.long sys_rmdir			/* 40 */
+	.long sys_dup
+	.long sys_pipe
+	.long sys_times
+	.long sys_ni_syscall		/* old prof syscall holder */
+	.long sys_brk			/* 45 */
+	.long sys_ni_syscall		/* setgid16 syscall holder */
+	.long sys_getgid		/* will be unused */
+	.long sys_ni_syscall		/* signal syscall holder */
+	.long sys_ni_syscall		/* geteuid16  syscall holder */
+	.long sys_ni_syscall		/* 50 - getegid16 syscall holder */
+	.long sys_acct
+	.long sys_umount		/* recycled never used phys() */
+	.long sys_ni_syscall		/* old lock syscall holder */
+	.long sys_ioctl
+	.long sys_fcntl			/* 55 - will be unused */
+	.long sys_ni_syscall		/* mpx syscall holder */
+	.long sys_setpgid
+	.long sys_ni_syscall		/* old ulimit syscall holder */
+	.long sys_ni_syscall		/* sys_olduname */
+	.long sys_umask			/* 60 */
+	.long sys_chroot
+	.long sys_ustat
+	.long sys_dup2
+	.long sys_getppid
+	.long sys_getpgrp		/* 65 */
+	.long sys_setsid
+	.long sys_ni_syscall		/* sigaction syscall holder */
+	.long sys_ni_syscall		/* sgetmask syscall holder */
+	.long sys_ni_syscall		/* ssetmask syscall holder */
+	.long sys_ni_syscall		/* 70 - setreuid16 syscall holder */
+	.long sys_ni_syscall		/* setregid16 syscall holder */
+	.long sys_ni_syscall		/* sigsuspend syscall holder */
+	.long sys_ni_syscall		/* sigpending syscall holder */
+	.long sys_sethostname
+	.long sys_setrlimit		/* 75 */
+	.long sys_getrlimit/*will be unused*/
+	.long sys_getrusage
+	.long sys_gettimeofday
+	.long sys_settimeofday
+	.long sys_ni_syscall		/* 80 - getgroups16 syscall holder */
+	.long sys_ni_syscall		/* setgroups16 syscall holder */
+	.long sys_ni_syscall		/* sys_oldselect */
+	.long sys_symlink
+	.long sys_ni_syscall		/* old lstat syscall holder */
+	.long sys_readlink		/* 85 */
+	.long sys_uselib
+	.long sys_swapon
+	.long sys_reboot
+	.long sys_ni_syscall		/* readdir syscall holder */
+	.long sys_ni_syscall		/* 90 - old_mmap syscall holder */
+	.long sys_munmap
+	.long sys_truncate
+	.long sys_ftruncate
+	.long sys_fchmod
+	.long sys_ni_syscall		/* 95 - fchwon16  syscall holder */
+	.long sys_getpriority
+	.long sys_setpriority
+	.long sys_ni_syscall		/* old profil syscall holder */
+	.long sys_statfs
+	.long sys_fstatfs		/* 100 */
+	.long sys_ni_syscall		/* ioperm syscall holder */
+	.long sys_socketcall
+	.long sys_syslog
+	.long sys_setitimer
+	.long sys_getitimer		/* 105 */
+	.long sys_newstat
+	.long sys_newlstat
+	.long sys_newfstat
+	.long sys_ni_syscall		/* old uname syscall holder */
+	.long sys_ni_syscall		/* 110  -  iopl syscall holder */
+	.long sys_vhangup
+	.long sys_ni_syscall		/* idle syscall holder */
+	.long sys_ni_syscall		/* vm86old syscall holder */
+	.long sys_wait4
+	.long sys_swapoff		/* 115 */
+	.long sys_sysinfo
+	.long sys_ipc
+	.long sys_fsync
+	.long sys_ni_syscall		/* sigreturn syscall holder */
+	.long sys_clone			/* 120 */
+	.long sys_setdomainname
+	.long sys_newuname
+	.long sys_ni_syscall		/* modify_ldt syscall holder */
+	.long sys_adjtimex
+	.long sys_mprotect		/* 125 */
+	.long sys_ni_syscall		/* sigprocmask syscall holder */
+	.long sys_ni_syscall		/* create_module syscall holder */
+	.long sys_init_module
+	.long sys_delete_module
+	.long sys_ni_syscall		/* 130 - get_kernel_syms */
+	.long sys_quotactl
+	.long sys_getpgid
+	.long sys_fchdir
+	.long sys_bdflush
+	.long sys_sysfs			/* 135 */
+	.long sys_personality
+	.long sys_ni_syscall		/* afs_syscall syscall holder */
+	.long sys_ni_syscall		/* setfsuid16 syscall holder */
+	.long sys_ni_syscall		/* setfsgid16 syscall holder */
+	.long sys_llseek		/* 140 */
+	.long sys_getdents
+	.long sys_select
+	.long sys_flock
+	.long sys_msync
+	.long sys_readv			/* 145 */
+	.long sys_writev
+	.long sys_getsid
+	.long sys_fdatasync
+	.long sys_sysctl
+	.long sys_mlock			/* 150 */
+	.long sys_munlock
+	.long sys_mlockall
+	.long sys_munlockall
+	.long sys_sched_setparam
+	.long sys_sched_getparam	/* 155 */
+	.long sys_sched_setscheduler
+	.long sys_sched_getscheduler
+	.long sys_sched_yield
+	.long sys_sched_get_priority_max
+	.long sys_sched_get_priority_min	/* 160 */
+	.long sys_sched_rr_get_interval
+	.long sys_nanosleep
+	.long sys_mremap
+	.long sys_ni_syscall		/* setresuid16 syscall holder */
+	.long sys_ni_syscall		/* 165 - getresuid16 syscall holder */
+	.long sys_tas			/* vm86 syscall holder */
+	.long sys_ni_syscall		/* query_module syscall holder */
+	.long sys_poll
+	.long sys_ni_syscall		/* was nfsservctl */
+	.long sys_setresgid		/* 170 */
+	.long sys_getresgid
+	.long sys_prctl
+	.long sys_rt_sigreturn
+	.long sys_rt_sigaction
+	.long sys_rt_sigprocmask	/* 175 */
+	.long sys_rt_sigpending
+	.long sys_rt_sigtimedwait
+	.long sys_rt_sigqueueinfo
+	.long sys_rt_sigsuspend
+	.long sys_pread64		/* 180 */
+	.long sys_pwrite64
+	.long sys_ni_syscall		/* chown16 syscall holder */
+	.long sys_getcwd
+	.long sys_capget
+	.long sys_capset		/* 185 */
+	.long sys_sigaltstack
+	.long sys_sendfile
+	.long sys_ni_syscall		/* streams1 */
+	.long sys_ni_syscall		/* streams2 */
+	.long sys_vfork			/* 190 */
+	.long sys_getrlimit
+	.long sys_mmap_pgoff
+	.long sys_truncate64
+	.long sys_ftruncate64
+	.long sys_stat64		/* 195 */
+	.long sys_lstat64
+	.long sys_fstat64
+	.long sys_lchown
+	.long sys_getuid
+	.long sys_getgid		/* 200 */
+	.long sys_geteuid
+	.long sys_getegid
+	.long sys_setreuid
+	.long sys_setregid
+	.long sys_getgroups		/* 205 */
+	.long sys_setgroups
+	.long sys_fchown
+	.long sys_setresuid
+	.long sys_getresuid
+	.long sys_setresgid		/* 210 */
+	.long sys_getresgid
+	.long sys_chown
+	.long sys_setuid
+	.long sys_setgid
+	.long sys_setfsuid		/* 215 */
+	.long sys_setfsgid
+	.long sys_pivot_root
+	.long sys_mincore
+	.long sys_madvise
+	.long sys_getdents64		/* 220 */
+	.long sys_fcntl64
+	.long sys_ni_syscall		/* reserved for TUX */
+	.long sys_ni_syscall		/* Reserved for Security */
+	.long sys_gettid
+	.long sys_readahead		/* 225 */
+	.long sys_setxattr
+	.long sys_lsetxattr
+	.long sys_fsetxattr
+	.long sys_getxattr
+	.long sys_lgetxattr		/* 230 */
+	.long sys_fgetxattr
+	.long sys_listxattr
+	.long sys_llistxattr
+	.long sys_flistxattr
+	.long sys_removexattr		/* 235 */
+	.long sys_lremovexattr
+	.long sys_fremovexattr
+	.long sys_tkill
+	.long sys_sendfile64
+	.long sys_futex			/* 240 */
+	.long sys_sched_setaffinity
+	.long sys_sched_getaffinity
+	.long sys_ni_syscall		/* reserved for "set_thread_area" system call */
+	.long sys_ni_syscall		/* reserved for "get_thread_area" system call */
+	.long sys_io_setup		/* 245 */
+	.long sys_io_destroy
+	.long sys_io_getevents
+	.long sys_io_submit
+	.long sys_io_cancel
+	.long sys_fadvise64		/* 250 */
+	.long sys_ni_syscall
+	.long sys_exit_group
+	.long sys_lookup_dcookie
+	.long sys_epoll_create
+	.long sys_epoll_ctl		/* 255 */
+	.long sys_epoll_wait
+	.long sys_remap_file_pages
+	.long sys_set_tid_address
+	.long sys_timer_create
+	.long sys_timer_settime		/* 260 */
+	.long sys_timer_gettime
+	.long sys_timer_getoverrun
+	.long sys_timer_delete
+	.long sys_clock_settime
+	.long sys_clock_gettime		/* 265 */
+	.long sys_clock_getres
+	.long sys_clock_nanosleep
+	.long sys_statfs64
+	.long sys_fstatfs64
+	.long sys_tgkill		/* 270 */
+	.long sys_utimes
+	.long sys_fadvise64_64
+	.long sys_ni_syscall		/* Reserved for sys_vserver */
+        .long sys_ni_syscall		/* Reserved for sys_mbind */
+        .long sys_ni_syscall		/* Reserved for sys_get_mempolicy */
+        .long sys_ni_syscall		/* Reserved for sys_set_mempolicy */
+        .long sys_mq_open
+        .long sys_mq_unlink
+        .long sys_mq_timedsend
+        .long sys_mq_timedreceive       /* 280 */
+        .long sys_mq_notify
+        .long sys_mq_getsetattr
+        .long sys_ni_syscall            /* reserved for kexec */
+	.long sys_waitid
+	.long sys_ni_syscall		/* 285 */ /* available */
+	.long sys_add_key
+	.long sys_request_key
+	.long sys_keyctl
+	.long sys_ioprio_set
+	.long sys_ioprio_get		/* 290 */
+	.long sys_inotify_init
+	.long sys_inotify_add_watch
+	.long sys_inotify_rm_watch
+	.long sys_migrate_pages
+	.long sys_openat		/* 295 */
+	.long sys_mkdirat
+	.long sys_mknodat
+	.long sys_fchownat
+	.long sys_futimesat
+	.long sys_fstatat64		/* 300 */
+	.long sys_unlinkat
+	.long sys_renameat
+	.long sys_linkat
+	.long sys_symlinkat
+	.long sys_readlinkat		/* 305 */
+	.long sys_fchmodat
+	.long sys_faccessat
+	.long sys_pselect6
+	.long sys_ppoll
+	.long sys_unshare		/* 310 */
+	.long sys_set_robust_list
+	.long sys_get_robust_list
+	.long sys_splice
+	.long sys_sync_file_range
+	.long sys_tee			/* 315 */
+	.long sys_vmsplice
+	.long sys_move_pages
+	.long sys_getcpu
+	.long sys_epoll_pwait
+	.long sys_utimensat		/* 320 */
+	.long sys_signalfd
+	.long sys_ni_syscall
+	.long sys_eventfd
+	.long sys_fallocate
+	.long sys_setns			/* 325 */
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
new file mode 100644
index 000000000..093f2761a
--- /dev/null
+++ b/arch/m32r/kernel/time.c
@@ -0,0 +1,198 @@
+/*
+ *  linux/arch/m32r/kernel/time.c
+ *
+ *  Copyright (c) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata,
+ *                            Hitoshi Yamamoto
+ *  Taken from i386 version.
+ *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *    Copyright (C) 1996, 1997, 1998  Ralf Baechle
+ *
+ *  This file contains the time handling details for PC-style clocks as
+ *  found in some MIPS systems.
+ *
+ *  Some code taken from sh version.
+ *    Copyright (C) 1999  Tetsuya Okada & Niibe Yutaka
+ *    Copyright (C) 2000  Philipp Rumpf <prumpf@tux.org>
+ */
+
+#undef  DEBUG_TIMER
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+
+#include <asm/io.h>
+#include <asm/m32r.h>
+
+#include <asm/hw_irq.h>
+
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
+/* this needs a better home */
+DEFINE_SPINLOCK(rtc_lock);
+
+#ifdef CONFIG_RTC_DRV_CMOS_MODULE
+EXPORT_SYMBOL(rtc_lock);
+#endif
+#endif  /* pc-style 'CMOS' RTC support */
+
+#ifdef CONFIG_SMP
+extern void smp_local_timer_interrupt(void);
+#endif
+
+#define TICK_SIZE	(tick_nsec / 1000)
+
+/*
+ * Change this if you have some constant time drift
+ */
+
+/* This is for machines which generate the exact clock. */
+#define USECS_PER_JIFFY (1000000/HZ)
+
+static unsigned long latch;
+
+static u32 m32r_gettimeoffset(void)
+{
+	unsigned long  elapsed_time = 0;  /* [us] */
+
+#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
+	|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
+	|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
+#ifndef CONFIG_SMP
+
+	unsigned long count;
+
+	/* timer count may underflow right here */
+	count = inl(M32R_MFT2CUT_PORTL);
+
+	if (inl(M32R_ICU_CR18_PORTL) & 0x00000100)	/* underflow check */
+		count = 0;
+
+	count = (latch - count) * TICK_SIZE;
+	elapsed_time = DIV_ROUND_CLOSEST(count, latch);
+	/* NOTE: LATCH is equal to the "interval" value (= reload count). */
+
+#else /* CONFIG_SMP */
+	unsigned long count;
+	static unsigned long p_jiffies = -1;
+	static unsigned long p_count = 0;
+
+	/* timer count may underflow right here */
+	count = inl(M32R_MFT2CUT_PORTL);
+
+	if (jiffies == p_jiffies && count > p_count)
+		count = 0;
+
+	p_jiffies = jiffies;
+	p_count = count;
+
+	count = (latch - count) * TICK_SIZE;
+	elapsed_time = DIV_ROUND_CLOSEST(count, latch);
+	/* NOTE: LATCH is equal to the "interval" value (= reload count). */
+#endif /* CONFIG_SMP */
+#elif defined(CONFIG_CHIP_M32310)
+#warning do_gettimeoffse not implemented
+#else
+#error no chip configuration
+#endif
+
+	return elapsed_time * 1000;
+}
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "xtime_update()" routine every clocktick
+ */
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+#ifndef CONFIG_SMP
+	profile_tick(CPU_PROFILING);
+#endif
+	xtime_update(1);
+
+#ifndef CONFIG_SMP
+	update_process_times(user_mode(get_irq_regs()));
+#endif
+	/* As we return to user mode fire off the other CPU schedulers..
+	   this is basically because we don't yet share IRQ's around.
+	   This message is rigged to be safe on the 386 - basically it's
+	   a hack, so don't look closely for now.. */
+
+#ifdef CONFIG_SMP
+	smp_local_timer_interrupt();
+	smp_send_timer();
+#endif
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction irq0 = {
+	.handler = timer_interrupt,
+	.name = "MFT2",
+};
+
+void read_persistent_clock(struct timespec *ts)
+{
+	unsigned int epoch, year, mon, day, hour, min, sec;
+
+	sec = min = hour = day = mon = year = 0;
+	epoch = 0;
+
+	year = 23;
+	mon = 4;
+	day = 17;
+
+	/* Attempt to guess the epoch.  This is the same heuristic as in rtc.c
+	   so no stupid things will happen to timekeeping.  Who knows, maybe
+	   Ultrix also uses 1952 as epoch ...  */
+	if (year > 10 && year < 44)
+		epoch = 1980;
+	else if (year < 96)
+		epoch = 1952;
+	year += epoch;
+
+	ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+	ts->tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+}
+
+
+void __init time_init(void)
+{
+	arch_gettimeoffset = m32r_gettimeoffset;
+
+#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
+	|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
+	|| defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
+
+	/* M32102 MFT setup */
+	setup_irq(M32R_IRQ_MFT2, &irq0);
+	{
+		unsigned long bus_clock;
+		unsigned short divide;
+
+		bus_clock = boot_cpu_data.bus_clock;
+		divide = boot_cpu_data.timer_divide;
+		latch = DIV_ROUND_CLOSEST(bus_clock/divide, HZ);
+
+		printk("Timer start : latch = %ld\n", latch);
+
+		outl((M32R_MFTMOD_CC_MASK | M32R_MFTMOD_TCCR \
+			|M32R_MFTMOD_CSSEL011), M32R_MFT2MOD_PORTL);
+		outl(latch, M32R_MFT2RLD_PORTL);
+		outl(latch, M32R_MFT2CUT_PORTL);
+		outl(0, M32R_MFT2CMPRLD_PORTL);
+		outl((M32R_MFTCR_MFT2MSK|M32R_MFTCR_MFT2EN), M32R_MFTCR_PORTL);
+	}
+
+#elif defined(CONFIG_CHIP_M32310)
+#warning time_init not implemented
+#else
+#error no chip configuration
+#endif
+}
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
new file mode 100644
index 000000000..a7a424f85
--- /dev/null
+++ b/arch/m32r/kernel/traps.c
@@ -0,0 +1,319 @@
+/*
+ *  linux/arch/m32r/kernel/traps.c
+ *
+ *  Copyright (C) 2001, 2002  Hirokazu Takata, Hiroyuki Kondo,
+ *                            Hitoshi Yamamoto
+ */
+
+/*
+ * 'traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/stddef.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <linux/atomic.h>
+
+#include <asm/smp.h>
+
+#include <linux/module.h>
+
+asmlinkage void alignment_check(void);
+asmlinkage void ei_handler(void);
+asmlinkage void rie_handler(void);
+asmlinkage void debug_trap(void);
+asmlinkage void cache_flushing_handler(void);
+asmlinkage void ill_trap(void);
+
+#ifdef CONFIG_SMP
+extern void smp_reschedule_interrupt(void);
+extern void smp_invalidate_interrupt(void);
+extern void smp_call_function_interrupt(void);
+extern void smp_ipi_timer_interrupt(void);
+extern void smp_flush_cache_all_interrupt(void);
+extern void smp_call_function_single_interrupt(void);
+
+/*
+ * for Boot AP function
+ */
+asm (
+	"	.section .eit_vector4,\"ax\"	\n"
+	"	.global _AP_RE			\n"
+	"	.global startup_AP		\n"
+	"_AP_RE:				\n"
+	"	.fill 32, 4, 0			\n"
+	"_AP_EI: bra	startup_AP		\n"
+	"	.previous			\n"
+);
+#endif  /* CONFIG_SMP */
+
+extern unsigned long	eit_vector[];
+#define BRA_INSN(func, entry)	\
+	((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
+	+ 0xff000000UL
+
+static void set_eit_vector_entries(void)
+{
+	extern void default_eit_handler(void);
+	extern void system_call(void);
+	extern void pie_handler(void);
+	extern void ace_handler(void);
+	extern void tme_handler(void);
+	extern void _flush_cache_copyback_all(void);
+
+	eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
+	eit_vector[1] = BRA_INSN(default_eit_handler, 1);
+	eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
+	eit_vector[5] = BRA_INSN(default_eit_handler, 5);
+	eit_vector[8] = BRA_INSN(rie_handler, 8);
+	eit_vector[12] = BRA_INSN(alignment_check, 12);
+	eit_vector[16] = BRA_INSN(ill_trap, 16);
+	eit_vector[17] = BRA_INSN(debug_trap, 17);
+	eit_vector[18] = BRA_INSN(system_call, 18);
+	eit_vector[19] = BRA_INSN(ill_trap, 19);
+	eit_vector[20] = BRA_INSN(ill_trap, 20);
+	eit_vector[21] = BRA_INSN(ill_trap, 21);
+	eit_vector[22] = BRA_INSN(ill_trap, 22);
+	eit_vector[23] = BRA_INSN(ill_trap, 23);
+	eit_vector[24] = BRA_INSN(ill_trap, 24);
+	eit_vector[25] = BRA_INSN(ill_trap, 25);
+	eit_vector[26] = BRA_INSN(ill_trap, 26);
+	eit_vector[27] = BRA_INSN(ill_trap, 27);
+	eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
+	eit_vector[29] = BRA_INSN(ill_trap, 29);
+	eit_vector[30] = BRA_INSN(ill_trap, 30);
+	eit_vector[31] = BRA_INSN(ill_trap, 31);
+	eit_vector[32] = BRA_INSN(ei_handler, 32);
+	eit_vector[64] = BRA_INSN(pie_handler, 64);
+#ifdef CONFIG_MMU
+	eit_vector[68] = BRA_INSN(ace_handler, 68);
+	eit_vector[72] = BRA_INSN(tme_handler, 72);
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_SMP
+	eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
+	eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
+	eit_vector[186] = (unsigned long)smp_call_function_interrupt;
+	eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
+	eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
+	eit_vector[189] = 0;	/* CPU_BOOT_IPI */
+	eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
+	eit_vector[191] = 0;
+#endif
+	_flush_cache_copyback_all();
+}
+
+void __init trap_init(void)
+{
+	set_eit_vector_entries();
+
+	/*
+	 * Should be a barrier for any external CPU state.
+	 */
+	cpu_init();
+}
+
+static int kstack_depth_to_print = 24;
+
+static void show_trace(struct task_struct *task, unsigned long *stack)
+{
+	unsigned long addr;
+
+	if (!stack)
+		stack = (unsigned long*)&stack;
+
+	printk("Call Trace: ");
+	while (!kstack_end(stack)) {
+		addr = *stack++;
+		if (__kernel_text_address(addr))
+			printk("[<%08lx>] %pSR\n", addr, (void *)addr);
+	}
+	printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+	unsigned long  *stack;
+	int  i;
+
+	/*
+	 * debugging aid: "show_stack(NULL);" prints the
+	 * back trace for this cpu.
+	 */
+
+	if(sp==NULL) {
+		if (task)
+			sp = (unsigned long *)task->thread.sp;
+		else
+			sp=(unsigned long*)&sp;
+	}
+
+	stack = sp;
+	for(i=0; i < kstack_depth_to_print; i++) {
+		if (kstack_end(stack))
+			break;
+		if (i && ((i % 4) == 0))
+			printk("\n       ");
+		printk("%08lx ", *stack++);
+	}
+	printk("\n");
+	show_trace(task, sp);
+}
+
+static void show_registers(struct pt_regs *regs)
+{
+	int i = 0;
+	int in_kernel = 1;
+	unsigned long sp;
+
+	printk("CPU:    %d\n", smp_processor_id());
+	show_regs(regs);
+
+	sp = (unsigned long) (1+regs);
+	if (user_mode(regs)) {
+		in_kernel = 0;
+		sp = regs->spu;
+		printk("SPU: %08lx\n", sp);
+	} else {
+		printk("SPI: %08lx\n", sp);
+	}
+	printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
+		current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
+
+	/*
+	 * When in-kernel, we also print out the stack and code at the
+	 * time of the fault..
+	 */
+	if (in_kernel) {
+		printk("\nStack: ");
+		show_stack(current, (unsigned long*) sp);
+
+		printk("\nCode: ");
+		if (regs->bpc < PAGE_OFFSET)
+			goto bad;
+
+		for(i=0;i<20;i++) {
+			unsigned char c;
+			if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
+bad:
+				printk(" Bad PC value.");
+				break;
+			}
+			printk("%02x ", c);
+		}
+	}
+	printk("\n");
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+	console_verbose();
+	spin_lock_irq(&die_lock);
+	bust_spinlocks(1);
+	printk("%s: %04lx\n", str, err & 0xffff);
+	show_registers(regs);
+	bust_spinlocks(0);
+	spin_unlock_irq(&die_lock);
+	do_exit(SIGSEGV);
+}
+
+static __inline__ void die_if_kernel(const char * str,
+	struct pt_regs * regs, long err)
+{
+	if (!user_mode(regs))
+		die(str, regs, err);
+}
+
+static __inline__ void do_trap(int trapnr, int signr, const char * str,
+	struct pt_regs * regs, long error_code, siginfo_t *info)
+{
+	if (user_mode(regs)) {
+		/* trap_signal */
+		struct task_struct *tsk = current;
+		tsk->thread.error_code = error_code;
+		tsk->thread.trap_no = trapnr;
+		if (info)
+			force_sig_info(signr, info, tsk);
+		else
+			force_sig(signr, tsk);
+		return;
+	} else {
+		/* kernel_trap */
+		if (!fixup_exception(regs))
+			die(str, regs, error_code);
+		return;
+	}
+}
+
+#define DO_ERROR(trapnr, signr, str, name) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+	do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
+}
+
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+	siginfo_t info; \
+	info.si_signo = signr; \
+	info.si_errno = 0; \
+	info.si_code = sicode; \
+	info.si_addr = (void __user *)siaddr; \
+	do_trap(trapnr, signr, str, regs, error_code, &info); \
+}
+
+DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
+DO_ERROR_INFO(0x20, SIGILL,  "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
+DO_ERROR_INFO(0x100, SIGILL,  "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
+DO_ERROR_INFO(-1, SIGILL,  "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
+
+extern int handle_unaligned_access(unsigned long, struct pt_regs *);
+
+/* This code taken from arch/sh/kernel/traps.c */
+asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
+{
+	mm_segment_t oldfs;
+	unsigned long insn;
+	int tmp;
+
+	oldfs = get_fs();
+
+	if (user_mode(regs)) {
+		local_irq_enable();
+		current->thread.error_code = error_code;
+		current->thread.trap_no = 0x17;
+
+		set_fs(USER_DS);
+		if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
+			set_fs(oldfs);
+			goto uspace_segv;
+		}
+		tmp = handle_unaligned_access(insn, regs);
+		set_fs(oldfs);
+
+		if (!tmp)
+			return;
+
+	uspace_segv:
+		printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+			"access\n", current->comm);
+		force_sig(SIGSEGV, current);
+	} else {
+		set_fs(KERNEL_DS);
+		if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
+			set_fs(oldfs);
+			die("insn faulting in do_address_error", regs, 0);
+		}
+		handle_unaligned_access(insn, regs);
+		set_fs(oldfs);
+	}
+}
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..018e4a711
--- /dev/null
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -0,0 +1,77 @@
+/* ld script to make M32R Linux kernel
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/addrspace.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(m32r)
+#if defined(__LITTLE_ENDIAN__)
+       jiffies = jiffies_64;
+#else
+       jiffies = jiffies_64 + 4;
+#endif
+
+kernel_entry = boot - 0x80000000;
+ENTRY(kernel_entry)
+
+SECTIONS
+{
+  . = CONFIG_MEMORY_START + __PAGE_OFFSET;
+  eit_vector = .;
+
+  . = . + 0x1000;
+  .empty_zero_page : { *(.empty_zero_page) } = 0
+
+  /* read-only */
+  _text = .;			/* Text and read-only data */
+  .boot : { *(.boot) } = 0
+  .text : {
+	HEAD_TEXT
+	TEXT_TEXT
+	SCHED_TEXT
+	LOCK_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} = 0x9090
+#ifdef CONFIG_SMP
+  . = ALIGN(65536);
+  .eit_vector4 : { *(.eit_vector4) }
+#endif
+  _etext = .;			/* End of text section */
+
+  EXCEPTION_TABLE(16)
+  NOTES
+
+  _sdata = .;			/* Start of data section */
+  RODATA
+  RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
+  _edata = .;			/* End of data section */
+
+  /* will be freed after init */
+  . = ALIGN(PAGE_SIZE);		/* Init code and data */
+  __init_begin = .;
+  INIT_TEXT_SECTION(PAGE_SIZE)
+  INIT_DATA_SECTION(16)
+  PERCPU_SECTION(32)
+  . = ALIGN(PAGE_SIZE);
+  __init_end = .;
+  /* freed after init ends here */
+
+  BSS_SECTION(0, 0, 4)
+
+  _end = . ;
+
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+
+  /* Sections to be discarded */
+  DISCARDS
+}
-- 
cgit v1.2.3-54-g00ecf