aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-06-25 16:17:36 +0300
committerAvi Kivity <avi@qumranet.com>2008-06-25 16:17:36 +0300
commitda17c8501b5829a7745937341cc1881f53971208 (patch)
treea094ad030d3490a9799697a661df0859760b12a1
parentccde0f40bcdf553df1f7aeb72b5f33773ea32938 (diff)
parentebbd0ffddd67e70180235561b150aa02897992b1 (diff)
Merge branch 'qemu-cvs'kvm-71-rc1
Conflicts: qemu/block.c qemu/configure qemu/cpu-defs.h qemu/cutils.c qemu/exec-all.h qemu/exec.c qemu/monitor.c qemu/target-i386/helper.c qemu/vl.c Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--Changelog2
-rw-r--r--Makefile2
-rw-r--r--Makefile.target10
-rw-r--r--audio/alsaaudio.c204
-rw-r--r--audio/audio.c6
-rw-r--r--audio/mixeng.c2
-rw-r--r--audio/mixeng_template.h10
-rw-r--r--audio/ossaudio.c6
-rw-r--r--block-qcow.c45
-rw-r--r--block.c27
-rw-r--r--block.h2
-rwxr-xr-xconfigure24
-rw-r--r--cpu-all.h27
-rw-r--r--cpu-defs.h13
-rw-r--r--cpu-exec.c37
-rw-r--r--cutils.c35
-rw-r--r--dyngen-exec.h6
-rw-r--r--dyngen.c8
-rw-r--r--exec-all.h246
-rw-r--r--exec.c416
-rw-r--r--gdbstub.c96
-rw-r--r--host-utils.h2
-rw-r--r--hw/ac97.c102
-rw-r--r--hw/audiodev.h3
-rw-r--r--hw/cs4231a.c674
-rw-r--r--hw/devices.h7
-rw-r--r--hw/eccmemctl.c62
-rw-r--r--hw/es1370.c10
-rw-r--r--hw/etraxfs.c55
-rw-r--r--hw/etraxfs.h42
-rw-r--r--hw/etraxfs_dma.c122
-rw-r--r--hw/etraxfs_pic.c53
-rw-r--r--hw/etraxfs_timer.c31
-rw-r--r--hw/firmware_abi.h2
-rw-r--r--hw/gus.c38
-rw-r--r--hw/gusemu.h4
-rw-r--r--hw/gusemu_mixer.c6
-rw-r--r--hw/hw.h5
-rw-r--r--hw/musicpal.c6
-rw-r--r--hw/pc.c8
-rw-r--r--hw/pflash_cfi01.c8
-rw-r--r--hw/pflash_cfi02.c12
-rw-r--r--hw/slavio_serial.c2
-rw-r--r--hw/sun4m.c34
-rw-r--r--hw/sun4u.c32
-rw-r--r--hw/tc6393xb.c299
-rw-r--r--hw/tcx.c4
-rw-r--r--hw/tosa.c38
-rw-r--r--kqemu.c7
-rw-r--r--linux-user/elfload.c4
-rw-r--r--linux-user/main.c184
-rw-r--r--linux-user/mmap.c62
-rw-r--r--linux-user/qemu.h14
-rw-r--r--linux-user/signal.c4
-rw-r--r--linux-user/syscall.c153
-rw-r--r--monitor.c106
-rw-r--r--nbd.c2
-rw-r--r--osdep.h4
-rw-r--r--qemu-doc.texi9
-rw-r--r--qemu-img.c47
-rw-r--r--qemu-img.texi8
-rw-r--r--qemu-lock.h249
-rw-r--r--qemu-malloc.c59
-rw-r--r--softmmu_template.h48
-rw-r--r--target-cris/cpu.h15
-rw-r--r--target-cris/helper.c19
-rw-r--r--target-cris/helper.h1
-rw-r--r--target-cris/machine.c88
-rw-r--r--target-cris/op_helper.c31
-rw-r--r--target-cris/translate.c334
-rw-r--r--target-i386/helper.c41
-rw-r--r--target-i386/helper.h4
-rw-r--r--target-i386/op_helper.c27
-rw-r--r--target-i386/translate.c14
-rw-r--r--target-mips/TODO12
-rw-r--r--target-mips/cpu.h13
-rw-r--r--target-mips/exec.h175
-rw-r--r--target-mips/fop_template.c104
-rw-r--r--target-mips/helper.h270
-rw-r--r--target-mips/op.c2219
-rw-r--r--target-mips/op_helper.c1611
-rw-r--r--target-mips/op_mem.c300
-rw-r--r--target-mips/translate.c3801
-rw-r--r--target-ppc/translate.c2
-rw-r--r--target-sparc/TODO4
-rw-r--r--target-sparc/cpu.h34
-rw-r--r--target-sparc/helper.c123
-rw-r--r--target-sparc/machine.c8
-rw-r--r--target-sparc/op_helper.c24
-rw-r--r--target-sparc/translate.c232
-rw-r--r--tcg/ppc/tcg-target.c235
-rw-r--r--tcg/ppc/tcg-target.h1
-rw-r--r--tcg/tcg.c6
-rw-r--r--tests/cris/Makefile4
-rw-r--r--tests/cris/check_movei.s3
-rw-r--r--tests/cris/check_swap.c1
-rw-r--r--tests/cris/check_xarith.s26
-rw-r--r--tests/cris/crisutils.h2
-rw-r--r--tests/cris/sys.c11
-rw-r--r--tests/cris/testutils.inc8
-rw-r--r--vl.c27
101 files changed, 7564 insertions, 6111 deletions
diff --git a/Changelog b/Changelog
index 0ed5bcbc0..44c79942c 100644
--- a/Changelog
+++ b/Changelog
@@ -2,6 +2,8 @@
- Braille support (Samuel Thibault)
- Freecom MusicPal system emulation (Jan Kiszka)
- OMAP242x and Nokia N800, N810 machines (Andrzej Zaborowski)
+ - EsounD audio driver (Frederick Reeve)
+ - Gravis Ultrasound GF1 sound card (Tibor "TS" Schütz)
version 0.9.1:
diff --git a/Makefile b/Makefile
index a2a74c18b..024ddfc71 100644
--- a/Makefile
+++ b/Makefile
@@ -39,7 +39,7 @@ recurse-all: $(SUBDIR_RULES)
#######################################################################
# BLOCK_OBJS is code used by both qemu system emulation and qemu-img
-BLOCK_OBJS=cutils.o
+BLOCK_OBJS=cutils.o qemu-malloc.o
BLOCK_OBJS+=block-cow.o block-qcow.o aes.o block-vmdk.o block-cloop.o
BLOCK_OBJS+=block-dmg.o block-bochs.o block-vpc.o block-vvfat.o
BLOCK_OBJS+=block-qcow2.o block-parallels.o
diff --git a/Makefile.target b/Makefile.target
index 9fb40a44b..77b230119 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -113,6 +113,7 @@ endif
ifeq ($(ARCH),ppc)
CPPFLAGS+= -D__powerpc__
+OP_CFLAGS+= -mlongcall
endif
ifeq ($(ARCH),sparc)
@@ -441,7 +442,7 @@ LDFLAGS+=-Wl,-T,$(SRC_PATH)/$(ARCH)el.ld
endif
endif
-OBJS= main.o syscall.o strace.o mmap.o signal.o path.o osdep.o thunk.o \
+OBJS= main.o syscall.o strace.o mmap.o signal.o path.o thunk.o \
elfload.o linuxload.o uaccess.o
LIBS+= $(AIOLIBS)
ifdef TARGET_HAS_BFLT
@@ -498,7 +499,7 @@ LDFLAGS+=-Wl,-segaddr,__STD_PROG_ZONE,0x1000 -image_base 0x0e000000
LIBS+=-lmx
-OBJS= main.o commpage.o machload.o mmap.o osdep.o signal.o syscall.o thunk.o
+OBJS= main.o commpage.o machload.o mmap.o signal.o syscall.o thunk.o
OBJS+= libqemu.a
@@ -551,6 +552,9 @@ endif
ifdef CONFIG_GUS
SOUND_HW += gus.o gusemu_hal.o gusemu_mixer.o
endif
+ifdef CONFIG_CS4231A
+SOUND_HW += cs4231a.o
+endif
ifdef CONFIG_KVM_KERNEL_INC
CFLAGS += -I $(CONFIG_KVM_KERNEL_INC)
@@ -671,7 +675,7 @@ OBJS+= arm-semi.o
OBJS+= pxa2xx.o pxa2xx_pic.o pxa2xx_gpio.o pxa2xx_timer.o pxa2xx_dma.o
OBJS+= pxa2xx_lcd.o pxa2xx_mmci.o pxa2xx_pcmcia.o pxa2xx_keypad.o
OBJS+= pflash_cfi01.o gumstix.o
-OBJS+= zaurus.o ide.o serial.o nand.o ecc.o spitz.o tosa.o
+OBJS+= zaurus.o ide.o serial.o nand.o ecc.o spitz.o tosa.o tc6393xb.o
OBJS+= omap1.o omap_lcdc.o omap_dma.o omap_clk.o omap_mmc.o omap_i2c.o
OBJS+= omap2.o omap_dss.o
OBJS+= palm.o tsc210x.o
diff --git a/audio/alsaaudio.c b/audio/alsaaudio.c
index 43cfa258d..c926cae38 100644
--- a/audio/alsaaudio.c
+++ b/audio/alsaaudio.c
@@ -58,37 +58,15 @@ static struct {
int period_size_out_overridden;
int verbose;
} conf = {
-#define DEFAULT_BUFFER_SIZE 1024
-#define DEFAULT_PERIOD_SIZE 256
-#ifdef HIGH_LATENCY
- .size_in_usec_in = 1,
- .size_in_usec_out = 1,
-#endif
.pcm_name_out = "default",
.pcm_name_in = "default",
-#ifdef HIGH_LATENCY
- .buffer_size_in = 400000,
- .period_size_in = 400000 / 4,
- .buffer_size_out = 400000,
- .period_size_out = 400000 / 4,
-#else
- .buffer_size_in = DEFAULT_BUFFER_SIZE * 4,
- .period_size_in = DEFAULT_PERIOD_SIZE * 4,
- .buffer_size_out = DEFAULT_BUFFER_SIZE,
- .period_size_out = DEFAULT_PERIOD_SIZE,
- .buffer_size_in_overridden = 0,
- .buffer_size_out_overridden = 0,
- .period_size_in_overridden = 0,
- .period_size_out_overridden = 0,
-#endif
- .threshold = 0,
- .verbose = 0
};
struct alsa_params_req {
int freq;
snd_pcm_format_t fmt;
int nchannels;
+ int size_in_usec;
unsigned int buffer_size;
unsigned int period_size;
};
@@ -286,17 +264,16 @@ static int alsa_open (int in, struct alsa_params_req *req,
snd_pcm_t *handle;
snd_pcm_hw_params_t *hw_params;
int err;
+ int size_in_usec;
unsigned int freq, nchannels;
const char *pcm_name = in ? conf.pcm_name_in : conf.pcm_name_out;
- unsigned int period_size, buffer_size;
snd_pcm_uframes_t obt_buffer_size;
const char *typ = in ? "ADC" : "DAC";
snd_pcm_format_t obtfmt;
freq = req->freq;
- period_size = req->period_size;
- buffer_size = req->buffer_size;
nchannels = req->nchannels;
+ size_in_usec = req->size_in_usec;
snd_pcm_hw_params_alloca (&hw_params);
@@ -356,130 +333,77 @@ static int alsa_open (int in, struct alsa_params_req *req,
goto err;
}
- if (!((in && conf.size_in_usec_in) || (!in && conf.size_in_usec_out))) {
- if (!buffer_size) {
- buffer_size = DEFAULT_BUFFER_SIZE;
- period_size= DEFAULT_PERIOD_SIZE;
- }
- }
+ if (req->buffer_size) {
+ unsigned long obt;
- if (buffer_size) {
- if ((in && conf.size_in_usec_in) || (!in && conf.size_in_usec_out)) {
- if (period_size) {
- err = snd_pcm_hw_params_set_period_time_near (
- handle,
- hw_params,
- &period_size,
- 0
- );
- if (err < 0) {
- alsa_logerr2 (err, typ,
- "Failed to set period time %d\n",
- req->period_size);
- goto err;
- }
- }
+ if (size_in_usec) {
+ int dir = 0;
+ unsigned int btime = req->buffer_size;
err = snd_pcm_hw_params_set_buffer_time_near (
handle,
hw_params,
- &buffer_size,
- 0
+ &btime,
+ &dir
);
-
- if (err < 0) {
- alsa_logerr2 (err, typ,
- "Failed to set buffer time %d\n",
- req->buffer_size);
- goto err;
- }
+ obt = btime;
}
else {
- int dir;
- snd_pcm_uframes_t minval;
-
- if (period_size) {
- minval = period_size;
- dir = 0;
-
- err = snd_pcm_hw_params_get_period_size_min (
- hw_params,
- &minval,
- &dir
- );
- if (err < 0) {
- alsa_logerr (
- err,
- "Could not get minmal period size for %s\n",
- typ
- );
- }
- else {
- if (period_size < minval) {
- if ((in && conf.period_size_in_overridden)
- || (!in && conf.period_size_out_overridden)) {
- dolog ("%s period size(%d) is less "
- "than minmal period size(%ld)\n",
- typ,
- period_size,
- minval);
- }
- period_size = minval;
- }
- }
+ snd_pcm_uframes_t bsize = req->buffer_size;
- err = snd_pcm_hw_params_set_period_size (
- handle,
- hw_params,
- period_size,
- 0
- );
- if (err < 0) {
- alsa_logerr2 (err, typ, "Failed to set period size %d\n",
- req->period_size);
- goto err;
- }
- }
+ err = snd_pcm_hw_params_set_buffer_size_near (
+ handle,
+ hw_params,
+ &bsize
+ );
+ obt = bsize;
+ }
+ if (err < 0) {
+ alsa_logerr2 (err, typ, "Failed to set buffer %s to %d\n",
+ size_in_usec ? "time" : "size", req->buffer_size);
+ goto err;
+ }
+
+ if (obt - req->buffer_size)
+ dolog ("Requested buffer %s %u was rejected, using %lu\n",
+ size_in_usec ? "time" : "size", req->buffer_size, obt);
+ }
+
+ if (req->period_size) {
+ unsigned long obt;
+
+ if (size_in_usec) {
+ int dir = 0;
+ unsigned int ptime = req->period_size;
- minval = buffer_size;
- err = snd_pcm_hw_params_get_buffer_size_min (
+ err = snd_pcm_hw_params_set_period_time_near (
+ handle,
hw_params,
- &minval
+ &ptime,
+ &dir
);
- if (err < 0) {
- alsa_logerr (err, "Could not get minmal buffer size for %s\n",
- typ);
- }
- else {
- if (buffer_size < minval) {
- if ((in && conf.buffer_size_in_overridden)
- || (!in && conf.buffer_size_out_overridden)) {
- dolog (
- "%s buffer size(%d) is less "
- "than minimal buffer size(%ld)\n",
- typ,
- buffer_size,
- minval
- );
- }
- buffer_size = minval;
- }
- }
+ obt = ptime;
+ }
+ else {
+ snd_pcm_uframes_t psize = req->period_size;
- err = snd_pcm_hw_params_set_buffer_size (
+ err = snd_pcm_hw_params_set_buffer_size_near (
handle,
hw_params,
- buffer_size
+ &psize
);
- if (err < 0) {
- alsa_logerr2 (err, typ, "Failed to set buffer size %d\n",
- req->buffer_size);
- goto err;
- }
+ obt = psize;
}
- }
- else {
- dolog ("warning: Buffer size is not set\n");
+
+ if (err < 0) {
+ alsa_logerr2 (err, typ, "Failed to set period %s to %d\n",
+ size_in_usec ? "time" : "size", req->period_size);
+ goto err;
+ }
+
+ if (obt - req->period_size)
+ dolog ("Requested period %s %u was rejected, using %lu\n",
+ size_in_usec ? "time" : "size", req->period_size, obt);
}
err = snd_pcm_hw_params (handle, hw_params);
@@ -697,6 +621,7 @@ static int alsa_init_out (HWVoiceOut *hw, audsettings_t *as)
req.nchannels = as->nchannels;
req.period_size = conf.period_size_out;
req.buffer_size = conf.buffer_size_out;
+ req.size_in_usec = conf.size_in_usec_in;
if (alsa_open (0, &req, &obt, &handle)) {
return -1;
@@ -774,6 +699,7 @@ static int alsa_init_in (HWVoiceIn *hw, audsettings_t *as)
req.nchannels = as->nchannels;
req.period_size = conf.period_size_in;
req.buffer_size = conf.buffer_size_in;
+ req.size_in_usec = conf.size_in_usec_in;
if (alsa_open (1, &req, &obt, &handle)) {
return -1;
@@ -953,16 +879,20 @@ static struct audio_option alsa_options[] = {
{"DAC_SIZE_IN_USEC", AUD_OPT_BOOL, &conf.size_in_usec_out,
"DAC period/buffer size in microseconds (otherwise in frames)", NULL, 0},
{"DAC_PERIOD_SIZE", AUD_OPT_INT, &conf.period_size_out,
- "DAC period size", &conf.period_size_out_overridden, 0},
+ "DAC period size (0 to go with system default)",
+ &conf.period_size_out_overridden, 0},
{"DAC_BUFFER_SIZE", AUD_OPT_INT, &conf.buffer_size_out,
- "DAC buffer size", &conf.buffer_size_out_overridden, 0},
+ "DAC buffer size (0 to go with system default)",
+ &conf.buffer_size_out_overridden, 0},
{"ADC_SIZE_IN_USEC", AUD_OPT_BOOL, &conf.size_in_usec_in,
"ADC period/buffer size in microseconds (otherwise in frames)", NULL, 0},
{"ADC_PERIOD_SIZE", AUD_OPT_INT, &conf.period_size_in,
- "ADC period size", &conf.period_size_in_overridden, 0},
+ "ADC period size (0 to go with system default)",
+ &conf.period_size_in_overridden, 0},
{"ADC_BUFFER_SIZE", AUD_OPT_INT, &conf.buffer_size_in,
- "ADC buffer size", &conf.buffer_size_in_overridden, 0},
+ "ADC buffer size (0 to go with system default)",
+ &conf.buffer_size_in_overridden, 0},
{"THRESHOLD", AUD_OPT_INT, &conf.threshold,
"(undocumented)", NULL, 0},
diff --git a/audio/audio.c b/audio/audio.c
index c178584ca..46b39df76 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -549,6 +549,12 @@ static void audio_print_settings (audsettings_t *as)
case AUD_FMT_U16:
AUD_log (NULL, "U16");
break;
+ case AUD_FMT_S32:
+ AUD_log (NULL, "S32");
+ break;
+ case AUD_FMT_U32:
+ AUD_log (NULL, "U32");
+ break;
default:
AUD_log (NULL, "invalid(%d)", as->fmt);
break;
diff --git a/audio/mixeng.c b/audio/mixeng.c
index b668c524e..5e0426cb6 100644
--- a/audio/mixeng.c
+++ b/audio/mixeng.c
@@ -28,8 +28,6 @@
#define AUDIO_CAP "mixeng"
#include "audio_int.h"
-#define NOVOL
-
/* 8 bit */
#define ENDIAN_CONVERSION natural
#define ENDIAN_CONVERT(v) (v)
diff --git a/audio/mixeng_template.h b/audio/mixeng_template.h
index d726441e2..21eef58bf 100644
--- a/audio/mixeng_template.h
+++ b/audio/mixeng_template.h
@@ -31,14 +31,14 @@
#define HALF (IN_MAX >> 1)
#endif
-#ifdef NOVOL
-#define VOL(a, b) a
-#else
+#ifdef CONFIG_MIXEMU
#ifdef FLOAT_MIXENG
#define VOL(a, b) ((a) * (b))
#else
#define VOL(a, b) ((a) * (b)) >> 32
#endif
+#else
+#define VOL(a, b) a
#endif
#define ET glue (ENDIAN_CONVERSION, glue (_, IN_T))
@@ -113,7 +113,7 @@ static void glue (glue (conv_, ET), _to_stereo)
{
st_sample_t *out = dst;
IN_T *in = (IN_T *) src;
-#ifndef NOVOL
+#ifdef CONFIG_MIXEMU
if (vol->mute) {
mixeng_clear (dst, samples);
return;
@@ -133,7 +133,7 @@ static void glue (glue (conv_, ET), _to_mono)
{
st_sample_t *out = dst;
IN_T *in = (IN_T *) src;
-#ifndef NOVOL
+#ifdef CONFIG_MIXEMU
if (vol->mute) {
mixeng_clear (dst, samples);
return;
diff --git a/audio/ossaudio.c b/audio/ossaudio.c
index 2a300c21d..a29b4bc17 100644
--- a/audio/ossaudio.c
+++ b/audio/ossaudio.c
@@ -254,6 +254,12 @@ static int oss_open (int in, struct oss_params *req,
goto err;
}
+ if (!abinfo.fragstotal || !abinfo.fragsize) {
+ AUD_log (AUDIO_CAP, "Returned bogus buffer information(%d, %d) for %s\n",
+ abinfo.fragstotal, abinfo.fragsize, typ);
+ goto err;
+ }
+
obt->fmt = fmt;
obt->nchannels = nchannels;
obt->freq = freq;
diff --git a/block-qcow.c b/block-qcow.c
index 0ac2b42b4..1fecf3078 100644
--- a/block-qcow.c
+++ b/block-qcow.c
@@ -339,33 +339,28 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
return -1;
} else {
cluster_offset = bdrv_getlength(s->hd);
- if (allocate == 1) {
- /* round to cluster size */
- cluster_offset = (cluster_offset + s->cluster_size - 1) &
- ~(s->cluster_size - 1);
- bdrv_truncate(s->hd, cluster_offset + s->cluster_size);
- /* if encrypted, we must initialize the cluster
- content which won't be written */
- if (s->crypt_method &&
- (n_end - n_start) < s->cluster_sectors) {
- uint64_t start_sect;
- start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
- memset(s->cluster_data + 512, 0x00, 512);
- for(i = 0; i < s->cluster_sectors; i++) {
- if (i < n_start || i >= n_end) {
- encrypt_sectors(s, start_sect + i,
- s->cluster_data,
- s->cluster_data + 512, 1, 1,
- &s->aes_encrypt_key);
- if (bdrv_pwrite(s->hd, cluster_offset + i * 512,
- s->cluster_data, 512) != 512)
- return -1;
- }
+ /* round to cluster size */
+ cluster_offset = (cluster_offset + s->cluster_size - 1) &
+ ~(s->cluster_size - 1);
+ bdrv_truncate(s->hd, cluster_offset + s->cluster_size);
+ /* if encrypted, we must initialize the cluster
+ content which won't be written */
+ if (s->crypt_method &&
+ (n_end - n_start) < s->cluster_sectors) {
+ uint64_t start_sect;
+ start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
+ memset(s->cluster_data + 512, 0x00, 512);
+ for(i = 0; i < s->cluster_sectors; i++) {
+ if (i < n_start || i >= n_end) {
+ encrypt_sectors(s, start_sect + i,
+ s->cluster_data,
+ s->cluster_data + 512, 1, 1,
+ &s->aes_encrypt_key);
+ if (bdrv_pwrite(s->hd, cluster_offset + i * 512,
+ s->cluster_data, 512) != 512)
+ return -1;
}
}
- } else {
- cluster_offset |= QCOW_OFLAG_COMPRESSED |
- (uint64_t)compressed_size << (63 - s->cluster_bits);
}
}
/* update L2 table */
diff --git a/block.c b/block.c
index 0e9577c16..b438f1db1 100644
--- a/block.c
+++ b/block.c
@@ -1024,6 +1024,33 @@ void bdrv_flush_all(void)
bdrv_iterate_writeable(bdrv_flush);
}
+/*
+ * Returns true iff the specified sector is present in the disk image. Drivers
+ * not implementing the functionality are assumed to not support backing files,
+ * hence all their sectors are reported as allocated.
+ *
+ * 'pnum' is set to the number of sectors (including and immediately following
+ * the specified sector) that are known to be in the same
+ * allocated/unallocated state.
+ *
+ * 'nb_sectors' is the max value 'pnum' should be set to.
+ */
+int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
+ int *pnum)
+{
+ int64_t n;
+ if (!bs->drv->bdrv_is_allocated) {
+ if (sector_num >= bs->total_sectors) {
+ *pnum = 0;
+ return 0;
+ }
+ n = bs->total_sectors - sector_num;
+ *pnum = (n < nb_sectors) ? (n) : (nb_sectors);
+ return 1;
+ }
+ return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);
+}
+
#ifndef QEMU_IMG
void bdrv_info(void)
{
diff --git a/block.h b/block.h
index 3dd429195..79f91e2c6 100644
--- a/block.h
+++ b/block.h
@@ -100,6 +100,8 @@ int qemu_key_check(BlockDriverState *bs, const char *name);
/* Ensure contents are flushed to disk. */
void bdrv_flush(BlockDriverState *bs);
+int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
+ int *pnum);
#define BDRV_TYPE_HD 0
#define BDRV_TYPE_CDROM 1
diff --git a/configure b/configure
index addd6e8cd..8a37b2864 100755
--- a/configure
+++ b/configure
@@ -90,6 +90,7 @@ slirp="yes"
adlib="no"
ac97="no"
gus="no"
+cs4231a="no"
oss="no"
dsound="no"
coreaudio="no"
@@ -116,6 +117,7 @@ build_docs="no"
uname_release=""
curses="yes"
nptl="yes"
+mixemu="no"
cpu_emulation="yes"
device_tree_support=""
@@ -303,6 +305,8 @@ for opt do
;;
--enable-gus) gus="yes"
;;
+ --enable-cs4231a) cs4231a="yes"
+ ;;
--disable-kqemu) kqemu="no"
;;
--disable-kvm) kvm="no"
@@ -353,6 +357,8 @@ for opt do
;;
--disable-nptl) nptl="no"
;;
+ --enable-mixemu) mixemu="yes"
+ ;;
--disable-cpu-emulation) cpu_emulation="no"
;;
--disable-libfdt) device_tree_support="no"
@@ -444,11 +450,13 @@ echo " --enable-mingw32 enable Win32 cross compilation with mingw32"
echo " --enable-adlib enable Adlib emulation"
echo " --enable-ac97 enable AC97 emulation"
echo " --enable-gus enable Gravis Ultrasound emulation"
+echo " --enable-cs4231a enable CS4231A emulation"
echo " --enable-coreaudio enable Coreaudio audio driver"
echo " --enable-alsa enable ALSA audio driver"
echo " --enable-esd enable EsoundD audio driver"
echo " --enable-fmod enable FMOD audio driver"
echo " --enable-dsound enable DirectSound audio driver"
+echo " --enable-mixemu enable mixer emulation"
echo " --disable-brlapi disable BrlAPI"
echo " --disable-vnc-tls disable TLS encryption for VNC server"
echo " --disable-curses disable curses output"
@@ -898,10 +906,12 @@ echo "mingw32 support $mingw32"
echo "Adlib support $adlib"
echo "AC97 support $ac97"
echo "GUS support $gus"
+echo "CS4231A support $cs4231a"
echo "CoreAudio support $coreaudio"
echo "ALSA support $alsa"
echo "EsounD support $esd"
echo "DSound support $dsound"
+echo "Mixer emulation $mixemu"
if test "$fmod" = "yes"; then
if test -z $fmod_lib || test -z $fmod_inc; then
echo
@@ -1111,6 +1121,10 @@ if test "$gus" = "yes" ; then
echo "CONFIG_GUS=yes" >> $config_mak
echo "#define CONFIG_GUS 1" >> $config_h
fi
+if test "$cs4231a" = "yes" ; then
+ echo "CONFIG_CS4231A=yes" >> $config_mak
+ echo "#define CONFIG_CS4231A 1" >> $config_h
+fi
if test "$oss" = "yes" ; then
echo "CONFIG_OSS=yes" >> $config_mak
echo "#define CONFIG_OSS 1" >> $config_h
@@ -1131,6 +1145,10 @@ if test "$dsound" = "yes" ; then
echo "CONFIG_DSOUND=yes" >> $config_mak
echo "#define CONFIG_DSOUND 1" >> $config_h
fi
+if test "$mixemu" = "yes" ; then
+ echo "CONFIG_MIXEMU=yes" >> $config_mak
+ echo "#define CONFIG_MIXEMU 1" >> $config_h
+fi
if test "$fmod" = "yes" ; then
echo "CONFIG_FMOD=yes" >> $config_mak
echo "CONFIG_FMOD_LIB=$fmod_lib" >> $config_mak
@@ -1374,28 +1392,22 @@ case "$target_cpu" in
;;
mips|mipsel)
echo "TARGET_ARCH=mips" >> $config_mak
- echo "CONFIG_DYNGEN_OP=yes" >> $config_mak
echo "#define TARGET_ARCH \"mips\"" >> $config_h
echo "#define TARGET_MIPS 1" >> $config_h
echo "#define TARGET_ABI_MIPSO32 1" >> $config_h
- echo "#define CONFIG_DYNGEN_OP 1" >> $config_h
;;
mipsn32|mipsn32el)
echo "TARGET_ARCH=mipsn32" >> $config_mak
- echo "CONFIG_DYNGEN_OP=yes" >> $config_mak
echo "#define TARGET_ARCH \"mipsn32\"" >> $config_h
echo "#define TARGET_MIPS 1" >> $config_h
echo "#define TARGET_ABI_MIPSN32 1" >> $config_h
- echo "#define CONFIG_DYNGEN_OP 1" >> $config_h
;;
mips64|mips64el)
echo "TARGET_ARCH=mips64" >> $config_mak
- echo "CONFIG_DYNGEN_OP=yes" >> $config_mak
echo "#define TARGET_ARCH \"mips64\"" >> $config_h
echo "#define TARGET_MIPS 1" >> $config_h
echo "#define TARGET_MIPS64 1" >> $config_h
echo "#define TARGET_ABI_MIPSN64 1" >> $config_h
- echo "#define CONFIG_DYNGEN_OP 1" >> $config_h
;;
ppc)
echo "TARGET_ARCH=ppc" >> $config_mak
diff --git a/cpu-all.h b/cpu-all.h
index 07633cb90..b0c113f7a 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -797,7 +797,7 @@ extern CPUState *cpu_single_env;
void cpu_interrupt(CPUState *s, int mask);
void cpu_reset_interrupt(CPUState *env, int mask);
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type);
int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
void cpu_watchpoint_remove_all(CPUState *env);
int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
@@ -869,21 +869,34 @@ extern ram_addr_t ram_size;
extern uint8_t *bios_mem;
/* physical memory access */
-#define TLB_INVALID_MASK (1 << 3)
-#define IO_MEM_SHIFT 4
+
+/* MMIO pages are identified by a combination of an IO device index and
+ 3 flags. The ROMD code stores the page ram offset in iotlb entry,
+ so only a limited number of ids are avaiable. */
+
+#define IO_MEM_SHIFT 3
#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
-#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
-/* acts like a ROM when read and like a device when written. As an
- exception, the write memory callback gets the ram offset instead of
- the physical address */
+#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
+
+/* Acts like a ROM when read and like a device when written. */
#define IO_MEM_ROMD (1)
#define IO_MEM_SUBPAGE (2)
#define IO_MEM_SUBWIDTH (4)
+/* Flags stored in the low bits of the TLB virtual address. These are
+ defined so that fast path ram access is all zeros. */
+/* Zero if TLB entry is valid. */
+#define TLB_INVALID_MASK (1 << 3)
+/* Set if TLB entry references a clean RAM page. The iotlb entry will
+ contain the page physical address. */
+#define TLB_NOTDIRTY (1 << 4)
+/* Set if TLB entry is an IO callback. */
+#define TLB_MMIO (1 << 5)
+
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
diff --git a/cpu-defs.h b/cpu-defs.h
index c4bfd1dda..169568cd8 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -106,16 +106,17 @@ typedef uint64_t target_phys_addr_t;
#endif
typedef struct CPUTLBEntry {
- /* bit 31 to TARGET_PAGE_BITS : virtual address
- bit TARGET_PAGE_BITS-1..IO_MEM_SHIFT : if non zero, memory io
- zone number
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
bit 3 : indicates that the entry is invalid
bit 2..0 : zero
*/
target_ulong addr_read;
target_ulong addr_write;
target_ulong addr_code;
- /* addend to virtual address to get physical address */
+ /* Addend to virtual address to get physical address. IO accesses
+ use the correcponding iotlb value. */
#if TARGET_PHYS_ADDR_BITS == 64
/* on i386 Linux make sure it is aligned */
target_phys_addr_t addend __attribute__((aligned(8)));
@@ -143,6 +144,7 @@ typedef struct CPUTLBEntry {
int halted; /* TRUE if the CPU is in suspend state */ \
/* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
/* buffer for temporaries in the code generator */ \
long temp_buf[CPU_TEMP_BUF_NLONGS]; \
@@ -155,7 +157,7 @@ typedef struct CPUTLBEntry {
\
struct { \
target_ulong vaddr; \
- target_phys_addr_t addend; \
+ int type; /* PAGE_READ/PAGE_WRITE */ \
} watchpoint[MAX_WATCHPOINTS]; \
int nb_watchpoints; \
int watchpoint_hit; \
@@ -166,6 +168,7 @@ typedef struct CPUTLBEntry {
\
void *next_cpu; /* next CPU sharing TB cache */ \
int cpu_index; /* CPU index (informative) */ \
+ int running; /* Nonzero if cpu is currently running(usermode). */ \
int thread_id; \
/* user data */ \
void *opaque; \
diff --git a/cpu-exec.c b/cpu-exec.c
index e8d1ccef5..7af8bba46 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -48,7 +48,6 @@
#endif
int tb_invalidated_flag;
-static unsigned long next_tb;
//#define DEBUG_EXEC
//#define DEBUG_SIGNAL
@@ -97,8 +96,6 @@ static TranslationBlock *tb_find_slow(target_ulong pc,
target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
uint8_t *tc_ptr;
- spin_lock(&tb_lock);
-
tb_invalidated_flag = 0;
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
@@ -159,7 +156,6 @@ static TranslationBlock *tb_find_slow(target_ulong pc,
found:
/* we add the TB in the virtual pc hash table */
env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
- spin_unlock(&tb_lock);
return tb;
}
@@ -221,7 +217,7 @@ static inline TranslationBlock *tb_find_fast(void)
cs_base = 0;
pc = env->pc;
#elif defined(TARGET_CRIS)
- flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
+ flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
flags |= env->dslot;
cs_base = 0;
pc = env->pc;
@@ -236,14 +232,6 @@ static inline TranslationBlock *tb_find_fast(void)
if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags, 0)) {
tb = tb_find_slow(pc, cs_base, flags);
- /* Note: we do it here to avoid a gcc bug on Mac OS X when
- doing it in tb_find_slow */
- if (tb_invalidated_flag) {
- /* as some TB could have been invalidated because
- of memory exceptions while generating the code, we
- must recompute the hash index here */
- next_tb = 0;
- }
}
return tb;
}
@@ -257,6 +245,7 @@ int cpu_exec(CPUState *env1)
int ret, interrupt_request;
TranslationBlock *tb;
uint8_t *tc_ptr;
+ unsigned long next_tb;
if (cpu_halted(env1) == EXCP_HALTED)
return EXCP_HALTED;
@@ -527,7 +516,15 @@ int cpu_exec(CPUState *env1)
next_tb = 0;
}
#elif defined(TARGET_CRIS)
- if (interrupt_request & CPU_INTERRUPT_HARD) {
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && (env->pregs[PR_CCS] & I_FLAG)) {
+ env->exception_index = EXCP_IRQ;
+ do_interrupt(env);
+ next_tb = 0;
+ }
+ if (interrupt_request & CPU_INTERRUPT_NMI
+ && (env->pregs[PR_CCS] & M_FLAG)) {
+ env->exception_index = EXCP_NMI;
do_interrupt(env);
next_tb = 0;
}
@@ -592,7 +589,16 @@ int cpu_exec(CPUState *env1)
#endif
}
#endif
+ spin_lock(&tb_lock);
tb = tb_find_fast();
+ /* Note: we do it here to avoid a gcc bug on Mac OS X when
+ doing it in tb_find_slow */
+ if (tb_invalidated_flag) {
+ /* as some TB could have been invalidated because
+ of memory exceptions while generating the code, we
+ must recompute the hash index here */
+ next_tb = 0;
+ }
#ifdef DEBUG_EXEC
if ((loglevel & CPU_LOG_EXEC)) {
fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
@@ -609,11 +615,10 @@ int cpu_exec(CPUState *env1)
(env->kqemu_enabled != 2) &&
#endif
tb->page_addr[1] == -1) {
- spin_lock(&tb_lock);
tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
- spin_unlock(&tb_lock);
}
}
+ spin_unlock(&tb_lock);
tc_ptr = tb->tc_ptr;
env->current_tb = tb;
/* execute the generated code */
diff --git a/cutils.c b/cutils.c
index 9848d38ae..3731021dc 100644
--- a/cutils.c
+++ b/cutils.c
@@ -108,41 +108,6 @@ int hex2bin(char ch)
return -1;
}
-void *get_mmap_addr(unsigned long size)
-{
- return NULL;
-}
-
-void qemu_free(void *ptr)
-{
- free(ptr);
-}
-
-void *qemu_malloc(size_t size)
-{
- return malloc(size);
-}
-
-void *qemu_mallocz(size_t size)
-{
- void *ptr;
- ptr = qemu_malloc(size);
- if (!ptr)
- return NULL;
- memset(ptr, 0, size);
- return ptr;
-}
-
-char *qemu_strdup(const char *str)
-{
- char *ptr;
- ptr = qemu_malloc(strlen(str) + 1);
- if (!ptr)
- return NULL;
- strcpy(ptr, str);
- return ptr;
-}
-
char *urldecode(const char *ptr)
{
char *ret;
diff --git a/dyngen-exec.h b/dyngen-exec.h
index f51d36328..92c1ae8e6 100644
--- a/dyngen-exec.h
+++ b/dyngen-exec.h
@@ -36,9 +36,9 @@ typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
// Linux/Sparc64 defines uint64_t
-#if !(defined (__sparc_v9__) && defined(__linux__))
+#if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
/* XXX may be done for all 64 bits targets ? */
-#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__)
+#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__)
typedef unsigned long uint64_t;
#else
typedef unsigned long long uint64_t;
@@ -54,7 +54,7 @@ typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
// Linux/Sparc64 defines int64_t
-#if !(defined (__sparc_v9__) && defined(__linux__))
+#if !(defined (__sparc_v9__) && defined(__linux__)) && !(defined(__APPLE__) && defined(__x86_64__))
#if defined (__x86_64__) || defined(__ia64) || defined(__s390x__) || defined(__alpha__)
typedef signed long int64_t;
#else
diff --git a/dyngen.c b/dyngen.c
index d05c623d7..2b70ded3a 100644
--- a/dyngen.c
+++ b/dyngen.c
@@ -1963,6 +1963,14 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
break;
case R_PPC_REL24:
/* warning: must be at 32 MB distancy */
+ fprintf(outfile, "{\n"
+ " long disp = (%s - (long)(gen_code_ptr + %d) + %d);\n"
+ " if ((disp << 6) >> 6 != disp) {;\n"
+ " fprintf(stderr, \"Branch target is too far away\\n\");"
+ " abort();\n"
+ " }\n"
+ "}\n",
+ relname, reloc_offset, addend);
fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = (*(uint32_t *)(gen_code_ptr + %d) & ~0x03fffffc) | ((%s - (long)(gen_code_ptr + %d) + %d) & 0x03fffffc);\n",
reloc_offset, reloc_offset, relname, reloc_offset, addend);
break;
diff --git a/exec-all.h b/exec-all.h
index 582277f81..62a939487 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -184,32 +184,37 @@ extern int code_gen_max_blocks;
#if defined(USE_DIRECT_JUMP)
#if defined(__powerpc__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop);
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
{
- uint32_t val, *ptr;
+ /* This must be in concord with INDEX_op_goto_tb inside tcg_out_op */
+ uint32_t *ptr;
long disp = addr - jmp_addr;
+ unsigned long patch_size;
ptr = (uint32_t *)jmp_addr;
- val = *ptr;
if ((disp << 6) >> 6 != disp) {
- uint16_t *p1;
-
- p1 = (uint16_t *) ptr;
- *ptr = (val & ~0x03fffffc) | 4;
- p1[3] = addr >> 16;
- p1[5] = addr & 0xffff;
+ ptr[0] = 0x3c000000 | (addr >> 16); /* lis 0,addr@ha */
+ ptr[1] = 0x60000000 | (addr & 0xffff); /* la 0,addr@l(0) */
+ ptr[2] = 0x7c0903a6; /* mtctr 0 */
+ ptr[3] = 0x4e800420; /* brctr */
+ patch_size = 16;
} else {
/* patch the branch destination */
- val = (val & ~0x03fffffc) | (disp & 0x03fffffc);
- *ptr = val;
+ if (disp != 16) {
+ *ptr = 0x48000000 | (disp & 0x03fffffc); /* b disp */
+ patch_size = 4;
+ } else {
+ ptr[0] = 0x60000000; /* nop */
+ ptr[1] = 0x60000000;
+ ptr[2] = 0x60000000;
+ ptr[3] = 0x60000000;
+ patch_size = 16;
+ }
}
/* flush icache */
- asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
- asm volatile ("sync" : : : "memory");
- asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
- asm volatile ("sync" : : : "memory");
- asm volatile ("isync" : : : "memory");
+ flush_icache_range(jmp_addr, jmp_addr + patch_size);
}
#elif defined(__i386__) || defined(__x86_64__)
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
@@ -297,216 +302,7 @@ extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
-#if defined(__hppa__)
-
-typedef int spinlock_t[4];
-
-#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
-
-static inline void resetlock (spinlock_t *p)
-{
- (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
-}
-
-#else
-
-typedef int spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED 0
-
-static inline void resetlock (spinlock_t *p)
-{
- *p = SPIN_LOCK_UNLOCKED;
-}
-
-#endif
-
-#if defined(__powerpc__)
-static inline int testandset (int *p)
-{
- int ret;
- __asm__ __volatile__ (
- "0: lwarx %0,0,%1\n"
- " xor. %0,%3,%0\n"
- " bne 1f\n"
- " stwcx. %2,0,%1\n"
- " bne- 0b\n"
- "1: "
- : "=&r" (ret)
- : "r" (p), "r" (1), "r" (0)
- : "cr0", "memory");
- return ret;
-}
-#elif defined(__i386__)
-static inline int testandset (int *p)
-{
- long int readval = 0;
-
- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
- : "+m" (*p), "+a" (readval)
- : "r" (1)
- : "cc");
- return readval;
-}
-#elif defined(__x86_64__)
-static inline int testandset (int *p)
-{
- long int readval = 0;
-
- __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
- : "+m" (*p), "+a" (readval)
- : "r" (1)
- : "cc");
- return readval;
-}
-#elif defined(__s390__)
-static inline int testandset (int *p)
-{
- int ret;
-
- __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
- " jl 0b"
- : "=&d" (ret)
- : "r" (1), "a" (p), "0" (*p)
- : "cc", "memory" );
- return ret;
-}
-#elif defined(__alpha__)
-static inline int testandset (int *p)
-{
- int ret;
- unsigned long one;
-
- __asm__ __volatile__ ("0: mov 1,%2\n"
- " ldl_l %0,%1\n"
- " stl_c %2,%1\n"
- " beq %2,1f\n"
- ".subsection 2\n"
- "1: br 0b\n"
- ".previous"
- : "=r" (ret), "=m" (*p), "=r" (one)
- : "m" (*p));
- return ret;
-}
-#elif defined(__sparc__)
-static inline int testandset (int *p)
-{
- int ret;
-
- __asm__ __volatile__("ldstub [%1], %0"
- : "=r" (ret)
- : "r" (p)
- : "memory");
-
- return (ret ? 1 : 0);
-}
-#elif defined(__arm__)
-static inline int testandset (int *spinlock)
-{
- register unsigned int ret;
- __asm__ __volatile__("swp %0, %1, [%2]"
- : "=r"(ret)
- : "0"(1), "r"(spinlock));
-
- return ret;
-}
-#elif defined(__mc68000)
-static inline int testandset (int *p)
-{
- char ret;
- __asm__ __volatile__("tas %1; sne %0"
- : "=r" (ret)
- : "m" (p)
- : "cc","memory");
- return ret;
-}
-#elif defined(__hppa__)
-
-/* Because malloc only guarantees 8-byte alignment for malloc'd data,
- and GCC only guarantees 8-byte alignment for stack locals, we can't
- be assured of 16-byte alignment for atomic lock data even if we
- specify "__attribute ((aligned(16)))" in the type declaration. So,
- we use a struct containing an array of four ints for the atomic lock
- type and dynamically select the 16-byte aligned int from the array
- for the semaphore. */
-#define __PA_LDCW_ALIGNMENT 16
-static inline void *ldcw_align (void *p) {
- unsigned long a = (unsigned long)p;
- a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
- return (void *)a;
-}
-
-static inline int testandset (spinlock_t *p)
-{
- unsigned int ret;
- p = ldcw_align(p);
- __asm__ __volatile__("ldcw 0(%1),%0"
- : "=r" (ret)
- : "r" (p)
- : "memory" );
- return !ret;
-}
-
-#elif defined(__ia64)
-
-#include "ia64intrin.h"
-static inline int testandset (int *p)
-{
- return (int)cmpxchg_acq(p,0,1);
-}
-#elif defined(__mips__)
-static inline int testandset (int *p)
-{
- int ret;
-
- __asm__ __volatile__ (
- " .set push \n"
- " .set noat \n"
- " .set mips2 \n"
- "1: li $1, 1 \n"
- " ll %0, %1 \n"
- " sc $1, %1 \n"
- " beqz $1, 1b \n"
- " .set pop "
- : "=r" (ret), "+R" (*p)
- :
- : "memory");
-
- return ret;
-}
-#else
-#error unimplemented CPU support
-#endif
-
-#if defined(CONFIG_USER_ONLY)
-static inline void spin_lock(spinlock_t *lock)
-{
- while (testandset(lock));
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
- resetlock(lock);
-}
-
-static inline int spin_trylock(spinlock_t *lock)
-{
- return !testandset(lock);
-}
-#else
-static inline void spin_lock(spinlock_t *lock)
-{
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-}
-
-static inline int spin_trylock(spinlock_t *lock)
-{
- return 1;
-}
-#endif
+#include "qemu-lock.h"
extern spinlock_t tb_lock;
diff --git a/exec.c b/exec.c
index 544b963e5..867be048a 100644
--- a/exec.c
+++ b/exec.c
@@ -100,6 +100,7 @@ unsigned long code_gen_buffer_size;
unsigned long code_gen_buffer_max_size;
uint8_t *code_gen_ptr;
+#if !defined(CONFIG_USER_ONLY)
ram_addr_t phys_ram_size;
int phys_ram_fd;
uint8_t *phys_ram_base;
@@ -107,6 +108,7 @@ uint8_t *phys_ram_dirty;
uint8_t *bios_mem;
static int in_migration;
static ram_addr_t phys_ram_alloc_offset = 0;
+#endif
CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
@@ -126,7 +128,7 @@ typedef struct PageDesc {
} PageDesc;
typedef struct PhysPageDesc {
- /* offset in host memory of the page + io_index in the low 12 bits */
+ /* offset in host memory of the page + io_index in the low bits */
ram_addr_t phys_offset;
} PhysPageDesc;
@@ -144,8 +146,6 @@ typedef struct PhysPageDesc {
#define L1_SIZE (1 << L1_BITS)
#define L2_SIZE (1 << L2_BITS)
-static void io_mem_init(void);
-
unsigned long qemu_real_host_page_size;
unsigned long qemu_host_page_bits;
unsigned long qemu_host_page_size;
@@ -155,12 +155,15 @@ unsigned long qemu_host_page_mask;
static PageDesc *l1_map[L1_SIZE];
PhysPageDesc **l1_phys_map;
+#if !defined(CONFIG_USER_ONLY)
+static void io_mem_init(void);
+
/* io memory support */
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+static int io_mem_nb;
char io_mem_used[IO_MEM_NB_ENTRIES];
-#if defined(CONFIG_SOFTMMU)
static int io_mem_watch;
#endif
@@ -268,13 +271,33 @@ static inline PageDesc *page_find_alloc(target_ulong index)
{
PageDesc **lp, *p;
+#if TARGET_LONG_BITS > 32
+ /* Host memory outside guest VM. For 32-bit targets we have already
+ excluded high addresses. */
+ if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
+ return NULL;
+#endif
lp = &l1_map[index >> L2_BITS];
p = *lp;
if (!p) {
/* allocate if not found */
- p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
- memset(p, 0, sizeof(PageDesc) * L2_SIZE);
+#if defined(CONFIG_USER_ONLY)
+ unsigned long addr;
+ size_t len = sizeof(PageDesc) * L2_SIZE;
+ /* Don't use qemu_malloc because it may recurse. */
+ p = mmap(0, len, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
*lp = p;
+ addr = h2g(p);
+ if (addr == (target_ulong)addr) {
+ page_set_flags(addr & TARGET_PAGE_MASK,
+ TARGET_PAGE_ALIGN(addr + len),
+ PAGE_RESERVED);
+ }
+#else
+ p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
+ *lp = p;
+#endif
}
return p + (index & (L2_SIZE - 1));
}
@@ -415,7 +438,9 @@ void cpu_exec_init_all(unsigned long tb_size)
code_gen_alloc(tb_size);
code_gen_ptr = code_gen_buffer;
page_init();
+#if !defined(CONFIG_USER_ONLY)
io_mem_init();
+#endif
}
void cpu_exec_init(CPUState *env)
@@ -709,10 +734,9 @@ static void build_page_bitmap(PageDesc *p)
int n, tb_start, tb_end;
TranslationBlock *tb;
- p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
+ p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
if (!p->code_bitmap)
return;
- memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
tb = p->first_tb;
while (tb != NULL) {
@@ -1196,7 +1220,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
#endif
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
{
int i;
@@ -1209,6 +1233,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
i = env->nb_watchpoints++;
env->watchpoint[i].vaddr = addr;
+ env->watchpoint[i].type = type;
tlb_flush_page(env, addr);
/* FIXME: This flush is needed because of the hack to make memory ops
terminate the TB. It can be removed once the proper IO trap and
@@ -1361,13 +1386,22 @@ void cpu_set_log_filename(const char *filename)
/* mask must never be zero, except for A20 change call */
void cpu_interrupt(CPUState *env, int mask)
{
+#if !defined(USE_NPTL)
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+#endif
+ /* FIXME: This is probably not threadsafe. A different thread could
+ be in the mittle of a read-modify-write operation. */
env->interrupt_request |= mask;
if (kvm_enabled() && !qemu_kvm_irqchip_in_kernel())
kvm_update_interrupt_request(env);
-
+#if defined(USE_NPTL)
+ /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
+ problem and hope the cpu will stop of its own accord. For userspace
+ emulation this often isn't actually as bad as it sounds. Often
+ signals are used primarily to interrupt blocking syscalls. */
+#else
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
tb = env->current_tb;
@@ -1376,6 +1410,7 @@ void cpu_interrupt(CPUState *env, int mask)
tb_reset_jump_recursive(tb);
resetlock(&interrupt_lock);
}
+#endif
}
void cpu_reset_interrupt(CPUState *env, int mask)
@@ -1548,9 +1583,6 @@ void tlb_flush(CPUState *env, int flush_global)
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
-#if !defined(CONFIG_SOFTMMU)
- munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
-#endif
#ifdef USE_KQEMU
if (env->kqemu_enabled) {
kqemu_flush(env, flush_global);
@@ -1597,10 +1629,6 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
tlb_flush_jmp_cache(env, addr);
-#if !defined(CONFIG_SOFTMMU)
- if (addr < MMAP_AREA_END)
- munmap((void *)addr, TARGET_PAGE_SIZE);
-#endif
#ifdef USE_KQEMU
if (env->kqemu_enabled) {
kqemu_flush_page(env, addr);
@@ -1632,7 +1660,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if ((addr - start) < length) {
- tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
+ tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
}
}
}
@@ -1686,34 +1714,6 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
#endif
#endif
}
-
-#if !defined(CONFIG_SOFTMMU)
- /* XXX: this is expensive */
- {
- VirtPageDesc *p;
- int j;
- target_ulong addr;
-
- for(i = 0; i < L1_SIZE; i++) {
- p = l1_virt_map[i];
- if (p) {
- addr = i << (TARGET_PAGE_BITS + L2_BITS);
- for(j = 0; j < L2_SIZE; j++) {
- if (p->valid_tag == virt_valid_tag &&
- p->phys_addr >= start && p->phys_addr < end &&
- (p->prot & PROT_WRITE)) {
- if (addr < MMAP_AREA_END) {
- mprotect((void *)addr, TARGET_PAGE_SIZE,
- p->prot & ~PROT_WRITE);
- }
- }
- addr += TARGET_PAGE_SIZE;
- p++;
- }
- }
- }
- }
-#endif
}
int cpu_physical_memory_set_dirty_tracking(int enable)
@@ -1739,7 +1739,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
tlb_entry->addend - (unsigned long)phys_ram_base;
if (!cpu_physical_memory_is_dirty(ram_addr)) {
- tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
+ tlb_entry->addr_write |= TLB_NOTDIRTY;
}
}
}
@@ -1762,33 +1762,26 @@ void cpu_tlb_update_dirty(CPUState *env)
#endif
}
-static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
- unsigned long start)
+static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
- unsigned long addr;
- if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
- addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
- if (addr == start) {
- tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
- }
- }
+ if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
+ tlb_entry->addr_write = vaddr;
}
-/* update the TLB corresponding to virtual page vaddr and phys addr
- addr so that it is no longer dirty */
-static inline void tlb_set_dirty(CPUState *env,
- unsigned long addr, target_ulong vaddr)
+/* update the TLB corresponding to virtual page vaddr
+ so that it is no longer dirty */
+static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
{
int i;
- addr &= TARGET_PAGE_MASK;
+ vaddr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_set_dirty1(&env->tlb_table[0][i], addr);
- tlb_set_dirty1(&env->tlb_table[1][i], addr);
+ tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
+ tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
#if (NB_MMU_MODES >= 3)
- tlb_set_dirty1(&env->tlb_table[2][i], addr);
+ tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
#if (NB_MMU_MODES == 4)
- tlb_set_dirty1(&env->tlb_table[3][i], addr);
+ tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
#endif
#endif
}
@@ -1805,10 +1798,12 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
unsigned long pd;
unsigned int index;
target_ulong address;
+ target_ulong code_address;
target_phys_addr_t addend;
int ret;
CPUTLBEntry *te;
int i;
+ target_phys_addr_t iotlb;
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
@@ -1822,154 +1817,73 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
#endif
ret = 0;
-#if !defined(CONFIG_SOFTMMU)
- if (is_softmmu)
-#endif
- {
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
- /* IO memory case */
- address = vaddr | pd;
- addend = paddr;
- } else {
- /* standard memory */
- address = vaddr;
- addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
- }
-
- /* Make accesses to pages with watchpoints go via the
- watchpoint trap routines. */
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
- if (address & ~TARGET_PAGE_MASK) {
- env->watchpoint[i].addend = 0;
- address = vaddr | io_mem_watch;
- } else {
- env->watchpoint[i].addend = pd - paddr +
- (unsigned long) phys_ram_base;
- /* TODO: Figure out how to make read watchpoints coexist
- with code. */
- pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
- }
- }
- }
-
- index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- addend -= vaddr;
- te = &env->tlb_table[mmu_idx][index];
- te->addend = addend;
- if (prot & PAGE_READ) {
- te->addr_read = address;
- } else {
- te->addr_read = -1;
+ address = vaddr;
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
+ /* IO memory case (romd handled later) */
+ address |= TLB_MMIO;
+ }
+ addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
+ if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
+ /* Normal RAM. */
+ iotlb = pd & TARGET_PAGE_MASK;
+ if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
+ iotlb |= IO_MEM_NOTDIRTY;
+ else
+ iotlb |= IO_MEM_ROM;
+ } else {
+ /* IO handlers are currently passed a phsical address.
+ It would be nice to pass an offset from the base address
+ of that region. This would avoid having to special case RAM,
+ and avoid full address decoding in every device.
+ We can't use the high bits of pd for this because
+ IO_MEM_ROMD uses these as a ram address. */
+ iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
+ }
+
+ code_address = address;
+ /* Make accesses to pages with watchpoints go via the
+ watchpoint trap routines. */
+ for (i = 0; i < env->nb_watchpoints; i++) {
+ if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
+ iotlb = io_mem_watch + paddr;
+ /* TODO: The memory case can be optimized by not trapping
+ reads of pages with a write breakpoint. */
+ address |= TLB_MMIO;
}
+ }
- if (prot & PAGE_EXEC) {
- te->addr_code = address;
- } else {
- te->addr_code = -1;
- }
- if (prot & PAGE_WRITE) {
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
- (pd & IO_MEM_ROMD)) {
- /* write access calls the I/O callback */
- te->addr_write = vaddr |
- (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
- } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
- !cpu_physical_memory_is_dirty(pd)) {
- te->addr_write = vaddr | IO_MEM_NOTDIRTY;
- } else {
- te->addr_write = address;
- }
- } else {
- te->addr_write = -1;
- }
+ index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ env->iotlb[mmu_idx][index] = iotlb - vaddr;
+ te = &env->tlb_table[mmu_idx][index];
+ te->addend = addend - vaddr;
+ if (prot & PAGE_READ) {
+ te->addr_read = address;
+ } else {
+ te->addr_read = -1;
}
-#if !defined(CONFIG_SOFTMMU)
- else {
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
- /* IO access: no mapping is done as it will be handled by the
- soft MMU */
- if (!(env->hflags & HF_SOFTMMU_MASK))
- ret = 2;
- } else {
- void *map_addr;
- if (vaddr >= MMAP_AREA_END) {
- ret = 2;
- } else {
- if (prot & PROT_WRITE) {
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
-#if defined(TARGET_HAS_SMC) || 1
- first_tb ||
-#endif
- ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
- !cpu_physical_memory_is_dirty(pd))) {
- /* ROM: we do as if code was inside */
- /* if code is present, we only map as read only and save the
- original mapping */
- VirtPageDesc *vp;
-
- vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
- vp->phys_addr = pd;
- vp->prot = prot;
- vp->valid_tag = virt_valid_tag;
- prot &= ~PAGE_WRITE;
- }
- }
- map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
- MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
- if (map_addr == MAP_FAILED) {
- cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
- paddr, vaddr);
- }
- }
+ if (prot & PAGE_EXEC) {
+ te->addr_code = code_address;
+ } else {
+ te->addr_code = -1;
+ }
+ if (prot & PAGE_WRITE) {
+ if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
+ (pd & IO_MEM_ROMD)) {
+ /* Write access calls the I/O callback. */
+ te->addr_write = address | TLB_MMIO;
+ } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
+ !cpu_physical_memory_is_dirty(pd)) {
+ te->addr_write = address | TLB_NOTDIRTY;
+ } else {
+ te->addr_write = address;
}
+ } else {
+ te->addr_write = -1;
}
-#endif
return ret;
}
-/* called from signal handler: invalidate the code and unprotect the
- page. Return TRUE if the fault was succesfully handled. */
-int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
-{
-#if !defined(CONFIG_SOFTMMU)
- VirtPageDesc *vp;
-
-#if defined(DEBUG_TLB)
- printf("page_unprotect: addr=0x%08x\n", addr);
-#endif
- addr &= TARGET_PAGE_MASK;
-
- /* if it is not mapped, no need to worry here */
- if (addr >= MMAP_AREA_END)
- return 0;
- vp = virt_page_find(addr >> TARGET_PAGE_BITS);
- if (!vp)
- return 0;
- /* NOTE: in this case, validate_tag is _not_ tested as it
- validates only the code TLB */
- if (vp->valid_tag != virt_valid_tag)
- return 0;
- if (!(vp->prot & PAGE_WRITE))
- return 0;
-#if defined(DEBUG_TLB)
- printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
- addr, vp->phys_addr, vp->prot);
-#endif
- if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
- cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
- (unsigned long)addr, vp->prot);
- /* set the dirty bit */
- phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
- /* flush the code inside */
- tb_invalidate_phys_page(vp->phys_addr, pc, puc);
- return 1;
-#else
- return 0;
-#endif
-}
-
#else
void tlb_flush(CPUState *env, int flush_global)
@@ -2053,9 +1967,12 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
end = TARGET_PAGE_ALIGN(end);
if (flags & PAGE_WRITE)
flags |= PAGE_WRITE_ORG;
- spin_lock(&tb_lock);
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
p = page_find_alloc(addr >> TARGET_PAGE_BITS);
+ /* We may be called for host regions that are outside guest
+ address space. */
+ if (!p)
+ return;
/* if the write protection is set, then we invalidate the code
inside */
if (!(p->flags & PAGE_WRITE) &&
@@ -2065,7 +1982,6 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
}
p->flags = flags;
}
- spin_unlock(&tb_lock);
}
int page_check_range(target_ulong start, target_ulong len, int flags)
@@ -2159,6 +2075,7 @@ static inline void tlb_set_dirty(CPUState *env,
}
#endif /* defined(CONFIG_USER_ONLY) */
+#if !defined(CONFIG_USER_ONLY)
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
ram_addr_t memory);
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
@@ -2326,11 +2243,10 @@ static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
unassigned_mem_writeb,
};
-static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
+ uint32_t val)
{
- unsigned long ram_addr;
int dirty_flags;
- ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
@@ -2338,7 +2254,7 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stb_p((uint8_t *)(long)addr, val);
+ stb_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2349,14 +2265,13 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
}
-static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
+ uint32_t val)
{
- unsigned long ram_addr;
int dirty_flags;
- ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
@@ -2364,7 +2279,7 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stw_p((uint8_t *)(long)addr, val);
+ stw_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2375,14 +2290,13 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
}
-static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
+ uint32_t val)
{
- unsigned long ram_addr;
int dirty_flags;
- ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
@@ -2390,7 +2304,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stl_p((uint8_t *)(long)addr, val);
+ stl_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
@@ -2401,7 +2315,7 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
}
static CPUReadMemoryFunc *error_mem_read[3] = {
@@ -2416,68 +2330,63 @@ static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
notdirty_mem_writel,
};
-#if defined(CONFIG_SOFTMMU)
+/* Generate a debug exception if a watchpoint has been hit. */
+static void check_watchpoint(int offset, int flags)
+{
+ CPUState *env = cpu_single_env;
+ target_ulong vaddr;
+ int i;
+
+ vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
+ for (i = 0; i < env->nb_watchpoints; i++) {
+ if (vaddr == env->watchpoint[i].vaddr
+ && (env->watchpoint[i].type & flags)) {
+ env->watchpoint_hit = i + 1;
+ cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
+ break;
+ }
+ }
+}
+
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
{
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
return ldub_phys(addr);
}
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
{
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
return lduw_phys(addr);
}
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
{
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
return ldl_phys(addr);
}
-/* Generate a debug exception if a watchpoint has been hit.
- Returns the real physical address of the access. addr will be a host
- address in case of a RAM location. */
-static target_ulong check_watchpoint(target_phys_addr_t addr)
-{
- CPUState *env = cpu_single_env;
- target_ulong watch;
- target_ulong retaddr;
- int i;
-
- retaddr = addr;
- for (i = 0; i < env->nb_watchpoints; i++) {
- watch = env->watchpoint[i].vaddr;
- if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
- retaddr = addr - env->watchpoint[i].addend;
- if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
- cpu_single_env->watchpoint_hit = i + 1;
- cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
- break;
- }
- }
- }
- return retaddr;
-}
-
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- addr = check_watchpoint(addr);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
stb_phys(addr, val);
}
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- addr = check_watchpoint(addr);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
stw_phys(addr, val);
}
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- addr = check_watchpoint(addr);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
stl_phys(addr, val);
}
@@ -2492,7 +2401,6 @@ static CPUWriteMemoryFunc *watch_mem_write[3] = {
watch_mem_writew,
watch_mem_writel,
};
-#endif
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
unsigned int len)
@@ -2664,10 +2572,8 @@ static void io_mem_init(void)
for (i=0; i<5; i++)
io_mem_used[i] = 1;
-#if defined(CONFIG_SOFTMMU)
- io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
+ io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
watch_mem_write, NULL);
-#endif
/* alloc dirty bits array */
phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
@@ -2730,6 +2636,8 @@ CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
return io_mem_read[io_index >> IO_MEM_SHIFT];
}
+#endif /* !defined(CONFIG_USER_ONLY) */
+
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
diff --git a/gdbstub.c b/gdbstub.c
index 3cec6e29f..86428d1d0 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -470,40 +470,49 @@ static void cpu_gdb_write_registers(CPUState *env, uint8_t *mem_buf, int size)
ppc_store_xer(env, tswapl(registers[101]));
}
#elif defined (TARGET_SPARC)
+#ifdef TARGET_ABI32
+#define tswap_abi(val) tswap32(val &0xffffffff)
+#else
+#define tswap_abi(val) tswapl(val)
+#endif
static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
{
+#ifdef TARGET_ABI32
+ abi_ulong *registers = (abi_ulong *)mem_buf;
+#else
target_ulong *registers = (target_ulong *)mem_buf;
+#endif
int i;
/* fill in g0..g7 */
for(i = 0; i < 8; i++) {
- registers[i] = tswapl(env->gregs[i]);
+ registers[i] = tswap_abi(env->gregs[i]);
}
/* fill in register window */
for(i = 0; i < 24; i++) {
- registers[i + 8] = tswapl(env->regwptr[i]);
+ registers[i + 8] = tswap_abi(env->regwptr[i]);
}
-#ifndef TARGET_SPARC64
+#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
/* fill in fprs */
for (i = 0; i < 32; i++) {
- registers[i + 32] = tswapl(*((uint32_t *)&env->fpr[i]));
+ registers[i + 32] = tswap_abi(*((uint32_t *)&env->fpr[i]));
}
/* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
- registers[64] = tswapl(env->y);
+ registers[64] = tswap_abi(env->y);
{
- target_ulong tmp;
+ uint32_t tmp;
- tmp = GET_PSR(env);
- registers[65] = tswapl(tmp);
+ tmp = GET_PSR(env);
+ registers[65] = tswap32(tmp);
}
- registers[66] = tswapl(env->wim);
- registers[67] = tswapl(env->tbr);
- registers[68] = tswapl(env->pc);
- registers[69] = tswapl(env->npc);
- registers[70] = tswapl(env->fsr);
+ registers[66] = tswap_abi(env->wim);
+ registers[67] = tswap_abi(env->tbr);
+ registers[68] = tswap_abi(env->pc);
+ registers[69] = tswap_abi(env->npc);
+ registers[70] = tswap_abi(env->fsr);
registers[71] = 0; /* csr */
registers[72] = 0;
- return 73 * sizeof(target_ulong);
+ return 73 * sizeof(uint32_t);
#else
/* fill in fprs */
for (i = 0; i < 64; i += 2) {
@@ -528,30 +537,34 @@ static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
static void cpu_gdb_write_registers(CPUState *env, uint8_t *mem_buf, int size)
{
+#ifdef TARGET_ABI32
+ abi_ulong *registers = (abi_ulong *)mem_buf;
+#else
target_ulong *registers = (target_ulong *)mem_buf;
+#endif
int i;
/* fill in g0..g7 */
for(i = 0; i < 7; i++) {
- env->gregs[i] = tswapl(registers[i]);
+ env->gregs[i] = tswap_abi(registers[i]);
}
/* fill in register window */
for(i = 0; i < 24; i++) {
- env->regwptr[i] = tswapl(registers[i + 8]);
+ env->regwptr[i] = tswap_abi(registers[i + 8]);
}
-#ifndef TARGET_SPARC64
+#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
/* fill in fprs */
for (i = 0; i < 32; i++) {
- *((uint32_t *)&env->fpr[i]) = tswapl(registers[i + 32]);
+ *((uint32_t *)&env->fpr[i]) = tswap_abi(registers[i + 32]);
}
/* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
- env->y = tswapl(registers[64]);
- PUT_PSR(env, tswapl(registers[65]));
- env->wim = tswapl(registers[66]);
- env->tbr = tswapl(registers[67]);
- env->pc = tswapl(registers[68]);
- env->npc = tswapl(registers[69]);
- env->fsr = tswapl(registers[70]);
+ env->y = tswap_abi(registers[64]);
+ PUT_PSR(env, tswap_abi(registers[65]));
+ env->wim = tswap_abi(registers[66]);
+ env->tbr = tswap_abi(registers[67]);
+ env->pc = tswap_abi(registers[68]);
+ env->npc = tswap_abi(registers[69]);
+ env->fsr = tswap_abi(registers[70]);
#else
for (i = 0; i < 64; i += 2) {
uint64_t tmp;
@@ -575,6 +588,7 @@ static void cpu_gdb_write_registers(CPUState *env, uint8_t *mem_buf, int size)
env->y = tswapl(registers[69]);
#endif
}
+#undef tswap_abi
#elif defined (TARGET_ARM)
static int cpu_gdb_read_registers(CPUState *env, uint8_t *mem_buf)
{
@@ -1110,21 +1124,37 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
if (*p == ',')
p++;
len = strtoull(p, (char **)&p, 16);
- if (type == 0 || type == 1) {
+ switch (type) {
+ case 0:
+ case 1:
if (cpu_breakpoint_insert(env, addr) < 0)
goto breakpoint_error;
put_packet(s, "OK");
+ break;
#ifndef CONFIG_USER_ONLY
- } else if (type == 2) {
- if (cpu_watchpoint_insert(env, addr) < 0)
+ case 2:
+ type = PAGE_WRITE;
+ goto insert_watchpoint;
+ case 3:
+ type = PAGE_READ;
+ goto insert_watchpoint;
+ case 4:
+ type = PAGE_READ | PAGE_WRITE;
+ insert_watchpoint:
+ if (cpu_watchpoint_insert(env, addr, type) < 0)
goto breakpoint_error;
put_packet(s, "OK");
+ break;
#endif
- } else {
- breakpoint_error:
- put_packet(s, "E22");
+ default:
+ put_packet(s, "");
+ break;
}
break;
+ breakpoint_error:
+ put_packet(s, "E22");
+ break;
+
case 'z':
type = strtoul(p, (char **)&p, 16);
if (*p == ',')
@@ -1137,12 +1167,12 @@ static int gdb_handle_packet(GDBState *s, CPUState *env, const char *line_buf)
cpu_breakpoint_remove(env, addr);
put_packet(s, "OK");
#ifndef CONFIG_USER_ONLY
- } else if (type == 2) {
+ } else if (type >= 2 || type <= 4) {
cpu_watchpoint_remove(env, addr);
put_packet(s, "OK");
#endif
} else {
- goto breakpoint_error;
+ put_packet(s, "");
}
break;
case 'q':
diff --git a/host-utils.h b/host-utils.h
index 0f7744511..b1e799ed3 100644
--- a/host-utils.h
+++ b/host-utils.h
@@ -23,6 +23,8 @@
* THE SOFTWARE.
*/
+#include "osdep.h"
+
#if defined(__x86_64__)
#define __HAVE_FAST_MULU64__
static always_inline void mulu64 (uint64_t *plow, uint64_t *phigh,
diff --git a/hw/ac97.c b/hw/ac97.c
index ba4ea1e51..55c65df4f 100644
--- a/hw/ac97.c
+++ b/hw/ac97.c
@@ -158,6 +158,7 @@ typedef struct AC97LinkState {
SWVoiceIn *voice_pi;
SWVoiceOut *voice_po;
SWVoiceIn *voice_mc;
+ int invalid_freq[3];
uint8_t silence[128];
uint32_t base[2];
int bup_flag;
@@ -360,39 +361,61 @@ static void open_voice (AC97LinkState *s, int index, int freq)
as.fmt = AUD_FMT_S16;
as.endianness = 0;
- switch (index) {
- case PI_INDEX:
- s->voice_pi = AUD_open_in (
- &s->card,
- s->voice_pi,
- "ac97.pi",
- s,
- pi_callback,
- &as
- );
- break;
+ if (freq > 0) {
+ s->invalid_freq[index] = 0;
+ switch (index) {
+ case PI_INDEX:
+ s->voice_pi = AUD_open_in (
+ &s->card,
+ s->voice_pi,
+ "ac97.pi",
+ s,
+ pi_callback,
+ &as
+ );
+ break;
- case PO_INDEX:
- s->voice_po = AUD_open_out (
- &s->card,
- s->voice_po,
- "ac97.po",
- s,
- po_callback,
- &as
- );
- break;
+ case PO_INDEX:
+ s->voice_po = AUD_open_out (
+ &s->card,
+ s->voice_po,
+ "ac97.po",
+ s,
+ po_callback,
+ &as
+ );
+ break;
- case MC_INDEX:
- s->voice_mc = AUD_open_in (
- &s->card,
- s->voice_mc,
- "ac97.mc",
- s,
- mc_callback,
- &as
- );
- break;
+ case MC_INDEX:
+ s->voice_mc = AUD_open_in (
+ &s->card,
+ s->voice_mc,
+ "ac97.mc",
+ s,
+ mc_callback,
+ &as
+ );
+ break;
+ }
+ }
+ else {
+ s->invalid_freq[index] = freq;
+ switch (index) {
+ case PI_INDEX:
+ AUD_close_in (&s->card, s->voice_pi);
+ s->voice_pi = NULL;
+ break;
+
+ case PO_INDEX:
+ AUD_close_out (&s->card, s->voice_po);
+ s->voice_po = NULL;
+ break;
+
+ case MC_INDEX:
+ AUD_close_in (&s->card, s->voice_mc);
+ s->voice_mc = NULL;
+ break;
+ }
}
}
@@ -1065,6 +1088,12 @@ static void transfer_audio (AC97LinkState *s, int index, int elapsed)
AC97BusMasterRegs *r = &s->bm_regs[index];
int written = 0, stop = 0;
+ if (s->invalid_freq[index]) {
+ AUD_log ("ac97", "attempt to use voice %d with invalid frequency %d\n",
+ index, s->invalid_freq[index]);
+ return;
+ }
+
if (r->sr & SR_DCH) {
if (r->cr & CR_RPBM) {
switch (index) {
@@ -1161,6 +1190,8 @@ static void ac97_save (QEMUFile *f, void *opaque)
uint8_t active[LAST_INDEX];
AC97LinkState *s = opaque;
+ pci_device_save (s->pci_dev, f);
+
qemu_put_be32s (f, &s->glob_cnt);
qemu_put_be32s (f, &s->glob_sta);
qemu_put_be32s (f, &s->cas);
@@ -1188,13 +1219,18 @@ static void ac97_save (QEMUFile *f, void *opaque)
static int ac97_load (QEMUFile *f, void *opaque, int version_id)
{
+ int ret;
size_t i;
uint8_t active[LAST_INDEX];
AC97LinkState *s = opaque;
- if (version_id != 1)
+ if (version_id != 2)
return -EINVAL;
+ ret = pci_device_load (s->pci_dev, f);
+ if (ret)
+ return ret;
+
qemu_get_be32s (f, &s->glob_cnt);
qemu_get_be32s (f, &s->glob_sta);
qemu_get_be32s (f, &s->cas);
@@ -1341,7 +1377,7 @@ int ac97_init (PCIBus *bus, AudioState *audio)
pci_register_io_region (&d->dev, 0, 256 * 4, PCI_ADDRESS_SPACE_IO, ac97_map);
pci_register_io_region (&d->dev, 1, 64 * 4, PCI_ADDRESS_SPACE_IO, ac97_map);
- register_savevm ("ac97", 0, 1, ac97_save, ac97_load, s);
+ register_savevm ("ac97", 0, 2, ac97_save, ac97_load, s);
qemu_register_reset (ac97_on_reset, s);
AUD_register_card (audio, "ac97", &s->card);
ac97_on_reset (s);
diff --git a/hw/audiodev.h b/hw/audiodev.h
index 3c02f19ff..5f4a21123 100644
--- a/hw/audiodev.h
+++ b/hw/audiodev.h
@@ -12,3 +12,6 @@ int GUS_init (AudioState *s, qemu_irq *pic);
/* ac97.c */
int ac97_init (PCIBus *buf, AudioState *s);
+
+/* cs4231a.c */
+int cs4231a_init (AudioState *s, qemu_irq *pic);
diff --git a/hw/cs4231a.c b/hw/cs4231a.c
new file mode 100644
index 000000000..75529089f
--- /dev/null
+++ b/hw/cs4231a.c
@@ -0,0 +1,674 @@
+/*
+ * QEMU Crystal CS4231 audio chip emulation
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw.h"
+#include "audiodev.h"
+#include "audio/audio.h"
+#include "isa.h"
+#include "qemu-timer.h"
+
+/*
+ Missing features:
+ ADC
+ Loopback
+ Timer
+ ADPCM
+ More...
+*/
+
+/* #define DEBUG */
+/* #define DEBUG_XLAW */
+
+static struct {
+ int irq;
+ int dma;
+ int port;
+ int aci_counter;
+} conf = {9, 3, 0x534, 1};
+
+#ifdef DEBUG
+#define dolog(...) AUD_log ("cs4231a", __VA_ARGS__)
+#else
+#define dolog(...)
+#endif
+
+#define lwarn(...) AUD_log ("cs4231a", "warning: " __VA_ARGS__)
+#define lerr(...) AUD_log ("cs4231a", "error: " __VA_ARGS__)
+
+#define CS_REGS 16
+#define CS_DREGS 32
+
+typedef struct CSState {
+ QEMUSoundCard card;
+ qemu_irq *pic;
+ uint32_t regs[CS_REGS];
+ uint8_t dregs[CS_DREGS];
+ int irq;
+ int dma;
+ int port;
+ int shift;
+ int dma_running;
+ int audio_free;
+ int transferred;
+ int aci_counter;
+ SWVoiceOut *voice;
+ int16_t *tab;
+} CSState;
+
+#define IO_READ_PROTO(name) \
+ static uint32_t name (void *opaque, uint32_t addr)
+
+#define IO_WRITE_PROTO(name) \
+ static void name (void *opaque, uint32_t addr, uint32_t val)
+
+#define GET_SADDR(addr) (addr & 3)
+
+#define MODE2 (1 << 6)
+#define MCE (1 << 6)
+#define PMCE (1 << 4)
+#define CMCE (1 << 5)
+#define TE (1 << 6)
+#define PEN (1 << 0)
+#define INT (1 << 0)
+#define IEN (1 << 1)
+#define PPIO (1 << 6)
+#define PI (1 << 4)
+#define CI (1 << 5)
+#define TI (1 << 6)
+
+enum {
+ Index_Address,
+ Index_Data,
+ Status,
+ PIO_Data
+};
+
+enum {
+ Left_ADC_Input_Control,
+ Right_ADC_Input_Control,
+ Left_AUX1_Input_Control,
+ Right_AUX1_Input_Control,
+ Left_AUX2_Input_Control,
+ Right_AUX2_Input_Control,
+ Left_DAC_Output_Control,
+ Right_DAC_Output_Control,
+ FS_And_Playback_Data_Format,
+ Interface_Configuration,
+ Pin_Control,
+ Error_Status_And_Initialization,
+ MODE_And_ID,
+ Loopback_Control,
+ Playback_Upper_Base_Count,
+ Playback_Lower_Base_Count,
+ Alternate_Feature_Enable_I,
+ Alternate_Feature_Enable_II,
+ Left_Line_Input_Control,
+ Right_Line_Input_Control,
+ Timer_Low_Base,
+ Timer_High_Base,
+ RESERVED,
+ Alternate_Feature_Enable_III,
+ Alternate_Feature_Status,
+ Version_Chip_ID,
+ Mono_Input_And_Output_Control,
+ RESERVED_2,
+ Capture_Data_Format,
+ RESERVED_3,
+ Capture_Upper_Base_Count,
+ Capture_Lower_Base_Count
+};
+
+static int freqs[2][8] = {
+ { 8000, 16000, 27420, 32000, -1, -1, 48000, 9000 },
+ { 5510, 11025, 18900, 22050, 37800, 44100, 33075, 6620 }
+};
+
+/* Tables courtesy http://hazelware.luggle.com/tutorials/mulawcompression.html */
+static int16_t MuLawDecompressTable[256] =
+{
+ -32124,-31100,-30076,-29052,-28028,-27004,-25980,-24956,
+ -23932,-22908,-21884,-20860,-19836,-18812,-17788,-16764,
+ -15996,-15484,-14972,-14460,-13948,-13436,-12924,-12412,
+ -11900,-11388,-10876,-10364, -9852, -9340, -8828, -8316,
+ -7932, -7676, -7420, -7164, -6908, -6652, -6396, -6140,
+ -5884, -5628, -5372, -5116, -4860, -4604, -4348, -4092,
+ -3900, -3772, -3644, -3516, -3388, -3260, -3132, -3004,
+ -2876, -2748, -2620, -2492, -2364, -2236, -2108, -1980,
+ -1884, -1820, -1756, -1692, -1628, -1564, -1500, -1436,
+ -1372, -1308, -1244, -1180, -1116, -1052, -988, -924,
+ -876, -844, -812, -780, -748, -716, -684, -652,
+ -620, -588, -556, -524, -492, -460, -428, -396,
+ -372, -356, -340, -324, -308, -292, -276, -260,
+ -244, -228, -212, -196, -180, -164, -148, -132,
+ -120, -112, -104, -96, -88, -80, -72, -64,
+ -56, -48, -40, -32, -24, -16, -8, 0,
+ 32124, 31100, 30076, 29052, 28028, 27004, 25980, 24956,
+ 23932, 22908, 21884, 20860, 19836, 18812, 17788, 16764,
+ 15996, 15484, 14972, 14460, 13948, 13436, 12924, 12412,
+ 11900, 11388, 10876, 10364, 9852, 9340, 8828, 8316,
+ 7932, 7676, 7420, 7164, 6908, 6652, 6396, 6140,
+ 5884, 5628, 5372, 5116, 4860, 4604, 4348, 4092,
+ 3900, 3772, 3644, 3516, 3388, 3260, 3132, 3004,
+ 2876, 2748, 2620, 2492, 2364, 2236, 2108, 1980,
+ 1884, 1820, 1756, 1692, 1628, 1564, 1500, 1436,
+ 1372, 1308, 1244, 1180, 1116, 1052, 988, 924,
+ 876, 844, 812, 780, 748, 716, 684, 652,
+ 620, 588, 556, 524, 492, 460, 428, 396,
+ 372, 356, 340, 324, 308, 292, 276, 260,
+ 244, 228, 212, 196, 180, 164, 148, 132,
+ 120, 112, 104, 96, 88, 80, 72, 64,
+ 56, 48, 40, 32, 24, 16, 8, 0
+};
+
+static int16_t ALawDecompressTable[256] =
+{
+ -5504, -5248, -6016, -5760, -4480, -4224, -4992, -4736,
+ -7552, -7296, -8064, -7808, -6528, -6272, -7040, -6784,
+ -2752, -2624, -3008, -2880, -2240, -2112, -2496, -2368,
+ -3776, -3648, -4032, -3904, -3264, -3136, -3520, -3392,
+ -22016,-20992,-24064,-23040,-17920,-16896,-19968,-18944,
+ -30208,-29184,-32256,-31232,-26112,-25088,-28160,-27136,
+ -11008,-10496,-12032,-11520,-8960, -8448, -9984, -9472,
+ -15104,-14592,-16128,-15616,-13056,-12544,-14080,-13568,
+ -344, -328, -376, -360, -280, -264, -312, -296,
+ -472, -456, -504, -488, -408, -392, -440, -424,
+ -88, -72, -120, -104, -24, -8, -56, -40,
+ -216, -200, -248, -232, -152, -136, -184, -168,
+ -1376, -1312, -1504, -1440, -1120, -1056, -1248, -1184,
+ -1888, -1824, -2016, -1952, -1632, -1568, -1760, -1696,
+ -688, -656, -752, -720, -560, -528, -624, -592,
+ -944, -912, -1008, -976, -816, -784, -880, -848,
+ 5504, 5248, 6016, 5760, 4480, 4224, 4992, 4736,
+ 7552, 7296, 8064, 7808, 6528, 6272, 7040, 6784,
+ 2752, 2624, 3008, 2880, 2240, 2112, 2496, 2368,
+ 3776, 3648, 4032, 3904, 3264, 3136, 3520, 3392,
+ 22016, 20992, 24064, 23040, 17920, 16896, 19968, 18944,
+ 30208, 29184, 32256, 31232, 26112, 25088, 28160, 27136,
+ 11008, 10496, 12032, 11520, 8960, 8448, 9984, 9472,
+ 15104, 14592, 16128, 15616, 13056, 12544, 14080, 13568,
+ 344, 328, 376, 360, 280, 264, 312, 296,
+ 472, 456, 504, 488, 408, 392, 440, 424,
+ 88, 72, 120, 104, 24, 8, 56, 40,
+ 216, 200, 248, 232, 152, 136, 184, 168,
+ 1376, 1312, 1504, 1440, 1120, 1056, 1248, 1184,
+ 1888, 1824, 2016, 1952, 1632, 1568, 1760, 1696,
+ 688, 656, 752, 720, 560, 528, 624, 592,
+ 944, 912, 1008, 976, 816, 784, 880, 848
+};
+
+static void cs_reset(void *opaque)
+{
+ CSState *s = opaque;
+
+ s->regs[Index_Address] = 0x40;
+ s->regs[Index_Data] = 0x00;
+ s->regs[Status] = 0x00;
+ s->regs[PIO_Data] = 0x00;
+
+ s->dregs[Left_ADC_Input_Control] = 0x00;
+ s->dregs[Right_ADC_Input_Control] = 0x00;
+ s->dregs[Left_AUX1_Input_Control] = 0x88;
+ s->dregs[Right_AUX1_Input_Control] = 0x88;
+ s->dregs[Left_AUX2_Input_Control] = 0x88;
+ s->dregs[Right_AUX2_Input_Control] = 0x88;
+ s->dregs[Left_DAC_Output_Control] = 0x80;
+ s->dregs[Right_DAC_Output_Control] = 0x80;
+ s->dregs[FS_And_Playback_Data_Format] = 0x00;
+ s->dregs[Interface_Configuration] = 0x08;
+ s->dregs[Pin_Control] = 0x00;
+ s->dregs[Error_Status_And_Initialization] = 0x00;
+ s->dregs[MODE_And_ID] = 0x8a;
+ s->dregs[Loopback_Control] = 0x00;
+ s->dregs[Playback_Upper_Base_Count] = 0x00;
+ s->dregs[Playback_Lower_Base_Count] = 0x00;
+ s->dregs[Alternate_Feature_Enable_I] = 0x00;
+ s->dregs[Alternate_Feature_Enable_II] = 0x00;
+ s->dregs[Left_Line_Input_Control] = 0x88;
+ s->dregs[Right_Line_Input_Control] = 0x88;
+ s->dregs[Timer_Low_Base] = 0x00;
+ s->dregs[Timer_High_Base] = 0x00;
+ s->dregs[RESERVED] = 0x00;
+ s->dregs[Alternate_Feature_Enable_III] = 0x00;
+ s->dregs[Alternate_Feature_Status] = 0x00;
+ s->dregs[Version_Chip_ID] = 0xa0;
+ s->dregs[Mono_Input_And_Output_Control] = 0xa0;
+ s->dregs[RESERVED_2] = 0x00;
+ s->dregs[Capture_Data_Format] = 0x00;
+ s->dregs[RESERVED_3] = 0x00;
+ s->dregs[Capture_Upper_Base_Count] = 0x00;
+ s->dregs[Capture_Lower_Base_Count] = 0x00;
+}
+
+static void cs_audio_callback (void *opaque, int free)
+{
+ CSState *s = opaque;
+ s->audio_free = free;
+}
+
+static void cs_reset_voices (CSState *s, uint32_t val)
+{
+ int xtal;
+ audsettings_t as;
+
+#ifdef DEBUG_XLAW
+ if (val == 0 || val == 32)
+ val = (1 << 4) | (1 << 5);
+#endif
+
+ xtal = val & 1;
+ as.freq = freqs[xtal][(val >> 1) & 7];
+
+ if (as.freq == -1) {
+ lerr ("unsupported frequency (val=%#x)\n", val);
+ goto error;
+ }
+
+ as.nchannels = (val & (1 << 4)) ? 2 : 1;
+ as.endianness = 0;
+ s->tab = NULL;
+
+ switch ((val >> 5) & ((s->dregs[MODE_And_ID] & MODE2) ? 7 : 3)) {
+ case 0:
+ as.fmt = AUD_FMT_U8;
+ s->shift = as.nchannels == 2;
+ break;
+
+ case 1:
+ s->tab = MuLawDecompressTable;
+ goto x_law;
+ case 3:
+ s->tab = ALawDecompressTable;
+ x_law:
+ as.fmt = AUD_FMT_S16;
+ as.endianness = AUDIO_HOST_ENDIANNESS;
+ s->shift = as.nchannels == 2;
+ break;
+
+ case 6:
+ as.endianness = 1;
+ case 2:
+ as.fmt = AUD_FMT_S16;
+ s->shift = as.nchannels;
+ break;
+
+ case 7:
+ case 4:
+ lerr ("attempt to use reserved format value (%#x)\n", val);
+ goto error;
+
+ case 5:
+ lerr ("ADPCM 4 bit IMA compatible format is not supported\n");
+ goto error;
+ }
+
+ s->voice = AUD_open_out (
+ &s->card,
+ s->voice,
+ "cs4231a",
+ s,
+ cs_audio_callback,
+ &as
+ );
+
+ if (s->dregs[Interface_Configuration] & PEN) {
+ if (!s->dma_running) {
+ DMA_hold_DREQ (s->dma);
+ AUD_set_active_out (s->voice, 1);
+ s->transferred = 0;
+ }
+ s->dma_running = 1;
+ }
+ else {
+ if (s->dma_running) {
+ DMA_release_DREQ (s->dma);
+ AUD_set_active_out (s->voice, 0);
+ }
+ s->dma_running = 0;
+ }
+ return;
+
+ error:
+ if (s->dma_running) {
+ DMA_release_DREQ (s->dma);
+ AUD_set_active_out (s->voice, 0);
+ }
+}
+
+IO_READ_PROTO (cs_read)
+{
+ CSState *s = opaque;
+ uint32_t saddr, iaddr, ret;
+
+ saddr = GET_SADDR (addr);
+ iaddr = ~0U;
+
+ switch (saddr) {
+ case Index_Address:
+ ret = s->regs[saddr] & ~0x80;
+ break;
+
+ case Index_Data:
+ if (!(s->dregs[MODE_And_ID] & MODE2))
+ iaddr = s->regs[Index_Address] & 0x0f;
+ else
+ iaddr = s->regs[Index_Address] & 0x1f;
+
+ ret = s->dregs[iaddr];
+ if (iaddr == Error_Status_And_Initialization) {
+ /* keep SEAL happy */
+ if (s->aci_counter) {
+ ret |= 1 << 5;
+ s->aci_counter -= 1;
+ }
+ }
+ break;
+
+ default:
+ ret = s->regs[saddr];
+ break;
+ }
+ dolog ("read %d:%d -> %d\n", saddr, iaddr, ret);
+ return ret;
+}
+
+IO_WRITE_PROTO (cs_write)
+{
+ CSState *s = opaque;
+ uint32_t saddr, iaddr;
+
+ saddr = GET_SADDR (addr);
+
+ switch (saddr) {
+ case Index_Address:
+ if (!(s->regs[Index_Address] & MCE) && (val & MCE)
+ && (s->dregs[Interface_Configuration] & (3 << 3)))
+ s->aci_counter = conf.aci_counter;
+
+ s->regs[Index_Address] = val & ~(1 << 7);
+ break;
+
+ case Index_Data:
+ if (!(s->dregs[MODE_And_ID] & MODE2))
+ iaddr = s->regs[Index_Address] & 0x0f;
+ else
+ iaddr = s->regs[Index_Address] & 0x1f;
+
+ switch (iaddr) {
+ case RESERVED:
+ case RESERVED_2:
+ case RESERVED_3:
+ lwarn ("attempt to write %#x to reserved indirect register %d\n",
+ val, iaddr);
+ break;
+
+ case FS_And_Playback_Data_Format:
+ if (s->regs[Index_Address] & MCE) {
+ cs_reset_voices (s, val);
+ }
+ else {
+ if (s->dregs[Alternate_Feature_Status] & PMCE) {
+ val = (val & ~0x0f) | (s->dregs[iaddr] & 0x0f);
+ cs_reset_voices (s, val);
+ }
+ else {
+ lwarn ("[P]MCE(%#x, %#x) is not set, val=%#x\n",
+ s->regs[Index_Address],
+ s->dregs[Alternate_Feature_Status],
+ val);
+ break;
+ }
+ }
+ s->dregs[iaddr] = val;
+ break;
+
+ case Interface_Configuration:
+ val &= ~(1 << 5); /* D5 is reserved */
+ s->dregs[iaddr] = val;
+ if (val & PPIO) {
+ lwarn ("PIO is not supported (%#x)\n", val);
+ break;
+ }
+ if (val & PEN) {
+ if (!s->dma_running) {
+ cs_reset_voices (s, s->dregs[FS_And_Playback_Data_Format]);
+ }
+ }
+ else {
+ if (s->dma_running) {
+ DMA_release_DREQ (s->dma);
+ AUD_set_active_out (s->voice, 0);
+ s->dma_running = 0;
+ }
+ }
+ break;
+
+ case Error_Status_And_Initialization:
+ lwarn ("attempt to write to read only register %d\n", iaddr);
+ break;
+
+ case MODE_And_ID:
+ dolog ("val=%#x\n", val);
+ if (val & MODE2)
+ s->dregs[iaddr] |= MODE2;
+ else
+ s->dregs[iaddr] &= ~MODE2;
+ break;
+
+ case Alternate_Feature_Enable_I:
+ if (val & TE)
+ lerr ("timer is not yet supported\n");
+ s->dregs[iaddr] = val;
+ break;
+
+ case Alternate_Feature_Status:
+ if ((s->dregs[iaddr] & PI) && !(val & PI)) {
+ /* XXX: TI CI */
+ qemu_irq_lower (s->pic[s->irq]);
+ s->regs[Status] &= ~INT;
+ }
+ s->dregs[iaddr] = val;
+ break;
+
+ case Version_Chip_ID:
+ lwarn ("write to Version_Chip_ID register %#x\n", val);
+ s->dregs[iaddr] = val;
+ break;
+
+ default:
+ s->dregs[iaddr] = val;
+ break;
+ }
+ dolog ("written value %#x to indirect register %d\n", val, iaddr);
+ break;
+
+ case Status:
+ if (s->regs[Status] & INT) {
+ qemu_irq_lower (s->pic[s->irq]);
+ }
+ s->regs[Status] &= ~INT;
+ s->dregs[Alternate_Feature_Status] &= ~(PI | CI | TI);
+ break;
+
+ case PIO_Data:
+ lwarn ("attempt to write value %#x to PIO register\n", val);
+ break;
+ }
+}
+
+static int cs_write_audio (CSState *s, int nchan, int dma_pos,
+ int dma_len, int len)
+{
+ int temp, net;
+ uint8_t tmpbuf[4096];
+
+ temp = len;
+ net = 0;
+
+ while (temp) {
+ int left = dma_len - dma_pos;
+ int copied;
+ size_t to_copy;
+
+ to_copy = audio_MIN (temp, left);
+ if (to_copy > sizeof (tmpbuf)) {
+ to_copy = sizeof (tmpbuf);
+ }
+
+ copied = DMA_read_memory (nchan, tmpbuf, dma_pos, to_copy);
+ if (s->tab) {
+ int i;
+ int16_t linbuf[4096];
+
+ for (i = 0; i < copied; ++i)
+ linbuf[i] = s->tab[tmpbuf[i]];
+ copied = AUD_write (s->voice, linbuf, copied << 1);
+ copied >>= 1;
+ }
+ else {
+ copied = AUD_write (s->voice, tmpbuf, copied);
+ }
+
+ temp -= copied;
+ dma_pos = (dma_pos + copied) % dma_len;
+ net += copied;
+
+ if (!copied) {
+ break;
+ }
+ }
+
+ return net;
+}
+
+static int cs_dma_read (void *opaque, int nchan, int dma_pos, int dma_len)
+{
+ CSState *s = opaque;
+ int copy, written;
+ int till = -1;
+
+ copy = s->voice ? (s->audio_free >> (s->tab != NULL)) : dma_len;
+
+ if (s->dregs[Pin_Control] & IEN) {
+ till = (s->dregs[Playback_Lower_Base_Count]
+ | (s->dregs[Playback_Upper_Base_Count] << 8)) << s->shift;
+ till -= s->transferred;
+ copy = audio_MIN (till, copy);
+ }
+
+ if ((copy <= 0) || (dma_len <= 0)) {
+ return dma_pos;
+ }
+
+ written = cs_write_audio (s, nchan, dma_pos, dma_len, copy);
+
+ dma_pos = (dma_pos + written) % dma_len;
+ s->audio_free -= (written << (s->tab != NULL));
+
+ if (written == till) {
+ s->regs[Status] |= INT;
+ s->dregs[Alternate_Feature_Status] |= PI;
+ s->transferred = 0;
+ qemu_irq_raise (s->pic[s->irq]);
+ }
+ else {
+ s->transferred += written;
+ }
+
+ return dma_pos;
+}
+
+static void cs_save(QEMUFile *f, void *opaque)
+{
+ CSState *s = opaque;
+ unsigned int i;
+ uint32_t val;
+
+ for (i = 0; i < CS_REGS; i++)
+ qemu_put_be32s(f, &s->regs[i]);
+
+ qemu_put_buffer(f, s->dregs, CS_DREGS);
+ val = s->dma_running; qemu_put_be32s(f, &val);
+ val = s->audio_free; qemu_put_be32s(f, &val);
+ val = s->transferred; qemu_put_be32s(f, &val);
+ val = s->aci_counter; qemu_put_be32s(f, &val);
+}
+
+static int cs_load(QEMUFile *f, void *opaque, int version_id)
+{
+ CSState *s = opaque;
+ unsigned int i;
+ uint32_t val, dma_running;
+
+ if (version_id > 1)
+ return -EINVAL;
+
+ for (i = 0; i < CS_REGS; i++)
+ qemu_get_be32s(f, &s->regs[i]);
+
+ qemu_get_buffer(f, s->dregs, CS_DREGS);
+
+ qemu_get_be32s(f, &dma_running);
+ qemu_get_be32s(f, &val); s->audio_free = val;
+ qemu_get_be32s(f, &val); s->transferred = val;
+ qemu_get_be32s(f, &val); s->aci_counter = val;
+ if (dma_running && (s->dregs[Interface_Configuration] & PEN))
+ cs_reset_voices (s, s->dregs[FS_And_Playback_Data_Format]);
+ return 0;
+}
+
+int cs4231a_init (AudioState *audio, qemu_irq *pic)
+{
+ int i;
+ CSState *s;
+
+ if (!audio) {
+ lerr ("No audio state\n");
+ return -1;
+ }
+
+ s = qemu_mallocz (sizeof (*s));
+ if (!s) {
+ lerr ("Could not allocate memory for cs4231a (%zu bytes)\n",
+ sizeof (*s));
+ return -1;
+ }
+
+ s->pic = pic;
+ s->irq = conf.irq;
+ s->dma = conf.dma;
+ s->port = conf.port;
+
+ for (i = 0; i < 4; i++) {
+ register_ioport_write (s->port + i, 1, 1, cs_write, s);
+ register_ioport_read (s->port + i, 1, 1, cs_read, s);
+ }
+
+ DMA_register_channel (s->dma, cs_dma_read, s);
+
+ register_savevm ("cs4231a", 0, 1, cs_save, cs_load, s);
+ qemu_register_reset (cs_reset, s);
+ cs_reset (s);
+
+ AUD_register_card (audio,"cs4231a", &s->card);
+ return 0;
+}
diff --git a/hw/devices.h b/hw/devices.h
index 948abc5fa..45fead9c2 100644
--- a/hw/devices.h
+++ b/hw/devices.h
@@ -64,4 +64,11 @@ int tusb6010_sync_io(struct tusb_s *s);
int tusb6010_async_io(struct tusb_s *s);
void tusb6010_power(struct tusb_s *s, int on);
+/* tc6393xb.c */
+struct tc6393xb_s;
+struct tc6393xb_s *tc6393xb_init(uint32_t base, qemu_irq irq);
+void tc6393xb_gpio_out_set(struct tc6393xb_s *s, int line,
+ qemu_irq handler);
+qemu_irq *tc6393xb_gpio_in_get(struct tc6393xb_s *s);
+
#endif
diff --git a/hw/eccmemctl.c b/hw/eccmemctl.c
index fe7f27e06..5ee50aee0 100644
--- a/hw/eccmemctl.c
+++ b/hw/eccmemctl.c
@@ -40,16 +40,16 @@
* SMC (version 0, implementation 2) SS-10SX and SS-20
*/
-/* Register offsets */
-#define ECC_MER 0 /* Memory Enable Register */
-#define ECC_MDR 4 /* Memory Delay Register */
-#define ECC_MFSR 8 /* Memory Fault Status Register */
-#define ECC_VCR 12 /* Video Configuration Register */
-#define ECC_MFAR0 16 /* Memory Fault Address Register 0 */
-#define ECC_MFAR1 20 /* Memory Fault Address Register 1 */
-#define ECC_DR 24 /* Diagnostic Register */
-#define ECC_ECR0 28 /* Event Count Register 0 */
-#define ECC_ECR1 32 /* Event Count Register 1 */
+/* Register indexes */
+#define ECC_MER 0 /* Memory Enable Register */
+#define ECC_MDR 1 /* Memory Delay Register */
+#define ECC_MFSR 2 /* Memory Fault Status Register */
+#define ECC_VCR 3 /* Video Configuration Register */
+#define ECC_MFAR0 4 /* Memory Fault Address Register 0 */
+#define ECC_MFAR1 5 /* Memory Fault Address Register 1 */
+#define ECC_DR 6 /* Diagnostic Register */
+#define ECC_ECR0 7 /* Event Count Register 0 */
+#define ECC_ECR1 8 /* Event Count Register 1 */
/* ECC fault control register */
#define ECC_MER_EE 0x00000001 /* Enable ECC checking */
@@ -129,34 +129,34 @@ static void ecc_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
{
ECCState *s = opaque;
- switch (addr & ECC_ADDR_MASK) {
+ switch ((addr & ECC_ADDR_MASK) >> 2) {
case ECC_MER:
- s->regs[0] = (s->regs[0] & (ECC_MER_VER | ECC_MER_IMPL)) |
- (val & ~(ECC_MER_VER | ECC_MER_IMPL));
+ s->regs[ECC_MER] = (s->regs[ECC_MER] & (ECC_MER_VER | ECC_MER_IMPL)) |
+ (val & ~(ECC_MER_VER | ECC_MER_IMPL));
DPRINTF("Write memory enable %08x\n", val);
break;
case ECC_MDR:
- s->regs[1] = val & ECC_MDR_MASK;
+ s->regs[ECC_MDR] = val & ECC_MDR_MASK;
DPRINTF("Write memory delay %08x\n", val);
break;
case ECC_MFSR:
- s->regs[2] = val;
+ s->regs[ECC_MFSR] = val;
DPRINTF("Write memory fault status %08x\n", val);
break;
case ECC_VCR:
- s->regs[3] = val;
+ s->regs[ECC_VCR] = val;
DPRINTF("Write slot configuration %08x\n", val);
break;
case ECC_DR:
- s->regs[6] = val;
+ s->regs[ECC_DR] = val;
DPRINTF("Write diagnosiic %08x\n", val);
break;
case ECC_ECR0:
- s->regs[7] = val;
+ s->regs[ECC_ECR0] = val;
DPRINTF("Write event count 1 %08x\n", val);
break;
case ECC_ECR1:
- s->regs[7] = val;
+ s->regs[ECC_ECR0] = val;
DPRINTF("Write event count 2 %08x\n", val);
break;
}
@@ -167,41 +167,41 @@ static uint32_t ecc_mem_readl(void *opaque, target_phys_addr_t addr)
ECCState *s = opaque;
uint32_t ret = 0;
- switch (addr & ECC_ADDR_MASK) {
+ switch ((addr & ECC_ADDR_MASK) >> 2) {
case ECC_MER:
- ret = s->regs[0];
+ ret = s->regs[ECC_MER];
DPRINTF("Read memory enable %08x\n", ret);
break;
case ECC_MDR:
- ret = s->regs[1];
+ ret = s->regs[ECC_MDR];
DPRINTF("Read memory delay %08x\n", ret);
break;
case ECC_MFSR:
- ret = s->regs[2];
+ ret = s->regs[ECC_MFSR];
DPRINTF("Read memory fault status %08x\n", ret);
break;
case ECC_VCR:
- ret = s->regs[3];
+ ret = s->regs[ECC_VCR];
DPRINTF("Read slot configuration %08x\n", ret);
break;
case ECC_MFAR0:
- ret = s->regs[4];
+ ret = s->regs[ECC_MFAR0];
DPRINTF("Read memory fault address 0 %08x\n", ret);
break;
case ECC_MFAR1:
- ret = s->regs[5];
+ ret = s->regs[ECC_MFAR1];
DPRINTF("Read memory fault address 1 %08x\n", ret);
break;
case ECC_DR:
- ret = s->regs[6];
+ ret = s->regs[ECC_DR];
DPRINTF("Read diagnostic %08x\n", ret);
break;
case ECC_ECR0:
- ret = s->regs[7];
+ ret = s->regs[ECC_ECR0];
DPRINTF("Read event count 1 %08x\n", ret);
break;
case ECC_ECR1:
- ret = s->regs[7];
+ ret = s->regs[ECC_ECR0];
DPRINTF("Read event count 2 %08x\n", ret);
break;
}
@@ -281,7 +281,6 @@ static void ecc_save(QEMUFile *f, void *opaque)
static void ecc_reset(void *opaque)
{
ECCState *s = opaque;
- int i;
s->regs[ECC_MER] &= (ECC_MER_VER | ECC_MER_IMPL);
s->regs[ECC_MER] |= ECC_MER_MRR;
@@ -293,9 +292,6 @@ static void ecc_reset(void *opaque)
s->regs[ECC_DR] = 0;
s->regs[ECC_ECR0] = 0;
s->regs[ECC_ECR1] = 0;
-
- for (i = 1; i < ECC_NREGS; i++)
- s->regs[i] = 0;
}
void * ecc_init(target_phys_addr_t base, qemu_irq irq, uint32_t version)
diff --git a/hw/es1370.c b/hw/es1370.c
index 754f621a1..a2a017bd5 100644
--- a/hw/es1370.c
+++ b/hw/es1370.c
@@ -936,6 +936,7 @@ static void es1370_save (QEMUFile *f, void *opaque)
ES1370State *s = opaque;
size_t i;
+ pci_device_save (s->pci_dev, f);
for (i = 0; i < NB_CHANNELS; ++i) {
struct chan *d = &s->chan[i];
qemu_put_be32s (f, &d->shift);
@@ -953,13 +954,18 @@ static void es1370_save (QEMUFile *f, void *opaque)
static int es1370_load (QEMUFile *f, void *opaque, int version_id)
{
+ int ret;
uint32_t ctl, sctl;
ES1370State *s = opaque;
size_t i;
- if (version_id != 1)
+ if (version_id != 2)
return -EINVAL;
+ ret = pci_device_load (s->pci_dev, f);
+ if (ret)
+ return ret;
+
for (i = 0; i < NB_CHANNELS; ++i) {
struct chan *d = &s->chan[i];
qemu_get_be32s (f, &d->shift);
@@ -1056,7 +1062,7 @@ int es1370_init (PCIBus *bus, AudioState *audio)
s->pci_dev = &d->dev;
pci_register_io_region (&d->dev, 0, 256, PCI_ADDRESS_SPACE_IO, es1370_map);
- register_savevm ("es1370", 0, 1, es1370_save, es1370_load, s);
+ register_savevm ("es1370", 0, 2, es1370_save, es1370_load, s);
qemu_register_reset (es1370_on_reset, s);
AUD_register_card (audio, "es1370", &s->card);
diff --git a/hw/etraxfs.c b/hw/etraxfs.c
index 942892c01..0efcd831f 100644
--- a/hw/etraxfs.c
+++ b/hw/etraxfs.c
@@ -30,21 +30,11 @@
#include "devices.h"
#include "boards.h"
-#include "etraxfs_dma.h"
-
-/* Init functions for different blocks. */
-extern qemu_irq *etraxfs_pic_init(CPUState *env, target_phys_addr_t base);
-void etraxfs_timer_init(CPUState *env, qemu_irq *irqs,
- target_phys_addr_t base);
-void *etraxfs_eth_init(NICInfo *nd, CPUState *env,
- qemu_irq *irq, target_phys_addr_t base);
-void etraxfs_ser_init(CPUState *env, qemu_irq *irq, CharDriverState *chr,
- target_phys_addr_t base);
+#include "etraxfs.h"
#define FLASH_SIZE 0x2000000
#define INTMEM_SIZE (128 * 1024)
-static void *etraxfs_dmac;
static uint32_t bootstrap_pc;
static void main_cpu_reset(void *opaque)
@@ -63,7 +53,8 @@ void bareetraxfs_init (ram_addr_t ram_size, int vga_ram_size,
const char *initrd_filename, const char *cpu_model)
{
CPUState *env;
- qemu_irq *pic;
+ struct etraxfs_pic *pic;
+ void *etraxfs_dmac;
struct etraxfs_dma_client *eth[2] = {NULL, NULL};
int kernel_size;
int i;
@@ -76,7 +67,7 @@ void bareetraxfs_init (ram_addr_t ram_size, int vga_ram_size,
cpu_model = "crisv32";
}
env = cpu_init(cpu_model);
-/* register_savevm("cpu", 0, 3, cpu_save, cpu_load, env); */
+ register_savevm("cpu", 0, 1, cpu_save, cpu_load, env);
qemu_register_reset(main_cpu_reset, env);
/* allocate RAM */
@@ -110,13 +101,13 @@ void bareetraxfs_init (ram_addr_t ram_size, int vga_ram_size,
etraxfs_dmac = etraxfs_dmac_init(env, 0xb0000000, 10);
for (i = 0; i < 10; i++) {
/* On ETRAX, odd numbered channels are inputs. */
- etraxfs_dmac_connect(etraxfs_dmac, i, pic + 7 + i, i & 1);
+ etraxfs_dmac_connect(etraxfs_dmac, i, pic->irq + 7 + i, i & 1);
}
/* Add the two ethernet blocks. */
- eth[0] = etraxfs_eth_init(&nd_table[0], env, pic + 25, 0xb0034000);
+ eth[0] = etraxfs_eth_init(&nd_table[0], env, pic->irq + 25, 0xb0034000);
if (nb_nics > 1)
- eth[1] = etraxfs_eth_init(&nd_table[1], env, pic + 26, 0xb0036000);
+ eth[1] = etraxfs_eth_init(&nd_table[1], env, pic->irq + 26, 0xb0036000);
/* The DMA Connector block is missing, hardwire things for now. */
etraxfs_dmac_connect_client(etraxfs_dmac, 0, eth[0]);
@@ -127,30 +118,31 @@ void bareetraxfs_init (ram_addr_t ram_size, int vga_ram_size,
}
/* 2 timers. */
- etraxfs_timer_init(env, pic + 0x1b, 0xb001e000);
- etraxfs_timer_init(env, pic + 0x1b, 0xb005e000);
+ etraxfs_timer_init(env, pic->irq + 0x1b, pic->nmi + 1, 0xb001e000);
+ etraxfs_timer_init(env, pic->irq + 0x1b, pic->nmi + 1, 0xb005e000);
for (i = 0; i < 4; i++) {
if (serial_hds[i]) {
- etraxfs_ser_init(env, pic + 0x14 + i,
+ etraxfs_ser_init(env, pic->irq + 0x14 + i,
serial_hds[i], 0xb0026000 + i * 0x2000);
}
}
if (kernel_filename) {
-#if 1
+ uint64_t entry;
/* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis
devboard SDK. */
kernel_size = load_elf(kernel_filename, 0,
- &bootstrap_pc, NULL, NULL);
-#else
- /* Takes a kimage from the axis devboard SDK. */
- kernel_size = load_image(kernel_filename, phys_ram_base + 0x4000);
- bootstrap_pc = 0x40004000;
- /* magic for boot. */
- env->regs[8] = 0x56902387;
- env->regs[9] = 0x40004000 + kernel_size;
-#endif
+ &entry, NULL, NULL);
+ bootstrap_pc = entry;
+ if (kernel_size < 0) {
+ /* Takes a kimage from the axis devboard SDK. */
+ kernel_size = load_image(kernel_filename, phys_ram_base + 0x4000);
+ bootstrap_pc = 0x40004000;
+ /* magic for boot. */
+ env->regs[8] = 0x56902387;
+ env->regs[9] = 0x40004000 + kernel_size;
+ }
}
env->pc = bootstrap_pc;
@@ -158,11 +150,6 @@ void bareetraxfs_init (ram_addr_t ram_size, int vga_ram_size,
printf ("ram size =%ld\n", ram_size);
}
-void DMA_run(void)
-{
- etraxfs_dmac_run(etraxfs_dmac);
-}
-
QEMUMachine bareetraxfs_machine = {
"bareetraxfs",
"Bare ETRAX FS board",
diff --git a/hw/etraxfs.h b/hw/etraxfs.h
new file mode 100644
index 000000000..0c9fdbb60
--- /dev/null
+++ b/hw/etraxfs.h
@@ -0,0 +1,42 @@
+/*
+ * QEMU ETRAX System Emulator
+ *
+ * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "etraxfs_dma.h"
+
+struct etraxfs_pic
+{
+ qemu_irq *irq;
+ qemu_irq *nmi;
+ qemu_irq *guru;
+
+ void *internal;
+};
+
+struct etraxfs_pic *etraxfs_pic_init(CPUState *env, target_phys_addr_t base);
+void etraxfs_timer_init(CPUState *env, qemu_irq *irqs, qemu_irq *nmi,
+ target_phys_addr_t base);
+void *etraxfs_eth_init(NICInfo *nd, CPUState *env,
+ qemu_irq *irq, target_phys_addr_t base);
+void etraxfs_ser_init(CPUState *env, qemu_irq *irq, CharDriverState *chr,
+ target_phys_addr_t base);
diff --git a/hw/etraxfs_dma.c b/hw/etraxfs_dma.c
index a090a6d6b..776cfb9cf 100644
--- a/hw/etraxfs_dma.c
+++ b/hw/etraxfs_dma.c
@@ -267,22 +267,33 @@ static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA);
/* Load and decode. FIXME: handle endianness. */
- D(printf("%s addr=%x\n", __func__, addr));
+ D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
cpu_physical_memory_read (addr,
(void *) &ctrl->channels[c].current_d,
sizeof ctrl->channels[c].current_d);
D(dump_d(c, &ctrl->channels[c].current_d));
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
- (uint32_t)ctrl->channels[c].current_d.buf;
+ ctrl->channels[c].regs[RW_DATA] = addr;
+}
+
+static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
+{
+ target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
+
+ /* Encode and store. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
+ D(dump_d(c, &ctrl->channels[c].current_d));
+ cpu_physical_memory_write (addr,
+ (void *) &ctrl->channels[c].current_c,
+ sizeof ctrl->channels[c].current_c);
}
static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
{
target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA);
- /* Load and decode. FIXME: handle endianness. */
- D(printf("%s addr=%x\n", __func__, addr));
+ /* Encode and store. FIXME: handle endianness. */
+ D(printf("%s ch=%d addr=%x\n", __func__, c, addr));
cpu_physical_memory_write (addr,
(void *) &ctrl->channels[c].current_d,
sizeof ctrl->channels[c].current_d);
@@ -325,20 +336,23 @@ static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
/* If the current descriptor cleared the eol flag and we had already
reached eol state, do the continue. */
if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
- D(printf("continue %d ok %x\n", c,
+ D(printf("continue %d ok %p\n", c,
ctrl->channels[c].current_d.next));
ctrl->channels[c].regs[RW_SAVED_DATA] =
(uint32_t) ctrl->channels[c].current_d.next;
channel_load_d(ctrl, c);
channel_start(ctrl, c);
}
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
+ (uint32_t) ctrl->channels[c].current_d.buf;
}
static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
{
unsigned int cmd = v & ((1 << 10) - 1);
- D(printf("%s cmd=%x\n", __func__, cmd));
+ D(printf("%s ch=%d cmd=%x pc=%x\n",
+ __func__, c, cmd, ctrl->env->pc));
if (cmd & regk_dma_load_d) {
channel_load_d(ctrl, c);
if (cmd & regk_dma_burst)
@@ -347,6 +361,7 @@ static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
if (cmd & regk_dma_load_c) {
channel_load_c(ctrl, c);
+ channel_start(ctrl, c);
}
}
@@ -381,11 +396,30 @@ static void channel_out_run(struct fs_dma_ctrl *ctrl, int c)
saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
- D(printf("buf=%x after=%x saved_data_buf=%x\n",
+ D(fprintf(logfile, "ch=%d buf=%x after=%x saved_data_buf=%x\n",
+ c,
(uint32_t)ctrl->channels[c].current_d.buf,
(uint32_t)ctrl->channels[c].current_d.after,
saved_data_buf));
+ len = (uint32_t) ctrl->channels[c].current_d.after;
+ len -= saved_data_buf;
+
+ if (len > sizeof buf)
+ len = sizeof buf;
+ cpu_physical_memory_read (saved_data_buf, buf, len);
+
+ D(printf("channel %d pushes %x %u bytes\n", c,
+ saved_data_buf, len));
+
+ if (ctrl->channels[c].client->client.push)
+ ctrl->channels[c].client->client.push(
+ ctrl->channels[c].client->client.opaque, buf, len);
+ else
+ printf("WARNING: DMA ch%d dataloss, no attached client.\n", c);
+
+ saved_data_buf += len;
+
if (saved_data_buf == (uint32_t)ctrl->channels[c].current_d.after) {
/* Done. Step to next. */
if (ctrl->channels[c].current_d.out_eop) {
@@ -402,36 +436,26 @@ static void channel_out_run(struct fs_dma_ctrl *ctrl, int c)
if (ctrl->channels[c].current_d.eol) {
D(printf("channel %d EOL\n", c));
ctrl->channels[c].eol = 1;
+
+ /* Mark the context as disabled. */
+ ctrl->channels[c].current_c.dis = 1;
+ channel_store_c(ctrl, c);
+
channel_stop(ctrl, c);
} else {
ctrl->channels[c].regs[RW_SAVED_DATA] =
(uint32_t) ctrl->channels[c].current_d.next;
/* Load new descriptor. */
channel_load_d(ctrl, c);
+ saved_data_buf = (uint32_t)
+ ctrl->channels[c].current_d.buf;
}
channel_store_d(ctrl, c);
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
D(dump_d(c, &ctrl->channels[c].current_d));
- return;
}
-
- len = (uint32_t) ctrl->channels[c].current_d.after;
- len -= saved_data_buf;
-
- if (len > sizeof buf)
- len = sizeof buf;
- cpu_physical_memory_read (saved_data_buf, buf, len);
-
- D(printf("channel %d pushes %x %u bytes\n", c,
- saved_data_buf, len));
- /* TODO: Push content. */
- if (ctrl->channels[c].client->client.push)
- ctrl->channels[c].client->client.push(
- ctrl->channels[c].client->client.opaque, buf, len);
- else
- printf("WARNING: DMA ch%d dataloss, no attached client.\n", c);
-
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF] += len;
+ ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
}
static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
@@ -482,14 +506,19 @@ static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
if (ctrl->channels[c].current_d.eol) {
D(printf("channel %d EOL\n", c));
ctrl->channels[c].eol = 1;
+
+ /* Mark the context as disabled. */
+ ctrl->channels[c].current_c.dis = 1;
+ channel_store_c(ctrl, c);
+
channel_stop(ctrl, c);
} else {
ctrl->channels[c].regs[RW_SAVED_DATA] =
(uint32_t) ctrl->channels[c].current_d.next;
/* Load new descriptor. */
channel_load_d(ctrl, c);
- saved_data_buf =
- ctrl->channels[c].regs[RW_SAVED_DATA_BUF];
+ saved_data_buf = (uint32_t)
+ ctrl->channels[c].current_d.buf;
}
}
@@ -522,21 +551,21 @@ dma_readl (void *opaque, target_phys_addr_t addr)
/* Make addr relative to this instances base. */
c = fs_channel(ctrl->base, addr);
- addr &= 0x1fff;
+ addr &= 0x1fff;
switch (addr)
- {
+ {
case RW_STAT:
r = ctrl->channels[c].state & 7;
r |= ctrl->channels[c].eol << 5;
r |= ctrl->channels[c].stream_cmd_src << 8;
break;
- default:
+ default:
r = ctrl->channels[c].regs[addr];
D(printf ("%s c=%d addr=%x pc=%x\n",
- __func__, c, addr, env->pc));
- break;
- }
+ __func__, c, addr, ctrl->env->pc));
+ break;
+ }
return r;
}
@@ -559,9 +588,9 @@ dma_writel (void *opaque, target_phys_addr_t addr, uint32_t value)
c = fs_channel(ctrl->base, addr);
addr &= 0x1fff;
switch (addr)
- {
+ {
case RW_DATA:
- printf("RW_DATA=%x\n", value);
+ ctrl->channels[c].regs[addr] = value;
break;
case RW_CFG:
@@ -590,13 +619,15 @@ dma_writel (void *opaque, target_phys_addr_t addr, uint32_t value)
case RW_STREAM_CMD:
ctrl->channels[c].regs[addr] = value;
+ D(printf("stream_cmd ch=%d pc=%x\n",
+ c, ctrl->env->pc));
channel_stream_cmd(ctrl, c, value);
break;
- default:
- D(printf ("%s c=%d %x %x pc=%x\n",
- __func__, c, addr, value, env->pc));
- break;
+ default:
+ D(printf ("%s c=%d %x %x pc=%x\n",
+ __func__, c, addr, value, ctrl->env->pc));
+ break;
}
}
@@ -658,6 +689,13 @@ void etraxfs_dmac_connect_client(void *opaque, int c,
}
+static void *etraxfs_dmac;
+void DMA_run(void)
+{
+ if (etraxfs_dmac)
+ etraxfs_dmac_run(etraxfs_dmac);
+}
+
void *etraxfs_dmac_init(CPUState *env,
target_phys_addr_t base, int nr_channels)
{
@@ -686,6 +724,8 @@ void *etraxfs_dmac_init(CPUState *env,
ctrl->channels[i].regmap);
}
+ /* Hax, we only support one DMA controller at a time. */
+ etraxfs_dmac = ctrl;
return ctrl;
err:
qemu_free(ctrl->channels);
diff --git a/hw/etraxfs_pic.c b/hw/etraxfs_pic.c
index 7022c9953..d145bec17 100644
--- a/hw/etraxfs_pic.c
+++ b/hw/etraxfs_pic.c
@@ -24,6 +24,7 @@
#include <stdio.h>
#include "hw.h"
+#include "etraxfs.h"
#define D(x)
@@ -143,7 +144,7 @@ void irq_info(void)
{
}
-static void etraxfs_pic_handler(void *opaque, int irq, int level)
+static void irq_handler(void *opaque, int irq, int level)
{
struct fs_pic_state_t *fs = (void *)opaque;
CPUState *env = fs->env;
@@ -187,22 +188,56 @@ static void etraxfs_pic_handler(void *opaque, int irq, int level)
}
}
-qemu_irq *etraxfs_pic_init(CPUState *env, target_phys_addr_t base)
+static void nmi_handler(void *opaque, int irq, int level)
+{
+ struct fs_pic_state_t *fs = (void *)opaque;
+ CPUState *env = fs->env;
+ uint32_t mask;
+
+ mask = 1 << irq;
+ if (level)
+ fs->r_nmi |= mask;
+ else
+ fs->r_nmi &= ~mask;
+
+ if (fs->r_nmi)
+ cpu_interrupt(env, CPU_INTERRUPT_NMI);
+ else
+ cpu_reset_interrupt(env, CPU_INTERRUPT_NMI);
+}
+
+static void guru_handler(void *opaque, int irq, int level)
+{
+ struct fs_pic_state_t *fs = (void *)opaque;
+ CPUState *env = fs->env;
+ cpu_abort(env, "%s unsupported exception\n", __func__);
+
+}
+
+
+struct etraxfs_pic *etraxfs_pic_init(CPUState *env, target_phys_addr_t base)
{
- struct fs_pic_state_t *fs;
- qemu_irq *pic;
+ struct fs_pic_state_t *fs = NULL;
+ struct etraxfs_pic *pic = NULL;
int intr_vect_regs;
- fs = qemu_mallocz(sizeof *fs);
- if (!fs)
- return NULL;
- fs->env = env;
+ pic = qemu_mallocz(sizeof *pic);
+ pic->internal = fs = qemu_mallocz(sizeof *fs);
+ if (!fs || !pic)
+ goto err;
- pic = qemu_allocate_irqs(etraxfs_pic_handler, fs, 30);
+ fs->env = env;
+ pic->irq = qemu_allocate_irqs(irq_handler, fs, 30);
+ pic->nmi = qemu_allocate_irqs(nmi_handler, fs, 2);
+ pic->guru = qemu_allocate_irqs(guru_handler, fs, 1);
intr_vect_regs = cpu_register_io_memory(0, pic_read, pic_write, fs);
cpu_register_physical_memory(base, 0x14, intr_vect_regs);
fs->base = base;
return pic;
+ err:
+ free(pic);
+ free(fs);
+ return NULL;
}
diff --git a/hw/etraxfs_timer.c b/hw/etraxfs_timer.c
index e0fde9c2c..e996c57f6 100644
--- a/hw/etraxfs_timer.c
+++ b/hw/etraxfs_timer.c
@@ -46,6 +46,7 @@
struct fs_timer_t {
CPUState *env;
qemu_irq *irq;
+ qemu_irq *nmi;
target_phys_addr_t base;
QEMUBH *bh_t0;
@@ -56,6 +57,8 @@ struct fs_timer_t {
ptimer_state *ptimer_wd;
struct timeval last;
+ int wd_hits;
+
/* Control registers. */
uint32_t rw_tmr0_div;
uint32_t r_tmr0_data;
@@ -129,6 +132,7 @@ static void update_ctrl(struct fs_timer_t *t, int tnum)
unsigned int freq_hz;
unsigned int div;
uint32_t ctrl;
+
ptimer_state *timer;
if (tnum == 0) {
@@ -163,8 +167,8 @@ static void update_ctrl(struct fs_timer_t *t, int tnum)
D(printf ("freq_hz=%d div=%d\n", freq_hz, div));
div = div * TIMER_SLOWDOWN;
- div >>= 15;
- freq_hz >>= 15;
+ div >>= 10;
+ freq_hz >>= 10;
ptimer_set_freq(timer, freq_hz);
ptimer_set_limit(timer, div, 0);
@@ -216,7 +220,18 @@ static void timer1_hit(void *opaque)
static void watchdog_hit(void *opaque)
{
- qemu_system_reset_request();
+ struct fs_timer_t *t = opaque;
+ if (t->wd_hits == 0) {
+ /* real hw gives a single tick before reseting but we are
+ a bit friendlier to compensate for our slower execution. */
+ ptimer_set_count(t->ptimer_wd, 10);
+ ptimer_run(t->ptimer_wd, 1);
+ qemu_irq_raise(t->nmi[0]);
+ }
+ else
+ qemu_system_reset_request();
+
+ t->wd_hits++;
}
static inline void timer_watchdog_update(struct fs_timer_t *t, uint32_t value)
@@ -235,7 +250,12 @@ static inline void timer_watchdog_update(struct fs_timer_t *t, uint32_t value)
return;
D(printf("en=%d new_key=%x oldkey=%x cmd=%d cnt=%d\n",
- wd_en, new_key, wd_key, wd_cmd, wd_cnt));
+ wd_en, new_key, wd_key, new_cmd, wd_cnt));
+
+ if (t->wd_hits)
+ qemu_irq_lower(t->nmi[0]);
+
+ t->wd_hits = 0;
ptimer_set_freq(t->ptimer_wd, 760);
if (wd_cnt == 0)
@@ -320,7 +340,7 @@ static void etraxfs_timer_reset(void *opaque)
qemu_irq_lower(t->irq[0]);
}
-void etraxfs_timer_init(CPUState *env, qemu_irq *irqs,
+void etraxfs_timer_init(CPUState *env, qemu_irq *irqs, qemu_irq *nmi,
target_phys_addr_t base)
{
static struct fs_timer_t *t;
@@ -337,6 +357,7 @@ void etraxfs_timer_init(CPUState *env, qemu_irq *irqs,
t->ptimer_t1 = ptimer_init(t->bh_t1);
t->ptimer_wd = ptimer_init(t->bh_wd);
t->irq = irqs;
+ t->nmi = nmi;
t->env = env;
t->base = base;
diff --git a/hw/firmware_abi.h b/hw/firmware_abi.h
index 499949363..713484d41 100644
--- a/hw/firmware_abi.h
+++ b/hw/firmware_abi.h
@@ -139,7 +139,7 @@ OpenBIOS_finish_partition(struct OpenBIOS_nvpart_v1 *header, uint32_t size)
}
static inline uint32_t
-OpenBIOS_set_var(uint8_t *nvram, uint32_t addr, const unsigned char *str)
+OpenBIOS_set_var(uint8_t *nvram, uint32_t addr, const char *str)
{
uint32_t len;
diff --git a/hw/gus.c b/hw/gus.c
index 57753a7f5..9ff00d791 100644
--- a/hw/gus.c
+++ b/hw/gus.c
@@ -58,7 +58,7 @@ typedef struct GUSState {
QEMUSoundCard card;
int freq;
int pos, left, shift, irqs;
- uint16_t *mixbuf;
+ GUSsample *mixbuf;
uint8_t himem[1024 * 1024 + 32 + 4096];
int samples;
SWVoiceOut *voice;
@@ -198,7 +198,7 @@ void GUS_dmarequest (GUSEmuState *der)
int GUS_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len)
{
GUSState *s = opaque;
- int8_t tmpbuf[4096];
+ char tmpbuf[4096];
int pos = dma_pos, mode, left = dma_len - dma_pos;
ldebug ("read DMA %#x %d\n", dma_pos, dma_len);
@@ -220,6 +220,38 @@ int GUS_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len)
return dma_len;
}
+static void GUS_save (QEMUFile *f, void *opaque)
+{
+ int32_t val;
+ GUSState *s = opaque;
+
+ val = s->pos; qemu_put_be32s (f, &val);
+ val = s->left; qemu_put_be32s (f, &val);
+ val = s->shift; qemu_put_be32s (f, &val);
+ val = s->irqs; qemu_put_be32s (f, &val);
+ val = s->samples; qemu_put_be32s (f, &val);
+ qemu_put_be64s (f, &s->last_ticks);
+ qemu_put_buffer (f, s->himem, sizeof (s->himem));
+}
+
+static int GUS_load (QEMUFile *f, void *opaque, int version_id)
+{
+ int32_t val;
+ GUSState *s = opaque;
+
+ if (version_id != 2)
+ return -EINVAL;
+
+ qemu_get_be32s (f, &val); s->pos = val;
+ qemu_get_be32s (f, &val); s->left = val;
+ qemu_get_be32s (f, &val); s->shift = val;
+ qemu_get_be32s (f, &val); s->irqs = val;
+ qemu_get_be32s (f, &val); s->samples = val;
+ qemu_get_be64s (f, &s->last_ticks);
+ qemu_get_buffer (f, s->himem, sizeof (s->himem));
+ return 0;
+}
+
int GUS_init (AudioState *audio, qemu_irq *pic)
{
GUSState *s;
@@ -296,5 +328,7 @@ int GUS_init (AudioState *audio, qemu_irq *pic)
s->pic = pic;
AUD_set_active_out (s->voice, 1);
+
+ register_savevm ("gus", 0, 2, GUS_save, GUS_load, s);
return 0;
}
diff --git a/hw/gusemu.h b/hw/gusemu.h
index 2e9c1c0c7..a64300a09 100644
--- a/hw/gusemu.h
+++ b/hw/gusemu.h
@@ -32,12 +32,14 @@
typedef unsigned short GUSword;
typedef unsigned int GUSdword;
typedef signed char GUSchar;
+ typedef signed short GUSsample;
#else
#include <stdint.h>
typedef int8_t GUSchar;
typedef uint8_t GUSbyte;
typedef uint16_t GUSword;
typedef uint32_t GUSdword;
+ typedef int16_t GUSsample;
#endif
typedef struct _GUSEmuState
@@ -91,7 +93,7 @@ void gus_dma_transferdata(GUSEmuState *state, char *dma_addr, unsigned int count
/* If the interrupts are asynchronous, it may be needed to use a separate thread mixing into a temporary */
/* audio buffer in order to avoid quality loss caused by large numsamples and elapsed_time values. */
-void gus_mixvoices(GUSEmuState *state, unsigned int playback_freq, unsigned int numsamples, short *bufferpos);
+void gus_mixvoices(GUSEmuState *state, unsigned int playback_freq, unsigned int numsamples, GUSsample *bufferpos);
/* recommended range: 10 < numsamples < 100 */
/* lower values may result in increased rounding error, higher values often cause audible timing delays */
diff --git a/hw/gusemu_mixer.c b/hw/gusemu_mixer.c
index b3b5aa5d7..6d8d9ced1 100644
--- a/hw/gusemu_mixer.c
+++ b/hw/gusemu_mixer.c
@@ -33,7 +33,7 @@
/* samples are always 16bit stereo (4 bytes each, first right then left interleaved) */
void gus_mixvoices(GUSEmuState * state, unsigned int playback_freq, unsigned int numsamples,
- short *bufferpos)
+ GUSsample *bufferpos)
{
/* note that byte registers are stored in the upper half of each voice register! */
GUSbyte *gusptr;
@@ -170,8 +170,8 @@ void gus_mixvoices(GUSEmuState * state, unsigned int playback_freq, unsigned int
}
/* mix samples into buffer */
- *(bufferpos + 2 * sample) += (short) ((sample1 * PanningPos) >> 4); /* right */
- *(bufferpos + 2 * sample + 1) += (short) ((sample1 * (15 - PanningPos)) >> 4); /* left */
+ *(bufferpos + 2 * sample) += (GUSsample) ((sample1 * PanningPos) >> 4); /* right */
+ *(bufferpos + 2 * sample + 1) += (GUSsample) ((sample1 * (15 - PanningPos)) >> 4); /* left */
}
/* write back voice and volume */
GUSvoice(wVSRCurrVol) = Volume32 / 32;
diff --git a/hw/hw.h b/hw/hw.h
index aabf06966..8bf2e0b74 100644
--- a/hw/hw.h
+++ b/hw/hw.h
@@ -101,9 +101,8 @@ void qemu_register_reset(QEMUResetHandler *func, void *opaque);
/* handler to set the boot_device for a specific type of QEMUMachine */
/* return 0 if success */
-typedef int QEMUBootSetHandler(const char *boot_device);
-extern QEMUBootSetHandler *qemu_boot_set_handler;
-void qemu_register_boot_set(QEMUBootSetHandler *func);
+typedef int QEMUBootSetHandler(void *opaque, const char *boot_device);
+void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque);
/* These should really be in isa.h, but are here to make pc.h happy. */
typedef void (IOPortWriteFunc)(void *opaque, uint32_t address, uint32_t data);
diff --git a/hw/musicpal.c b/hw/musicpal.c
index 211c84936..b0fcee25b 100644
--- a/hw/musicpal.c
+++ b/hw/musicpal.c
@@ -1504,12 +1504,6 @@ static void musicpal_init(ram_addr_t ram_size, int vga_ram_size,
qemu_add_kbd_event_handler(musicpal_key_event, pic[MP_GPIO_IRQ]);
- /*
- * Wait a bit to catch menu button during U-Boot start-up
- * (to trigger emergency update).
- */
- sleep(1);
-
mv88w8618_eth_init(&nd_table[0], MP_ETH_BASE, pic[MP_ETH_IRQ]);
mixer_i2c = musicpal_audio_init(MP_AUDIO_BASE, pic[MP_AUDIO_IRQ]);
diff --git a/hw/pc.c b/hw/pc.c
index 42c26872d..6334c769b 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -195,10 +195,10 @@ static int boot_device2nibble(char boot_device)
/* copy/pasted from cmos_init, should be made a general function
and used there as well */
-int pc_boot_set(const char *boot_device)
+static int pc_boot_set(void *opaque, const char *boot_device)
{
#define PC_MAX_BOOT_DEVICES 3
- RTCState *s = rtc_state;
+ RTCState *s = (RTCState *)opaque;
int nbds, bds[3] = { 0, };
int i;
@@ -797,8 +797,6 @@ static void pc_init1(ram_addr_t ram_size, int vga_ram_size,
below_4g_mem_size = ram_size;
}
- qemu_register_boot_set(pc_boot_set);
-
linux_boot = (kernel_filename != NULL);
/* init CPUs */
@@ -999,6 +997,8 @@ static void pc_init1(ram_addr_t ram_size, int vga_ram_size,
rtc_state = rtc_init(0x70, i8259[8]);
+ qemu_register_boot_set(pc_boot_set, rtc_state);
+
register_ioport_read(0x92, 1, 1, ioport92_read, NULL);
register_ioport_write(0x92, 1, 1, ioport92_write, NULL);
diff --git a/hw/pflash_cfi01.c b/hw/pflash_cfi01.c
index c360179c3..771ea854d 100644
--- a/hw/pflash_cfi01.c
+++ b/hw/pflash_cfi01.c
@@ -202,14 +202,8 @@ static void pflash_write (pflash_t *pfl, target_ulong offset, uint32_t value,
uint8_t *p;
uint8_t cmd;
- /* WARNING: when the memory area is in ROMD mode, the offset is a
- ram offset, not a physical address */
cmd = value;
-
- if (pfl->wcycle == 0)
- offset -= (target_ulong)(long)pfl->storage;
- else
- offset -= pfl->base;
+ offset -= pfl->base;
DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d wcycle 0x%x\n",
__func__, offset, value, width, pfl->wcycle);
diff --git a/hw/pflash_cfi02.c b/hw/pflash_cfi02.c
index 1daaac3d8..5530841bf 100644
--- a/hw/pflash_cfi02.c
+++ b/hw/pflash_cfi02.c
@@ -112,13 +112,12 @@ static uint32_t pflash_read (pflash_t *pfl, uint32_t offset, int width)
DPRINTF("%s: offset " TARGET_FMT_lx "\n", __func__, offset);
ret = -1;
+ offset -= pfl->base;
if (pfl->rom_mode) {
- offset -= (uint32_t)(long)pfl->storage;
/* Lazy reset of to ROMD mode */
if (pfl->wcycle == 0)
pflash_register_memory(pfl, 1);
- } else
- offset -= pfl->base;
+ }
offset &= pfl->chip_len - 1;
boff = offset & 0xFF;
if (pfl->width == 2)
@@ -242,12 +241,7 @@ static void pflash_write (pflash_t *pfl, uint32_t offset, uint32_t value,
}
DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d %d\n", __func__,
offset, value, width, pfl->wcycle);
- /* WARNING: when the memory area is in ROMD mode, the offset is a
- ram offset, not a physical address */
- if (pfl->rom_mode)
- offset -= (uint32_t)(long)pfl->storage;
- else
- offset -= pfl->base;
+ offset -= pfl->base;
offset &= pfl->chip_len - 1;
DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d\n", __func__,
diff --git a/hw/slavio_serial.c b/hw/slavio_serial.c
index 37424c6cd..1ef3c119e 100644
--- a/hw/slavio_serial.c
+++ b/hw/slavio_serial.c
@@ -770,7 +770,7 @@ static const uint8_t e0_keycodes[128] = {
0, 0, 0, 0, 0, 0, 0, 68, 69, 70, 0, 91, 0, 93, 0, 112,
113, 114, 94, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 3, 25, 26, 49, 52, 72, 73, 97, 99, 111, 118, 120, 122, 67, 0,
};
static void sunkbd_event(void *opaque, int ch)
diff --git a/hw/sun4m.c b/hw/sun4m.c
index 734b576aa..e1ff225e3 100644
--- a/hw/sun4m.c
+++ b/hw/sun4m.c
@@ -149,6 +149,26 @@ void DMA_register_channel (int nchan,
{
}
+static int nvram_boot_set(void *opaque, const char *boot_device)
+{
+ unsigned int i;
+ uint8_t image[sizeof(ohwcfg_v3_t)];
+ ohwcfg_v3_t *header = (ohwcfg_v3_t *)&image;
+ m48t59_t *nvram = (m48t59_t *)opaque;
+
+ for (i = 0; i < sizeof(image); i++)
+ image[i] = m48t59_read(nvram, i) & 0xff;
+
+ strcpy((char *)header->boot_devices, boot_device);
+ header->nboot_devices = strlen(boot_device) & 0xff;
+ header->crc = cpu_to_be16(OHW_compute_crc(header, 0x00, 0xF8));
+
+ for (i = 0; i < sizeof(image); i++)
+ m48t59_write(nvram, i, image[i]);
+
+ return 0;
+}
+
extern int nographic;
static void nvram_init(m48t59_t *nvram, uint8_t *macaddr, const char *cmdline,
@@ -167,17 +187,17 @@ static void nvram_init(m48t59_t *nvram, uint8_t *macaddr, const char *cmdline,
memset(image, '\0', sizeof(image));
// Try to match PPC NVRAM
- strcpy(header->struct_ident, "QEMU_BIOS");
+ strcpy((char *)header->struct_ident, "QEMU_BIOS");
header->struct_version = cpu_to_be32(3); /* structure v3 */
header->nvram_size = cpu_to_be16(0x2000);
header->nvram_arch_ptr = cpu_to_be16(sizeof(ohwcfg_v3_t));
header->nvram_arch_size = cpu_to_be16(sizeof(struct sparc_arch_cfg));
- strcpy(header->arch, arch);
+ strcpy((char *)header->arch, arch);
header->nb_cpus = smp_cpus & 0xff;
header->RAM0_base = 0;
header->RAM0_size = cpu_to_be64((uint64_t)RAM_size);
- strcpy(header->boot_devices, boot_devices);
+ strcpy((char *)header->boot_devices, boot_devices);
header->nboot_devices = strlen(boot_devices) & 0xff;
header->kernel_image = cpu_to_be64((uint64_t)KERNEL_LOAD_ADDR);
header->kernel_size = cpu_to_be64((uint64_t)kernel_size);
@@ -230,6 +250,8 @@ static void nvram_init(m48t59_t *nvram, uint8_t *macaddr, const char *cmdline,
for (i = 0; i < sizeof(image); i++)
m48t59_write(nvram, i, image[i]);
+
+ qemu_register_boot_set(nvram_boot_set, nvram);
}
static void *slavio_intctl;
@@ -404,7 +426,7 @@ static void sun4m_hw_init(const struct hwdef *hwdef, ram_addr_t RAM_size,
qemu_register_reset(secondary_cpu_reset, env);
env->halted = 1;
}
- register_savevm("cpu", i, 3, cpu_save, cpu_load, env);
+ register_savevm("cpu", i, 4, cpu_save, cpu_load, env);
cpu_irqs[i] = qemu_allocate_irqs(cpu_set_irq, envs[i], MAX_PILS);
env->prom_addr = hwdef->slavio_base;
}
@@ -579,7 +601,7 @@ static void sun4c_hw_init(const struct hwdef *hwdef, ram_addr_t RAM_size,
cpu_sparc_set_id(env, 0);
qemu_register_reset(main_cpu_reset, env);
- register_savevm("cpu", 0, 3, cpu_save, cpu_load, env);
+ register_savevm("cpu", 0, 4, cpu_save, cpu_load, env);
cpu_irqs = qemu_allocate_irqs(cpu_set_irq, env, MAX_PILS);
env->prom_addr = hwdef->slavio_base;
@@ -1391,7 +1413,7 @@ static void sun4d_hw_init(const struct sun4d_hwdef *hwdef, ram_addr_t RAM_size,
qemu_register_reset(secondary_cpu_reset, env);
env->halted = 1;
}
- register_savevm("cpu", i, 3, cpu_save, cpu_load, env);
+ register_savevm("cpu", i, 4, cpu_save, cpu_load, env);
cpu_irqs[i] = qemu_allocate_irqs(cpu_set_irq, envs[i], MAX_PILS);
env->prom_addr = hwdef->slavio_base;
}
diff --git a/hw/sun4u.c b/hw/sun4u.c
index 98bed4b3d..985efd9dd 100644
--- a/hw/sun4u.c
+++ b/hw/sun4u.c
@@ -68,10 +68,30 @@ void DMA_register_channel (int nchan,
{
}
+static int nvram_boot_set(void *opaque, const char *boot_device)
+{
+ unsigned int i;
+ uint8_t image[sizeof(ohwcfg_v3_t)];
+ ohwcfg_v3_t *header = (ohwcfg_v3_t *)&image;
+ m48t59_t *nvram = (m48t59_t *)opaque;
+
+ for (i = 0; i < sizeof(image); i++)
+ image[i] = m48t59_read(nvram, i) & 0xff;
+
+ strcpy((char *)header->boot_devices, boot_device);
+ header->nboot_devices = strlen(boot_device) & 0xff;
+ header->crc = cpu_to_be16(OHW_compute_crc(header, 0x00, 0xF8));
+
+ for (i = 0; i < sizeof(image); i++)
+ m48t59_write(nvram, i, image[i]);
+
+ return 0;
+}
+
extern int nographic;
static int sun4u_NVRAM_set_params (m48t59_t *nvram, uint16_t NVRAM_size,
- const unsigned char *arch,
+ const char *arch,
ram_addr_t RAM_size,
const char *boot_devices,
uint32_t kernel_image, uint32_t kernel_size,
@@ -90,17 +110,17 @@ static int sun4u_NVRAM_set_params (m48t59_t *nvram, uint16_t NVRAM_size,
memset(image, '\0', sizeof(image));
// Try to match PPC NVRAM
- strcpy(header->struct_ident, "QEMU_BIOS");
+ strcpy((char *)header->struct_ident, "QEMU_BIOS");
header->struct_version = cpu_to_be32(3); /* structure v3 */
header->nvram_size = cpu_to_be16(NVRAM_size);
header->nvram_arch_ptr = cpu_to_be16(sizeof(ohwcfg_v3_t));
header->nvram_arch_size = cpu_to_be16(sizeof(struct sparc_arch_cfg));
- strcpy(header->arch, arch);
+ strcpy((char *)header->arch, arch);
header->nb_cpus = smp_cpus & 0xff;
header->RAM0_base = 0;
header->RAM0_size = cpu_to_be64((uint64_t)RAM_size);
- strcpy(header->boot_devices, boot_devices);
+ strcpy((char *)header->boot_devices, boot_devices);
header->nboot_devices = strlen(boot_devices) & 0xff;
header->kernel_image = cpu_to_be64((uint64_t)kernel_image);
header->kernel_size = cpu_to_be64((uint64_t)kernel_size);
@@ -155,6 +175,8 @@ static int sun4u_NVRAM_set_params (m48t59_t *nvram, uint16_t NVRAM_size,
for (i = 0; i < sizeof(image); i++)
m48t59_write(nvram, i, image[i]);
+ qemu_register_boot_set(nvram_boot_set, nvram);
+
return 0;
}
@@ -260,7 +282,7 @@ static void sun4u_init(ram_addr_t RAM_size, int vga_ram_size,
bh = qemu_bh_new(hstick_irq, env);
env->hstick = ptimer_init(bh);
ptimer_set_period(env->hstick, 1ULL);
- register_savevm("cpu", 0, 3, cpu_save, cpu_load, env);
+ register_savevm("cpu", 0, 4, cpu_save, cpu_load, env);
qemu_register_reset(main_cpu_reset, env);
main_cpu_reset(env);
diff --git a/hw/tc6393xb.c b/hw/tc6393xb.c
new file mode 100644
index 000000000..6db6dcb33
--- /dev/null
+++ b/hw/tc6393xb.c
@@ -0,0 +1,299 @@
+/*
+ * Toshiba TC6393XB I/O Controller.
+ * Found in Sharp Zaurus SL-6000 (tosa) or some
+ * Toshiba e-Series PDAs.
+ *
+ * Most features are currently unsupported!!!
+ *
+ * This code is licensed under the GNU GPL v2.
+ */
+#include "hw.h"
+#include "pxa.h"
+#include "devices.h"
+
+#define TC6393XB_GPIOS 16
+
+#define SCR_REVID 0x08 /* b Revision ID */
+#define SCR_ISR 0x50 /* b Interrupt Status */
+#define SCR_IMR 0x52 /* b Interrupt Mask */
+#define SCR_IRR 0x54 /* b Interrupt Routing */
+#define SCR_GPER 0x60 /* w GP Enable */
+#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
+#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
+#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
+#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
+#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
+#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
+#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
+#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
+#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
+#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
+#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
+#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
+#define SCR_CCR 0x98 /* w Clock Control */
+#define SCR_PLL2CR 0x9a /* w PLL2 Control */
+#define SCR_PLL1CR 0x9c /* l PLL1 Control */
+#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
+#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
+#define SCR_FER 0xe0 /* b Function Enable */
+#define SCR_MCR 0xe4 /* w Mode Control */
+#define SCR_CONFIG 0xfc /* b Configuration Control */
+#define SCR_DEBUG 0xff /* b Debug */
+
+struct tc6393xb_s {
+ target_phys_addr_t target_base;
+ struct {
+ uint8_t ISR;
+ uint8_t IMR;
+ uint8_t IRR;
+ uint16_t GPER;
+ uint8_t GPI_SR[3];
+ uint8_t GPI_IMR[3];
+ uint8_t GPI_EDER[3];
+ uint8_t GPI_LIR[3];
+ uint8_t GP_IARCR[3];
+ uint8_t GP_IARLCR[3];
+ uint8_t GPI_BCR[3];
+ uint16_t GPA_IARCR;
+ uint16_t GPA_IARLCR;
+ uint16_t CCR;
+ uint16_t PLL2CR;
+ uint32_t PLL1CR;
+ uint8_t DIARCR;
+ uint8_t DBOCR;
+ uint8_t FER;
+ uint16_t MCR;
+ uint8_t CONFIG;
+ uint8_t DEBUG;
+ } scr;
+ uint32_t gpio_dir;
+ uint32_t gpio_level;
+ uint32_t prev_level;
+ qemu_irq handler[TC6393XB_GPIOS];
+ qemu_irq *gpio_in;
+};
+
+qemu_irq *tc6393xb_gpio_in_get(struct tc6393xb_s *s)
+{
+ return s->gpio_in;
+}
+
+static void tc6393xb_gpio_set(void *opaque, int line, int level)
+{
+// struct tc6393xb_s *s = opaque;
+
+ if (line > TC6393XB_GPIOS) {
+ printf("%s: No GPIO pin %i\n", __FUNCTION__, line);
+ return;
+ }
+
+ // FIXME: how does the chip reflect the GPIO input level change?
+}
+
+void tc6393xb_gpio_out_set(struct tc6393xb_s *s, int line,
+ qemu_irq handler)
+{
+ if (line >= TC6393XB_GPIOS) {
+ fprintf(stderr, "TC6393xb: no GPIO pin %d\n", line);
+ return;
+ }
+
+ s->handler[line] = handler;
+}
+
+static void tc6393xb_gpio_handler_update(struct tc6393xb_s *s)
+{
+ uint32_t level, diff;
+ int bit;
+
+ level = s->gpio_level & s->gpio_dir;
+
+ for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) {
+ bit = ffs(diff) - 1;
+ qemu_set_irq(s->handler[bit], (level >> bit) & 1);
+ }
+
+ s->prev_level = level;
+}
+
+#define SCR_REG_B(N) \
+ case SCR_ ##N: return s->scr.N
+#define SCR_REG_W(N) \
+ case SCR_ ##N: return s->scr.N; \
+ case SCR_ ##N + 1: return s->scr.N >> 8;
+#define SCR_REG_L(N) \
+ case SCR_ ##N: return s->scr.N; \
+ case SCR_ ##N + 1: return s->scr.N >> 8; \
+ case SCR_ ##N + 2: return s->scr.N >> 16; \
+ case SCR_ ##N + 3: return s->scr.N >> 24;
+#define SCR_REG_A(N) \
+ case SCR_ ##N(0): return s->scr.N[0]; \
+ case SCR_ ##N(1): return s->scr.N[1]; \
+ case SCR_ ##N(2): return s->scr.N[2]
+
+static uint32_t tc6393xb_readb(void *opaque, target_phys_addr_t addr)
+{
+ struct tc6393xb_s *s = opaque;
+ addr -= s->target_base;
+ switch (addr) {
+ case SCR_REVID:
+ return 3;
+ case SCR_REVID+1:
+ return 0;
+ SCR_REG_B(ISR);
+ SCR_REG_B(IMR);
+ SCR_REG_B(IRR);
+ SCR_REG_W(GPER);
+ SCR_REG_A(GPI_SR);
+ SCR_REG_A(GPI_IMR);
+ SCR_REG_A(GPI_EDER);
+ SCR_REG_A(GPI_LIR);
+ case SCR_GPO_DSR(0):
+ case SCR_GPO_DSR(1):
+ case SCR_GPO_DSR(2):
+ return (s->gpio_level >> ((addr - SCR_GPO_DSR(0)) * 8)) & 0xff;
+ case SCR_GPO_DOECR(0):
+ case SCR_GPO_DOECR(1):
+ case SCR_GPO_DOECR(2):
+ return (s->gpio_dir >> ((addr - SCR_GPO_DOECR(0)) * 8)) & 0xff;
+ SCR_REG_A(GP_IARCR);
+ SCR_REG_A(GP_IARLCR);
+ SCR_REG_A(GPI_BCR);
+ SCR_REG_W(GPA_IARCR);
+ SCR_REG_W(GPA_IARLCR);
+ SCR_REG_W(CCR);
+ SCR_REG_W(PLL2CR);
+ SCR_REG_L(PLL1CR);
+ SCR_REG_B(DIARCR);
+ SCR_REG_B(DBOCR);
+ SCR_REG_B(FER);
+ SCR_REG_W(MCR);
+ SCR_REG_B(CONFIG);
+ SCR_REG_B(DEBUG);
+ }
+ fprintf(stderr, "tc6393xb: unhandled read at %08x\n", (uint32_t) addr);
+ return 0;
+}
+#undef SCR_REG_B
+#undef SCR_REG_W
+#undef SCR_REG_L
+#undef SCR_REG_A
+
+#define SCR_REG_B(N) \
+ case SCR_ ##N: s->scr.N = value; break;
+#define SCR_REG_W(N) \
+ case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); break; \
+ case SCR_ ##N + 1: s->scr.N = (s->scr.N & 0xff) | (value << 8); break
+#define SCR_REG_L(N) \
+ case SCR_ ##N: s->scr.N = (s->scr.N & ~0xff) | (value & 0xff); break; \
+ case SCR_ ##N + 1: s->scr.N = (s->scr.N & ~(0xff << 8)) | (value & (0xff << 8)); break; \
+ case SCR_ ##N + 2: s->scr.N = (s->scr.N & ~(0xff << 16)) | (value & (0xff << 16)); break; \
+ case SCR_ ##N + 3: s->scr.N = (s->scr.N & ~(0xff << 24)) | (value & (0xff << 24)); break;
+#define SCR_REG_A(N) \
+ case SCR_ ##N(0): s->scr.N[0] = value; break; \
+ case SCR_ ##N(1): s->scr.N[1] = value; break; \
+ case SCR_ ##N(2): s->scr.N[2] = value; break
+
+static void tc6393xb_writeb(void *opaque, target_phys_addr_t addr, uint32_t value)
+{
+ struct tc6393xb_s *s = opaque;
+ addr -= s->target_base;
+ switch (addr) {
+ SCR_REG_B(ISR);
+ SCR_REG_B(IMR);
+ SCR_REG_B(IRR);
+ SCR_REG_W(GPER);
+ SCR_REG_A(GPI_SR);
+ SCR_REG_A(GPI_IMR);
+ SCR_REG_A(GPI_EDER);
+ SCR_REG_A(GPI_LIR);
+ case SCR_GPO_DSR(0):
+ case SCR_GPO_DSR(1):
+ case SCR_GPO_DSR(2):
+ s->gpio_level = (s->gpio_level & ~(0xff << ((addr - SCR_GPO_DSR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DSR(0))*8));
+ tc6393xb_gpio_handler_update(s);
+ break;
+ case SCR_GPO_DOECR(0):
+ case SCR_GPO_DOECR(1):
+ case SCR_GPO_DOECR(2):
+ s->gpio_dir = (s->gpio_dir & ~(0xff << ((addr - SCR_GPO_DOECR(0))*8))) | ((value & 0xff) << ((addr - SCR_GPO_DOECR(0))*8));
+ tc6393xb_gpio_handler_update(s);
+ break;
+ SCR_REG_A(GP_IARCR);
+ SCR_REG_A(GP_IARLCR);
+ SCR_REG_A(GPI_BCR);
+ SCR_REG_W(GPA_IARCR);
+ SCR_REG_W(GPA_IARLCR);
+ SCR_REG_W(CCR);
+ SCR_REG_W(PLL2CR);
+ SCR_REG_L(PLL1CR);
+ SCR_REG_B(DIARCR);
+ SCR_REG_B(DBOCR);
+ SCR_REG_B(FER);
+ SCR_REG_W(MCR);
+ SCR_REG_B(CONFIG);
+ SCR_REG_B(DEBUG);
+ default:
+ fprintf(stderr, "tc6393xb: unhandled write at %08x: %02x\n",
+ (uint32_t) addr, value & 0xff);
+ break;
+ }
+}
+#undef SCR_REG_B
+#undef SCR_REG_W
+#undef SCR_REG_L
+#undef SCR_REG_A
+
+static uint32_t tc6393xb_readw(void *opaque, target_phys_addr_t addr)
+{
+ return (tc6393xb_readb(opaque, addr) & 0xff) |
+ (tc6393xb_readb(opaque, addr + 1) << 8);
+}
+
+static uint32_t tc6393xb_readl(void *opaque, target_phys_addr_t addr)
+{
+ return (tc6393xb_readb(opaque, addr) & 0xff) |
+ ((tc6393xb_readb(opaque, addr + 1) & 0xff) << 8) |
+ ((tc6393xb_readb(opaque, addr + 2) & 0xff) << 16) |
+ ((tc6393xb_readb(opaque, addr + 3) & 0xff) << 24);
+}
+
+static void tc6393xb_writew(void *opaque, target_phys_addr_t addr, uint32_t value)
+{
+ tc6393xb_writeb(opaque, addr, value);
+ tc6393xb_writeb(opaque, addr + 1, value >> 8);
+}
+
+static void tc6393xb_writel(void *opaque, target_phys_addr_t addr, uint32_t value)
+{
+ tc6393xb_writeb(opaque, addr, value);
+ tc6393xb_writeb(opaque, addr + 1, value >> 8);
+ tc6393xb_writeb(opaque, addr + 2, value >> 16);
+ tc6393xb_writeb(opaque, addr + 3, value >> 24);
+}
+
+struct tc6393xb_s *tc6393xb_init(uint32_t base, qemu_irq irq)
+{
+ int iomemtype;
+ struct tc6393xb_s *s;
+ CPUReadMemoryFunc *tc6393xb_readfn[] = {
+ tc6393xb_readb,
+ tc6393xb_readw,
+ tc6393xb_readl,
+ };
+ CPUWriteMemoryFunc *tc6393xb_writefn[] = {
+ tc6393xb_writeb,
+ tc6393xb_writew,
+ tc6393xb_writel,
+ };
+
+ s = (struct tc6393xb_s *) qemu_mallocz(sizeof(struct tc6393xb_s));
+ s->target_base = base;
+ s->gpio_in = qemu_allocate_irqs(tc6393xb_gpio_set, s, TC6393XB_GPIOS);
+
+ iomemtype = cpu_register_io_memory(0, tc6393xb_readfn,
+ tc6393xb_writefn, s);
+ cpu_register_physical_memory(s->target_base, 0x200000, iomemtype);
+
+ return s;
+}
diff --git a/hw/tcx.c b/hw/tcx.c
index c72b99924..a63b44188 100644
--- a/hw/tcx.c
+++ b/hw/tcx.c
@@ -186,6 +186,8 @@ static void tcx_update_display(void *opaque)
if (ts->ds->depth == 0)
return;
+ if (ts->ds->width != ts->width || ts->ds->height != ts->height)
+ dpy_resize(ts->ds, ts->width, ts->height);
page = ts->vram_offset;
y_start = -1;
page_min = 0xffffffff;
@@ -264,6 +266,8 @@ static void tcx24_update_display(void *opaque)
if (ts->ds->depth != 32)
return;
+ if (ts->ds->width != ts->width || ts->ds->height != ts->height)
+ dpy_resize(ts->ds, ts->width, ts->height);
page = ts->vram_offset;
page24 = ts->vram24_offset;
cpage = ts->cplane_offset;
diff --git a/hw/tosa.c b/hw/tosa.c
index 97d41ece8..f67d67e19 100644
--- a/hw/tosa.c
+++ b/hw/tosa.c
@@ -12,6 +12,7 @@
#include "pxa.h"
#include "arm-misc.h"
#include "sysemu.h"
+#include "devices.h"
#include "sharpsl.h"
#include "pcmcia.h"
#include "block.h"
@@ -31,41 +32,6 @@
#define TOSA_GPIO_SD_WP (TOSA_SCOOP_GPIO_BASE + 3)
#define TOSA_GPIO_PWR_ON (TOSA_SCOOP_GPIO_BASE + 4)
-struct tc6393xb_s {
- target_phys_addr_t target_base;
-};
-
-static uint32_t tc6393xb_readb(void *opaque, target_phys_addr_t addr)
-{
- return 3;
-}
-static void tc6393xb_writeb(void *opaque, target_phys_addr_t addr,
- uint32_t value)
-{
-}
-static void tosa_tc6393xb_register(struct pxa2xx_state_s *cpu)
-{
- int iomemtype;
- struct tc6393xb_s *s;
- CPUReadMemoryFunc *tc6393xb_readfn[] = {
- tc6393xb_readb,
- tc6393xb_readb,
- tc6393xb_readb,
- };
- CPUWriteMemoryFunc *tc6393xb_writefn[] = {
- tc6393xb_writeb,
- tc6393xb_writeb,
- tc6393xb_writeb,
- };
-
- s = (struct tc6393xb_s *) qemu_mallocz(sizeof(struct tc6393xb_s));
- s->target_base = 0x10000000;
-
- iomemtype = cpu_register_io_memory(0, tc6393xb_readfn,
- tc6393xb_writefn, s);
- cpu_register_physical_memory(s->target_base, 0x200000, iomemtype);
-}
-
static void tosa_microdrive_attach(struct pxa2xx_state_s *cpu)
{
struct pcmcia_card_s *md;
@@ -132,7 +98,7 @@ static void tosa_init(ram_addr_t ram_size, int vga_ram_size,
cpu_register_physical_memory(0, TOSA_ROM,
qemu_ram_alloc(TOSA_ROM) | IO_MEM_ROM);
- tosa_tc6393xb_register(cpu);
+ tc6393xb_init(0x10000000, NULL);
scp0 = scoop_init(cpu, 0, 0x08800000);
scp1 = scoop_init(cpu, 1, 0x14800040);
diff --git a/kqemu.c b/kqemu.c
index 540aa3cc5..4783aa2a0 100644
--- a/kqemu.c
+++ b/kqemu.c
@@ -166,14 +166,19 @@ int kqemu_init(CPUState *env)
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
NULL);
+ if (kqemu_fd == KQEMU_INVALID_FD) {
+ fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
+ KQEMU_DEVICE, GetLastError());
+ return -1;
+ }
#else
kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
-#endif
if (kqemu_fd == KQEMU_INVALID_FD) {
fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
KQEMU_DEVICE, strerror(errno));
return -1;
}
+#endif
version = 0;
#ifdef _WIN32
DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index ca0023e62..67b7535d4 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -89,7 +89,7 @@ enum {
static const char *get_elf_platform(void)
{
static char elf_platform[] = "i386";
- int family = (global_env->cpuid_version >> 8) & 0xff;
+ int family = (thread_env->cpuid_version >> 8) & 0xff;
if (family > 6)
family = 6;
if (family >= 3)
@@ -101,7 +101,7 @@ static const char *get_elf_platform(void)
static uint32_t get_elf_hwcap(void)
{
- return global_env->cpuid_features;
+ return thread_env->cpuid_features;
}
#ifdef TARGET_X86_64
diff --git a/linux-user/main.c b/linux-user/main.c
index 21c2d3ba0..a4ffea3fd 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -26,6 +26,8 @@
#include "qemu.h"
#include "qemu-common.h"
+/* For tb_lock */
+#include "exec-all.h"
#define DEBUG_LOGFILE "/tmp/qemu.log"
@@ -123,6 +125,135 @@ int64_t cpu_get_real_ticks(void)
#endif
+#if defined(USE_NPTL)
+/***********************************************************/
+/* Helper routines for implementing atomic operations. */
+
+/* To implement exclusive operations we force all cpus to syncronise.
+ We don't require a full sync, only that no cpus are executing guest code.
+ The alternative is to map target atomic ops onto host equivalents,
+ which requires quite a lot of per host/target work. */
+static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static int pending_cpus;
+
+/* Make sure everything is in a consistent state for calling fork(). */
+void fork_start(void)
+{
+ mmap_fork_start();
+ pthread_mutex_lock(&tb_lock);
+ pthread_mutex_lock(&exclusive_lock);
+}
+
+void fork_end(int child)
+{
+ if (child) {
+ /* Child processes created by fork() only have a single thread.
+ Discard information about the parent threads. */
+ first_cpu = thread_env;
+ thread_env->next_cpu = NULL;
+ pending_cpus = 0;
+ pthread_mutex_init(&exclusive_lock, NULL);
+ pthread_cond_init(&exclusive_cond, NULL);
+ pthread_cond_init(&exclusive_resume, NULL);
+ pthread_mutex_init(&tb_lock, NULL);
+ } else {
+ pthread_mutex_unlock(&exclusive_lock);
+ pthread_mutex_unlock(&tb_lock);
+ }
+ mmap_fork_end(child);
+}
+
+/* Wait for pending exclusive operations to complete. The exclusive lock
+ must be held. */
+static inline void exclusive_idle(void)
+{
+ while (pending_cpus) {
+ pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ }
+}
+
+/* Start an exclusive operation.
+ Must only be called from outside cpu_arm_exec. */
+static inline void start_exclusive(void)
+{
+ CPUState *other;
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+
+ pending_cpus = 1;
+ /* Make all other cpus stop executing. */
+ for (other = first_cpu; other; other = other->next_cpu) {
+ if (other->running) {
+ pending_cpus++;
+ cpu_interrupt(other, CPU_INTERRUPT_EXIT);
+ }
+ }
+ if (pending_cpus > 1) {
+ pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ }
+}
+
+/* Finish an exclusive operation. */
+static inline void end_exclusive(void)
+{
+ pending_cpus = 0;
+ pthread_cond_broadcast(&exclusive_resume);
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Wait for exclusive ops to finish, and begin cpu execution. */
+static inline void cpu_exec_start(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+ env->running = 1;
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Mark cpu as not executing, and release pending exclusive ops. */
+static inline void cpu_exec_end(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ env->running = 0;
+ if (pending_cpus > 1) {
+ pending_cpus--;
+ if (pending_cpus == 1) {
+ pthread_cond_signal(&exclusive_cond);
+ }
+ }
+ exclusive_idle();
+ pthread_mutex_unlock(&exclusive_lock);
+}
+#else /* if !USE_NPTL */
+/* These are no-ops because we are not threadsafe. */
+static inline void cpu_exec_start(CPUState *env)
+{
+}
+
+static inline void cpu_exec_end(CPUState *env)
+{
+}
+
+static inline void start_exclusive(void)
+{
+}
+
+static inline void end_exclusive(void)
+{
+}
+
+void fork_start(void)
+{
+}
+
+void fork_end(int child)
+{
+}
+#endif
+
+
#ifdef TARGET_I386
/***********************************************************/
/* CPUX86 core interface */
@@ -378,8 +509,11 @@ do_kernel_trap(CPUARMState *env)
/* ??? No-op. Will need to do better for SMP. */
break;
case 0xffff0fc0: /* __kernel_cmpxchg */
- /* ??? This is not really atomic. However we don't support
- threads anyway, so it doesn't realy matter. */
+ /* XXX: This only works between threads, not between processes.
+ It's probably possible to implement this with native host
+ operations. However things like ldrex/strex are much harder so
+ there's not much point trying. */
+ start_exclusive();
cpsr = cpsr_read(env);
addr = env->regs[2];
/* FIXME: This should SEGV if the access fails. */
@@ -396,6 +530,7 @@ do_kernel_trap(CPUARMState *env)
cpsr &= ~CPSR_C;
}
cpsr_write(env, cpsr, CPSR_C);
+ end_exclusive();
break;
case 0xffff0fe0: /* __kernel_get_tls */
env->regs[0] = env->cp15.c13_tls2;
@@ -422,7 +557,9 @@ void cpu_loop(CPUARMState *env)
uint32_t addr;
for(;;) {
+ cpu_exec_start(env);
trapnr = cpu_arm_exec(env);
+ cpu_exec_end(env);
switch(trapnr) {
case EXCP_UDEF:
{
@@ -626,11 +763,11 @@ void cpu_loop(CPUARMState *env)
can be found at http://www.sics.se/~psm/sparcstack.html */
static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
{
- index = (index + cwp * 16) & (16 * NWINDOWS - 1);
+ index = (index + cwp * 16) % (16 * env->nwindows);
/* wrap handling : if cwp is on the last window, then we use the
registers 'after' the end */
- if (index < 8 && env->cwp == (NWINDOWS - 1))
- index += (16 * NWINDOWS);
+ if (index < 8 && env->cwp == env->nwindows - 1)
+ index += 16 * env->nwindows;
return index;
}
@@ -642,8 +779,8 @@ static inline void save_window_offset(CPUSPARCState *env, int cwp1)
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
#if defined(DEBUG_WIN)
- printf("win_overflow: sp_ptr=0x%x save_cwp=%d\n",
- (int)sp_ptr, cwp1);
+ printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
+ sp_ptr, cwp1);
#endif
for(i = 0; i < 16; i++) {
/* FIXME - what to do if put_user() fails? */
@@ -656,12 +793,12 @@ static void save_window(CPUSPARCState *env)
{
#ifndef TARGET_SPARC64
unsigned int new_wim;
- new_wim = ((env->wim >> 1) | (env->wim << (NWINDOWS - 1))) &
- ((1LL << NWINDOWS) - 1);
- save_window_offset(env, (env->cwp - 2) & (NWINDOWS - 1));
+ new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
env->wim = new_wim;
#else
- save_window_offset(env, (env->cwp - 2) & (NWINDOWS - 1));
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
env->cansave++;
env->canrestore--;
#endif
@@ -672,15 +809,15 @@ static void restore_window(CPUSPARCState *env)
unsigned int new_wim, i, cwp1;
abi_ulong sp_ptr;
- new_wim = ((env->wim << 1) | (env->wim >> (NWINDOWS - 1))) &
- ((1LL << NWINDOWS) - 1);
+ new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
/* restore the invalid window */
- cwp1 = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
#if defined(DEBUG_WIN)
- printf("win_underflow: sp_ptr=0x%x load_cwp=%d\n",
- (int)sp_ptr, cwp1);
+ printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
+ sp_ptr, cwp1);
#endif
for(i = 0; i < 16; i++) {
/* FIXME - what to do if get_user() fails? */
@@ -690,8 +827,8 @@ static void restore_window(CPUSPARCState *env)
env->wim = new_wim;
#ifdef TARGET_SPARC64
env->canrestore++;
- if (env->cleanwin < NWINDOWS - 1)
- env->cleanwin++;
+ if (env->cleanwin < env->nwindows - 1)
+ env->cleanwin++;
env->cansave--;
#endif
}
@@ -703,14 +840,14 @@ static void flush_windows(CPUSPARCState *env)
offset = 1;
for(;;) {
/* if restore would invoke restore_window(), then we can stop */
- cwp1 = (env->cwp + offset) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + offset);
if (env->wim & (1 << cwp1))
break;
save_window_offset(env, cwp1);
offset++;
}
/* set wim so that restore will reload the registers */
- cwp1 = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
env->wim = 1 << cwp1;
#if defined(DEBUG_WIN)
printf("flush_windows: nb=%d\n", offset - 1);
@@ -2044,14 +2181,13 @@ void usage(void)
_exit(1);
}
-/* XXX: currently only used for async signals (see signal.c) */
-CPUState *global_env;
+THREAD CPUState *thread_env;
+/* Assumes contents are already zeroed. */
void init_task_state(TaskState *ts)
{
int i;
- memset(ts, 0, sizeof(TaskState));
ts->used = 1;
ts->first_free = ts->sigqueue_table;
for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
@@ -2203,7 +2339,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
- global_env = env;
+ thread_env = env;
if (getenv("QEMU_STRACE")) {
do_strace = 1;
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index c0821386d..be1ddb9f2 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -46,6 +46,22 @@ void mmap_unlock(void)
pthread_mutex_unlock(&mmap_mutex);
}
}
+
+/* Grab lock to make sure things are in a consistent state after fork(). */
+void mmap_fork_start(void)
+{
+ if (mmap_lock_count)
+ abort();
+ pthread_mutex_lock(&mmap_mutex);
+}
+
+void mmap_fork_end(int child)
+{
+ if (child)
+ pthread_mutex_init(&mmap_mutex, NULL);
+ else
+ pthread_mutex_unlock(&mmap_mutex);
+}
#else
/* We aren't threadsafe to start with, so no need to worry about locking. */
void mmap_lock(void)
@@ -57,6 +73,52 @@ void mmap_unlock(void)
}
#endif
+void *qemu_vmalloc(size_t size)
+{
+ void *p;
+ unsigned long addr;
+ mmap_lock();
+ /* Use map and mark the pages as used. */
+ p = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ addr = (unsigned long)p;
+ if (addr == (target_ulong) addr) {
+ /* Allocated region overlaps guest address space.
+ This may recurse. */
+ page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
+ PAGE_RESERVED);
+ }
+
+ mmap_unlock();
+ return p;
+}
+
+void *qemu_malloc(size_t size)
+{
+ char * p;
+ size += 16;
+ p = qemu_vmalloc(size);
+ *(size_t *)p = size;
+ return p + 16;
+}
+
+/* We use map, which is always zero initialized. */
+void * qemu_mallocz(size_t size)
+{
+ return qemu_malloc(size);
+}
+
+void qemu_free(void *ptr)
+{
+ /* FIXME: We should unmark the reserved pages here. However this gets
+ complicated when one target page spans multiple host pages, so we
+ don't bother. */
+ size_t *p;
+ p = (size_t *)((char *)ptr - 16);
+ munmap(p, *p);
+}
+
/* NOTE: all the constants are the HOST ones, but addresses are target. */
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
{
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 81f7fb290..d3a3c3c20 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -37,6 +37,12 @@ typedef target_long abi_long;
#include "target_signal.h"
#include "gdbstub.h"
+#if defined(USE_NPTL)
+#define THREAD __thread
+#else
+#define THREAD
+#endif
+
/* This struct is used to hold certain information about the image.
* Basically, it replicates in user space what would be certain
* task_struct fields in the kernel
@@ -184,12 +190,14 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6);
void gemu_log(const char *fmt, ...) __attribute__((format(printf,1,2)));
-extern CPUState *global_env;
+extern THREAD CPUState *thread_env;
void cpu_loop(CPUState *env);
void init_paths(const char *prefix);
const char *path(const char *pathname);
char *target_strerror(int err);
int get_osversion(void);
+void fork_start(void);
+void fork_end(int child);
extern int loglevel;
extern FILE *logfile;
@@ -235,6 +243,10 @@ int target_msync(abi_ulong start, abi_ulong len, int flags);
extern unsigned long last_brk;
void mmap_lock(void);
void mmap_unlock(void);
+#if defined(USE_NPTL)
+void mmap_fork_start(void);
+void mmap_fork_end(int child);
+#endif
/* user access */
diff --git a/linux-user/signal.c b/linux-user/signal.c
index e6e1a0826..623a5e31c 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -424,9 +424,9 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
fprintf(stderr, "qemu: got signal %d\n", sig);
#endif
host_to_target_siginfo_noswap(&tinfo, info);
- if (queue_signal(global_env, sig, &tinfo) == 1) {
+ if (queue_signal(thread_env, sig, &tinfo) == 1) {
/* interrupt the virtual CPU as soon as possible */
- cpu_interrupt(global_env, CPU_INTERRUPT_EXIT);
+ cpu_interrupt(thread_env, CPU_INTERRUPT_EXIT);
}
}
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 25b9e5a65..cd90946ae 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -27,6 +27,7 @@
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
+#include <limits.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
@@ -74,6 +75,11 @@
#if defined(USE_NPTL)
#include <linux/futex.h>
+#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
+ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
+#else
+/* XXX: Hardcode the above values. */
+#define CLONE_NPTL_FLAGS2 0
#endif
//#define DEBUG
@@ -2706,6 +2712,48 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
#endif /* defined(TARGET_I386) */
+#if defined(USE_NPTL)
+
+#define NEW_STACK_SIZE PTHREAD_STACK_MIN
+
+static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
+typedef struct {
+ CPUState *env;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ pthread_t thread;
+ uint32_t tid;
+ abi_ulong child_tidptr;
+ abi_ulong parent_tidptr;
+ sigset_t sigmask;
+} new_thread_info;
+
+static void *clone_func(void *arg)
+{
+ new_thread_info *info = arg;
+ CPUState *env;
+
+ env = info->env;
+ thread_env = env;
+ info->tid = gettid();
+ if (info->child_tidptr)
+ put_user_u32(info->tid, info->child_tidptr);
+ if (info->parent_tidptr)
+ put_user_u32(info->tid, info->parent_tidptr);
+ /* Enable signals. */
+ sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
+ /* Signal to the parent that we're ready. */
+ pthread_mutex_lock(&info->mutex);
+ pthread_cond_broadcast(&info->cond);
+ pthread_mutex_unlock(&info->mutex);
+ /* Wait until the parent has finshed initializing the tls state. */
+ pthread_mutex_lock(&clone_lock);
+ pthread_mutex_unlock(&clone_lock);
+ cpu_loop(env);
+ /* never exits */
+ return NULL;
+}
+#else
/* this stack is the equivalent of the kernel stack associated with a
thread/process */
#define NEW_STACK_SIZE 8192
@@ -2717,26 +2765,29 @@ static int clone_func(void *arg)
/* never exits */
return 0;
}
+#endif
/* do_fork() Must return host values and target errnos (unlike most
do_*() functions). */
-int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp)
+static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
+ abi_ulong parent_tidptr, target_ulong newtls,
+ abi_ulong child_tidptr)
{
int ret;
TaskState *ts;
uint8_t *new_stack;
CPUState *new_env;
+#if defined(USE_NPTL)
+ unsigned int nptl_flags;
+ sigset_t sigmask;
+#endif
if (flags & CLONE_VM) {
#if defined(USE_NPTL)
- /* qemu is not threadsafe. Bail out immediately if application
- tries to create a thread. */
- if (!(flags & CLONE_VFORK)) {
- gemu_log ("clone(CLONE_VM) not supported\n");
- return -EINVAL;
- }
+ new_thread_info info;
+ pthread_attr_t attr;
#endif
- ts = malloc(sizeof(TaskState) + NEW_STACK_SIZE);
+ ts = qemu_mallocz(sizeof(TaskState) + NEW_STACK_SIZE);
init_task_state(ts);
new_stack = ts->stack;
/* we create a new CPU instance. */
@@ -2744,19 +2795,94 @@ int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp)
/* Init regs that differ from the parent. */
cpu_clone_regs(new_env, newsp);
new_env->opaque = ts;
+#if defined(USE_NPTL)
+ nptl_flags = flags;
+ flags &= ~CLONE_NPTL_FLAGS2;
+
+ /* TODO: Implement CLONE_CHILD_CLEARTID. */
+ if (nptl_flags & CLONE_SETTLS)
+ cpu_set_tls (new_env, newtls);
+
+ /* Grab a mutex so that thread setup appears atomic. */
+ pthread_mutex_lock(&clone_lock);
+
+ memset(&info, 0, sizeof(info));
+ pthread_mutex_init(&info.mutex, NULL);
+ pthread_mutex_lock(&info.mutex);
+ pthread_cond_init(&info.cond, NULL);
+ info.env = new_env;
+ if (nptl_flags & CLONE_CHILD_SETTID)
+ info.child_tidptr = child_tidptr;
+ if (nptl_flags & CLONE_PARENT_SETTID)
+ info.parent_tidptr = parent_tidptr;
+
+ ret = pthread_attr_init(&attr);
+ ret = pthread_attr_setstack(&attr, new_stack, NEW_STACK_SIZE);
+ /* It is not safe to deliver signals until the child has finished
+ initializing, so temporarily block all signals. */
+ sigfillset(&sigmask);
+ sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
+
+ ret = pthread_create(&info.thread, &attr, clone_func, &info);
+
+ sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
+ pthread_attr_destroy(&attr);
+ if (ret == 0) {
+ /* Wait for the child to initialize. */
+ pthread_cond_wait(&info.cond, &info.mutex);
+ ret = info.tid;
+ if (flags & CLONE_PARENT_SETTID)
+ put_user_u32(ret, parent_tidptr);
+ } else {
+ ret = -1;
+ }
+ pthread_mutex_unlock(&info.mutex);
+ pthread_cond_destroy(&info.cond);
+ pthread_mutex_destroy(&info.mutex);
+ pthread_mutex_unlock(&clone_lock);
+#else
+ if (flags & CLONE_NPTL_FLAGS2)
+ return -EINVAL;
+ /* This is probably going to die very quickly, but do it anyway. */
#ifdef __ia64__
ret = __clone2(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
#else
ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
#endif
+#endif
} else {
/* if no CLONE_VM, we consider it is a fork */
- if ((flags & ~CSIGNAL) != 0)
+ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
return -EINVAL;
+ fork_start();
ret = fork();
+#if defined(USE_NPTL)
+ /* There is a race condition here. The parent process could
+ theoretically read the TID in the child process before the child
+ tid is set. This would require using either ptrace
+ (not implemented) or having *_tidptr to point at a shared memory
+ mapping. We can't repeat the spinlock hack used above because
+ the child process gets its own copy of the lock. */
+ if (ret == 0) {
+ cpu_clone_regs(env, newsp);
+ fork_end(1);
+ /* Child Process. */
+ if (flags & CLONE_CHILD_SETTID)
+ put_user_u32(gettid(), child_tidptr);
+ if (flags & CLONE_PARENT_SETTID)
+ put_user_u32(gettid(), parent_tidptr);
+ ts = (TaskState *)env->opaque;
+ if (flags & CLONE_SETTLS)
+ cpu_set_tls (env, newtls);
+ /* TODO: Implement CLONE_CHILD_CLEARTID. */
+ } else {
+ fork_end(0);
+ }
+#else
if (ret == 0) {
cpu_clone_regs(env, newsp);
}
+#endif
}
return ret;
}
@@ -2944,7 +3070,7 @@ void syscall_init(void)
#if TARGET_ABI_BITS == 32
static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
{
-#ifdef TARGET_WORDS_BIG_ENDIAN
+#ifdef TARGET_WORDS_BIGENDIAN
return ((uint64_t)word0 << 32) | word1;
#else
return ((uint64_t)word1 << 32) | word0;
@@ -3153,7 +3279,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = do_brk(arg1);
break;
case TARGET_NR_fork:
- ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
+ ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
break;
#ifdef TARGET_NR_waitpid
case TARGET_NR_waitpid:
@@ -4531,7 +4657,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = get_errno(fsync(arg1));
break;
case TARGET_NR_clone:
- ret = get_errno(do_fork(cpu_env, arg1, arg2));
+ ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
break;
#ifdef __NR_exit_group
/* new thread calls */
@@ -4967,7 +5093,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_vfork
case TARGET_NR_vfork:
- ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
+ ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
+ 0, 0, 0, 0));
break;
#endif
#ifdef TARGET_NR_ugetrlimit
diff --git a/monitor.c b/monitor.c
index 9cfee4c9e..e0d076c79 100644
--- a/monitor.c
+++ b/monitor.c
@@ -36,11 +36,9 @@
#include "disas.h"
#include "migration.h"
#include <dirent.h>
+#include "qemu-timer.h"
#include "qemu-kvm.h"
-#ifdef CONFIG_PROFILER
-#include "qemu-timer.h" /* for ticks_per_sec */
-#endif
//#define DEBUG
//#define DEBUG_COMPLETION
@@ -423,18 +421,26 @@ static void do_eject(int force, const char *filename)
eject_device(bs, force);
}
-static void do_change_block(const char *device, const char *filename)
+static void do_change_block(const char *device, const char *filename, const char *fmt)
{
BlockDriverState *bs;
+ BlockDriver *drv = NULL;
bs = bdrv_find(device);
if (!bs) {
term_printf("device not found\n");
return;
}
+ if (fmt) {
+ drv = bdrv_find_format(fmt);
+ if (!drv) {
+ term_printf("invalid format %s\n", fmt);
+ return;
+ }
+ }
if (eject_device(bs, 0) < 0)
return;
- bdrv_open(bs, filename, 0);
+ bdrv_open2(bs, filename, 0, drv);
qemu_key_check(bs, filename);
}
@@ -453,12 +459,12 @@ static void do_change_vnc(const char *target)
}
}
-static void do_change(const char *device, const char *target)
+static void do_change(const char *device, const char *target, const char *fmt)
{
if (strcmp(device, "vnc") == 0) {
do_change_vnc(target);
} else {
- do_change_block(device, target);
+ do_change_block(device, target, fmt);
}
}
@@ -923,6 +929,23 @@ static const KeyDef key_defs[] = {
{ 0xd2, "insert" },
{ 0xd3, "delete" },
+#if defined(TARGET_SPARC) && !defined(TARGET_SPARC64)
+ { 0xf0, "stop" },
+ { 0xf1, "again" },
+ { 0xf2, "props" },
+ { 0xf3, "undo" },
+ { 0xf4, "front" },
+ { 0xf5, "copy" },
+ { 0xf6, "open" },
+ { 0xf7, "paste" },
+ { 0xf8, "find" },
+ { 0xf9, "cut" },
+ { 0xfa, "lf" },
+ { 0xfb, "help" },
+ { 0xfc, "meta_l" },
+ { 0xfd, "meta_r" },
+ { 0xfe, "compose" },
+#endif
{ 0, NULL },
};
@@ -944,14 +967,37 @@ static int get_keycode(const char *key)
return -1;
}
-static void do_sendkey(const char *string)
+#define MAX_KEYCODES 16
+static uint8_t keycodes[MAX_KEYCODES];
+static int nb_pending_keycodes;
+static QEMUTimer *key_timer;
+
+static void release_keys(void *opaque)
+{
+ int keycode;
+
+ while (nb_pending_keycodes > 0) {
+ nb_pending_keycodes--;
+ keycode = keycodes[nb_pending_keycodes];
+ if (keycode & 0x80)
+ kbd_put_keycode(0xe0);
+ kbd_put_keycode(keycode | 0x80);
+ }
+}
+
+static void do_sendkey(const char *string, int has_hold_time, int hold_time)
{
- uint8_t keycodes[16];
- int nb_keycodes = 0;
char keyname_buf[16];
char *separator;
int keyname_len, keycode, i;
+ if (nb_pending_keycodes > 0) {
+ qemu_del_timer(key_timer);
+ release_keys(NULL);
+ }
+ if (!has_hold_time)
+ hold_time = 100;
+ i = 0;
while (1) {
separator = strchr(string, '-');
keyname_len = separator ? separator - string : strlen(string);
@@ -961,7 +1007,7 @@ static void do_sendkey(const char *string)
term_printf("invalid key: '%s...'\n", keyname_buf);
return;
}
- if (nb_keycodes == sizeof(keycodes)) {
+ if (i == MAX_KEYCODES) {
term_printf("too many keys\n");
return;
}
@@ -971,26 +1017,23 @@ static void do_sendkey(const char *string)
term_printf("unknown key: '%s'\n", keyname_buf);
return;
}
- keycodes[nb_keycodes++] = keycode;
+ keycodes[i++] = keycode;
}
if (!separator)
break;
string = separator + 1;
}
+ nb_pending_keycodes = i;
/* key down events */
- for(i = 0; i < nb_keycodes; i++) {
+ for (i = 0; i < nb_pending_keycodes; i++) {
keycode = keycodes[i];
if (keycode & 0x80)
kbd_put_keycode(0xe0);
kbd_put_keycode(keycode & 0x7f);
}
- /* key up events */
- for(i = nb_keycodes - 1; i >= 0; i--) {
- keycode = keycodes[i];
- if (keycode & 0x80)
- kbd_put_keycode(0xe0);
- kbd_put_keycode(keycode | 0x80);
- }
+ /* delayed key up events */
+ qemu_mod_timer(key_timer, qemu_get_clock(vm_clock) +
+ muldiv64(ticks_per_sec, hold_time, 1000));
}
static int mouse_button_state;
@@ -1043,12 +1086,22 @@ static void do_ioport_read(int count, int format, int size, int addr, int has_in
suffix, addr, size * 2, val);
}
+/* boot_set handler */
+static QEMUBootSetHandler *qemu_boot_set_handler = NULL;
+static void *boot_opaque;
+
+void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque)
+{
+ qemu_boot_set_handler = func;
+ boot_opaque = opaque;
+}
+
static void do_boot_set(const char *bootdevice)
{
int res;
if (qemu_boot_set_handler) {
- res = qemu_boot_set_handler(bootdevice);
+ res = qemu_boot_set_handler(boot_opaque, bootdevice);
if (res == 0)
term_printf("boot device list now set to %s\n", bootdevice);
else
@@ -1346,8 +1399,8 @@ static term_cmd_t term_cmds[] = {
"", "quit the emulator" },
{ "eject", "-fB", do_eject,
"[-f] device", "eject a removable medium (use -f to force it)" },
- { "change", "BF", do_change,
- "device filename", "change a removable medium" },
+ { "change", "BFs?", do_change,
+ "device filename [format]", "change a removable medium, optional format" },
{ "screendump", "F", do_screen_dump,
"filename", "save screen into PPM image 'filename'" },
{ "logfile", "F", do_logfile,
@@ -1377,8 +1430,8 @@ static term_cmd_t term_cmds[] = {
{ "i", "/ii.", do_ioport_read,
"/fmt addr", "I/O port read" },
- { "sendkey", "s", do_sendkey,
- "keys", "send keys to the VM (e.g. 'sendkey ctrl-alt-f1')" },
+ { "sendkey", "si?", do_sendkey,
+ "keys [hold_ms]", "send keys to the VM (e.g. 'sendkey ctrl-alt-f1', default hold time=100 ms)" },
{ "system_reset", "", do_system_reset,
"", "reset the system" },
{ "system_powerdown", "", do_system_powerdown,
@@ -2694,6 +2747,9 @@ void monitor_init(CharDriverState *hd, int show_banner)
int i;
if (is_first_init) {
+ key_timer = qemu_new_timer(vm_clock, release_keys, NULL);
+ if (!key_timer)
+ return;
for (i = 0; i < MAX_MON; i++) {
monitor_hd[i] = NULL;
}
diff --git a/nbd.c b/nbd.c
index fcf033929..c7d63d348 100644
--- a/nbd.c
+++ b/nbd.c
@@ -388,7 +388,7 @@ int nbd_trip(BlockDriverState *bs, int csock, off_t size, uint64_t dev_offset, o
}
if (len > sizeof(data)) {
- LOG("len (%u) is larger than max len (%lu)",
+ LOG("len (%u) is larger than max len (%u)",
len, sizeof(data));
errno = EINVAL;
return -1;
diff --git a/osdep.h b/osdep.h
index 62de45704..a32242e07 100644
--- a/osdep.h
+++ b/osdep.h
@@ -35,9 +35,11 @@
#define always_inline inline
#else
#define always_inline __attribute__ (( always_inline )) __inline__
+#define inline always_inline
#endif
-#endif
+#else
#define inline always_inline
+#endif
#ifdef __i386__
#define REGPARM __attribute((regparm(3)))
diff --git a/qemu-doc.texi b/qemu-doc.texi
index dfb486457..ac3c41718 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -175,13 +175,16 @@ Adlib(OPL2) - Yamaha YM3812 compatible chip
@item
Gravis Ultrasound GF1 sound card
@item
+CS4231A compatible sound card
+@item
PCI UHCI USB controller and a virtual USB hub.
@end itemize
SMP is supported with up to 255 CPUs.
-Note that adlib, ac97 and gus are only available when QEMU was configured
-with --enable-adlib, --enable-ac97 or --enable-gus respectively.
+Note that adlib, ac97, gus and cs4231a are only available when QEMU
+was configured with --enable-adlib, --enable-ac97, --enable-gus or
+--enable-cs4231a respectively.
QEMU uses the PC BIOS from the Bochs project and the Plex86/Bochs LGPL
VGA BIOS.
@@ -191,6 +194,8 @@ QEMU uses YM3812 emulation by Tatsuyuki Satoh.
QEMU uses GUS emulation(GUSEMU32 @url{http://www.deinmeister.de/gusemu/})
by Tibor "TS" Schütz.
+CS4231A is the chip used in Windows Sound System and GUSMAX products
+
@c man end
@node pcsys_quickstart
diff --git a/qemu-img.c b/qemu-img.c
index b3190d469..e18032f93 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -82,13 +82,17 @@ static void help(void)
"Command syntax:\n"
" create [-e] [-6] [-b base_image] [-f fmt] filename [size]\n"
" commit [-f fmt] filename\n"
- " convert [-c] [-e] [-6] [-f fmt] [-O output_fmt] filename [filename2 [...]] output_filename\n"
+ " convert [-c] [-e] [-6] [-f fmt] [-O output_fmt] [-B output_base_image] filename [filename2 [...]] output_filename\n"
" info [-f fmt] filename\n"
"\n"
"Command parameters:\n"
" 'filename' is a disk image filename\n"
" 'base_image' is the read-only disk image which is used as base for a copy on\n"
" write image; the copy on write image only stores the modified data\n"
+ " 'output_base_image' forces the output image to be created as a copy on write\n"
+ " image of the specified base image; 'output_base_image' should have the same\n"
+ " content as the input's base image, however the path, image format, etc may\n"
+ " differ\n"
" 'fmt' is the disk image format. It is guessed automatically in most cases\n"
" 'size' is the disk image size in kilobytes. Optional suffixes 'M' (megabyte)\n"
" and 'G' (gigabyte) are supported\n"
@@ -377,6 +381,13 @@ static int is_not_zero(const uint8_t *sector, int len)
return 0;
}
+/*
+ * Returns true iff the first sector pointed to by 'buf' contains at least
+ * a non-NUL byte.
+ *
+ * 'pnum' is set to the number of sectors (including and immediately following
+ * the first one) that are known to be in the same allocated/unallocated state.
+ */
static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
{
int v, i;
@@ -400,7 +411,7 @@ static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
static int img_convert(int argc, char **argv)
{
int c, ret, n, n1, bs_n, bs_i, flags, cluster_size, cluster_sectors;
- const char *fmt, *out_fmt, *out_filename;
+ const char *fmt, *out_fmt, *out_baseimg, *out_filename;
BlockDriver *drv;
BlockDriverState **bs, *out_bs;
int64_t total_sectors, nb_sectors, sector_num, bs_offset;
@@ -411,9 +422,10 @@ static int img_convert(int argc, char **argv)
fmt = NULL;
out_fmt = "raw";
+ out_baseimg = NULL;
flags = 0;
for(;;) {
- c = getopt(argc, argv, "f:O:hce6");
+ c = getopt(argc, argv, "f:O:B:hce6");
if (c == -1)
break;
switch(c) {
@@ -426,6 +438,9 @@ static int img_convert(int argc, char **argv)
case 'O':
out_fmt = optarg;
break;
+ case 'B':
+ out_baseimg = optarg;
+ break;
case 'c':
flags |= BLOCK_FLAG_COMPRESS;
break;
@@ -442,6 +457,9 @@ static int img_convert(int argc, char **argv)
if (bs_n < 1) help();
out_filename = argv[argc - 1];
+
+ if (bs_n > 1 && out_baseimg)
+ error("-B makes no sense when concatenating multiple input images");
bs = calloc(bs_n, sizeof(BlockDriverState *));
if (!bs)
@@ -468,7 +486,7 @@ static int img_convert(int argc, char **argv)
if (flags & BLOCK_FLAG_ENCRYPT && flags & BLOCK_FLAG_COMPRESS)
error("Compression and encryption not supported at the same time");
- ret = bdrv_create(drv, out_filename, total_sectors, NULL, flags);
+ ret = bdrv_create(drv, out_filename, total_sectors, out_baseimg, flags);
if (ret < 0) {
if (ret == -ENOTSUP) {
error("Formatting not supported for file format '%s'", fmt);
@@ -547,7 +565,7 @@ static int img_convert(int argc, char **argv)
/* signal EOF to align */
bdrv_write_compressed(out_bs, 0, NULL, 0);
} else {
- sector_num = 0;
+ sector_num = 0; // total number of sectors converted so far
for(;;) {
nb_sectors = total_sectors - sector_num;
if (nb_sectors <= 0)
@@ -570,6 +588,20 @@ static int img_convert(int argc, char **argv)
if (n > bs_offset + bs_sectors - sector_num)
n = bs_offset + bs_sectors - sector_num;
+ /* If the output image is being created as a copy on write image,
+ assume that sectors which are unallocated in the input image
+ are present in both the output's and input's base images (no
+ need to copy them). */
+ if (out_baseimg) {
+ if (!bdrv_is_allocated(bs[bs_i], sector_num - bs_offset, n, &n1)) {
+ sector_num += n1;
+ continue;
+ }
+ /* The next 'n1' sectors are allocated in the input image. Copy
+ only those as they may be followed by unallocated sectors. */
+ n = n1;
+ }
+
if (bdrv_read(bs[bs_i], sector_num - bs_offset, buf, n) < 0)
error("error while reading");
/* NOTE: at the same time we convert, we do not write zero
@@ -577,7 +609,10 @@ static int img_convert(int argc, char **argv)
should add a specific call to have the info to go faster */
buf1 = buf;
while (n > 0) {
- if (is_allocated_sectors(buf1, n, &n1)) {
+ /* If the output image is being created as a copy on write image,
+ copy all sectors even the ones containing only NUL bytes,
+ because they may differ from the sectors in the base image. */
+ if (out_baseimg || is_allocated_sectors(buf1, n, &n1)) {
if (bdrv_write(out_bs, sector_num, buf1, n1) < 0)
error("error while writing");
}
diff --git a/qemu-img.texi b/qemu-img.texi
index 89f912df9..1c0504bcf 100644
--- a/qemu-img.texi
+++ b/qemu-img.texi
@@ -10,7 +10,7 @@ The following commands are supported:
@table @option
@item create [-e] [-6] [-b @var{base_image}] [-f @var{fmt}] @var{filename} [@var{size}]
@item commit [-f @var{fmt}] @var{filename}
-@item convert [-c] [-e] [-6] [-f @var{fmt}] @var{filename} [-O @var{output_fmt}] @var{output_filename}
+@item convert [-c] [-e] [-6] [-f @var{fmt}] [-O @var{output_fmt}] [-B @var{output_base_image}] @var{filename} [@var{filename2} [...]] @var{output_filename}
@item info [-f @var{fmt}] @var{filename}
@end table
@@ -21,7 +21,11 @@ Command parameters:
@item base_image
is the read-only disk image which is used as base for a copy on
write image; the copy on write image only stores the modified data
-
+@item output_base_image
+forces the output image to be created as a copy on write
+image of the specified base image; @code{output_base_image} should have the same
+content as the input's base image, however the path, image format, etc may
+differ
@item fmt
is the disk image format. It is guessed automatically in most cases. The following formats are supported:
diff --git a/qemu-lock.h b/qemu-lock.h
new file mode 100644
index 000000000..f565565e5
--- /dev/null
+++ b/qemu-lock.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* Locking primitives. Most of this code should be redundant -
+ system emulation doesn't need/use locking, NPTL userspace uses
+ pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
+ In either case a spinlock is probably the wrong kind of lock.
+ Spinlocks are only good if you know annother CPU has the lock and is
+ likely to release it soon. In environments where you have more threads
+ than physical CPUs (the extreme case being a single CPU host) a spinlock
+ simply wastes CPU until the OS decides to preempt it. */
+#if defined(USE_NPTL)
+
+#include <pthread.h>
+#define spin_lock pthread_mutex_lock
+#define spin_unlock pthread_mutex_unlock
+#define spinlock_t pthread_mutex_t
+#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
+
+#else
+
+#if defined(__hppa__)
+
+typedef int spinlock_t[4];
+
+#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
+
+static inline void resetlock (spinlock_t *p)
+{
+ (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
+}
+
+#else
+
+typedef int spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED 0
+
+static inline void resetlock (spinlock_t *p)
+{
+ *p = SPIN_LOCK_UNLOCKED;
+}
+
+#endif
+
+#if defined(__powerpc__)
+static inline int testandset (int *p)
+{
+ int ret;
+ __asm__ __volatile__ (
+ "0: lwarx %0,0,%1\n"
+ " xor. %0,%3,%0\n"
+ " bne 1f\n"
+ " stwcx. %2,0,%1\n"
+ " bne- 0b\n"
+ "1: "
+ : "=&r" (ret)
+ : "r" (p), "r" (1), "r" (0)
+ : "cr0", "memory");
+ return ret;
+}
+#elif defined(__i386__)
+static inline int testandset (int *p)
+{
+ long int readval = 0;
+
+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
+ : "+m" (*p), "+a" (readval)
+ : "r" (1)
+ : "cc");
+ return readval;
+}
+#elif defined(__x86_64__)
+static inline int testandset (int *p)
+{
+ long int readval = 0;
+
+ __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
+ : "+m" (*p), "+a" (readval)
+ : "r" (1)
+ : "cc");
+ return readval;
+}
+#elif defined(__s390__)
+static inline int testandset (int *p)
+{
+ int ret;
+
+ __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
+ " jl 0b"
+ : "=&d" (ret)
+ : "r" (1), "a" (p), "0" (*p)
+ : "cc", "memory" );
+ return ret;
+}
+#elif defined(__alpha__)
+static inline int testandset (int *p)
+{
+ int ret;
+ unsigned long one;
+
+ __asm__ __volatile__ ("0: mov 1,%2\n"
+ " ldl_l %0,%1\n"
+ " stl_c %2,%1\n"
+ " beq %2,1f\n"
+ ".subsection 2\n"
+ "1: br 0b\n"
+ ".previous"
+ : "=r" (ret), "=m" (*p), "=r" (one)
+ : "m" (*p));
+ return ret;
+}
+#elif defined(__sparc__)
+static inline int testandset (int *p)
+{
+ int ret;
+
+ __asm__ __volatile__("ldstub [%1], %0"
+ : "=r" (ret)
+ : "r" (p)
+ : "memory");
+
+ return (ret ? 1 : 0);
+}
+#elif defined(__arm__)
+static inline int testandset (int *spinlock)
+{
+ register unsigned int ret;
+ __asm__ __volatile__("swp %0, %1, [%2]"
+ : "=r"(ret)
+ : "0"(1), "r"(spinlock));
+
+ return ret;
+}
+#elif defined(__mc68000)
+static inline int testandset (int *p)
+{
+ char ret;
+ __asm__ __volatile__("tas %1; sne %0"
+ : "=r" (ret)
+ : "m" (p)
+ : "cc","memory");
+ return ret;
+}
+#elif defined(__hppa__)
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data,
+ and GCC only guarantees 8-byte alignment for stack locals, we can't
+ be assured of 16-byte alignment for atomic lock data even if we
+ specify "__attribute ((aligned(16)))" in the type declaration. So,
+ we use a struct containing an array of four ints for the atomic lock
+ type and dynamically select the 16-byte aligned int from the array
+ for the semaphore. */
+#define __PA_LDCW_ALIGNMENT 16
+static inline void *ldcw_align (void *p) {
+ unsigned long a = (unsigned long)p;
+ a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
+ return (void *)a;
+}
+
+static inline int testandset (spinlock_t *p)
+{
+ unsigned int ret;
+ p = ldcw_align(p);
+ __asm__ __volatile__("ldcw 0(%1),%0"
+ : "=r" (ret)
+ : "r" (p)
+ : "memory" );
+ return !ret;
+}
+
+#elif defined(__ia64)
+
+#include "ia64intrin.h"
+
+static inline int testandset (int *p)
+{
+ return (int)cmpxchg_acq(p,0,1);
+}
+#elif defined(__mips__)
+static inline int testandset (int *p)
+{
+ int ret;
+
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set noat \n"
+ " .set mips2 \n"
+ "1: li $1, 1 \n"
+ " ll %0, %1 \n"
+ " sc $1, %1 \n"
+ " beqz $1, 1b \n"
+ " .set pop "
+ : "=r" (ret), "+R" (*p)
+ :
+ : "memory");
+
+ return ret;
+}
+#else
+#error unimplemented CPU support
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+static inline void spin_lock(spinlock_t *lock)
+{
+ while (testandset(lock));
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ resetlock(lock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return !testandset(lock);
+}
+#else
+static inline void spin_lock(spinlock_t *lock)
+{
+}
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return 1;
+}
+#endif
+
+#endif
diff --git a/qemu-malloc.c b/qemu-malloc.c
new file mode 100644
index 000000000..16d3c2e1f
--- /dev/null
+++ b/qemu-malloc.c
@@ -0,0 +1,59 @@
+/*
+ * malloc-like functions for system emulation.
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu-common.h"
+
+void *get_mmap_addr(unsigned long size)
+{
+ return NULL;
+}
+
+void qemu_free(void *ptr)
+{
+ free(ptr);
+}
+
+void *qemu_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+void *qemu_mallocz(size_t size)
+{
+ void *ptr;
+ ptr = qemu_malloc(size);
+ if (!ptr)
+ return NULL;
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+char *qemu_strdup(const char *str)
+{
+ char *ptr;
+ ptr = qemu_malloc(strlen(str) + 1);
+ if (!ptr)
+ return NULL;
+ strcpy(ptr, str);
+ return ptr;
+}
diff --git a/softmmu_template.h b/softmmu_template.h
index 0a4bc7e0c..934df5286 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -51,12 +51,13 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int mmu_idx,
void *retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
- target_ulong tlb_addr)
+ target_ulong addr)
{
DATA_TYPE res;
int index;
+ index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#if SHIFT <= 2
res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
#else
@@ -81,7 +82,7 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE res;
int index;
target_ulong tlb_addr;
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
void *retaddr;
/* test if there is match for unaligned or IO access */
@@ -90,12 +91,12 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
redo:
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ addend = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(addend, addr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
@@ -113,7 +114,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
}
} else {
/* the page is not in the TLB : fill it */
@@ -135,19 +137,19 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
{
DATA_TYPE res, res1, res2;
int index, shift;
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
target_ulong tlb_addr, addr1, addr2;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ addend = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(addend, addr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
@@ -166,7 +168,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
res = (DATA_TYPE)res;
} else {
/* unaligned/aligned access in the same page */
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
}
} else {
/* the page is not in the TLB : fill it */
@@ -185,13 +188,14 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
DATA_TYPE val,
- target_ulong tlb_addr,
+ target_ulong addr,
void *retaddr)
{
int index;
+ index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- env->mem_write_vaddr = tlb_addr;
+ env->mem_write_vaddr = addr;
env->mem_write_pc = (unsigned long)retaddr;
#if SHIFT <= 2
io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
@@ -213,7 +217,7 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
DATA_TYPE val,
int mmu_idx)
{
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
target_ulong tlb_addr;
void *retaddr;
int index;
@@ -222,13 +226,13 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
- glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ addend = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(addend, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
@@ -245,7 +249,8 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
do_unaligned_access(addr, 1, mmu_idx, retaddr);
}
#endif
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
}
} else {
/* the page is not in the TLB : fill it */
@@ -265,7 +270,7 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
int mmu_idx,
void *retaddr)
{
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
target_ulong tlb_addr;
int index, i;
@@ -273,12 +278,12 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ addend = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(addend, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
@@ -295,7 +300,8 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
}
} else {
/* aligned/unaligned access in the same page */
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
}
} else {
/* the page is not in the TLB : fill it */
diff --git a/target-cris/cpu.h b/target-cris/cpu.h
index a4f016f12..447e780b2 100644
--- a/target-cris/cpu.h
+++ b/target-cris/cpu.h
@@ -29,12 +29,11 @@
#define ELF_MACHINE EM_CRIS
-#define EXCP_MMU_EXEC 0
-#define EXCP_MMU_READ 1
-#define EXCP_MMU_WRITE 2
-#define EXCP_MMU_FLUSH 3
-#define EXCP_MMU_FAULT 4
-#define EXCP_BREAK 16 /* trap. */
+#define EXCP_NMI 1
+#define EXCP_GURU 2
+#define EXCP_BUSFAULT 3
+#define EXCP_IRQ 4
+#define EXCP_BREAK 5
/* Register aliases. R0 - R15 */
#define R_FP 8
@@ -54,11 +53,14 @@
#define PR_EBP 9
#define PR_ERP 10
#define PR_SRP 11
+#define PR_NRP 12
#define PR_CCS 13
#define PR_USP 14
#define PR_SPC 15
/* CPU flags. */
+#define Q_FLAG 0x80000000
+#define M_FLAG 0x40000000
#define S_FLAG 0x200
#define R_FLAG 0x100
#define P_FLAG 0x80
@@ -154,7 +156,6 @@ typedef struct CPUCRISState {
uint32_t lo;
} tlbsets[2][4][16];
- int features;
int user_mode_only;
CPU_COMMON
diff --git a/target-cris/helper.c b/target-cris/helper.c
index c16a58a3b..a29e55c05 100644
--- a/target-cris/helper.c
+++ b/target-cris/helper.c
@@ -78,13 +78,13 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
miss = cris_mmu_translate(&res, env, address, rw, mmu_idx);
if (miss)
{
- if (env->exception_index == EXCP_MMU_FAULT)
+ if (env->exception_index == EXCP_BUSFAULT)
cpu_abort(env,
"CRIS: Illegal recursive bus fault."
"addr=%x rw=%d\n",
address, rw);
- env->exception_index = EXCP_MMU_FAULT;
+ env->exception_index = EXCP_BUSFAULT;
env->fault_vector = res.bf_vec;
r = 1;
}
@@ -120,17 +120,20 @@ void do_interrupt(CPUState *env)
env->pregs[PR_ERP] = env->pc + 2;
break;
- case EXCP_MMU_FAULT:
+ case EXCP_NMI:
+ /* NMI is hardwired to vector zero. */
+ ex_vec = 0;
+ env->pregs[PR_CCS] &= ~M_FLAG;
+ env->pregs[PR_NRP] = env->pc;
+ break;
+
+ case EXCP_BUSFAULT:
ex_vec = env->fault_vector;
env->pregs[PR_ERP] = env->pc;
break;
default:
- /* Is the core accepting interrupts? */
- if (!(env->pregs[PR_CCS] & I_FLAG))
- return;
- /* The interrupt controller gives us the
- vector. */
+ /* The interrupt controller gives us the vector. */
ex_vec = env->interrupt_vector;
/* Normal interrupts are taken between
TB's. env->pc is valid here. */
diff --git a/target-cris/helper.h b/target-cris/helper.h
index 34f6831f4..4c96f0487 100644
--- a/target-cris/helper.h
+++ b/target-cris/helper.h
@@ -5,6 +5,7 @@ void TCG_HELPER_PROTO helper_tlb_flush_pid(uint32_t pid);
void TCG_HELPER_PROTO helper_dump(uint32_t a0, uint32_t a1, uint32_t a2);
void TCG_HELPER_PROTO helper_dummy(void);
void TCG_HELPER_PROTO helper_rfe(void);
+void TCG_HELPER_PROTO helper_rfn(void);
void TCG_HELPER_PROTO helper_store(uint32_t a0);
void TCG_HELPER_PROTO helper_movl_sreg_reg (uint32_t sreg, uint32_t reg);
diff --git a/target-cris/machine.c b/target-cris/machine.c
index cbfa645b4..3e152e9fb 100644
--- a/target-cris/machine.c
+++ b/target-cris/machine.c
@@ -5,3 +5,91 @@ void register_machines(void)
{
qemu_register_machine(&bareetraxfs_machine);
}
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+ CPUCRISState *env = opaque;
+ int i;
+ int s;
+ int mmu;
+
+ for (i = 0; i < 16; i++)
+ qemu_put_be32(f, env->regs[i]);
+ for (i = 0; i < 16; i++)
+ qemu_put_be32(f, env->pregs[i]);
+
+ qemu_put_be32(f, env->pc);
+ qemu_put_be32(f, env->ksp);
+
+ qemu_put_be32(f, env->dslot);
+ qemu_put_be32(f, env->btaken);
+ qemu_put_be32(f, env->btarget);
+
+ qemu_put_be32(f, env->cc_op);
+ qemu_put_be32(f, env->cc_mask);
+ qemu_put_be32(f, env->cc_dest);
+ qemu_put_be32(f, env->cc_src);
+ qemu_put_be32(f, env->cc_result);
+ qemu_put_be32(f, env->cc_size);
+ qemu_put_be32(f, env->cc_x);
+
+ for (s = 0; s < 4; i++) {
+ for (i = 0; i < 16; i++)
+ qemu_put_be32(f, env->sregs[s][i]);
+ }
+
+ qemu_put_be32(f, env->mmu_rand_lfsr);
+ for (mmu = 0; mmu < 2; mmu++) {
+ for (s = 0; s < 4; i++) {
+ for (i = 0; i < 16; i++) {
+ qemu_put_be32(f, env->tlbsets[mmu][s][i].lo);
+ qemu_put_be32(f, env->tlbsets[mmu][s][i].hi);
+ }
+ }
+ }
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ CPUCRISState *env = opaque;
+ int i;
+ int s;
+ int mmu;
+
+ for (i = 0; i < 16; i++)
+ env->regs[i] = qemu_get_be32(f);
+ for (i = 0; i < 16; i++)
+ env->pregs[i] = qemu_get_be32(f);
+
+ env->pc = qemu_get_be32(f);
+ env->ksp = qemu_get_be32(f);
+
+ env->dslot = qemu_get_be32(f);
+ env->btaken = qemu_get_be32(f);
+ env->btarget = qemu_get_be32(f);
+
+ env->cc_op = qemu_get_be32(f);
+ env->cc_mask = qemu_get_be32(f);
+ env->cc_dest = qemu_get_be32(f);
+ env->cc_src = qemu_get_be32(f);
+ env->cc_result = qemu_get_be32(f);
+ env->cc_size = qemu_get_be32(f);
+ env->cc_x = qemu_get_be32(f);
+
+ for (s = 0; s < 4; i++) {
+ for (i = 0; i < 16; i++)
+ env->sregs[s][i] = qemu_get_be32(f);
+ }
+
+ env->mmu_rand_lfsr = qemu_get_be32(f);
+ for (mmu = 0; mmu < 2; mmu++) {
+ for (s = 0; s < 4; i++) {
+ for (i = 0; i < 16; i++) {
+ env->tlbsets[mmu][s][i].lo = qemu_get_be32(f);
+ env->tlbsets[mmu][s][i].hi = qemu_get_be32(f);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/target-cris/op_helper.c b/target-cris/op_helper.c
index ebff31a1d..0e7d3c37f 100644
--- a/target-cris/op_helper.c
+++ b/target-cris/op_helper.c
@@ -24,6 +24,10 @@
#include "mmu.h"
#include "helper.h"
+#define D(x)
+
+#if !defined(CONFIG_USER_ONLY)
+
#define MMUSUFFIX _mmu
#define SHIFT 0
@@ -38,8 +42,6 @@
#define SHIFT 3
#include "softmmu_template.h"
-#define D(x)
-
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
@@ -78,6 +80,8 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
env = saved_env;
}
+#endif
+
void helper_raise_exception(uint32_t index)
{
env->exception_index = index;
@@ -202,6 +206,8 @@ static void cris_ccs_rshift(CPUState *env)
void helper_rfe(void)
{
+ int rflag = env->pregs[PR_CCS] & R_FLAG;
+
D(fprintf(logfile, "rfe: erp=%x pid=%x ccs=%x btarget=%x\n",
env->pregs[PR_ERP], env->pregs[PR_PID],
env->pregs[PR_CCS],
@@ -210,10 +216,29 @@ void helper_rfe(void)
cris_ccs_rshift(env);
/* RFE sets the P_FLAG only if the R_FLAG is not set. */
- if (!(env->pregs[PR_CCS] & R_FLAG))
+ if (!rflag)
env->pregs[PR_CCS] |= P_FLAG;
}
+void helper_rfn(void)
+{
+ int rflag = env->pregs[PR_CCS] & R_FLAG;
+
+ D(fprintf(logfile, "rfn: erp=%x pid=%x ccs=%x btarget=%x\n",
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->btarget));
+
+ cris_ccs_rshift(env);
+
+ /* Set the P_FLAG only if the R_FLAG is not set. */
+ if (!rflag)
+ env->pregs[PR_CCS] |= P_FLAG;
+
+ /* Always set the M flag. */
+ env->pregs[PR_CCS] |= M_FLAG;
+}
+
void helper_store(uint32_t a0)
{
if (env->pregs[PR_CCS] & P_FLAG )
diff --git a/target-cris/translate.c b/target-cris/translate.c
index 615a3fcac..cd0c2e15f 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -73,6 +73,7 @@ TCGv cc_op;
TCGv cc_size;
TCGv cc_mask;
+TCGv env_btaken;
TCGv env_btarget;
TCGv env_pc;
@@ -104,9 +105,16 @@ typedef struct DisasContext {
int flags_x;
int clear_x; /* Clear x after this insn? */
- int user; /* user or kernel mode. */
+ int cpustate_changed;
+ unsigned int tb_flags; /* tb dependent flags. */
int is_jmp;
+#define JMP_NOJMP 0
+#define JMP_DIRECT 1
+#define JMP_INDIRECT 2
+ int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
+ uint32_t jmp_pc;
+
int delayed_branch;
struct TranslationBlock *tb;
@@ -207,8 +215,10 @@ static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
tcg_gen_andi_tl(cpu_PR[r], tn, 3);
else {
tcg_gen_mov_tl(cpu_PR[r], tn);
- if (r == PR_PID)
+ if (r == PR_PID)
tcg_gen_helper_0_1(helper_tlb_flush_pid, tn);
+ else if (r == PR_CCS)
+ dc->cpustate_changed = 1;
}
}
@@ -610,7 +620,7 @@ static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
btaken = tcg_temp_new(TCG_TYPE_TL);
/* Conditional jmp. */
- t_gen_mov_TN_env(btaken, btaken);
+ tcg_gen_mov_tl(btaken, env_btaken);
tcg_gen_mov_tl(env_pc, pc_false);
tcg_gen_brcondi_tl(TCG_COND_EQ, btaken, 0, l1);
tcg_gen_mov_tl(env_pc, pc_true);
@@ -628,7 +638,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb((long)tb + n);
} else {
- tcg_gen_mov_tl(env_pc, cpu_T[0]);
+ tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(0);
}
}
@@ -648,6 +658,9 @@ static int sign_extend(unsigned int val, unsigned int width)
static inline void cris_clear_x_flag(DisasContext *dc)
{
+ if (dc->flagx_known && dc->flags_x)
+ dc->flags_uptodate = 0;
+
dc->flagx_known = 1;
dc->flags_x = 0;
}
@@ -715,6 +728,15 @@ static void cris_evaluate_flags(DisasContext *dc)
}
break;
}
+ if (dc->flagx_known) {
+ if (dc->flags_x)
+ tcg_gen_ori_tl(cpu_PR[PR_CCS],
+ cpu_PR[PR_CCS], X_FLAG);
+ else
+ tcg_gen_andi_tl(cpu_PR[PR_CCS],
+ cpu_PR[PR_CCS], ~X_FLAG);
+ }
+
dc->flags_uptodate = 1;
}
}
@@ -723,6 +745,11 @@ static void cris_cc_mask(DisasContext *dc, unsigned int mask)
{
uint32_t ovl;
+ if (!mask) {
+ dc->update_cc = 0;
+ return;
+ }
+
/* Check if we need to evaluate the condition codes due to
CC overlaying. */
ovl = (dc->cc_mask ^ mask) & ~mask;
@@ -732,11 +759,6 @@ static void cris_cc_mask(DisasContext *dc, unsigned int mask)
}
dc->cc_mask = mask;
dc->update_cc = 1;
-
- if (mask == 0)
- dc->update_cc = 0;
- else
- dc->flags_uptodate = 0;
}
static void cris_update_cc_op(DisasContext *dc, int op, int size)
@@ -942,7 +964,7 @@ static int arith_cc(DisasContext *dc)
static void gen_tst_cc (DisasContext *dc, int cond)
{
- int arith_opt;
+ int arith_opt, move_opt;
/* TODO: optimize more condition codes. */
@@ -955,9 +977,10 @@ static void gen_tst_cc (DisasContext *dc, int cond)
* code is true.
*/
arith_opt = arith_cc(dc) && !dc->flags_uptodate;
+ move_opt = (dc->cc_op == CC_OP_MOVE) && !dc->flags_uptodate;
switch (cond) {
case CC_EQ:
- if (arith_opt) {
+ if (arith_opt || move_opt) {
/* If cc_result is zero, T0 should be
non-zero otherwise T0 should be zero. */
int l1;
@@ -975,7 +998,7 @@ static void gen_tst_cc (DisasContext *dc, int cond)
}
break;
case CC_NE:
- if (arith_opt)
+ if (arith_opt || move_opt)
tcg_gen_mov_tl(cpu_T[0], cc_result);
else {
cris_evaluate_flags(dc);
@@ -990,8 +1013,7 @@ static void gen_tst_cc (DisasContext *dc, int cond)
break;
case CC_CC:
cris_evaluate_flags(dc);
- tcg_gen_xori_tl(cpu_T[0], cpu_PR[PR_CCS],
- C_FLAG);
+ tcg_gen_xori_tl(cpu_T[0], cpu_PR[PR_CCS], C_FLAG);
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], C_FLAG);
break;
case CC_VS:
@@ -1005,9 +1027,17 @@ static void gen_tst_cc (DisasContext *dc, int cond)
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], V_FLAG);
break;
case CC_PL:
- if (arith_opt)
- tcg_gen_shli_tl(cpu_T[0], cc_result, 31);
- else {
+ if (arith_opt || move_opt) {
+ int bits = 31;
+
+ if (dc->cc_size == 1)
+ bits = 7;
+ else if (dc->cc_size == 2)
+ bits = 15;
+
+ tcg_gen_shri_tl(cpu_T[0], cc_result, bits);
+ tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
+ } else {
cris_evaluate_flags(dc);
tcg_gen_xori_tl(cpu_T[0], cpu_PR[PR_CCS],
N_FLAG);
@@ -1015,9 +1045,15 @@ static void gen_tst_cc (DisasContext *dc, int cond)
}
break;
case CC_MI:
- if (arith_opt) {
- tcg_gen_shli_tl(cpu_T[0], cc_result, 31);
- tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
+ if (arith_opt || move_opt) {
+ int bits = 31;
+
+ if (dc->cc_size == 1)
+ bits = 7;
+ else if (dc->cc_size == 2)
+ bits = 15;
+
+ tcg_gen_shri_tl(cpu_T[0], cc_result, 31);
}
else {
cris_evaluate_flags(dc);
@@ -1121,28 +1157,46 @@ static void gen_tst_cc (DisasContext *dc, int cond)
};
}
-static void cris_prepare_cc_branch (DisasContext *dc, int offset, int cond)
+static void cris_store_direct_jmp(DisasContext *dc)
+{
+ /* Store the direct jmp state into the cpu-state. */
+ if (dc->jmp == JMP_DIRECT) {
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ tcg_gen_movi_tl(env_btaken, 1);
+ }
+}
+
+static void cris_prepare_cc_branch (DisasContext *dc,
+ int offset, int cond)
{
/* This helps us re-schedule the micro-code to insns in delay-slots
before the actual jump. */
dc->delayed_branch = 2;
+ dc->jmp_pc = dc->pc + offset;
+
if (cond != CC_A)
{
+ dc->jmp = JMP_INDIRECT;
gen_tst_cc (dc, cond);
- t_gen_mov_env_TN(btaken, cpu_T[0]);
- } else
- t_gen_mov_env_TN(btaken, tcg_const_tl(1));
- tcg_gen_movi_tl(env_btarget, dc->pc + offset);
+ tcg_gen_mov_tl(env_btaken, cpu_T[0]);
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ } else {
+ /* Allow chaining. */
+ dc->jmp = JMP_DIRECT;
+ }
}
-/* Dynamic jumps, when the dest is in a live reg for example. */
-void cris_prepare_dyn_jmp (DisasContext *dc)
+/* jumps, when the dest is in a live reg for example. Direct should be set
+ when the dest addr is constant to allow tb chaining. */
+static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
{
/* This helps us re-schedule the micro-code to insns in delay-slots
before the actual jump. */
dc->delayed_branch = 2;
- t_gen_mov_env_TN(btaken, tcg_const_tl(1));
+ dc->jmp = type;
+ if (type == JMP_INDIRECT)
+ tcg_gen_movi_tl(env_btaken, 1);
}
void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
@@ -1150,6 +1204,11 @@ void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
{
int mem_index = cpu_mmu_index(dc->env);
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1)
+ cris_store_direct_jmp(dc);
+
if (size == 1) {
if (sign)
tcg_gen_qemu_ld8s(dst, addr, mem_index);
@@ -1172,6 +1231,21 @@ void gen_store (DisasContext *dc, TCGv addr, TCGv val,
{
int mem_index = cpu_mmu_index(dc->env);
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1)
+ cris_store_direct_jmp(dc);
+
+
+ /* Conditional writes. We only support the kind were X and P are known
+ at translation time. */
+ if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
+ dc->postinc = 0;
+ cris_evaluate_flags(dc);
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
+ return;
+ }
+
/* Remember, operands are flipped. CRIS has reversed order. */
if (size == 1)
tcg_gen_qemu_st8(val, addr, mem_index);
@@ -1179,6 +1253,11 @@ void gen_store (DisasContext *dc, TCGv addr, TCGv val,
tcg_gen_qemu_st16(val, addr, mem_index);
else
tcg_gen_qemu_st32(val, addr, mem_index);
+
+ if (dc->flagx_known && dc->flags_x) {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
+ }
}
static inline void t_gen_sext(TCGv d, TCGv s, int size)
@@ -1352,6 +1431,8 @@ static unsigned int dec_bccq(DisasContext *dc)
tmp = offset;
offset = sign_extend(offset, 8);
+ DIS(fprintf (logfile, "b%s %x\n", cc_name(cond), dc->pc + offset));
+
/* op2 holds the condition-code. */
cris_cc_mask(dc, 0);
cris_prepare_cc_branch (dc, offset, cond);
@@ -1449,7 +1530,6 @@ static unsigned int dec_btstq(DisasContext *dc)
cris_cc_mask(dc, CC_MASK_NZ);
- t_gen_mov_TN_reg(cpu_T[0], dc->op2);
cris_alu(dc, CC_OP_BTST,
cpu_T[0], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
@@ -1463,9 +1543,10 @@ static unsigned int dec_asrq(DisasContext *dc)
DIS(fprintf (logfile, "asrq %u, $r%d\n", dc->op1, dc->op2));
cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_ASR,
+ tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+ cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2],
- cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
}
static unsigned int dec_lslq(DisasContext *dc)
@@ -1475,9 +1556,11 @@ static unsigned int dec_lslq(DisasContext *dc)
cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_LSL,
+ tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+
+ cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2],
- cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
}
static unsigned int dec_lsrq(DisasContext *dc)
@@ -1487,9 +1570,10 @@ static unsigned int dec_lsrq(DisasContext *dc)
cris_cc_mask(dc, CC_MASK_NZ);
- cris_alu(dc, CC_OP_LSR,
+ tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+ cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2],
- cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
return 2;
}
@@ -1883,11 +1967,10 @@ static unsigned int dec_movs_r(DisasContext *dc)
dc->op1, dc->op2));
cris_cc_mask(dc, CC_MASK_NZ);
- t_gen_mov_TN_reg(cpu_T[0], dc->op1);
/* Size can only be qi or hi. */
t_gen_sext(cpu_T[1], cpu_R[dc->op1], size);
cris_alu(dc, CC_OP_MOVE,
- cpu_R[dc->op2], cpu_T[0], cpu_T[1], 4);
+ cpu_R[dc->op2], cpu_R[dc->op1], cpu_T[1], 4);
return 2;
}
@@ -1962,7 +2045,6 @@ static unsigned int dec_setclrf(DisasContext *dc)
flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
| EXTRACT_FIELD(dc->ir, 0, 3);
- DIS(fprintf (logfile, "set=%d flags=%x\n", set, flags));
if (set && flags == 0) {
DIS(fprintf (logfile, "nop\n"));
return 2;
@@ -1975,13 +2057,30 @@ static unsigned int dec_setclrf(DisasContext *dc)
flags));
}
- if (set && (flags & X_FLAG)) {
+ /* User space is not allowed to touch these. Silently ignore. */
+ if (dc->tb_flags & U_FLAG) {
+ flags &= ~(I_FLAG | U_FLAG);
+ }
+
+ if (flags & X_FLAG) {
dc->flagx_known = 1;
- dc->flags_x = X_FLAG;
- } else {
- dc->flagx_known = 0;
+ if (set)
+ dc->flags_x = X_FLAG;
+ else
+ dc->flags_x = 0;
}
+ /* Break the TB if the P flag changes. */
+ if (flags & P_FLAG) {
+ if ((set && !(dc->tb_flags & P_FLAG))
+ || (!set && (dc->tb_flags & P_FLAG))) {
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ dc->is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = 1;
+ }
+ }
+
+
/* Simply decode the flags. */
cris_evaluate_flags (dc);
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
@@ -1989,11 +2088,11 @@ static unsigned int dec_setclrf(DisasContext *dc)
tcg_gen_movi_tl(cc_op, dc->cc_op);
if (set) {
- if (!dc->user && (flags & U_FLAG)) {
+ if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
/* Enter user mode. */
t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
- dc->is_jmp = DISAS_NEXT;
+ dc->cpustate_changed = 1;
}
tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
}
@@ -2030,7 +2129,7 @@ static unsigned int dec_move_rp(DisasContext *dc)
if (dc->op2 == PR_CCS) {
cris_evaluate_flags(dc);
t_gen_mov_TN_reg(cpu_T[0], dc->op1);
- if (dc->user) {
+ if (dc->tb_flags & U_FLAG) {
/* User space is not allowed to touch all flags. */
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x39f);
tcg_gen_andi_tl(cpu_T[1], cpu_PR[PR_CCS], ~0x39f);
@@ -2051,16 +2150,12 @@ static unsigned int dec_move_pr(DisasContext *dc)
{
DIS(fprintf (logfile, "move $p%u, $r%u\n", dc->op1, dc->op2));
cris_cc_mask(dc, 0);
- /* Support register 0 is hardwired to zero.
- Treat it specially. */
- if (dc->op2 == 0)
- tcg_gen_movi_tl(cpu_T[1], 0);
- else if (dc->op2 == PR_CCS) {
+
+ if (dc->op2 == PR_CCS)
cris_evaluate_flags(dc);
- t_gen_mov_TN_preg(cpu_T[1], dc->op2);
- } else
- t_gen_mov_TN_preg(cpu_T[1], dc->op2);
- cris_alu(dc, CC_OP_MOVE,
+
+ t_gen_mov_TN_preg(cpu_T[1], dc->op2);
+ cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op1], cpu_R[dc->op1], cpu_T[1],
preg_sizes[dc->op2]);
return 2;
@@ -2410,7 +2505,7 @@ static unsigned int dec_move_mp(DisasContext *dc)
cris_cc_mask(dc, 0);
if (dc->op2 == PR_CCS) {
cris_evaluate_flags(dc);
- if (dc->user) {
+ if (dc->tb_flags & U_FLAG) {
/* User space is not allowed to touch all flags. */
tcg_gen_andi_tl(cpu_T[1], cpu_T[1], 0x39f);
tcg_gen_andi_tl(cpu_T[0], cpu_PR[PR_CCS], ~0x39f);
@@ -2561,7 +2656,7 @@ static unsigned int dec_jump_p(DisasContext *dc)
/* rete will often have low bit set to indicate delayslot. */
tcg_gen_andi_tl(env_btarget, cpu_T[0], ~1);
cris_cc_mask(dc, 0);
- cris_prepare_dyn_jmp(dc);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
return 2;
}
@@ -2576,7 +2671,7 @@ static unsigned int dec_jas_r(DisasContext *dc)
abort();
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
- cris_prepare_dyn_jmp(dc);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
return 2;
}
@@ -2589,9 +2684,10 @@ static unsigned int dec_jas_im(DisasContext *dc)
DIS(fprintf (logfile, "jas 0x%x\n", imm));
cris_cc_mask(dc, 0);
/* Store the return address in Pd. */
- tcg_gen_movi_tl(env_btarget, imm);
t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
- cris_prepare_dyn_jmp(dc);
+
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
return 6;
}
@@ -2604,11 +2700,10 @@ static unsigned int dec_jasc_im(DisasContext *dc)
DIS(fprintf (logfile, "jasc 0x%x\n", imm));
cris_cc_mask(dc, 0);
/* Store the return address in Pd. */
- tcg_gen_movi_tl(cpu_T[0], imm);
- tcg_gen_mov_tl(env_btarget, cpu_T[0]);
- tcg_gen_movi_tl(cpu_T[0], dc->pc + 8 + 4);
- t_gen_mov_preg_TN(dc, dc->op2, cpu_T[0]);
- cris_prepare_dyn_jmp(dc);
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
+
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
return 6;
}
@@ -2617,11 +2712,9 @@ static unsigned int dec_jasc_r(DisasContext *dc)
DIS(fprintf (logfile, "jasc_r $r%u, $p%u\n", dc->op1, dc->op2));
cris_cc_mask(dc, 0);
/* Store the return address in Pd. */
- t_gen_mov_TN_reg(cpu_T[0], dc->op1);
- tcg_gen_mov_tl(env_btarget, cpu_T[0]);
- tcg_gen_movi_tl(cpu_T[0], dc->pc + 4 + 4);
- t_gen_mov_preg_TN(dc, dc->op2, cpu_T[0]);
- cris_prepare_dyn_jmp(dc);
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
+ cris_prepare_jmp(dc, JMP_INDIRECT);
return 2;
}
@@ -2651,12 +2744,11 @@ static unsigned int dec_bas_im(DisasContext *dc)
DIS(fprintf (logfile, "bas 0x%x, $p%u\n", dc->pc + simm, dc->op2));
cris_cc_mask(dc, 0);
- /* Stor the return address in Pd. */
- tcg_gen_movi_tl(cpu_T[0], dc->pc + simm);
- tcg_gen_mov_tl(env_btarget, cpu_T[0]);
- tcg_gen_movi_tl(cpu_T[0], dc->pc + 8);
- t_gen_mov_preg_TN(dc, dc->op2, cpu_T[0]);
- cris_prepare_dyn_jmp(dc);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
+
+ dc->jmp_pc = dc->pc + simm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
return 6;
}
@@ -2667,12 +2759,11 @@ static unsigned int dec_basc_im(DisasContext *dc)
DIS(fprintf (logfile, "basc 0x%x, $p%u\n", dc->pc + simm, dc->op2));
cris_cc_mask(dc, 0);
- /* Stor the return address in Pd. */
- tcg_gen_movi_tl(cpu_T[0], dc->pc + simm);
- tcg_gen_mov_tl(env_btarget, cpu_T[0]);
- tcg_gen_movi_tl(cpu_T[0], dc->pc + 12);
- t_gen_mov_preg_TN(dc, dc->op2, cpu_T[0]);
- cris_prepare_dyn_jmp(dc);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
+
+ dc->jmp_pc = dc->pc + simm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
return 6;
}
@@ -2695,12 +2786,13 @@ static unsigned int dec_rfe_etc(DisasContext *dc)
break;
case 5:
/* rfn. */
- BUG();
+ cris_evaluate_flags(dc);
+ tcg_gen_helper_0_0(helper_rfn);
+ dc->is_jmp = DISAS_UPDATE;
break;
case 6:
/* break. */
- tcg_gen_movi_tl(cpu_T[0], dc->pc);
- t_gen_mov_env_TN(pc, cpu_T[0]);
+ tcg_gen_movi_tl(env_pc, dc->pc);
/* Breaks start at 16 in the exception vector. */
t_gen_mov_env_TN(trap_vector,
tcg_const_tl(dc->op1 + 16));
@@ -2884,8 +2976,7 @@ static void check_breakpoint(CPUState *env, DisasContext *dc)
for(j = 0; j < env->nb_breakpoints; j++) {
if (env->breakpoints[j] == dc->pc) {
cris_evaluate_flags (dc);
- tcg_gen_movi_tl(cpu_T[0], dc->pc);
- t_gen_mov_env_TN(pc, cpu_T[0]);
+ tcg_gen_movi_tl(env_pc, dc->pc);
t_gen_raise_exception(EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
}
@@ -2940,6 +3031,7 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
struct DisasContext ctx;
struct DisasContext *dc = &ctx;
uint32_t next_page_start;
+ target_ulong npc;
if (!logfile)
logfile = stderr;
@@ -2968,18 +3060,24 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
dc->cc_size_uptodate = -1;
/* Decode TB flags. */
- dc->user = tb->flags & U_FLAG;
+ dc->tb_flags = tb->flags & (P_FLAG | U_FLAG | X_FLAG);
dc->delayed_branch = !!(tb->flags & 7);
+ if (dc->delayed_branch)
+ dc->jmp = JMP_INDIRECT;
+ else
+ dc->jmp = JMP_NOJMP;
+
+ dc->cpustate_changed = 0;
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile,
- "srch=%d pc=%x %x bt=%x ds=%lld ccs=%x\n"
+ "srch=%d pc=%x %x flg=%llx bt=%x ds=%lld ccs=%x\n"
"pid=%x usp=%x\n"
"%x.%x.%x.%x\n"
"%x.%x.%x.%x\n"
"%x.%x.%x.%x\n"
"%x.%x.%x.%x\n",
- search_pc, dc->pc, dc->ppc,
+ search_pc, dc->pc, dc->ppc, tb->flags,
env->btarget, tb->flags & 7,
env->pregs[PR_CCS],
env->pregs[PR_PID], env->pregs[PR_USP],
@@ -2997,9 +3095,6 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
do
{
check_breakpoint(env, dc);
- if (dc->is_jmp == DISAS_JUMP
- || dc->is_jmp == DISAS_SWI)
- goto done;
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
@@ -3034,13 +3129,20 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
actually genereating any host code, the simulator will just
loop doing nothing for on this program location. */
if (dc->delayed_branch) {
- t_gen_mov_env_TN(dslot, tcg_const_tl(0));
dc->delayed_branch--;
if (dc->delayed_branch == 0)
{
- t_gen_cc_jmp(env_btarget,
- tcg_const_tl(dc->pc));
- dc->is_jmp = DISAS_JUMP;
+ if (tb->flags & 7)
+ t_gen_mov_env_TN(dslot,
+ tcg_const_tl(0));
+ if (dc->jmp == JMP_DIRECT) {
+ dc->is_jmp = DISAS_NEXT;
+ } else {
+ t_gen_cc_jmp(env_btarget,
+ tcg_const_tl(dc->pc));
+ dc->is_jmp = DISAS_JUMP;
+ }
+ break;
}
}
@@ -3051,28 +3153,33 @@ gen_intermediate_code_internal(CPUState *env, TranslationBlock *tb,
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end
&& (dc->pc < next_page_start));
+ npc = dc->pc;
+ if (dc->jmp == JMP_DIRECT && !dc->delayed_branch)
+ npc = dc->jmp_pc;
+
+ /* Force an update if the per-tb cpu state has changed. */
+ if (dc->is_jmp == DISAS_NEXT
+ && (dc->cpustate_changed || !dc->flagx_known
+ || (dc->flags_x != (tb->flags & X_FLAG)))) {
+ dc->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_tl(env_pc, npc);
+ }
/* Broken branch+delayslot sequence. */
if (dc->delayed_branch == 1) {
/* Set env->dslot to the size of the branch insn. */
t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
- }
-
- if (!dc->is_jmp) {
- D(fprintf(logfile, "!jmp pc=%x jmp=%d db=%d\n", dc->pc,
- dc->is_jmp, dc->delayed_branch));
- /* T0 and env_pc should hold the new pc. */
- tcg_gen_movi_tl(cpu_T[0], dc->pc);
- tcg_gen_mov_tl(env_pc, cpu_T[0]);
+ cris_store_direct_jmp(dc);
}
cris_evaluate_flags (dc);
- done:
+
if (__builtin_expect(env->singlestep_enabled, 0)) {
+ tcg_gen_movi_tl(env_pc, npc);
t_gen_raise_exception(EXCP_DEBUG);
} else {
switch(dc->is_jmp) {
case DISAS_NEXT:
- gen_goto_tb(dc, 1, dc->pc);
+ gen_goto_tb(dc, 1, npc);
break;
default:
case DISAS_JUMP:
@@ -3164,12 +3271,20 @@ void cpu_dump_state (CPUState *env, FILE *f,
CPUCRISState *cpu_cris_init (const char *cpu_model)
{
CPUCRISState *env;
+ static int tcg_initialized = 0;
int i;
env = qemu_mallocz(sizeof(CPUCRISState));
if (!env)
return NULL;
+
cpu_exec_init(env);
+ cpu_reset(env);
+
+ if (tcg_initialized)
+ return env;
+
+ tcg_initialized = 1;
cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
#if TARGET_LONG_BITS > HOST_LONG_BITS
@@ -3207,7 +3322,9 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
env_btarget = tcg_global_mem_new(TCG_TYPE_PTR, TCG_AREG0,
offsetof(CPUState, btarget),
"btarget");
-
+ env_btaken = tcg_global_mem_new(TCG_TYPE_PTR, TCG_AREG0,
+ offsetof(CPUState, btaken),
+ "btaken");
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new(TCG_TYPE_PTR, TCG_AREG0,
offsetof(CPUState, regs[i]),
@@ -3228,6 +3345,7 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
TCG_HELPER(helper_movl_sreg_reg);
TCG_HELPER(helper_movl_reg_sreg);
TCG_HELPER(helper_rfe);
+ TCG_HELPER(helper_rfn);
TCG_HELPER(helper_evaluate_flags_muls);
TCG_HELPER(helper_evaluate_flags_mulu);
@@ -3237,8 +3355,6 @@ CPUCRISState *cpu_cris_init (const char *cpu_model)
TCG_HELPER(helper_evaluate_flags_move_2);
TCG_HELPER(helper_evaluate_flags);
TCG_HELPER(helper_top_evaluate_flags);
-
- cpu_reset(env);
return env;
}
diff --git a/target-i386/helper.c b/target-i386/helper.c
index ff999bc12..3c710bfba 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -45,25 +45,25 @@ static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
/* feature flags taken from "Intel Processor Identification and the CPUID
* Instruction" and AMD's "CPUID Specification". In cases of disagreement
* about feature names, the Linux name is used. */
- const char *feature_name[] = {
+ static const char *feature_name[] = {
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
"fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
};
- const char *ext_feature_name[] = {
+ static const char *ext_feature_name[] = {
"pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
"tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
- const char *ext2_feature_name[] = {
+ static const char *ext2_feature_name[] = {
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
"pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
"fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
};
- const char *ext3_feature_name[] = {
+ static const char *ext3_feature_name[] = {
"lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -135,6 +135,7 @@ typedef struct x86_def_t {
int stepping;
uint32_t features, ext_features, ext2_features, ext3_features;
uint32_t xlevel;
+ char model_id[48];
} x86_def_t;
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
@@ -170,6 +171,7 @@ static x86_def_t x86_defs[] = {
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.ext3_features = CPUID_EXT3_SVM,
.xlevel = 0x8000000A,
+ .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
},
#endif
{
@@ -181,6 +183,7 @@ static x86_def_t x86_defs[] = {
.features = PPRO_FEATURES,
.ext_features = CPUID_EXT_SSE3,
.xlevel = 0,
+ .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
},
{
.name = "486",
@@ -230,6 +233,8 @@ static x86_def_t x86_defs[] = {
.features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
.ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.xlevel = 0x80000008,
+ /* XXX: put another string ? */
+ .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
},
};
@@ -270,7 +275,6 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
family = strtol(val, &err, 10);
if (!*val || *err || family < 0) {
fprintf(stderr, "bad numerical value %s\n", val);
- x86_cpu_def = 0;
goto error;
}
x86_cpu_def->family = family;
@@ -279,7 +283,6 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
model = strtol(val, &err, 10);
if (!*val || *err || model < 0 || model > 0xf) {
fprintf(stderr, "bad numerical value %s\n", val);
- x86_cpu_def = 0;
goto error;
}
x86_cpu_def->model = model;
@@ -288,18 +291,31 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
stepping = strtol(val, &err, 10);
if (!*val || *err || stepping < 0 || stepping > 0xf) {
fprintf(stderr, "bad numerical value %s\n", val);
- x86_cpu_def = 0;
goto error;
}
x86_cpu_def->stepping = stepping;
+ } else if (!strcmp(featurestr, "vendor")) {
+ if (strlen(val) != 12) {
+ fprintf(stderr, "vendor string must be 12 chars long\n");
+ goto error;
+ }
+ x86_cpu_def->vendor1 = 0;
+ x86_cpu_def->vendor2 = 0;
+ x86_cpu_def->vendor3 = 0;
+ for(i = 0; i < 4; i++) {
+ x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
+ x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
+ x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
+ }
+ } else if (!strcmp(featurestr, "model_id")) {
+ pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
+ val);
} else {
fprintf(stderr, "unrecognized feature %s\n", featurestr);
- x86_cpu_def = 0;
goto error;
}
} else {
fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
- x86_cpu_def = 0;
goto error;
}
featurestr = strtok(NULL, ",");
@@ -352,16 +368,19 @@ static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
env->cpuid_xlevel = def->xlevel;
env->cpuid_ext3_features = def->ext3_features;
{
- const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
+ const char *model_id = def->model_id;
int c, len, i;
+
if (cpu_vendor_string != NULL)
model_id = cpu_vendor_string;
+ if (!model_id)
+ model_id = "";
len = strlen(model_id);
for(i = 0; i < 48; i++) {
if (i >= len)
c = '\0';
else
- c = model_id[i];
+ c = (uint8_t)model_id[i];
env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
}
}
diff --git a/target-i386/helper.h b/target-i386/helper.h
index 152029914..8df6acc8d 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -60,9 +60,9 @@ DEF_HELPER(void, helper_sysexit, (void))
DEF_HELPER(void, helper_syscall, (int next_eip_addend))
DEF_HELPER(void, helper_sysret, (int dflag))
#endif
-DEF_HELPER(void, helper_hlt, (void))
+DEF_HELPER(void, helper_hlt, (int next_eip_addend))
DEF_HELPER(void, helper_monitor, (target_ulong ptr))
-DEF_HELPER(void, helper_mwait, (void))
+DEF_HELPER(void, helper_mwait, (int next_eip_addend))
DEF_HELPER(void, helper_debug, (void))
DEF_HELPER(void, helper_raise_interrupt, (int intno, int next_eip_addend))
DEF_HELPER(void, helper_raise_exception, (int exception_index))
diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c
index ebeeebdec..0b5fdc04c 100644
--- a/target-i386/op_helper.c
+++ b/target-i386/op_helper.c
@@ -1837,6 +1837,8 @@ void helper_cmpxchg8b(target_ulong a0)
stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
eflags |= CC_Z;
} else {
+ /* always do the store */
+ stq(a0, d);
EDX = (uint32_t)(d >> 32);
EAX = (uint32_t)d;
eflags &= ~CC_Z;
@@ -1850,6 +1852,8 @@ void helper_cmpxchg16b(target_ulong a0)
uint64_t d0, d1;
int eflags;
+ if ((a0 & 0xf) != 0)
+ raise_exception(EXCP0D_GPF);
eflags = cc_table[CC_OP].compute_all();
d0 = ldq(a0);
d1 = ldq(a0 + 8);
@@ -1858,6 +1862,9 @@ void helper_cmpxchg16b(target_ulong a0)
stq(a0 + 8, ECX);
eflags |= CC_Z;
} else {
+ /* always do the store */
+ stq(a0, d0);
+ stq(a0 + 8, d1);
EDX = d1;
EAX = d0;
eflags &= ~CC_Z;
@@ -4547,16 +4554,22 @@ void helper_idivq_EAX(target_ulong t0)
}
#endif
-void helper_hlt(void)
+static void do_hlt(void)
{
- helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
-
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
env->halted = 1;
env->exception_index = EXCP_HLT;
cpu_loop_exit();
}
+void helper_hlt(int next_eip_addend)
+{
+ helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
+ EIP += next_eip_addend;
+
+ do_hlt();
+}
+
void helper_monitor(target_ulong ptr)
{
if ((uint32_t)ECX != 0)
@@ -4565,17 +4578,19 @@ void helper_monitor(target_ulong ptr)
helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
}
-void helper_mwait(void)
+void helper_mwait(int next_eip_addend)
{
if ((uint32_t)ECX != 0)
raise_exception(EXCP0D_GPF);
helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
+ EIP += next_eip_addend;
+
/* XXX: not complete but not completely erroneous */
if (env->cpu_index != 0 || env->next_cpu != NULL) {
/* more than one CPU: do not sleep because another CPU may
wake this one */
} else {
- helper_hlt();
+ do_hlt();
}
}
@@ -4767,7 +4782,7 @@ static inline void svm_save_seg(target_phys_addr_t addr,
stl_phys(addr + offsetof(struct vmcb_seg, limit),
sc->limit);
stw_phys(addr + offsetof(struct vmcb_seg, attrib),
- (sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
+ ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 85495a9d1..6f9c25672 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -3788,9 +3788,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
+ case 0x82:
+ if (CODE64(s))
+ goto illegal_op;
case 0x80: /* GRP1 */
case 0x81:
- case 0x82:
case 0x83:
{
int val;
@@ -6418,8 +6420,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_jmp_im(s->pc - s->cs_base);
- tcg_gen_helper_0_0(helper_hlt);
+ gen_jmp_im(pc_start - s->cs_base);
+ tcg_gen_helper_0_1(helper_hlt, tcg_const_i32(s->pc - pc_start));
s->is_jmp = 3;
}
break;
@@ -6517,6 +6519,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
s->cpl != 0)
goto illegal_op;
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
#ifdef TARGET_X86_64
if (s->aflag == 2) {
@@ -6539,8 +6543,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
- gen_jmp_im(s->pc - s->cs_base);
- tcg_gen_helper_0_0(helper_mwait);
+ gen_jmp_im(pc_start - s->cs_base);
+ tcg_gen_helper_0_1(helper_mwait, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
default:
diff --git a/target-mips/TODO b/target-mips/TODO
index c58956cff..bb18fa8d8 100644
--- a/target-mips/TODO
+++ b/target-mips/TODO
@@ -30,6 +30,11 @@ General
each ASID change. Using the MMU modes to implement ASIDs hinges on
implementing the global bit efficiently.
- save/restore of the CPU state is not implemented (see machine.c).
+- Improve cpu state handling:
+ Step 1) Collect all the TC state in a single struct, so we need only
+ a single global pointer for the active TC.
+ Step 2) Use only a single TC context as working context, and copy the
+ contexts on TC switch. Likewise for FPU contexts.
MIPS64
------
@@ -37,7 +42,8 @@ MIPS64
"Generic" 4Kc system emulation
------------------------------
-- Doesn't correspond to any real hardware.
+- Doesn't correspond to any real hardware. Should be removed some day,
+ U-Boot is the last remaining user.
PICA 61 system emulation
------------------------
@@ -46,7 +52,7 @@ PICA 61 system emulation
MALTA system emulation
----------------------
- We fake firmware support instead of doing the real thing
-- Real firmware falls over when trying to init RAM, presumably due
- to lacking system controller emulation.
+- Real firmware (YAMON) falls over when trying to init RAM, presumably
+ due to lacking system controller emulation.
- Bonito system controller not implemented
- MSC1 system controller not implemented
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index 964a560fa..fdb05ccfd 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -70,11 +70,6 @@ typedef struct CPUMIPSFPUContext CPUMIPSFPUContext;
struct CPUMIPSFPUContext {
/* Floating point registers */
fpr_t fpr[32];
-#ifndef USE_HOST_FLOAT_REGS
- fpr_t ft0;
- fpr_t ft1;
- fpr_t ft2;
-#endif
float_status fp_status;
/* fpu implementation/revision register (fir) */
uint32_t fcr0;
@@ -145,9 +140,11 @@ struct CPUMIPSState {
target_ulong gpr[MIPS_SHADOW_SET_MAX][32];
/* Special registers */
target_ulong PC[MIPS_TC_MAX];
-#if TARGET_LONG_BITS > HOST_LONG_BITS
- target_ulong t0;
- target_ulong t1;
+ /* temporary hack for FP globals */
+#ifndef USE_HOST_FLOAT_REGS
+ fpr_t ft0;
+ fpr_t ft1;
+ fpr_t ft2;
#endif
target_ulong HI[MIPS_TC_MAX][MIPS_DSP_ACC];
target_ulong LO[MIPS_TC_MAX][MIPS_DSP_ACC];
diff --git a/target-mips/exec.h b/target-mips/exec.h
index f10a35df0..a7014eea1 100644
--- a/target-mips/exec.h
+++ b/target-mips/exec.h
@@ -10,35 +10,27 @@
register struct CPUMIPSState *env asm(AREG0);
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-#define T0 (env->t0)
-#define T1 (env->t1)
-#else
-register target_ulong T0 asm(AREG1);
-register target_ulong T1 asm(AREG2);
-#endif
-
#if defined (USE_HOST_FLOAT_REGS)
#error "implement me."
#else
-#define FDT0 (env->fpu->ft0.fd)
-#define FDT1 (env->fpu->ft1.fd)
-#define FDT2 (env->fpu->ft2.fd)
-#define FST0 (env->fpu->ft0.fs[FP_ENDIAN_IDX])
-#define FST1 (env->fpu->ft1.fs[FP_ENDIAN_IDX])
-#define FST2 (env->fpu->ft2.fs[FP_ENDIAN_IDX])
-#define FSTH0 (env->fpu->ft0.fs[!FP_ENDIAN_IDX])
-#define FSTH1 (env->fpu->ft1.fs[!FP_ENDIAN_IDX])
-#define FSTH2 (env->fpu->ft2.fs[!FP_ENDIAN_IDX])
-#define DT0 (env->fpu->ft0.d)
-#define DT1 (env->fpu->ft1.d)
-#define DT2 (env->fpu->ft2.d)
-#define WT0 (env->fpu->ft0.w[FP_ENDIAN_IDX])
-#define WT1 (env->fpu->ft1.w[FP_ENDIAN_IDX])
-#define WT2 (env->fpu->ft2.w[FP_ENDIAN_IDX])
-#define WTH0 (env->fpu->ft0.w[!FP_ENDIAN_IDX])
-#define WTH1 (env->fpu->ft1.w[!FP_ENDIAN_IDX])
-#define WTH2 (env->fpu->ft2.w[!FP_ENDIAN_IDX])
+#define FDT0 (env->ft0.fd)
+#define FDT1 (env->ft1.fd)
+#define FDT2 (env->ft2.fd)
+#define FST0 (env->ft0.fs[FP_ENDIAN_IDX])
+#define FST1 (env->ft1.fs[FP_ENDIAN_IDX])
+#define FST2 (env->ft2.fs[FP_ENDIAN_IDX])
+#define FSTH0 (env->ft0.fs[!FP_ENDIAN_IDX])
+#define FSTH1 (env->ft1.fs[!FP_ENDIAN_IDX])
+#define FSTH2 (env->ft2.fs[!FP_ENDIAN_IDX])
+#define DT0 (env->ft0.d)
+#define DT1 (env->ft1.d)
+#define DT2 (env->ft2.d)
+#define WT0 (env->ft0.w[FP_ENDIAN_IDX])
+#define WT1 (env->ft1.w[FP_ENDIAN_IDX])
+#define WT2 (env->ft2.w[FP_ENDIAN_IDX])
+#define WTH0 (env->ft0.w[!FP_ENDIAN_IDX])
+#define WTH1 (env->ft1.w[!FP_ENDIAN_IDX])
+#define WTH2 (env->ft2.w[!FP_ENDIAN_IDX])
#endif
#include "cpu.h"
@@ -48,67 +40,12 @@ register target_ulong T1 asm(AREG2);
#include "softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
-#if defined(TARGET_MIPS64)
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-void do_dsll (void);
-void do_dsll32 (void);
-void do_dsra (void);
-void do_dsra32 (void);
-void do_dsrl (void);
-void do_dsrl32 (void);
-void do_drotr (void);
-void do_drotr32 (void);
-void do_dsllv (void);
-void do_dsrav (void);
-void do_dsrlv (void);
-void do_drotrv (void);
-void do_dclo (void);
-void do_dclz (void);
-#endif
-#endif
-
-#if HOST_LONG_BITS < 64
-void do_div (void);
-#endif
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-void do_mult (void);
-void do_multu (void);
-void do_madd (void);
-void do_maddu (void);
-void do_msub (void);
-void do_msubu (void);
-void do_muls (void);
-void do_mulsu (void);
-void do_macc (void);
-void do_macchi (void);
-void do_maccu (void);
-void do_macchiu (void);
-void do_msac (void);
-void do_msachi (void);
-void do_msacu (void);
-void do_msachiu (void);
-void do_mulhi (void);
-void do_mulhiu (void);
-void do_mulshi (void);
-void do_mulshiu (void);
-#endif
-#if defined(TARGET_MIPS64)
-void do_ddiv (void);
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-void do_ddivu (void);
-#endif
-#endif
-void do_mfc0_random(void);
-void do_mfc0_count(void);
-void do_mtc0_entryhi(uint32_t in);
void do_mtc0_status_debug(uint32_t old, uint32_t val);
void do_mtc0_status_irqraise_debug(void);
void dump_fpu(CPUState *env);
void fpu_dump_state(CPUState *env, FILE *f,
int (*fpu_fprintf)(FILE *f, const char *fmt, ...),
int flags);
-void dump_sc (void);
-void do_pmon (int function);
int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int mmu_idx, int is_softmmu);
@@ -133,82 +70,6 @@ void cpu_mips_update_irq (CPUState *env);
void cpu_mips_clock_init (CPUState *env);
void cpu_mips_tlb_flush (CPUState *env, int flush_global);
-void do_cfc1 (int reg);
-void do_ctc1 (int reg);
-
-#define FOP_PROTO(op) \
-void do_float_ ## op ## _s(void); \
-void do_float_ ## op ## _d(void);
-FOP_PROTO(roundl)
-FOP_PROTO(roundw)
-FOP_PROTO(truncl)
-FOP_PROTO(truncw)
-FOP_PROTO(ceill)
-FOP_PROTO(ceilw)
-FOP_PROTO(floorl)
-FOP_PROTO(floorw)
-FOP_PROTO(rsqrt)
-FOP_PROTO(recip)
-#undef FOP_PROTO
-
-#define FOP_PROTO(op) \
-void do_float_ ## op ## _s(void); \
-void do_float_ ## op ## _d(void); \
-void do_float_ ## op ## _ps(void);
-FOP_PROTO(add)
-FOP_PROTO(sub)
-FOP_PROTO(mul)
-FOP_PROTO(div)
-FOP_PROTO(recip1)
-FOP_PROTO(recip2)
-FOP_PROTO(rsqrt1)
-FOP_PROTO(rsqrt2)
-#undef FOP_PROTO
-
-void do_float_cvtd_s(void);
-void do_float_cvtd_w(void);
-void do_float_cvtd_l(void);
-void do_float_cvtl_d(void);
-void do_float_cvtl_s(void);
-void do_float_cvtps_pw(void);
-void do_float_cvtpw_ps(void);
-void do_float_cvts_d(void);
-void do_float_cvts_w(void);
-void do_float_cvts_l(void);
-void do_float_cvts_pl(void);
-void do_float_cvts_pu(void);
-void do_float_cvtw_s(void);
-void do_float_cvtw_d(void);
-
-void do_float_addr_ps(void);
-void do_float_mulr_ps(void);
-
-#define FOP_PROTO(op) \
-void do_cmp_d_ ## op(long cc); \
-void do_cmpabs_d_ ## op(long cc); \
-void do_cmp_s_ ## op(long cc); \
-void do_cmpabs_s_ ## op(long cc); \
-void do_cmp_ps_ ## op(long cc); \
-void do_cmpabs_ps_ ## op(long cc);
-
-FOP_PROTO(f)
-FOP_PROTO(un)
-FOP_PROTO(eq)
-FOP_PROTO(ueq)
-FOP_PROTO(olt)
-FOP_PROTO(ult)
-FOP_PROTO(ole)
-FOP_PROTO(ule)
-FOP_PROTO(sf)
-FOP_PROTO(ngle)
-FOP_PROTO(seq)
-FOP_PROTO(ngl)
-FOP_PROTO(lt)
-FOP_PROTO(nge)
-FOP_PROTO(le)
-FOP_PROTO(ngt)
-#undef FOP_PROTO
-
static always_inline void env_to_regs(void)
{
}
diff --git a/target-mips/fop_template.c b/target-mips/fop_template.c
deleted file mode 100644
index 2a85a6b4a..000000000
--- a/target-mips/fop_template.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * MIPS emulation micro-operations templates for floating point reg
- * load & store for qemu.
- *
- * Copyright (c) 2006 Marius Groeger
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#if defined(FREG)
-
-#define OP_WLOAD_FREG(treg, tregname, FREG) \
- void glue(glue(op_load_fpr_,tregname), FREG) (void) \
- { \
- treg = env->fpu->fpr[FREG].w[FP_ENDIAN_IDX]; \
- FORCE_RET(); \
- }
-
-#define OP_WSTORE_FREG(treg, tregname, FREG) \
- void glue(glue(op_store_fpr_,tregname), FREG) (void) \
- { \
- env->fpu->fpr[FREG].w[FP_ENDIAN_IDX] = treg; \
- FORCE_RET(); \
- }
-
-/* WT0 = FREG.w: op_load_fpr_WT0_fprFREG */
-OP_WLOAD_FREG(WT0, WT0_fpr, FREG)
-/* FREG.w = WT0: op_store_fpr_WT0_fprFREG */
-OP_WSTORE_FREG(WT0, WT0_fpr, FREG)
-
-OP_WLOAD_FREG(WT1, WT1_fpr, FREG)
-OP_WSTORE_FREG(WT1, WT1_fpr, FREG)
-
-OP_WLOAD_FREG(WT2, WT2_fpr, FREG)
-OP_WSTORE_FREG(WT2, WT2_fpr, FREG)
-
-#define OP_DLOAD_FREG(treg, tregname, FREG) \
- void glue(glue(op_load_fpr_,tregname), FREG) (void) \
- { \
- if (env->hflags & MIPS_HFLAG_F64) \
- treg = env->fpu->fpr[FREG].d; \
- else \
- treg = (uint64_t)(env->fpu->fpr[FREG | 1].w[FP_ENDIAN_IDX]) << 32 | \
- env->fpu->fpr[FREG & ~1].w[FP_ENDIAN_IDX]; \
- FORCE_RET(); \
- }
-
-#define OP_DSTORE_FREG(treg, tregname, FREG) \
- void glue(glue(op_store_fpr_,tregname), FREG) (void) \
- { \
- if (env->hflags & MIPS_HFLAG_F64) \
- env->fpu->fpr[FREG].d = treg; \
- else { \
- env->fpu->fpr[FREG | 1].w[FP_ENDIAN_IDX] = treg >> 32; \
- env->fpu->fpr[FREG & ~1].w[FP_ENDIAN_IDX] = treg; \
- } \
- FORCE_RET(); \
- }
-
-OP_DLOAD_FREG(DT0, DT0_fpr, FREG)
-OP_DSTORE_FREG(DT0, DT0_fpr, FREG)
-
-OP_DLOAD_FREG(DT1, DT1_fpr, FREG)
-OP_DSTORE_FREG(DT1, DT1_fpr, FREG)
-
-OP_DLOAD_FREG(DT2, DT2_fpr, FREG)
-OP_DSTORE_FREG(DT2, DT2_fpr, FREG)
-
-#define OP_PSLOAD_FREG(treg, tregname, FREG) \
- void glue(glue(op_load_fpr_,tregname), FREG) (void) \
- { \
- treg = env->fpu->fpr[FREG].w[!FP_ENDIAN_IDX]; \
- FORCE_RET(); \
- }
-
-#define OP_PSSTORE_FREG(treg, tregname, FREG) \
- void glue(glue(op_store_fpr_,tregname), FREG) (void) \
- { \
- env->fpu->fpr[FREG].w[!FP_ENDIAN_IDX] = treg; \
- FORCE_RET(); \
- }
-
-OP_PSLOAD_FREG(WTH0, WTH0_fpr, FREG)
-OP_PSSTORE_FREG(WTH0, WTH0_fpr, FREG)
-
-OP_PSLOAD_FREG(WTH1, WTH1_fpr, FREG)
-OP_PSSTORE_FREG(WTH1, WTH1_fpr, FREG)
-
-OP_PSLOAD_FREG(WTH2, WTH2_fpr, FREG)
-OP_PSSTORE_FREG(WTH2, WTH2_fpr, FREG)
-
-#endif
diff --git a/target-mips/helper.h b/target-mips/helper.h
index 98a97c9be..9fb5d7402 100644
--- a/target-mips/helper.h
+++ b/target-mips/helper.h
@@ -1,8 +1,262 @@
-void do_raise_exception_err(int excp, int err);
-void do_raise_exception(int excp);
-void do_interrupt_restart (void);
-
-void do_clo (void);
-void do_clz (void);
-void do_dclo (void);
-void do_dclz (void);
+#ifndef DEF_HELPER
+#define DEF_HELPER(ret, name, params) ret name params;
+#endif
+
+DEF_HELPER(void, do_raise_exception_err, (int excp, int err))
+DEF_HELPER(void, do_raise_exception, (int excp))
+DEF_HELPER(void, do_interrupt_restart, (void))
+
+#ifdef TARGET_MIPS64
+DEF_HELPER(target_ulong, do_ldl, (target_ulong t0, target_ulong t1, int mem_idx))
+DEF_HELPER(target_ulong, do_ldr, (target_ulong t0, target_ulong t1, int mem_idx))
+DEF_HELPER(void, do_sdl, (target_ulong t0, target_ulong t1, int mem_idx))
+DEF_HELPER(void, do_sdr, (target_ulong t0, target_ulong t1, int mem_idx))
+#endif
+DEF_HELPER(target_ulong, do_lwl, (target_ulong t0, target_ulong t1, int mem_idx))
+DEF_HELPER(target_ulong, do_lwr, (target_ulong t0, target_ulong t1, int mem_idx))
+DEF_HELPER(void, do_swl, (target_ulong t0, target_ulong t1, int mem_idx))
+DEF_HELPER(void, do_swr, (target_ulong t0, target_ulong t1, int mem_idx))
+
+DEF_HELPER(target_ulong, do_clo, (target_ulong t0))
+DEF_HELPER(target_ulong, do_clz, (target_ulong t0))
+#ifdef TARGET_MIPS64
+DEF_HELPER(target_ulong, do_dclo, (target_ulong t0))
+DEF_HELPER(target_ulong, do_dclz, (target_ulong t0))
+DEF_HELPER(void, do_dmult, (target_ulong t0, target_ulong t1))
+DEF_HELPER(void, do_dmultu, (target_ulong t0, target_ulong t1))
+#endif
+
+DEF_HELPER(target_ulong, do_muls, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_mulsu, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_macc, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_maccu, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_msac, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_msacu, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_mulhi, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_mulhiu, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_mulshi, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_mulshiu, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_macchi, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_macchiu, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_msachi, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_msachiu, (target_ulong t0, target_ulong t1))
+
+/* CP0 helpers */
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER(target_ulong, do_mfc0_mvpcontrol, (void))
+DEF_HELPER(target_ulong, do_mfc0_mvpconf0, (void))
+DEF_HELPER(target_ulong, do_mfc0_mvpconf1, (void))
+DEF_HELPER(target_ulong, do_mfc0_random, (void))
+DEF_HELPER(target_ulong, do_mfc0_tcstatus, (void))
+DEF_HELPER(target_ulong, do_mftc0_tcstatus, (void))
+DEF_HELPER(target_ulong, do_mfc0_tcbind, (void))
+DEF_HELPER(target_ulong, do_mftc0_tcbind, (void))
+DEF_HELPER(target_ulong, do_mfc0_tcrestart, (void))
+DEF_HELPER(target_ulong, do_mftc0_tcrestart, (void))
+DEF_HELPER(target_ulong, do_mfc0_tchalt, (void))
+DEF_HELPER(target_ulong, do_mftc0_tchalt, (void))
+DEF_HELPER(target_ulong, do_mfc0_tccontext, (void))
+DEF_HELPER(target_ulong, do_mftc0_tccontext, (void))
+DEF_HELPER(target_ulong, do_mfc0_tcschedule, (void))
+DEF_HELPER(target_ulong, do_mftc0_tcschedule, (void))
+DEF_HELPER(target_ulong, do_mfc0_tcschefback, (void))
+DEF_HELPER(target_ulong, do_mftc0_tcschefback, (void))
+DEF_HELPER(target_ulong, do_mfc0_count, (void))
+DEF_HELPER(target_ulong, do_mftc0_entryhi, (void))
+DEF_HELPER(target_ulong, do_mftc0_status, (void))
+DEF_HELPER(target_ulong, do_mfc0_lladdr, (void))
+DEF_HELPER(target_ulong, do_mfc0_watchlo, (uint32_t sel))
+DEF_HELPER(target_ulong, do_mfc0_watchhi, (uint32_t sel))
+DEF_HELPER(target_ulong, do_mfc0_debug, (void))
+DEF_HELPER(target_ulong, do_mftc0_debug, (void))
+#ifdef TARGET_MIPS64
+DEF_HELPER(target_ulong, do_dmfc0_tcrestart, (void))
+DEF_HELPER(target_ulong, do_dmfc0_tchalt, (void))
+DEF_HELPER(target_ulong, do_dmfc0_tccontext, (void))
+DEF_HELPER(target_ulong, do_dmfc0_tcschedule, (void))
+DEF_HELPER(target_ulong, do_dmfc0_tcschefback, (void))
+DEF_HELPER(target_ulong, do_dmfc0_lladdr, (void))
+DEF_HELPER(target_ulong, do_dmfc0_watchlo, (uint32_t sel))
+#endif /* TARGET_MIPS64 */
+
+DEF_HELPER(void, do_mtc0_index, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_mvpcontrol, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_vpecontrol, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_vpeconf0, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_vpeconf1, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_yqmask, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_vpeopt, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_entrylo0, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tcstatus, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tcstatus, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tcbind, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tcbind, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tcrestart, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tcrestart, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tchalt, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tchalt, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tccontext, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tccontext, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tcschedule, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tcschedule, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_tcschefback, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_tcschefback, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_entrylo1, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_context, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_pagemask, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_pagegrain, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_wired, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_srsconf0, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_srsconf1, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_srsconf2, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_srsconf3, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_srsconf4, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_hwrena, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_count, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_entryhi, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_entryhi, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_compare, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_status, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_status, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_intctl, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_srsctl, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_cause, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_ebase, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_config0, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_config2, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_watchlo, (target_ulong t0, uint32_t sel))
+DEF_HELPER(void, do_mtc0_watchhi, (target_ulong t0, uint32_t sel))
+DEF_HELPER(void, do_mtc0_xcontext, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_framemask, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_debug, (target_ulong t0))
+DEF_HELPER(void, do_mttc0_debug, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_performance0, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_taglo, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_datalo, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_taghi, (target_ulong t0))
+DEF_HELPER(void, do_mtc0_datahi, (target_ulong t0))
+#endif /* !CONFIG_USER_ONLY */
+
+/* MIPS MT functions */
+DEF_HELPER(target_ulong, do_mftgpr, (target_ulong t0, uint32_t sel))
+DEF_HELPER(target_ulong, do_mftlo, (target_ulong t0, uint32_t sel))
+DEF_HELPER(target_ulong, do_mfthi, (target_ulong t0, uint32_t sel))
+DEF_HELPER(target_ulong, do_mftacx, (target_ulong t0, uint32_t sel))
+DEF_HELPER(target_ulong, do_mftdsp, (target_ulong t0))
+DEF_HELPER(void, do_mttgpr, (target_ulong t0, uint32_t sel))
+DEF_HELPER(void, do_mttlo, (target_ulong t0, uint32_t sel))
+DEF_HELPER(void, do_mtthi, (target_ulong t0, uint32_t sel))
+DEF_HELPER(void, do_mttacx, (target_ulong t0, uint32_t sel))
+DEF_HELPER(void, do_mttdsp, (target_ulong t0))
+DEF_HELPER(target_ulong, do_dmt, (target_ulong t0))
+DEF_HELPER(target_ulong, do_emt, (target_ulong t0))
+DEF_HELPER(target_ulong, do_dvpe, (target_ulong t0))
+DEF_HELPER(target_ulong, do_evpe, (target_ulong t0))
+DEF_HELPER(void, do_fork, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_yield, (target_ulong t0))
+
+/* CP1 functions */
+DEF_HELPER(target_ulong, do_cfc1, (uint32_t reg))
+DEF_HELPER(void, do_ctc1, (target_ulong t0, uint32_t reg))
+
+DEF_HELPER(void, do_float_cvtd_s, (void))
+DEF_HELPER(void, do_float_cvtd_w, (void))
+DEF_HELPER(void, do_float_cvtd_l, (void))
+DEF_HELPER(void, do_float_cvtl_d, (void))
+DEF_HELPER(void, do_float_cvtl_s, (void))
+DEF_HELPER(void, do_float_cvtps_pw, (void))
+DEF_HELPER(void, do_float_cvtpw_ps, (void))
+DEF_HELPER(void, do_float_cvts_d, (void))
+DEF_HELPER(void, do_float_cvts_w, (void))
+DEF_HELPER(void, do_float_cvts_l, (void))
+DEF_HELPER(void, do_float_cvts_pl, (void))
+DEF_HELPER(void, do_float_cvts_pu, (void))
+DEF_HELPER(void, do_float_cvtw_s, (void))
+DEF_HELPER(void, do_float_cvtw_d, (void))
+
+DEF_HELPER(void, do_float_addr_ps, (void))
+DEF_HELPER(void, do_float_mulr_ps, (void))
+
+#define FOP_PROTO(op) \
+DEF_HELPER(void, do_float_ ## op ## _s, (void)) \
+DEF_HELPER(void, do_float_ ## op ## _d, (void))
+FOP_PROTO(sqrt)
+FOP_PROTO(roundl)
+FOP_PROTO(roundw)
+FOP_PROTO(truncl)
+FOP_PROTO(truncw)
+FOP_PROTO(ceill)
+FOP_PROTO(ceilw)
+FOP_PROTO(floorl)
+FOP_PROTO(floorw)
+FOP_PROTO(rsqrt)
+FOP_PROTO(recip)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER(void, do_float_ ## op ## _s, (void)) \
+DEF_HELPER(void, do_float_ ## op ## _d, (void)) \
+DEF_HELPER(void, do_float_ ## op ## _ps, (void))
+FOP_PROTO(add)
+FOP_PROTO(sub)
+FOP_PROTO(mul)
+FOP_PROTO(div)
+FOP_PROTO(abs)
+FOP_PROTO(chs)
+FOP_PROTO(muladd)
+FOP_PROTO(mulsub)
+FOP_PROTO(nmuladd)
+FOP_PROTO(nmulsub)
+FOP_PROTO(recip1)
+FOP_PROTO(recip2)
+FOP_PROTO(rsqrt1)
+FOP_PROTO(rsqrt2)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER(void, do_cmp_d_ ## op, (long cc)) \
+DEF_HELPER(void, do_cmpabs_d_ ## op, (long cc)) \
+DEF_HELPER(void, do_cmp_s_ ## op, (long cc)) \
+DEF_HELPER(void, do_cmpabs_s_ ## op, (long cc)) \
+DEF_HELPER(void, do_cmp_ps_ ## op, (long cc)) \
+DEF_HELPER(void, do_cmpabs_ps_ ## op, (long cc))
+FOP_PROTO(f)
+FOP_PROTO(un)
+FOP_PROTO(eq)
+FOP_PROTO(ueq)
+FOP_PROTO(olt)
+FOP_PROTO(ult)
+FOP_PROTO(ole)
+FOP_PROTO(ule)
+FOP_PROTO(sf)
+FOP_PROTO(ngle)
+FOP_PROTO(seq)
+FOP_PROTO(ngl)
+FOP_PROTO(lt)
+FOP_PROTO(nge)
+FOP_PROTO(le)
+FOP_PROTO(ngt)
+#undef FOP_PROTO
+
+/* Special functions */
+DEF_HELPER(target_ulong, do_di, (target_ulong t0))
+DEF_HELPER(target_ulong, do_ei, (target_ulong t0))
+DEF_HELPER(void, do_eret, (void))
+DEF_HELPER(void, do_deret, (void))
+DEF_HELPER(target_ulong, do_rdhwr_cpunum, (target_ulong t0))
+DEF_HELPER(target_ulong, do_rdhwr_synci_step, (target_ulong t0))
+DEF_HELPER(target_ulong, do_rdhwr_cc, (target_ulong t0))
+DEF_HELPER(target_ulong, do_rdhwr_ccres, (target_ulong t0))
+DEF_HELPER(void, do_pmon, (int function))
+DEF_HELPER(void, do_wait, (void))
+
+/* Bitfield operations. */
+DEF_HELPER(target_ulong, do_ext, (target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size))
+DEF_HELPER(target_ulong, do_ins, (target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size))
+DEF_HELPER(target_ulong, do_wsbh, (target_ulong t0, target_ulong t1))
+#ifdef TARGET_MIPS64
+DEF_HELPER(target_ulong, do_dext, (target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size))
+DEF_HELPER(target_ulong, do_dins, (target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size))
+DEF_HELPER(target_ulong, do_dsbh, (target_ulong t0, target_ulong t1))
+DEF_HELPER(target_ulong, do_dshd, (target_ulong t0, target_ulong t1))
+#endif
diff --git a/target-mips/op.c b/target-mips/op.c
deleted file mode 100644
index f09c2a451..000000000
--- a/target-mips/op.c
+++ /dev/null
@@ -1,2219 +0,0 @@
-/*
- * MIPS emulation micro-operations for qemu.
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- * Copyright (c) 2006 Marius Groeger (FPU operations)
- * Copyright (c) 2007 Thiemo Seufer (64-bit FPU support)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "config.h"
-#include "exec.h"
-#include "host-utils.h"
-
-#ifndef CALL_FROM_TB0
-#define CALL_FROM_TB0(func) func()
-#endif
-#ifndef CALL_FROM_TB1
-#define CALL_FROM_TB1(func, arg0) func(arg0)
-#endif
-#ifndef CALL_FROM_TB1_CONST16
-#define CALL_FROM_TB1_CONST16(func, arg0) CALL_FROM_TB1(func, arg0)
-#endif
-#ifndef CALL_FROM_TB2
-#define CALL_FROM_TB2(func, arg0, arg1) func(arg0, arg1)
-#endif
-#ifndef CALL_FROM_TB2_CONST16
-#define CALL_FROM_TB2_CONST16(func, arg0, arg1) \
- CALL_FROM_TB2(func, arg0, arg1)
-#endif
-#ifndef CALL_FROM_TB3
-#define CALL_FROM_TB3(func, arg0, arg1, arg2) func(arg0, arg1, arg2)
-#endif
-#ifndef CALL_FROM_TB4
-#define CALL_FROM_TB4(func, arg0, arg1, arg2, arg3) \
- func(arg0, arg1, arg2, arg3)
-#endif
-
-#define FREG 0
-#include "fop_template.c"
-#undef FREG
-#define FREG 1
-#include "fop_template.c"
-#undef FREG
-#define FREG 2
-#include "fop_template.c"
-#undef FREG
-#define FREG 3
-#include "fop_template.c"
-#undef FREG
-#define FREG 4
-#include "fop_template.c"
-#undef FREG
-#define FREG 5
-#include "fop_template.c"
-#undef FREG
-#define FREG 6
-#include "fop_template.c"
-#undef FREG
-#define FREG 7
-#include "fop_template.c"
-#undef FREG
-#define FREG 8
-#include "fop_template.c"
-#undef FREG
-#define FREG 9
-#include "fop_template.c"
-#undef FREG
-#define FREG 10
-#include "fop_template.c"
-#undef FREG
-#define FREG 11
-#include "fop_template.c"
-#undef FREG
-#define FREG 12
-#include "fop_template.c"
-#undef FREG
-#define FREG 13
-#include "fop_template.c"
-#undef FREG
-#define FREG 14
-#include "fop_template.c"
-#undef FREG
-#define FREG 15
-#include "fop_template.c"
-#undef FREG
-#define FREG 16
-#include "fop_template.c"
-#undef FREG
-#define FREG 17
-#include "fop_template.c"
-#undef FREG
-#define FREG 18
-#include "fop_template.c"
-#undef FREG
-#define FREG 19
-#include "fop_template.c"
-#undef FREG
-#define FREG 20
-#include "fop_template.c"
-#undef FREG
-#define FREG 21
-#include "fop_template.c"
-#undef FREG
-#define FREG 22
-#include "fop_template.c"
-#undef FREG
-#define FREG 23
-#include "fop_template.c"
-#undef FREG
-#define FREG 24
-#include "fop_template.c"
-#undef FREG
-#define FREG 25
-#include "fop_template.c"
-#undef FREG
-#define FREG 26
-#include "fop_template.c"
-#undef FREG
-#define FREG 27
-#include "fop_template.c"
-#undef FREG
-#define FREG 28
-#include "fop_template.c"
-#undef FREG
-#define FREG 29
-#include "fop_template.c"
-#undef FREG
-#define FREG 30
-#include "fop_template.c"
-#undef FREG
-#define FREG 31
-#include "fop_template.c"
-#undef FREG
-
-/* Load and store */
-#define MEMSUFFIX _raw
-#include "op_mem.c"
-#undef MEMSUFFIX
-#if !defined(CONFIG_USER_ONLY)
-#define MEMSUFFIX _user
-#include "op_mem.c"
-#undef MEMSUFFIX
-
-#define MEMSUFFIX _super
-#include "op_mem.c"
-#undef MEMSUFFIX
-
-#define MEMSUFFIX _kernel
-#include "op_mem.c"
-#undef MEMSUFFIX
-#endif
-
-/* 64 bits arithmetic */
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-void op_mult (void)
-{
- CALL_FROM_TB0(do_mult);
- FORCE_RET();
-}
-
-void op_multu (void)
-{
- CALL_FROM_TB0(do_multu);
- FORCE_RET();
-}
-
-void op_madd (void)
-{
- CALL_FROM_TB0(do_madd);
- FORCE_RET();
-}
-
-void op_maddu (void)
-{
- CALL_FROM_TB0(do_maddu);
- FORCE_RET();
-}
-
-void op_msub (void)
-{
- CALL_FROM_TB0(do_msub);
- FORCE_RET();
-}
-
-void op_msubu (void)
-{
- CALL_FROM_TB0(do_msubu);
- FORCE_RET();
-}
-
-/* Multiplication variants of the vr54xx. */
-void op_muls (void)
-{
- CALL_FROM_TB0(do_muls);
- FORCE_RET();
-}
-
-void op_mulsu (void)
-{
- CALL_FROM_TB0(do_mulsu);
- FORCE_RET();
-}
-
-void op_macc (void)
-{
- CALL_FROM_TB0(do_macc);
- FORCE_RET();
-}
-
-void op_macchi (void)
-{
- CALL_FROM_TB0(do_macchi);
- FORCE_RET();
-}
-
-void op_maccu (void)
-{
- CALL_FROM_TB0(do_maccu);
- FORCE_RET();
-}
-void op_macchiu (void)
-{
- CALL_FROM_TB0(do_macchiu);
- FORCE_RET();
-}
-
-void op_msac (void)
-{
- CALL_FROM_TB0(do_msac);
- FORCE_RET();
-}
-
-void op_msachi (void)
-{
- CALL_FROM_TB0(do_msachi);
- FORCE_RET();
-}
-
-void op_msacu (void)
-{
- CALL_FROM_TB0(do_msacu);
- FORCE_RET();
-}
-
-void op_msachiu (void)
-{
- CALL_FROM_TB0(do_msachiu);
- FORCE_RET();
-}
-
-void op_mulhi (void)
-{
- CALL_FROM_TB0(do_mulhi);
- FORCE_RET();
-}
-
-void op_mulhiu (void)
-{
- CALL_FROM_TB0(do_mulhiu);
- FORCE_RET();
-}
-
-void op_mulshi (void)
-{
- CALL_FROM_TB0(do_mulshi);
- FORCE_RET();
-}
-
-void op_mulshiu (void)
-{
- CALL_FROM_TB0(do_mulshiu);
- FORCE_RET();
-}
-
-#else /* TARGET_LONG_BITS > HOST_LONG_BITS */
-
-static always_inline uint64_t get_HILO (void)
-{
- return ((uint64_t)env->HI[env->current_tc][0] << 32) |
- ((uint64_t)(uint32_t)env->LO[env->current_tc][0]);
-}
-
-static always_inline void set_HILO (uint64_t HILO)
-{
- env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
- env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
-}
-
-static always_inline void set_HIT0_LO (uint64_t HILO)
-{
- env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
- T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
-}
-
-static always_inline void set_HI_LOT0 (uint64_t HILO)
-{
- T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
- env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
-}
-
-void op_mult (void)
-{
- set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
- FORCE_RET();
-}
-
-void op_multu (void)
-{
- set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
- FORCE_RET();
-}
-
-void op_madd (void)
-{
- int64_t tmp;
-
- tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
- set_HILO((int64_t)get_HILO() + tmp);
- FORCE_RET();
-}
-
-void op_maddu (void)
-{
- uint64_t tmp;
-
- tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
- set_HILO(get_HILO() + tmp);
- FORCE_RET();
-}
-
-void op_msub (void)
-{
- int64_t tmp;
-
- tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
- set_HILO((int64_t)get_HILO() - tmp);
- FORCE_RET();
-}
-
-void op_msubu (void)
-{
- uint64_t tmp;
-
- tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
- set_HILO(get_HILO() - tmp);
- FORCE_RET();
-}
-
-/* Multiplication variants of the vr54xx. */
-void op_muls (void)
-{
- set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
- FORCE_RET();
-}
-
-void op_mulsu (void)
-{
- set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
- FORCE_RET();
-}
-
-void op_macc (void)
-{
- set_HI_LOT0(get_HILO() + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
- FORCE_RET();
-}
-
-void op_macchi (void)
-{
- set_HIT0_LO(get_HILO() + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
- FORCE_RET();
-}
-
-void op_maccu (void)
-{
- set_HI_LOT0(get_HILO() + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
- FORCE_RET();
-}
-
-void op_macchiu (void)
-{
- set_HIT0_LO(get_HILO() + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
- FORCE_RET();
-}
-
-void op_msac (void)
-{
- set_HI_LOT0(get_HILO() - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
- FORCE_RET();
-}
-
-void op_msachi (void)
-{
- set_HIT0_LO(get_HILO() - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
- FORCE_RET();
-}
-
-void op_msacu (void)
-{
- set_HI_LOT0(get_HILO() - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
- FORCE_RET();
-}
-
-void op_msachiu (void)
-{
- set_HIT0_LO(get_HILO() - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
- FORCE_RET();
-}
-
-void op_mulhi (void)
-{
- set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
- FORCE_RET();
-}
-
-void op_mulhiu (void)
-{
- set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
- FORCE_RET();
-}
-
-void op_mulshi (void)
-{
- set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
- FORCE_RET();
-}
-
-void op_mulshiu (void)
-{
- set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
- FORCE_RET();
-}
-
-#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
-
-#if defined(TARGET_MIPS64)
-void op_dmult (void)
-{
- CALL_FROM_TB4(muls64, &(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), T0, T1);
- FORCE_RET();
-}
-
-void op_dmultu (void)
-{
- CALL_FROM_TB4(mulu64, &(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), T0, T1);
- FORCE_RET();
-}
-#endif
-
-/* CP0 functions */
-void op_mfc0_mvpcontrol (void)
-{
- T0 = env->mvp->CP0_MVPControl;
- FORCE_RET();
-}
-
-void op_mfc0_mvpconf0 (void)
-{
- T0 = env->mvp->CP0_MVPConf0;
- FORCE_RET();
-}
-
-void op_mfc0_mvpconf1 (void)
-{
- T0 = env->mvp->CP0_MVPConf1;
- FORCE_RET();
-}
-
-void op_mfc0_random (void)
-{
- CALL_FROM_TB0(do_mfc0_random);
- FORCE_RET();
-}
-
-void op_mfc0_tcstatus (void)
-{
- T0 = env->CP0_TCStatus[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tcstatus(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->CP0_TCStatus[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_tcbind (void)
-{
- T0 = env->CP0_TCBind[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tcbind(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->CP0_TCBind[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_tcrestart (void)
-{
- T0 = env->PC[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tcrestart(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->PC[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_tchalt (void)
-{
- T0 = env->CP0_TCHalt[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tchalt(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->CP0_TCHalt[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_tccontext (void)
-{
- T0 = env->CP0_TCContext[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tccontext(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->CP0_TCContext[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_tcschedule (void)
-{
- T0 = env->CP0_TCSchedule[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tcschedule(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->CP0_TCSchedule[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_tcschefback (void)
-{
- T0 = env->CP0_TCScheFBack[env->current_tc];
- FORCE_RET();
-}
-
-void op_mftc0_tcschefback(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->CP0_TCScheFBack[other_tc];
- FORCE_RET();
-}
-
-void op_mfc0_count (void)
-{
- CALL_FROM_TB0(do_mfc0_count);
- FORCE_RET();
-}
-
-void op_mftc0_entryhi(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = (env->CP0_EntryHi & ~0xff) | (env->CP0_TCStatus[other_tc] & 0xff);
- FORCE_RET();
-}
-
-void op_mftc0_status(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- uint32_t tcstatus = env->CP0_TCStatus[other_tc];
-
- T0 = env->CP0_Status & ~0xf1000018;
- T0 |= tcstatus & (0xf << CP0TCSt_TCU0);
- T0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
- T0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
- FORCE_RET();
-}
-
-void op_mfc0_lladdr (void)
-{
- T0 = (int32_t)env->CP0_LLAddr >> 4;
- FORCE_RET();
-}
-
-void op_mfc0_watchlo (void)
-{
- T0 = (int32_t)env->CP0_WatchLo[PARAM1];
- FORCE_RET();
-}
-
-void op_mfc0_watchhi (void)
-{
- T0 = env->CP0_WatchHi[PARAM1];
- FORCE_RET();
-}
-
-void op_mfc0_debug (void)
-{
- T0 = env->CP0_Debug;
- if (env->hflags & MIPS_HFLAG_DM)
- T0 |= 1 << CP0DB_DM;
- FORCE_RET();
-}
-
-void op_mftc0_debug(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- /* XXX: Might be wrong, check with EJTAG spec. */
- T0 = (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
- (env->CP0_Debug_tcstatus[other_tc] &
- ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
- FORCE_RET();
-}
-
-void op_mtc0_index (void)
-{
- int num = 1;
- unsigned int tmp = env->tlb->nb_tlb;
-
- do {
- tmp >>= 1;
- num <<= 1;
- } while (tmp);
- env->CP0_Index = (env->CP0_Index & 0x80000000) | (T0 & (num - 1));
- FORCE_RET();
-}
-
-void op_mtc0_mvpcontrol (void)
-{
- uint32_t mask = 0;
- uint32_t newval;
-
- if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
- mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
- (1 << CP0MVPCo_EVP);
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
- mask |= (1 << CP0MVPCo_STLB);
- newval = (env->mvp->CP0_MVPControl & ~mask) | (T0 & mask);
-
- // TODO: Enable/disable shared TLB, enable/disable VPEs.
-
- env->mvp->CP0_MVPControl = newval;
- FORCE_RET();
-}
-
-void op_mtc0_vpecontrol (void)
-{
- uint32_t mask;
- uint32_t newval;
-
- mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
- (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
- newval = (env->CP0_VPEControl & ~mask) | (T0 & mask);
-
- /* Yield scheduler intercept not implemented. */
- /* Gating storage scheduler intercept not implemented. */
-
- // TODO: Enable/disable TCs.
-
- env->CP0_VPEControl = newval;
- FORCE_RET();
-}
-
-void op_mtc0_vpeconf0 (void)
-{
- uint32_t mask = 0;
- uint32_t newval;
-
- if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
- if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
- mask |= (0xff << CP0VPEC0_XTC);
- mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
- }
- newval = (env->CP0_VPEConf0 & ~mask) | (T0 & mask);
-
- // TODO: TC exclusive handling due to ERL/EXL.
-
- env->CP0_VPEConf0 = newval;
- FORCE_RET();
-}
-
-void op_mtc0_vpeconf1 (void)
-{
- uint32_t mask = 0;
- uint32_t newval;
-
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
- mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
- (0xff << CP0VPEC1_NCP1);
- newval = (env->CP0_VPEConf1 & ~mask) | (T0 & mask);
-
- /* UDI not implemented. */
- /* CP2 not implemented. */
-
- // TODO: Handle FPU (CP1) binding.
-
- env->CP0_VPEConf1 = newval;
- FORCE_RET();
-}
-
-void op_mtc0_yqmask (void)
-{
- /* Yield qualifier inputs not implemented. */
- env->CP0_YQMask = 0x00000000;
- FORCE_RET();
-}
-
-void op_mtc0_vpeschedule (void)
-{
- env->CP0_VPESchedule = T0;
- FORCE_RET();
-}
-
-void op_mtc0_vpeschefback (void)
-{
- env->CP0_VPEScheFBack = T0;
- FORCE_RET();
-}
-
-void op_mtc0_vpeopt (void)
-{
- env->CP0_VPEOpt = T0 & 0x0000ffff;
- FORCE_RET();
-}
-
-void op_mtc0_entrylo0 (void)
-{
- /* Large physaddr (PABITS) not implemented */
- /* 1k pages not implemented */
- env->CP0_EntryLo0 = T0 & 0x3FFFFFFF;
- FORCE_RET();
-}
-
-void op_mtc0_tcstatus (void)
-{
- uint32_t mask = env->CP0_TCStatus_rw_bitmask;
- uint32_t newval;
-
- newval = (env->CP0_TCStatus[env->current_tc] & ~mask) | (T0 & mask);
-
- // TODO: Sync with CP0_Status.
-
- env->CP0_TCStatus[env->current_tc] = newval;
- FORCE_RET();
-}
-
-void op_mttc0_tcstatus (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- // TODO: Sync with CP0_Status.
-
- env->CP0_TCStatus[other_tc] = T0;
- FORCE_RET();
-}
-
-void op_mtc0_tcbind (void)
-{
- uint32_t mask = (1 << CP0TCBd_TBE);
- uint32_t newval;
-
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
- mask |= (1 << CP0TCBd_CurVPE);
- newval = (env->CP0_TCBind[env->current_tc] & ~mask) | (T0 & mask);
- env->CP0_TCBind[env->current_tc] = newval;
- FORCE_RET();
-}
-
-void op_mttc0_tcbind (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- uint32_t mask = (1 << CP0TCBd_TBE);
- uint32_t newval;
-
- if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
- mask |= (1 << CP0TCBd_CurVPE);
- newval = (env->CP0_TCBind[other_tc] & ~mask) | (T0 & mask);
- env->CP0_TCBind[other_tc] = newval;
- FORCE_RET();
-}
-
-void op_mtc0_tcrestart (void)
-{
- env->PC[env->current_tc] = T0;
- env->CP0_TCStatus[env->current_tc] &= ~(1 << CP0TCSt_TDS);
- env->CP0_LLAddr = 0ULL;
- /* MIPS16 not implemented. */
- FORCE_RET();
-}
-
-void op_mttc0_tcrestart (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- env->PC[other_tc] = T0;
- env->CP0_TCStatus[other_tc] &= ~(1 << CP0TCSt_TDS);
- env->CP0_LLAddr = 0ULL;
- /* MIPS16 not implemented. */
- FORCE_RET();
-}
-
-void op_mtc0_tchalt (void)
-{
- env->CP0_TCHalt[env->current_tc] = T0 & 0x1;
-
- // TODO: Halt TC / Restart (if allocated+active) TC.
-
- FORCE_RET();
-}
-
-void op_mttc0_tchalt (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- // TODO: Halt TC / Restart (if allocated+active) TC.
-
- env->CP0_TCHalt[other_tc] = T0;
- FORCE_RET();
-}
-
-void op_mtc0_tccontext (void)
-{
- env->CP0_TCContext[env->current_tc] = T0;
- FORCE_RET();
-}
-
-void op_mttc0_tccontext (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- env->CP0_TCContext[other_tc] = T0;
- FORCE_RET();
-}
-
-void op_mtc0_tcschedule (void)
-{
- env->CP0_TCSchedule[env->current_tc] = T0;
- FORCE_RET();
-}
-
-void op_mttc0_tcschedule (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- env->CP0_TCSchedule[other_tc] = T0;
- FORCE_RET();
-}
-
-void op_mtc0_tcschefback (void)
-{
- env->CP0_TCScheFBack[env->current_tc] = T0;
- FORCE_RET();
-}
-
-void op_mttc0_tcschefback (void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- env->CP0_TCScheFBack[other_tc] = T0;
- FORCE_RET();
-}
-
-void op_mtc0_entrylo1 (void)
-{
- /* Large physaddr (PABITS) not implemented */
- /* 1k pages not implemented */
- env->CP0_EntryLo1 = T0 & 0x3FFFFFFF;
- FORCE_RET();
-}
-
-void op_mtc0_context (void)
-{
- env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (T0 & ~0x007FFFFF);
- FORCE_RET();
-}
-
-void op_mtc0_pagemask (void)
-{
- /* 1k pages not implemented */
- env->CP0_PageMask = T0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
- FORCE_RET();
-}
-
-void op_mtc0_pagegrain (void)
-{
- /* SmartMIPS not implemented */
- /* Large physaddr (PABITS) not implemented */
- /* 1k pages not implemented */
- env->CP0_PageGrain = 0;
- FORCE_RET();
-}
-
-void op_mtc0_wired (void)
-{
- env->CP0_Wired = T0 % env->tlb->nb_tlb;
- FORCE_RET();
-}
-
-void op_mtc0_srsconf0 (void)
-{
- env->CP0_SRSConf0 |= T0 & env->CP0_SRSConf0_rw_bitmask;
- FORCE_RET();
-}
-
-void op_mtc0_srsconf1 (void)
-{
- env->CP0_SRSConf1 |= T0 & env->CP0_SRSConf1_rw_bitmask;
- FORCE_RET();
-}
-
-void op_mtc0_srsconf2 (void)
-{
- env->CP0_SRSConf2 |= T0 & env->CP0_SRSConf2_rw_bitmask;
- FORCE_RET();
-}
-
-void op_mtc0_srsconf3 (void)
-{
- env->CP0_SRSConf3 |= T0 & env->CP0_SRSConf3_rw_bitmask;
- FORCE_RET();
-}
-
-void op_mtc0_srsconf4 (void)
-{
- env->CP0_SRSConf4 |= T0 & env->CP0_SRSConf4_rw_bitmask;
- FORCE_RET();
-}
-
-void op_mtc0_hwrena (void)
-{
- env->CP0_HWREna = T0 & 0x0000000F;
- FORCE_RET();
-}
-
-void op_mtc0_count (void)
-{
- CALL_FROM_TB2(cpu_mips_store_count, env, T0);
- FORCE_RET();
-}
-
-void op_mtc0_entryhi (void)
-{
- target_ulong old, val;
-
- /* 1k pages not implemented */
- val = T0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
-#if defined(TARGET_MIPS64)
- val &= env->SEGMask;
-#endif
- old = env->CP0_EntryHi;
- env->CP0_EntryHi = val;
- if (env->CP0_Config3 & (1 << CP0C3_MT)) {
- uint32_t tcst = env->CP0_TCStatus[env->current_tc] & ~0xff;
- env->CP0_TCStatus[env->current_tc] = tcst | (val & 0xff);
- }
- /* If the ASID changes, flush qemu's TLB. */
- if ((old & 0xFF) != (val & 0xFF))
- CALL_FROM_TB2(cpu_mips_tlb_flush, env, 1);
- FORCE_RET();
-}
-
-void op_mttc0_entryhi(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (T0 & ~0xff);
- env->CP0_TCStatus[other_tc] = (env->CP0_TCStatus[other_tc] & ~0xff) | (T0 & 0xff);
- FORCE_RET();
-}
-
-void op_mtc0_compare (void)
-{
- CALL_FROM_TB2(cpu_mips_store_compare, env, T0);
- FORCE_RET();
-}
-
-void op_mtc0_status (void)
-{
- uint32_t val, old;
- uint32_t mask = env->CP0_Status_rw_bitmask;
-
- val = T0 & mask;
- old = env->CP0_Status;
- env->CP0_Status = (env->CP0_Status & ~mask) | val;
- CALL_FROM_TB1(compute_hflags, env);
- if (loglevel & CPU_LOG_EXEC)
- CALL_FROM_TB2(do_mtc0_status_debug, old, val);
- CALL_FROM_TB1(cpu_mips_update_irq, env);
- FORCE_RET();
-}
-
-void op_mttc0_status(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
- uint32_t tcstatus = env->CP0_TCStatus[other_tc];
-
- env->CP0_Status = T0 & ~0xf1000018;
- tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (T0 & (0xf << CP0St_CU0));
- tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((T0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
- tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((T0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
- env->CP0_TCStatus[other_tc] = tcstatus;
- FORCE_RET();
-}
-
-void op_mtc0_intctl (void)
-{
- /* vectored interrupts not implemented, no performance counters. */
- env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (T0 & 0x000002e0);
- FORCE_RET();
-}
-
-void op_mtc0_srsctl (void)
-{
- uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
- env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (T0 & mask);
- FORCE_RET();
-}
-
-void op_mtc0_srsmap (void)
-{
- env->CP0_SRSMap = T0;
- FORCE_RET();
-}
-
-void op_mtc0_cause (void)
-{
- uint32_t mask = 0x00C00300;
- uint32_t old = env->CP0_Cause;
-
- if (env->insn_flags & ISA_MIPS32R2)
- mask |= 1 << CP0Ca_DC;
-
- env->CP0_Cause = (env->CP0_Cause & ~mask) | (T0 & mask);
-
- if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
- if (env->CP0_Cause & (1 << CP0Ca_DC))
- CALL_FROM_TB1(cpu_mips_stop_count, env);
- else
- CALL_FROM_TB1(cpu_mips_start_count, env);
- }
-
- /* Handle the software interrupt as an hardware one, as they
- are very similar */
- if (T0 & CP0Ca_IP_mask) {
- CALL_FROM_TB1(cpu_mips_update_irq, env);
- }
- FORCE_RET();
-}
-
-void op_mtc0_epc (void)
-{
- env->CP0_EPC = T0;
- FORCE_RET();
-}
-
-void op_mtc0_ebase (void)
-{
- /* vectored interrupts not implemented */
- /* Multi-CPU not implemented */
- env->CP0_EBase = 0x80000000 | (T0 & 0x3FFFF000);
- FORCE_RET();
-}
-
-void op_mtc0_config0 (void)
-{
- env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (T0 & 0x00000007);
- FORCE_RET();
-}
-
-void op_mtc0_config2 (void)
-{
- /* tertiary/secondary caches not implemented */
- env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
- FORCE_RET();
-}
-
-void op_mtc0_watchlo (void)
-{
- /* Watch exceptions for instructions, data loads, data stores
- not implemented. */
- env->CP0_WatchLo[PARAM1] = (T0 & ~0x7);
- FORCE_RET();
-}
-
-void op_mtc0_watchhi (void)
-{
- env->CP0_WatchHi[PARAM1] = (T0 & 0x40FF0FF8);
- env->CP0_WatchHi[PARAM1] &= ~(env->CP0_WatchHi[PARAM1] & T0 & 0x7);
- FORCE_RET();
-}
-
-void op_mtc0_xcontext (void)
-{
- target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
- env->CP0_XContext = (env->CP0_XContext & mask) | (T0 & ~mask);
- FORCE_RET();
-}
-
-void op_mtc0_framemask (void)
-{
- env->CP0_Framemask = T0; /* XXX */
- FORCE_RET();
-}
-
-void op_mtc0_debug (void)
-{
- env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (T0 & 0x13300120);
- if (T0 & (1 << CP0DB_DM))
- env->hflags |= MIPS_HFLAG_DM;
- else
- env->hflags &= ~MIPS_HFLAG_DM;
- FORCE_RET();
-}
-
-void op_mttc0_debug(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- /* XXX: Might be wrong, check with EJTAG spec. */
- env->CP0_Debug_tcstatus[other_tc] = T0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
- env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
- (T0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
- FORCE_RET();
-}
-
-void op_mtc0_depc (void)
-{
- env->CP0_DEPC = T0;
- FORCE_RET();
-}
-
-void op_mtc0_performance0 (void)
-{
- env->CP0_Performance0 = T0 & 0x000007ff;
- FORCE_RET();
-}
-
-void op_mtc0_taglo (void)
-{
- env->CP0_TagLo = T0 & 0xFFFFFCF6;
- FORCE_RET();
-}
-
-void op_mtc0_datalo (void)
-{
- env->CP0_DataLo = T0; /* XXX */
- FORCE_RET();
-}
-
-void op_mtc0_taghi (void)
-{
- env->CP0_TagHi = T0; /* XXX */
- FORCE_RET();
-}
-
-void op_mtc0_datahi (void)
-{
- env->CP0_DataHi = T0; /* XXX */
- FORCE_RET();
-}
-
-void op_mtc0_errorepc (void)
-{
- env->CP0_ErrorEPC = T0;
- FORCE_RET();
-}
-
-void op_mtc0_desave (void)
-{
- env->CP0_DESAVE = T0;
- FORCE_RET();
-}
-
-#if defined(TARGET_MIPS64)
-void op_dmfc0_tcrestart (void)
-{
- T0 = env->PC[env->current_tc];
- FORCE_RET();
-}
-
-void op_dmfc0_tchalt (void)
-{
- T0 = env->CP0_TCHalt[env->current_tc];
- FORCE_RET();
-}
-
-void op_dmfc0_tccontext (void)
-{
- T0 = env->CP0_TCContext[env->current_tc];
- FORCE_RET();
-}
-
-void op_dmfc0_tcschedule (void)
-{
- T0 = env->CP0_TCSchedule[env->current_tc];
- FORCE_RET();
-}
-
-void op_dmfc0_tcschefback (void)
-{
- T0 = env->CP0_TCScheFBack[env->current_tc];
- FORCE_RET();
-}
-
-void op_dmfc0_lladdr (void)
-{
- T0 = env->CP0_LLAddr >> 4;
- FORCE_RET();
-}
-
-void op_dmfc0_watchlo (void)
-{
- T0 = env->CP0_WatchLo[PARAM1];
- FORCE_RET();
-}
-#endif /* TARGET_MIPS64 */
-
-/* MIPS MT functions */
-void op_mftgpr(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->gpr[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mftlo(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->LO[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mfthi(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->HI[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mftacx(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->ACX[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mftdsp(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->DSPControl[other_tc];
- FORCE_RET();
-}
-
-void op_mttgpr(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->gpr[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mttlo(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->LO[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mtthi(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->HI[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mttacx(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->ACX[other_tc][PARAM1];
- FORCE_RET();
-}
-
-void op_mttdsp(void)
-{
- int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
-
- T0 = env->DSPControl[other_tc];
- FORCE_RET();
-}
-
-
-void op_dmt(void)
-{
- // TODO
- T0 = 0;
- // rt = T0
- FORCE_RET();
-}
-
-void op_emt(void)
-{
- // TODO
- T0 = 0;
- // rt = T0
- FORCE_RET();
-}
-
-void op_dvpe(void)
-{
- // TODO
- T0 = 0;
- // rt = T0
- FORCE_RET();
-}
-
-void op_evpe(void)
-{
- // TODO
- T0 = 0;
- // rt = T0
- FORCE_RET();
-}
-
-void op_fork(void)
-{
- // T0 = rt, T1 = rs
- T0 = 0;
- // TODO: store to TC register
- FORCE_RET();
-}
-
-void op_yield(void)
-{
- if (T0 < 0) {
- /* No scheduling policy implemented. */
- if (T0 != -2) {
- if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
- env->CP0_TCStatus[env->current_tc] & (1 << CP0TCSt_DT)) {
- env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
- env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
- CALL_FROM_TB1(do_raise_exception, EXCP_THREAD);
- }
- }
- } else if (T0 == 0) {
- if (0 /* TODO: TC underflow */) {
- env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
- CALL_FROM_TB1(do_raise_exception, EXCP_THREAD);
- } else {
- // TODO: Deallocate TC
- }
- } else if (T0 > 0) {
- /* Yield qualifier inputs not implemented. */
- env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
- env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
- CALL_FROM_TB1(do_raise_exception, EXCP_THREAD);
- }
- T0 = env->CP0_YQMask;
- FORCE_RET();
-}
-
-/* CP1 functions */
-#if 0
-# define DEBUG_FPU_STATE() CALL_FROM_TB1(dump_fpu, env)
-#else
-# define DEBUG_FPU_STATE() do { } while(0)
-#endif
-
-void op_cfc1 (void)
-{
- CALL_FROM_TB1(do_cfc1, PARAM1);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_ctc1 (void)
-{
- CALL_FROM_TB1(do_ctc1, PARAM1);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_mfc1 (void)
-{
- T0 = (int32_t)WT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_mtc1 (void)
-{
- WT0 = T0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_dmfc1 (void)
-{
- T0 = DT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_dmtc1 (void)
-{
- DT0 = T0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_mfhc1 (void)
-{
- T0 = (int32_t)WTH0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_mthc1 (void)
-{
- WTH0 = T0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-/* Float support.
- Single precition routines have a "s" suffix, double precision a
- "d" suffix, 32bit integer "w", 64bit integer "l", paired singe "ps",
- paired single lowwer "pl", paired single upper "pu". */
-
-#define FLOAT_OP(name, p) void OPPROTO op_float_##name##_##p(void)
-
-FLOAT_OP(cvtd, s)
-{
- CALL_FROM_TB0(do_float_cvtd_s);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtd, w)
-{
- CALL_FROM_TB0(do_float_cvtd_w);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtd, l)
-{
- CALL_FROM_TB0(do_float_cvtd_l);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtl, d)
-{
- CALL_FROM_TB0(do_float_cvtl_d);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtl, s)
-{
- CALL_FROM_TB0(do_float_cvtl_s);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtps, s)
-{
- WT2 = WT0;
- WTH2 = WT1;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtps, pw)
-{
- CALL_FROM_TB0(do_float_cvtps_pw);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtpw, ps)
-{
- CALL_FROM_TB0(do_float_cvtpw_ps);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvts, d)
-{
- CALL_FROM_TB0(do_float_cvts_d);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvts, w)
-{
- CALL_FROM_TB0(do_float_cvts_w);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvts, l)
-{
- CALL_FROM_TB0(do_float_cvts_l);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvts, pl)
-{
- CALL_FROM_TB0(do_float_cvts_pl);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvts, pu)
-{
- CALL_FROM_TB0(do_float_cvts_pu);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtw, s)
-{
- CALL_FROM_TB0(do_float_cvtw_s);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(cvtw, d)
-{
- CALL_FROM_TB0(do_float_cvtw_d);
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-FLOAT_OP(pll, ps)
-{
- DT2 = ((uint64_t)WT0 << 32) | WT1;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(plu, ps)
-{
- DT2 = ((uint64_t)WT0 << 32) | WTH1;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(pul, ps)
-{
- DT2 = ((uint64_t)WTH0 << 32) | WT1;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(puu, ps)
-{
- DT2 = ((uint64_t)WTH0 << 32) | WTH1;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-#define FLOAT_ROUNDOP(op, ttype, stype) \
-FLOAT_OP(op ## ttype, stype) \
-{ \
- CALL_FROM_TB0(do_float_ ## op ## ttype ## _ ## stype); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-
-FLOAT_ROUNDOP(round, l, d)
-FLOAT_ROUNDOP(round, l, s)
-FLOAT_ROUNDOP(round, w, d)
-FLOAT_ROUNDOP(round, w, s)
-
-FLOAT_ROUNDOP(trunc, l, d)
-FLOAT_ROUNDOP(trunc, l, s)
-FLOAT_ROUNDOP(trunc, w, d)
-FLOAT_ROUNDOP(trunc, w, s)
-
-FLOAT_ROUNDOP(ceil, l, d)
-FLOAT_ROUNDOP(ceil, l, s)
-FLOAT_ROUNDOP(ceil, w, d)
-FLOAT_ROUNDOP(ceil, w, s)
-
-FLOAT_ROUNDOP(floor, l, d)
-FLOAT_ROUNDOP(floor, l, s)
-FLOAT_ROUNDOP(floor, w, d)
-FLOAT_ROUNDOP(floor, w, s)
-#undef FLOAR_ROUNDOP
-
-FLOAT_OP(movf, d)
-{
- if (!(env->fpu->fcr31 & PARAM1))
- DT2 = DT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movf, s)
-{
- if (!(env->fpu->fcr31 & PARAM1))
- WT2 = WT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movf, ps)
-{
- unsigned int mask = GET_FP_COND (env->fpu) >> PARAM1;
- if (!(mask & 1))
- WT2 = WT0;
- if (!(mask & 2))
- WTH2 = WTH0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movt, d)
-{
- if (env->fpu->fcr31 & PARAM1)
- DT2 = DT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movt, s)
-{
- if (env->fpu->fcr31 & PARAM1)
- WT2 = WT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movt, ps)
-{
- unsigned int mask = GET_FP_COND (env->fpu) >> PARAM1;
- if (mask & 1)
- WT2 = WT0;
- if (mask & 2)
- WTH2 = WTH0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movz, d)
-{
- if (!T0)
- DT2 = DT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movz, s)
-{
- if (!T0)
- WT2 = WT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movz, ps)
-{
- if (!T0) {
- WT2 = WT0;
- WTH2 = WTH0;
- }
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movn, d)
-{
- if (T0)
- DT2 = DT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movn, s)
-{
- if (T0)
- WT2 = WT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(movn, ps)
-{
- if (T0) {
- WT2 = WT0;
- WTH2 = WTH0;
- }
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-/* operations calling helpers, for s, d and ps */
-#define FLOAT_HOP(name) \
-FLOAT_OP(name, d) \
-{ \
- CALL_FROM_TB0(do_float_ ## name ## _d); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name, s) \
-{ \
- CALL_FROM_TB0(do_float_ ## name ## _s); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name, ps) \
-{ \
- CALL_FROM_TB0(do_float_ ## name ## _ps); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_HOP(add)
-FLOAT_HOP(sub)
-FLOAT_HOP(mul)
-FLOAT_HOP(div)
-FLOAT_HOP(recip2)
-FLOAT_HOP(rsqrt2)
-FLOAT_HOP(rsqrt1)
-FLOAT_HOP(recip1)
-#undef FLOAT_HOP
-
-/* operations calling helpers, for s and d */
-#define FLOAT_HOP(name) \
-FLOAT_OP(name, d) \
-{ \
- CALL_FROM_TB0(do_float_ ## name ## _d); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name, s) \
-{ \
- CALL_FROM_TB0(do_float_ ## name ## _s); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_HOP(rsqrt)
-FLOAT_HOP(recip)
-#undef FLOAT_HOP
-
-/* operations calling helpers, for ps */
-#define FLOAT_HOP(name) \
-FLOAT_OP(name, ps) \
-{ \
- CALL_FROM_TB0(do_float_ ## name ## _ps); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_HOP(addr)
-FLOAT_HOP(mulr)
-#undef FLOAT_HOP
-
-/* ternary operations */
-#define FLOAT_TERNOP(name1, name2) \
-FLOAT_OP(name1 ## name2, d) \
-{ \
- FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \
- FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name1 ## name2, s) \
-{ \
- FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
- FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name1 ## name2, ps) \
-{ \
- FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
- FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
- FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
- FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_TERNOP(mul, add)
-FLOAT_TERNOP(mul, sub)
-#undef FLOAT_TERNOP
-
-/* negated ternary operations */
-#define FLOAT_NTERNOP(name1, name2) \
-FLOAT_OP(n ## name1 ## name2, d) \
-{ \
- FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \
- FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \
- FDT2 = float64_chs(FDT2); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(n ## name1 ## name2, s) \
-{ \
- FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
- FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
- FST2 = float32_chs(FST2); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(n ## name1 ## name2, ps) \
-{ \
- FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
- FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
- FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
- FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
- FST2 = float32_chs(FST2); \
- FSTH2 = float32_chs(FSTH2); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_NTERNOP(mul, add)
-FLOAT_NTERNOP(mul, sub)
-#undef FLOAT_NTERNOP
-
-/* unary operations, modifying fp status */
-#define FLOAT_UNOP(name) \
-FLOAT_OP(name, d) \
-{ \
- FDT2 = float64_ ## name(FDT0, &env->fpu->fp_status); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name, s) \
-{ \
- FST2 = float32_ ## name(FST0, &env->fpu->fp_status); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_UNOP(sqrt)
-#undef FLOAT_UNOP
-
-/* unary operations, not modifying fp status */
-#define FLOAT_UNOP(name) \
-FLOAT_OP(name, d) \
-{ \
- FDT2 = float64_ ## name(FDT0); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name, s) \
-{ \
- FST2 = float32_ ## name(FST0); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-FLOAT_OP(name, ps) \
-{ \
- FST2 = float32_ ## name(FST0); \
- FSTH2 = float32_ ## name(FSTH0); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-FLOAT_UNOP(abs)
-FLOAT_UNOP(chs)
-#undef FLOAT_UNOP
-
-FLOAT_OP(mov, d)
-{
- FDT2 = FDT0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(mov, s)
-{
- FST2 = FST0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(mov, ps)
-{
- FST2 = FST0;
- FSTH2 = FSTH0;
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-FLOAT_OP(alnv, ps)
-{
- switch (T0 & 0x7) {
- case 0:
- FST2 = FST0;
- FSTH2 = FSTH0;
- break;
- case 4:
-#ifdef TARGET_WORDS_BIGENDIAN
- FSTH2 = FST0;
- FST2 = FSTH1;
-#else
- FSTH2 = FST1;
- FST2 = FSTH0;
-#endif
- break;
- default: /* unpredictable */
- break;
- }
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-#ifdef CONFIG_SOFTFLOAT
-#define clear_invalid() do { \
- int flags = get_float_exception_flags(&env->fpu->fp_status); \
- flags &= ~float_flag_invalid; \
- set_float_exception_flags(flags, &env->fpu->fp_status); \
-} while(0)
-#else
-#define clear_invalid() do { } while(0)
-#endif
-
-extern void dump_fpu_s(CPUState *env);
-
-#define CMP_OP(fmt, op) \
-void OPPROTO op_cmp ## _ ## fmt ## _ ## op(void) \
-{ \
- CALL_FROM_TB1(do_cmp ## _ ## fmt ## _ ## op, PARAM1); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-} \
-void OPPROTO op_cmpabs ## _ ## fmt ## _ ## op(void) \
-{ \
- CALL_FROM_TB1(do_cmpabs ## _ ## fmt ## _ ## op, PARAM1); \
- DEBUG_FPU_STATE(); \
- FORCE_RET(); \
-}
-#define CMP_OPS(op) \
-CMP_OP(d, op) \
-CMP_OP(s, op) \
-CMP_OP(ps, op)
-
-CMP_OPS(f)
-CMP_OPS(un)
-CMP_OPS(eq)
-CMP_OPS(ueq)
-CMP_OPS(olt)
-CMP_OPS(ult)
-CMP_OPS(ole)
-CMP_OPS(ule)
-CMP_OPS(sf)
-CMP_OPS(ngle)
-CMP_OPS(seq)
-CMP_OPS(ngl)
-CMP_OPS(lt)
-CMP_OPS(nge)
-CMP_OPS(le)
-CMP_OPS(ngt)
-#undef CMP_OPS
-#undef CMP_OP
-
-void op_bc1f (void)
-{
- T0 = !!(~GET_FP_COND(env->fpu) & (0x1 << PARAM1));
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-void op_bc1any2f (void)
-{
- T0 = !!(~GET_FP_COND(env->fpu) & (0x3 << PARAM1));
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-void op_bc1any4f (void)
-{
- T0 = !!(~GET_FP_COND(env->fpu) & (0xf << PARAM1));
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_bc1t (void)
-{
- T0 = !!(GET_FP_COND(env->fpu) & (0x1 << PARAM1));
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-void op_bc1any2t (void)
-{
- T0 = !!(GET_FP_COND(env->fpu) & (0x3 << PARAM1));
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-void op_bc1any4t (void)
-{
- T0 = !!(GET_FP_COND(env->fpu) & (0xf << PARAM1));
- DEBUG_FPU_STATE();
- FORCE_RET();
-}
-
-void op_tlbwi (void)
-{
- CALL_FROM_TB0(env->tlb->do_tlbwi);
- FORCE_RET();
-}
-
-void op_tlbwr (void)
-{
- CALL_FROM_TB0(env->tlb->do_tlbwr);
- FORCE_RET();
-}
-
-void op_tlbp (void)
-{
- CALL_FROM_TB0(env->tlb->do_tlbp);
- FORCE_RET();
-}
-
-void op_tlbr (void)
-{
- CALL_FROM_TB0(env->tlb->do_tlbr);
- FORCE_RET();
-}
-
-/* Specials */
-#if defined (CONFIG_USER_ONLY)
-void op_tls_value (void)
-{
- T0 = env->tls_value;
-}
-#endif
-
-void op_pmon (void)
-{
- CALL_FROM_TB1(do_pmon, PARAM1);
- FORCE_RET();
-}
-
-void op_di (void)
-{
- T0 = env->CP0_Status;
- env->CP0_Status = T0 & ~(1 << CP0St_IE);
- CALL_FROM_TB1(cpu_mips_update_irq, env);
- FORCE_RET();
-}
-
-void op_ei (void)
-{
- T0 = env->CP0_Status;
- env->CP0_Status = T0 | (1 << CP0St_IE);
- CALL_FROM_TB1(cpu_mips_update_irq, env);
- FORCE_RET();
-}
-
-void op_trap (void)
-{
- if (T0) {
- CALL_FROM_TB1(do_raise_exception, EXCP_TRAP);
- }
- FORCE_RET();
-}
-
-void op_debug (void)
-{
- CALL_FROM_TB1(do_raise_exception, EXCP_DEBUG);
- FORCE_RET();
-}
-
-void debug_pre_eret (void);
-void debug_post_eret (void);
-void op_eret (void)
-{
- if (loglevel & CPU_LOG_EXEC)
- CALL_FROM_TB0(debug_pre_eret);
- if (env->CP0_Status & (1 << CP0St_ERL)) {
- env->PC[env->current_tc] = env->CP0_ErrorEPC;
- env->CP0_Status &= ~(1 << CP0St_ERL);
- } else {
- env->PC[env->current_tc] = env->CP0_EPC;
- env->CP0_Status &= ~(1 << CP0St_EXL);
- }
- CALL_FROM_TB1(compute_hflags, env);
- if (loglevel & CPU_LOG_EXEC)
- CALL_FROM_TB0(debug_post_eret);
- env->CP0_LLAddr = 1;
- FORCE_RET();
-}
-
-void op_deret (void)
-{
- if (loglevel & CPU_LOG_EXEC)
- CALL_FROM_TB0(debug_pre_eret);
- env->PC[env->current_tc] = env->CP0_DEPC;
- env->hflags &= MIPS_HFLAG_DM;
- CALL_FROM_TB1(compute_hflags, env);
- if (loglevel & CPU_LOG_EXEC)
- CALL_FROM_TB0(debug_post_eret);
- env->CP0_LLAddr = 1;
- FORCE_RET();
-}
-
-void op_rdhwr_cpunum(void)
-{
- if ((env->hflags & MIPS_HFLAG_CP0) ||
- (env->CP0_HWREna & (1 << 0)))
- T0 = env->CP0_EBase & 0x3ff;
- else
- CALL_FROM_TB1(do_raise_exception, EXCP_RI);
- FORCE_RET();
-}
-
-void op_rdhwr_synci_step(void)
-{
- if ((env->hflags & MIPS_HFLAG_CP0) ||
- (env->CP0_HWREna & (1 << 1)))
- T0 = env->SYNCI_Step;
- else
- CALL_FROM_TB1(do_raise_exception, EXCP_RI);
- FORCE_RET();
-}
-
-void op_rdhwr_cc(void)
-{
- if ((env->hflags & MIPS_HFLAG_CP0) ||
- (env->CP0_HWREna & (1 << 2)))
- T0 = env->CP0_Count;
- else
- CALL_FROM_TB1(do_raise_exception, EXCP_RI);
- FORCE_RET();
-}
-
-void op_rdhwr_ccres(void)
-{
- if ((env->hflags & MIPS_HFLAG_CP0) ||
- (env->CP0_HWREna & (1 << 3)))
- T0 = env->CCRes;
- else
- CALL_FROM_TB1(do_raise_exception, EXCP_RI);
- FORCE_RET();
-}
-
-void op_save_state (void)
-{
- env->hflags = PARAM1;
- FORCE_RET();
-}
-
-void op_wait (void)
-{
- env->halted = 1;
- CALL_FROM_TB1(do_raise_exception, EXCP_HLT);
- FORCE_RET();
-}
-
-/* Bitfield operations. */
-void op_ext(void)
-{
- unsigned int pos = PARAM1;
- unsigned int size = PARAM2;
-
- T0 = (int32_t)((T1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
- FORCE_RET();
-}
-
-void op_ins(void)
-{
- unsigned int pos = PARAM1;
- unsigned int size = PARAM2;
- target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
-
- T0 = (int32_t)((T0 & ~mask) | ((T1 << pos) & mask));
- FORCE_RET();
-}
-
-void op_wsbh(void)
-{
- T0 = (int32_t)(((T1 << 8) & ~0x00FF00FF) | ((T1 >> 8) & 0x00FF00FF));
- FORCE_RET();
-}
-
-#if defined(TARGET_MIPS64)
-void op_dext(void)
-{
- unsigned int pos = PARAM1;
- unsigned int size = PARAM2;
-
- T0 = (T1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
- FORCE_RET();
-}
-
-void op_dins(void)
-{
- unsigned int pos = PARAM1;
- unsigned int size = PARAM2;
- target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
-
- T0 = (T0 & ~mask) | ((T1 << pos) & mask);
- FORCE_RET();
-}
-
-void op_dsbh(void)
-{
- T0 = ((T1 << 8) & ~0x00FF00FF00FF00FFULL) | ((T1 >> 8) & 0x00FF00FF00FF00FFULL);
- FORCE_RET();
-}
-
-void op_dshd(void)
-{
- T1 = ((T1 << 16) & ~0x0000FFFF0000FFFFULL) | ((T1 >> 16) & 0x0000FFFF0000FFFFULL);
- T0 = (T1 << 32) | (T1 >> 32);
- FORCE_RET();
-}
-#endif
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 2020e9efe..eae5b7489 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -33,7 +33,6 @@ void do_raise_exception_err (uint32_t exception, int error_code)
#endif
env->exception_index = exception;
env->error_code = error_code;
- T0 = 0;
cpu_loop_exit();
}
@@ -65,248 +64,539 @@ void do_restore_state (void *pc_ptr)
}
}
-void do_clo (void)
+target_ulong do_clo (target_ulong t0)
{
- T0 = clo32(T0);
+ return clo32(t0);
}
-void do_clz (void)
+target_ulong do_clz (target_ulong t0)
{
- T0 = clz32(T0);
+ return clz32(t0);
}
#if defined(TARGET_MIPS64)
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-/* Those might call libgcc functions. */
-void do_dsll (void)
+target_ulong do_dclo (target_ulong t0)
{
- T0 = T0 << T1;
+ return clo64(t0);
}
-void do_dsll32 (void)
+target_ulong do_dclz (target_ulong t0)
{
- T0 = T0 << (T1 + 32);
+ return clz64(t0);
}
+#endif /* TARGET_MIPS64 */
-void do_dsra (void)
+/* 64 bits arithmetic for 32 bits hosts */
+static always_inline uint64_t get_HILO (void)
{
- T0 = (int64_t)T0 >> T1;
+ return ((uint64_t)(env->HI[env->current_tc][0]) << 32) | (uint32_t)env->LO[env->current_tc][0];
}
-void do_dsra32 (void)
+static always_inline void set_HILO (uint64_t HILO)
{
- T0 = (int64_t)T0 >> (T1 + 32);
+ env->LO[env->current_tc][0] = (int32_t)HILO;
+ env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
}
-void do_dsrl (void)
+static always_inline void set_HIT0_LO (target_ulong t0, uint64_t HILO)
{
- T0 = T0 >> T1;
+ env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
+ t0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
}
-void do_dsrl32 (void)
+static always_inline void set_HI_LOT0 (target_ulong t0, uint64_t HILO)
{
- T0 = T0 >> (T1 + 32);
+ t0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
+ env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
}
-void do_drotr (void)
+#if TARGET_LONG_BITS > HOST_LONG_BITS
+void do_madd (target_ulong t0, target_ulong t1)
{
- target_ulong tmp;
+ int64_t tmp;
- if (T1) {
- tmp = T0 << (0x40 - T1);
- T0 = (T0 >> T1) | tmp;
- }
+ tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
+ set_HILO((int64_t)get_HILO() + tmp);
}
-void do_drotr32 (void)
+void do_maddu (target_ulong t0, target_ulong t1)
{
- target_ulong tmp;
-
- tmp = T0 << (0x40 - (32 + T1));
- T0 = (T0 >> (32 + T1)) | tmp;
-}
+ uint64_t tmp;
-void do_dsllv (void)
-{
- T0 = T1 << (T0 & 0x3F);
+ tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
+ set_HILO(get_HILO() + tmp);
}
-void do_dsrav (void)
+void do_msub (target_ulong t0, target_ulong t1)
{
- T0 = (int64_t)T1 >> (T0 & 0x3F);
-}
+ int64_t tmp;
-void do_dsrlv (void)
-{
- T0 = T1 >> (T0 & 0x3F);
+ tmp = ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
+ set_HILO((int64_t)get_HILO() - tmp);
}
-void do_drotrv (void)
+void do_msubu (target_ulong t0, target_ulong t1)
{
- target_ulong tmp;
+ uint64_t tmp;
- T0 &= 0x3F;
- if (T0) {
- tmp = T1 << (0x40 - T0);
- T0 = (T1 >> T0) | tmp;
- } else
- T0 = T1;
+ tmp = ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
+ set_HILO(get_HILO() - tmp);
}
-
#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
-void do_dclo (void)
+/* Multiplication variants of the vr54xx. */
+target_ulong do_muls (target_ulong t0, target_ulong t1)
{
- T0 = clo64(T0);
-}
+ set_HI_LOT0(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
-void do_dclz (void)
-{
- T0 = clz64(T0);
+ return t0;
}
-#endif /* TARGET_MIPS64 */
-
-/* 64 bits arithmetic for 32 bits hosts */
-#if TARGET_LONG_BITS > HOST_LONG_BITS
-static always_inline uint64_t get_HILO (void)
+target_ulong do_mulsu (target_ulong t0, target_ulong t1)
{
- return (env->HI[env->current_tc][0] << 32) | (uint32_t)env->LO[env->current_tc][0];
-}
+ set_HI_LOT0(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
-static always_inline void set_HILO (uint64_t HILO)
-{
- env->LO[env->current_tc][0] = (int32_t)HILO;
- env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
+ return t0;
}
-static always_inline void set_HIT0_LO (uint64_t HILO)
+target_ulong do_macc (target_ulong t0, target_ulong t1)
{
- env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
- T0 = env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
+ set_HI_LOT0(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
+
+ return t0;
}
-static always_inline void set_HI_LOT0 (uint64_t HILO)
+target_ulong do_macchi (target_ulong t0, target_ulong t1)
{
- T0 = env->LO[env->current_tc][0] = (int32_t)(HILO & 0xFFFFFFFF);
- env->HI[env->current_tc][0] = (int32_t)(HILO >> 32);
+ set_HIT0_LO(t0, ((int64_t)get_HILO()) + ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
+
+ return t0;
}
-void do_mult (void)
+target_ulong do_maccu (target_ulong t0, target_ulong t1)
{
- set_HILO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
+ set_HI_LOT0(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
+
+ return t0;
}
-void do_multu (void)
+target_ulong do_macchiu (target_ulong t0, target_ulong t1)
{
- set_HILO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
+ set_HIT0_LO(t0, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
+
+ return t0;
}
-void do_madd (void)
+target_ulong do_msac (target_ulong t0, target_ulong t1)
{
- int64_t tmp;
+ set_HI_LOT0(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
- tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
- set_HILO((int64_t)get_HILO() + tmp);
+ return t0;
}
-void do_maddu (void)
+target_ulong do_msachi (target_ulong t0, target_ulong t1)
{
- uint64_t tmp;
+ set_HIT0_LO(t0, ((int64_t)get_HILO()) - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
- tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
- set_HILO(get_HILO() + tmp);
+ return t0;
}
-void do_msub (void)
+target_ulong do_msacu (target_ulong t0, target_ulong t1)
{
- int64_t tmp;
+ set_HI_LOT0(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
- tmp = ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
- set_HILO((int64_t)get_HILO() - tmp);
+ return t0;
}
-void do_msubu (void)
+target_ulong do_msachiu (target_ulong t0, target_ulong t1)
{
- uint64_t tmp;
+ set_HIT0_LO(t0, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
- tmp = ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
- set_HILO(get_HILO() - tmp);
+ return t0;
}
-/* Multiplication variants of the vr54xx. */
-void do_muls (void)
+target_ulong do_mulhi (target_ulong t0, target_ulong t1)
{
- set_HI_LOT0(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
+ set_HIT0_LO(t0, (int64_t)(int32_t)t0 * (int64_t)(int32_t)t1);
+
+ return t0;
}
-void do_mulsu (void)
+target_ulong do_mulhiu (target_ulong t0, target_ulong t1)
{
- set_HI_LOT0(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
+ set_HIT0_LO(t0, (uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1);
+
+ return t0;
}
-void do_macc (void)
+target_ulong do_mulshi (target_ulong t0, target_ulong t1)
{
- set_HI_LOT0(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
+ set_HIT0_LO(t0, 0 - ((int64_t)(int32_t)t0 * (int64_t)(int32_t)t1));
+
+ return t0;
}
-void do_macchi (void)
+target_ulong do_mulshiu (target_ulong t0, target_ulong t1)
{
- set_HIT0_LO(((int64_t)get_HILO()) + ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
+ set_HIT0_LO(t0, 0 - ((uint64_t)(uint32_t)t0 * (uint64_t)(uint32_t)t1));
+
+ return t0;
}
-void do_maccu (void)
+#ifdef TARGET_MIPS64
+void do_dmult (target_ulong t0, target_ulong t1)
{
- set_HI_LOT0(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
+ muls64(&(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), t0, t1);
}
-void do_macchiu (void)
+void do_dmultu (target_ulong t0, target_ulong t1)
{
- set_HIT0_LO(((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
+ mulu64(&(env->LO[env->current_tc][0]), &(env->HI[env->current_tc][0]), t0, t1);
}
+#endif
-void do_msac (void)
+#ifdef TARGET_WORDS_BIGENDIAN
+#define GET_LMASK(v) ((v) & 3)
+#define GET_OFFSET(addr, offset) (addr + (offset))
+#else
+#define GET_LMASK(v) (((v) & 3) ^ 3)
+#define GET_OFFSET(addr, offset) (addr - (offset))
+#endif
+
+target_ulong do_lwl(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HI_LOT0(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
+ target_ulong tmp;
+
+#ifdef CONFIG_USER_ONLY
+#define ldfun ldub_raw
+#else
+ int (*ldfun)(target_ulong);
+
+ switch (mem_idx)
+ {
+ case 0: ldfun = ldub_kernel; break;
+ case 1: ldfun = ldub_super; break;
+ default:
+ case 2: ldfun = ldub_user; break;
+ }
+#endif
+ tmp = ldfun(t0);
+ t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
+
+ if (GET_LMASK(t0) <= 2) {
+ tmp = ldfun(GET_OFFSET(t0, 1));
+ t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
+ }
+
+ if (GET_LMASK(t0) <= 1) {
+ tmp = ldfun(GET_OFFSET(t0, 2));
+ t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
+ }
+
+ if (GET_LMASK(t0) == 0) {
+ tmp = ldfun(GET_OFFSET(t0, 3));
+ t1 = (t1 & 0xFFFFFF00) | tmp;
+ }
+ return (int32_t)t1;
}
-void do_msachi (void)
+target_ulong do_lwr(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HIT0_LO(((int64_t)get_HILO()) - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
+ target_ulong tmp;
+
+#ifdef CONFIG_USER_ONLY
+#define ldfun ldub_raw
+#else
+ int (*ldfun)(target_ulong);
+
+ switch (mem_idx)
+ {
+ case 0: ldfun = ldub_kernel; break;
+ case 1: ldfun = ldub_super; break;
+ default:
+ case 2: ldfun = ldub_user; break;
+ }
+#endif
+ tmp = ldfun(t0);
+ t1 = (t1 & 0xFFFFFF00) | tmp;
+
+ if (GET_LMASK(t0) >= 1) {
+ tmp = ldfun(GET_OFFSET(t0, -1));
+ t1 = (t1 & 0xFFFF00FF) | (tmp << 8);
+ }
+
+ if (GET_LMASK(t0) >= 2) {
+ tmp = ldfun(GET_OFFSET(t0, -2));
+ t1 = (t1 & 0xFF00FFFF) | (tmp << 16);
+ }
+
+ if (GET_LMASK(t0) == 3) {
+ tmp = ldfun(GET_OFFSET(t0, -3));
+ t1 = (t1 & 0x00FFFFFF) | (tmp << 24);
+ }
+ return (int32_t)t1;
}
-void do_msacu (void)
+void do_swl(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HI_LOT0(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
+#ifdef CONFIG_USER_ONLY
+#define stfun stb_raw
+#else
+ void (*stfun)(target_ulong, int);
+
+ switch (mem_idx)
+ {
+ case 0: stfun = stb_kernel; break;
+ case 1: stfun = stb_super; break;
+ default:
+ case 2: stfun = stb_user; break;
+ }
+#endif
+ stfun(t0, (uint8_t)(t1 >> 24));
+
+ if (GET_LMASK(t0) <= 2)
+ stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 16));
+
+ if (GET_LMASK(t0) <= 1)
+ stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 8));
+
+ if (GET_LMASK(t0) == 0)
+ stfun(GET_OFFSET(t0, 3), (uint8_t)t1);
}
-void do_msachiu (void)
+void do_swr(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HIT0_LO(((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
+#ifdef CONFIG_USER_ONLY
+#define stfun stb_raw
+#else
+ void (*stfun)(target_ulong, int);
+
+ switch (mem_idx)
+ {
+ case 0: stfun = stb_kernel; break;
+ case 1: stfun = stb_super; break;
+ default:
+ case 2: stfun = stb_user; break;
+ }
+#endif
+ stfun(t0, (uint8_t)t1);
+
+ if (GET_LMASK(t0) >= 1)
+ stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
+
+ if (GET_LMASK(t0) >= 2)
+ stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
+
+ if (GET_LMASK(t0) == 3)
+ stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
}
-void do_mulhi (void)
+#if defined(TARGET_MIPS64)
+/* "half" load and stores. We must do the memory access inline,
+ or fault handling won't work. */
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define GET_LMASK64(v) ((v) & 7)
+#else
+#define GET_LMASK64(v) (((v) & 7) ^ 7)
+#endif
+
+target_ulong do_ldl(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HIT0_LO((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1);
+ uint64_t tmp;
+
+#ifdef CONFIG_USER_ONLY
+#define ldfun ldub_raw
+#else
+ int (*ldfun)(target_ulong);
+
+ switch (mem_idx)
+ {
+ case 0: ldfun = ldub_kernel; break;
+ case 1: ldfun = ldub_super; break;
+ default:
+ case 2: ldfun = ldub_user; break;
+ }
+#endif
+ tmp = ldfun(t0);
+ t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
+
+ if (GET_LMASK64(t0) <= 6) {
+ tmp = ldfun(GET_OFFSET(t0, 1));
+ t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
+ }
+
+ if (GET_LMASK64(t0) <= 5) {
+ tmp = ldfun(GET_OFFSET(t0, 2));
+ t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
+ }
+
+ if (GET_LMASK64(t0) <= 4) {
+ tmp = ldfun(GET_OFFSET(t0, 3));
+ t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
+ }
+
+ if (GET_LMASK64(t0) <= 3) {
+ tmp = ldfun(GET_OFFSET(t0, 4));
+ t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
+ }
+
+ if (GET_LMASK64(t0) <= 2) {
+ tmp = ldfun(GET_OFFSET(t0, 5));
+ t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
+ }
+
+ if (GET_LMASK64(t0) <= 1) {
+ tmp = ldfun(GET_OFFSET(t0, 6));
+ t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
+ }
+
+ if (GET_LMASK64(t0) == 0) {
+ tmp = ldfun(GET_OFFSET(t0, 7));
+ t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
+ }
+
+ return t1;
}
-void do_mulhiu (void)
+target_ulong do_ldr(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HIT0_LO((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1);
+ uint64_t tmp;
+
+#ifdef CONFIG_USER_ONLY
+#define ldfun ldub_raw
+#else
+ int (*ldfun)(target_ulong);
+
+ switch (mem_idx)
+ {
+ case 0: ldfun = ldub_kernel; break;
+ case 1: ldfun = ldub_super; break;
+ default:
+ case 2: ldfun = ldub_user; break;
+ }
+#endif
+ tmp = ldfun(t0);
+ t1 = (t1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
+
+ if (GET_LMASK64(t0) >= 1) {
+ tmp = ldfun(GET_OFFSET(t0, -1));
+ t1 = (t1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
+ }
+
+ if (GET_LMASK64(t0) >= 2) {
+ tmp = ldfun(GET_OFFSET(t0, -2));
+ t1 = (t1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
+ }
+
+ if (GET_LMASK64(t0) >= 3) {
+ tmp = ldfun(GET_OFFSET(t0, -3));
+ t1 = (t1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
+ }
+
+ if (GET_LMASK64(t0) >= 4) {
+ tmp = ldfun(GET_OFFSET(t0, -4));
+ t1 = (t1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
+ }
+
+ if (GET_LMASK64(t0) >= 5) {
+ tmp = ldfun(GET_OFFSET(t0, -5));
+ t1 = (t1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
+ }
+
+ if (GET_LMASK64(t0) >= 6) {
+ tmp = ldfun(GET_OFFSET(t0, -6));
+ t1 = (t1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
+ }
+
+ if (GET_LMASK64(t0) == 7) {
+ tmp = ldfun(GET_OFFSET(t0, -7));
+ t1 = (t1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
+ }
+
+ return t1;
}
-void do_mulshi (void)
+void do_sdl(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HIT0_LO(0 - ((int64_t)(int32_t)T0 * (int64_t)(int32_t)T1));
+#ifdef CONFIG_USER_ONLY
+#define stfun stb_raw
+#else
+ void (*stfun)(target_ulong, int);
+
+ switch (mem_idx)
+ {
+ case 0: stfun = stb_kernel; break;
+ case 1: stfun = stb_super; break;
+ default:
+ case 2: stfun = stb_user; break;
+ }
+#endif
+ stfun(t0, (uint8_t)(t1 >> 56));
+
+ if (GET_LMASK64(t0) <= 6)
+ stfun(GET_OFFSET(t0, 1), (uint8_t)(t1 >> 48));
+
+ if (GET_LMASK64(t0) <= 5)
+ stfun(GET_OFFSET(t0, 2), (uint8_t)(t1 >> 40));
+
+ if (GET_LMASK64(t0) <= 4)
+ stfun(GET_OFFSET(t0, 3), (uint8_t)(t1 >> 32));
+
+ if (GET_LMASK64(t0) <= 3)
+ stfun(GET_OFFSET(t0, 4), (uint8_t)(t1 >> 24));
+
+ if (GET_LMASK64(t0) <= 2)
+ stfun(GET_OFFSET(t0, 5), (uint8_t)(t1 >> 16));
+
+ if (GET_LMASK64(t0) <= 1)
+ stfun(GET_OFFSET(t0, 6), (uint8_t)(t1 >> 8));
+
+ if (GET_LMASK64(t0) <= 0)
+ stfun(GET_OFFSET(t0, 7), (uint8_t)t1);
}
-void do_mulshiu (void)
+void do_sdr(target_ulong t0, target_ulong t1, int mem_idx)
{
- set_HIT0_LO(0 - ((uint64_t)(uint32_t)T0 * (uint64_t)(uint32_t)T1));
+#ifdef CONFIG_USER_ONLY
+#define stfun stb_raw
+#else
+ void (*stfun)(target_ulong, int);
+
+ switch (mem_idx)
+ {
+ case 0: stfun = stb_kernel; break;
+ case 1: stfun = stb_super; break;
+ default:
+ case 2: stfun = stb_user; break;
+ }
+#endif
+ stfun(t0, (uint8_t)t1);
+
+ if (GET_LMASK64(t0) >= 1)
+ stfun(GET_OFFSET(t0, -1), (uint8_t)(t1 >> 8));
+
+ if (GET_LMASK64(t0) >= 2)
+ stfun(GET_OFFSET(t0, -2), (uint8_t)(t1 >> 16));
+
+ if (GET_LMASK64(t0) >= 3)
+ stfun(GET_OFFSET(t0, -3), (uint8_t)(t1 >> 24));
+
+ if (GET_LMASK64(t0) >= 4)
+ stfun(GET_OFFSET(t0, -4), (uint8_t)(t1 >> 32));
+
+ if (GET_LMASK64(t0) >= 5)
+ stfun(GET_OFFSET(t0, -5), (uint8_t)(t1 >> 40));
+
+ if (GET_LMASK64(t0) >= 6)
+ stfun(GET_OFFSET(t0, -6), (uint8_t)(t1 >> 48));
+
+ if (GET_LMASK64(t0) == 7)
+ stfun(GET_OFFSET(t0, -7), (uint8_t)(t1 >> 56));
}
-#endif /* TARGET_LONG_BITS > HOST_LONG_BITS */
+#endif /* TARGET_MIPS64 */
-#if defined(CONFIG_USER_ONLY)
+#ifdef CONFIG_USER_ONLY
void do_mfc0_random (void)
{
cpu_abort(env, "mfc0 random\n");
@@ -360,14 +650,665 @@ void cpu_mips_tlb_flush (CPUState *env, int flush_global)
#else
/* CP0 helpers */
-void do_mfc0_random (void)
+target_ulong do_mfc0_mvpcontrol (void)
{
- T0 = (int32_t)cpu_mips_get_random(env);
+ return env->mvp->CP0_MVPControl;
}
-void do_mfc0_count (void)
+target_ulong do_mfc0_mvpconf0 (void)
+{
+ return env->mvp->CP0_MVPConf0;
+}
+
+target_ulong do_mfc0_mvpconf1 (void)
+{
+ return env->mvp->CP0_MVPConf1;
+}
+
+target_ulong do_mfc0_random (void)
+{
+ return (int32_t)cpu_mips_get_random(env);
+}
+
+target_ulong do_mfc0_tcstatus (void)
+{
+ return env->CP0_TCStatus[env->current_tc];
+}
+
+target_ulong do_mftc0_tcstatus(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->CP0_TCStatus[other_tc];
+}
+
+target_ulong do_mfc0_tcbind (void)
+{
+ return env->CP0_TCBind[env->current_tc];
+}
+
+target_ulong do_mftc0_tcbind(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->CP0_TCBind[other_tc];
+}
+
+target_ulong do_mfc0_tcrestart (void)
+{
+ return env->PC[env->current_tc];
+}
+
+target_ulong do_mftc0_tcrestart(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->PC[other_tc];
+}
+
+target_ulong do_mfc0_tchalt (void)
+{
+ return env->CP0_TCHalt[env->current_tc];
+}
+
+target_ulong do_mftc0_tchalt(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->CP0_TCHalt[other_tc];
+}
+
+target_ulong do_mfc0_tccontext (void)
+{
+ return env->CP0_TCContext[env->current_tc];
+}
+
+target_ulong do_mftc0_tccontext(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->CP0_TCContext[other_tc];
+}
+
+target_ulong do_mfc0_tcschedule (void)
+{
+ return env->CP0_TCSchedule[env->current_tc];
+}
+
+target_ulong do_mftc0_tcschedule(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->CP0_TCSchedule[other_tc];
+}
+
+target_ulong do_mfc0_tcschefback (void)
+{
+ return env->CP0_TCScheFBack[env->current_tc];
+}
+
+target_ulong do_mftc0_tcschefback(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->CP0_TCScheFBack[other_tc];
+}
+
+target_ulong do_mfc0_count (void)
+{
+ return (int32_t)cpu_mips_get_count(env);
+}
+
+target_ulong do_mftc0_entryhi(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return (env->CP0_EntryHi & ~0xff) | (env->CP0_TCStatus[other_tc] & 0xff);
+}
+
+target_ulong do_mftc0_status(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t tcstatus = env->CP0_TCStatus[other_tc];
+ target_ulong t0;
+
+ t0 = env->CP0_Status & ~0xf1000018;
+ t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
+ t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
+ t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
+
+ return t0;
+}
+
+target_ulong do_mfc0_lladdr (void)
+{
+ return (int32_t)env->CP0_LLAddr >> 4;
+}
+
+target_ulong do_mfc0_watchlo (uint32_t sel)
+{
+ return (int32_t)env->CP0_WatchLo[sel];
+}
+
+target_ulong do_mfc0_watchhi (uint32_t sel)
+{
+ return env->CP0_WatchHi[sel];
+}
+
+target_ulong do_mfc0_debug (void)
+{
+ target_ulong t0 = env->CP0_Debug;
+ if (env->hflags & MIPS_HFLAG_DM)
+ t0 |= 1 << CP0DB_DM;
+
+ return t0;
+}
+
+target_ulong do_mftc0_debug(void)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (env->CP0_Debug_tcstatus[other_tc] &
+ ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong do_dmfc0_tcrestart (void)
+{
+ return env->PC[env->current_tc];
+}
+
+target_ulong do_dmfc0_tchalt (void)
+{
+ return env->CP0_TCHalt[env->current_tc];
+}
+
+target_ulong do_dmfc0_tccontext (void)
+{
+ return env->CP0_TCContext[env->current_tc];
+}
+
+target_ulong do_dmfc0_tcschedule (void)
+{
+ return env->CP0_TCSchedule[env->current_tc];
+}
+
+target_ulong do_dmfc0_tcschefback (void)
+{
+ return env->CP0_TCScheFBack[env->current_tc];
+}
+
+target_ulong do_dmfc0_lladdr (void)
+{
+ return env->CP0_LLAddr >> 4;
+}
+
+target_ulong do_dmfc0_watchlo (uint32_t sel)
+{
+ return env->CP0_WatchLo[sel];
+}
+#endif /* TARGET_MIPS64 */
+
+void do_mtc0_index (target_ulong t0)
+{
+ int num = 1;
+ unsigned int tmp = env->tlb->nb_tlb;
+
+ do {
+ tmp >>= 1;
+ num <<= 1;
+ } while (tmp);
+ env->CP0_Index = (env->CP0_Index & 0x80000000) | (t0 & (num - 1));
+}
+
+void do_mtc0_mvpcontrol (target_ulong t0)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
+ mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
+ (1 << CP0MVPCo_EVP);
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (1 << CP0MVPCo_STLB);
+ newval = (env->mvp->CP0_MVPControl & ~mask) | (t0 & mask);
+
+ // TODO: Enable/disable shared TLB, enable/disable VPEs.
+
+ env->mvp->CP0_MVPControl = newval;
+}
+
+void do_mtc0_vpecontrol (target_ulong t0)
+{
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (env->CP0_VPEControl & ~mask) | (t0 & mask);
+
+ /* Yield scheduler intercept not implemented. */
+ /* Gating storage scheduler intercept not implemented. */
+
+ // TODO: Enable/disable TCs.
+
+ env->CP0_VPEControl = newval;
+}
+
+void do_mtc0_vpeconf0 (target_ulong t0)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
+ mask |= (0xff << CP0VPEC0_XTC);
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ }
+ newval = (env->CP0_VPEConf0 & ~mask) | (t0 & mask);
+
+ // TODO: TC exclusive handling due to ERL/EXL.
+
+ env->CP0_VPEConf0 = newval;
+}
+
+void do_mtc0_vpeconf1 (target_ulong t0)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
+ (0xff << CP0VPEC1_NCP1);
+ newval = (env->CP0_VPEConf1 & ~mask) | (t0 & mask);
+
+ /* UDI not implemented. */
+ /* CP2 not implemented. */
+
+ // TODO: Handle FPU (CP1) binding.
+
+ env->CP0_VPEConf1 = newval;
+}
+
+void do_mtc0_yqmask (target_ulong t0)
+{
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_YQMask = 0x00000000;
+}
+
+void do_mtc0_vpeopt (target_ulong t0)
{
- T0 = (int32_t)cpu_mips_get_count(env);
+ env->CP0_VPEOpt = t0 & 0x0000ffff;
+}
+
+void do_mtc0_entrylo0 (target_ulong t0)
+{
+ /* Large physaddr (PABITS) not implemented */
+ /* 1k pages not implemented */
+ env->CP0_EntryLo0 = t0 & 0x3FFFFFFF;
+}
+
+void do_mtc0_tcstatus (target_ulong t0)
+{
+ uint32_t mask = env->CP0_TCStatus_rw_bitmask;
+ uint32_t newval;
+
+ newval = (env->CP0_TCStatus[env->current_tc] & ~mask) | (t0 & mask);
+
+ // TODO: Sync with CP0_Status.
+
+ env->CP0_TCStatus[env->current_tc] = newval;
+}
+
+void do_mttc0_tcstatus (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ // TODO: Sync with CP0_Status.
+
+ env->CP0_TCStatus[other_tc] = t0;
+}
+
+void do_mtc0_tcbind (target_ulong t0)
+{
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (1 << CP0TCBd_CurVPE);
+ newval = (env->CP0_TCBind[env->current_tc] & ~mask) | (t0 & mask);
+ env->CP0_TCBind[env->current_tc] = newval;
+}
+
+void do_mttc0_tcbind (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (1 << CP0TCBd_CurVPE);
+ newval = (env->CP0_TCBind[other_tc] & ~mask) | (t0 & mask);
+ env->CP0_TCBind[other_tc] = newval;
+}
+
+void do_mtc0_tcrestart (target_ulong t0)
+{
+ env->PC[env->current_tc] = t0;
+ env->CP0_TCStatus[env->current_tc] &= ~(1 << CP0TCSt_TDS);
+ env->CP0_LLAddr = 0ULL;
+ /* MIPS16 not implemented. */
+}
+
+void do_mttc0_tcrestart (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->PC[other_tc] = t0;
+ env->CP0_TCStatus[other_tc] &= ~(1 << CP0TCSt_TDS);
+ env->CP0_LLAddr = 0ULL;
+ /* MIPS16 not implemented. */
+}
+
+void do_mtc0_tchalt (target_ulong t0)
+{
+ env->CP0_TCHalt[env->current_tc] = t0 & 0x1;
+
+ // TODO: Halt TC / Restart (if allocated+active) TC.
+}
+
+void do_mttc0_tchalt (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ // TODO: Halt TC / Restart (if allocated+active) TC.
+
+ env->CP0_TCHalt[other_tc] = t0;
+}
+
+void do_mtc0_tccontext (target_ulong t0)
+{
+ env->CP0_TCContext[env->current_tc] = t0;
+}
+
+void do_mttc0_tccontext (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->CP0_TCContext[other_tc] = t0;
+}
+
+void do_mtc0_tcschedule (target_ulong t0)
+{
+ env->CP0_TCSchedule[env->current_tc] = t0;
+}
+
+void do_mttc0_tcschedule (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->CP0_TCSchedule[other_tc] = t0;
+}
+
+void do_mtc0_tcschefback (target_ulong t0)
+{
+ env->CP0_TCScheFBack[env->current_tc] = t0;
+}
+
+void do_mttc0_tcschefback (target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->CP0_TCScheFBack[other_tc] = t0;
+}
+
+void do_mtc0_entrylo1 (target_ulong t0)
+{
+ /* Large physaddr (PABITS) not implemented */
+ /* 1k pages not implemented */
+ env->CP0_EntryLo1 = t0 & 0x3FFFFFFF;
+}
+
+void do_mtc0_context (target_ulong t0)
+{
+ env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (t0 & ~0x007FFFFF);
+}
+
+void do_mtc0_pagemask (target_ulong t0)
+{
+ /* 1k pages not implemented */
+ env->CP0_PageMask = t0 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
+}
+
+void do_mtc0_pagegrain (target_ulong t0)
+{
+ /* SmartMIPS not implemented */
+ /* Large physaddr (PABITS) not implemented */
+ /* 1k pages not implemented */
+ env->CP0_PageGrain = 0;
+}
+
+void do_mtc0_wired (target_ulong t0)
+{
+ env->CP0_Wired = t0 % env->tlb->nb_tlb;
+}
+
+void do_mtc0_srsconf0 (target_ulong t0)
+{
+ env->CP0_SRSConf0 |= t0 & env->CP0_SRSConf0_rw_bitmask;
+}
+
+void do_mtc0_srsconf1 (target_ulong t0)
+{
+ env->CP0_SRSConf1 |= t0 & env->CP0_SRSConf1_rw_bitmask;
+}
+
+void do_mtc0_srsconf2 (target_ulong t0)
+{
+ env->CP0_SRSConf2 |= t0 & env->CP0_SRSConf2_rw_bitmask;
+}
+
+void do_mtc0_srsconf3 (target_ulong t0)
+{
+ env->CP0_SRSConf3 |= t0 & env->CP0_SRSConf3_rw_bitmask;
+}
+
+void do_mtc0_srsconf4 (target_ulong t0)
+{
+ env->CP0_SRSConf4 |= t0 & env->CP0_SRSConf4_rw_bitmask;
+}
+
+void do_mtc0_hwrena (target_ulong t0)
+{
+ env->CP0_HWREna = t0 & 0x0000000F;
+}
+
+void do_mtc0_count (target_ulong t0)
+{
+ cpu_mips_store_count(env, t0);
+}
+
+void do_mtc0_entryhi (target_ulong t0)
+{
+ target_ulong old, val;
+
+ /* 1k pages not implemented */
+ val = t0 & ((TARGET_PAGE_MASK << 1) | 0xFF);
+#if defined(TARGET_MIPS64)
+ val &= env->SEGMask;
+#endif
+ old = env->CP0_EntryHi;
+ env->CP0_EntryHi = val;
+ if (env->CP0_Config3 & (1 << CP0C3_MT)) {
+ uint32_t tcst = env->CP0_TCStatus[env->current_tc] & ~0xff;
+ env->CP0_TCStatus[env->current_tc] = tcst | (val & 0xff);
+ }
+ /* If the ASID changes, flush qemu's TLB. */
+ if ((old & 0xFF) != (val & 0xFF))
+ cpu_mips_tlb_flush(env, 1);
+}
+
+void do_mttc0_entryhi(target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (t0 & ~0xff);
+ env->CP0_TCStatus[other_tc] = (env->CP0_TCStatus[other_tc] & ~0xff) | (t0 & 0xff);
+}
+
+void do_mtc0_compare (target_ulong t0)
+{
+ cpu_mips_store_compare(env, t0);
+}
+
+void do_mtc0_status (target_ulong t0)
+{
+ uint32_t val, old;
+ uint32_t mask = env->CP0_Status_rw_bitmask;
+
+ val = t0 & mask;
+ old = env->CP0_Status;
+ env->CP0_Status = (env->CP0_Status & ~mask) | val;
+ compute_hflags(env);
+ if (loglevel & CPU_LOG_EXEC)
+ do_mtc0_status_debug(old, val);
+ cpu_mips_update_irq(env);
+}
+
+void do_mttc0_status(target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t tcstatus = env->CP0_TCStatus[other_tc];
+
+ env->CP0_Status = t0 & ~0xf1000018;
+ tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (t0 & (0xf << CP0St_CU0));
+ tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((t0 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
+ tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((t0 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
+ env->CP0_TCStatus[other_tc] = tcstatus;
+}
+
+void do_mtc0_intctl (target_ulong t0)
+{
+ /* vectored interrupts not implemented, no performance counters. */
+ env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (t0 & 0x000002e0);
+}
+
+void do_mtc0_srsctl (target_ulong t0)
+{
+ uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
+ env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (t0 & mask);
+}
+
+void do_mtc0_cause (target_ulong t0)
+{
+ uint32_t mask = 0x00C00300;
+ uint32_t old = env->CP0_Cause;
+
+ if (env->insn_flags & ISA_MIPS32R2)
+ mask |= 1 << CP0Ca_DC;
+
+ env->CP0_Cause = (env->CP0_Cause & ~mask) | (t0 & mask);
+
+ if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
+ if (env->CP0_Cause & (1 << CP0Ca_DC))
+ cpu_mips_stop_count(env);
+ else
+ cpu_mips_start_count(env);
+ }
+
+ /* Handle the software interrupt as an hardware one, as they
+ are very similar */
+ if (t0 & CP0Ca_IP_mask) {
+ cpu_mips_update_irq(env);
+ }
+}
+
+void do_mtc0_ebase (target_ulong t0)
+{
+ /* vectored interrupts not implemented */
+ /* Multi-CPU not implemented */
+ env->CP0_EBase = 0x80000000 | (t0 & 0x3FFFF000);
+}
+
+void do_mtc0_config0 (target_ulong t0)
+{
+ env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (t0 & 0x00000007);
+}
+
+void do_mtc0_config2 (target_ulong t0)
+{
+ /* tertiary/secondary caches not implemented */
+ env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
+}
+
+void do_mtc0_watchlo (target_ulong t0, uint32_t sel)
+{
+ /* Watch exceptions for instructions, data loads, data stores
+ not implemented. */
+ env->CP0_WatchLo[sel] = (t0 & ~0x7);
+}
+
+void do_mtc0_watchhi (target_ulong t0, uint32_t sel)
+{
+ env->CP0_WatchHi[sel] = (t0 & 0x40FF0FF8);
+ env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & t0 & 0x7);
+}
+
+void do_mtc0_xcontext (target_ulong t0)
+{
+ target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
+ env->CP0_XContext = (env->CP0_XContext & mask) | (t0 & ~mask);
+}
+
+void do_mtc0_framemask (target_ulong t0)
+{
+ env->CP0_Framemask = t0; /* XXX */
+}
+
+void do_mtc0_debug (target_ulong t0)
+{
+ env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (t0 & 0x13300120);
+ if (t0 & (1 << CP0DB_DM))
+ env->hflags |= MIPS_HFLAG_DM;
+ else
+ env->hflags &= ~MIPS_HFLAG_DM;
+}
+
+void do_mttc0_debug(target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ env->CP0_Debug_tcstatus[other_tc] = t0 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
+ env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (t0 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+void do_mtc0_performance0 (target_ulong t0)
+{
+ env->CP0_Performance0 = t0 & 0x000007ff;
+}
+
+void do_mtc0_taglo (target_ulong t0)
+{
+ env->CP0_TagLo = t0 & 0xFFFFFCF6;
+}
+
+void do_mtc0_datalo (target_ulong t0)
+{
+ env->CP0_DataLo = t0; /* XXX */
+}
+
+void do_mtc0_taghi (target_ulong t0)
+{
+ env->CP0_TagHi = t0; /* XXX */
+}
+
+void do_mtc0_datahi (target_ulong t0)
+{
+ env->CP0_DataHi = t0; /* XXX */
}
void do_mtc0_status_debug(uint32_t old, uint32_t val)
@@ -388,7 +1329,152 @@ void do_mtc0_status_irqraise_debug(void)
{
fprintf(logfile, "Raise pending IRQs\n");
}
+#endif /* !CONFIG_USER_ONLY */
+
+/* MIPS MT functions */
+target_ulong do_mftgpr(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->gpr[other_tc][sel];
+}
+target_ulong do_mftlo(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->LO[other_tc][sel];
+}
+
+target_ulong do_mfthi(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->HI[other_tc][sel];
+}
+
+target_ulong do_mftacx(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->ACX[other_tc][sel];
+}
+
+target_ulong do_mftdsp(target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ return env->DSPControl[other_tc];
+}
+
+void do_mttgpr(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->gpr[other_tc][sel] = t0;
+}
+
+void do_mttlo(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->LO[other_tc][sel] = t0;
+}
+
+void do_mtthi(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->HI[other_tc][sel] = t0;
+}
+
+void do_mttacx(target_ulong t0, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->ACX[other_tc][sel] = t0;
+}
+
+void do_mttdsp(target_ulong t0)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+
+ env->DSPControl[other_tc] = t0;
+}
+
+/* MIPS MT functions */
+target_ulong do_dmt(target_ulong t0)
+{
+ // TODO
+ t0 = 0;
+ // rt = t0
+
+ return t0;
+}
+
+target_ulong do_emt(target_ulong t0)
+{
+ // TODO
+ t0 = 0;
+ // rt = t0
+
+ return t0;
+}
+
+target_ulong do_dvpe(target_ulong t0)
+{
+ // TODO
+ t0 = 0;
+ // rt = t0
+
+ return t0;
+}
+
+target_ulong do_evpe(target_ulong t0)
+{
+ // TODO
+ t0 = 0;
+ // rt = t0
+
+ return t0;
+}
+
+void do_fork(target_ulong t0, target_ulong t1)
+{
+ // t0 = rt, t1 = rs
+ t0 = 0;
+ // TODO: store to TC register
+}
+
+target_ulong do_yield(target_ulong t0)
+{
+ if (t0 < 0) {
+ /* No scheduling policy implemented. */
+ if (t0 != -2) {
+ if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
+ env->CP0_TCStatus[env->current_tc] & (1 << CP0TCSt_DT)) {
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
+ do_raise_exception(EXCP_THREAD);
+ }
+ }
+ } else if (t0 == 0) {
+ if (0 /* TODO: TC underflow */) {
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ do_raise_exception(EXCP_THREAD);
+ } else {
+ // TODO: Deallocate TC
+ }
+ } else if (t0 > 0) {
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
+ do_raise_exception(EXCP_THREAD);
+ }
+ return env->CP0_YQMask;
+}
+
+/* CP1 functions */
void fpu_handle_exception(void)
{
#ifdef CONFIG_SOFTFLOAT
@@ -426,6 +1512,7 @@ void fpu_handle_exception(void)
#endif
}
+#ifndef CONFIG_USER_ONLY
/* TLB management */
void cpu_mips_tlb_flush (CPUState *env, int flush_global)
{
@@ -550,18 +1637,23 @@ void r4k_do_tlbr (void)
#endif /* !CONFIG_USER_ONLY */
-void dump_ldst (const unsigned char *func)
+/* Specials */
+target_ulong do_di (target_ulong t0)
{
- if (loglevel)
- fprintf(logfile, "%s => " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__, T0, T1);
+ t0 = env->CP0_Status;
+ env->CP0_Status = t0 & ~(1 << CP0St_IE);
+ cpu_mips_update_irq(env);
+
+ return t0;
}
-void dump_sc (void)
+target_ulong do_ei (target_ulong t0)
{
- if (loglevel) {
- fprintf(logfile, "%s " TARGET_FMT_lx " at " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", __func__,
- T1, T0, env->CP0_LLAddr);
- }
+ t0 = env->CP0_Status;
+ env->CP0_Status = t0 | (1 << CP0St_IE);
+ cpu_mips_update_irq(env);
+
+ return t0;
}
void debug_pre_eret (void)
@@ -591,6 +1683,122 @@ void debug_post_eret (void)
}
}
+void do_eret (void)
+{
+ if (loglevel & CPU_LOG_EXEC)
+ debug_pre_eret();
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ env->PC[env->current_tc] = env->CP0_ErrorEPC;
+ env->CP0_Status &= ~(1 << CP0St_ERL);
+ } else {
+ env->PC[env->current_tc] = env->CP0_EPC;
+ env->CP0_Status &= ~(1 << CP0St_EXL);
+ }
+ compute_hflags(env);
+ if (loglevel & CPU_LOG_EXEC)
+ debug_post_eret();
+ env->CP0_LLAddr = 1;
+}
+
+void do_deret (void)
+{
+ if (loglevel & CPU_LOG_EXEC)
+ debug_pre_eret();
+ env->PC[env->current_tc] = env->CP0_DEPC;
+ env->hflags &= MIPS_HFLAG_DM;
+ compute_hflags(env);
+ if (loglevel & CPU_LOG_EXEC)
+ debug_post_eret();
+ env->CP0_LLAddr = 1;
+}
+
+target_ulong do_rdhwr_cpunum(target_ulong t0)
+{
+ if ((env->hflags & MIPS_HFLAG_CP0) ||
+ (env->CP0_HWREna & (1 << 0)))
+ t0 = env->CP0_EBase & 0x3ff;
+ else
+ do_raise_exception(EXCP_RI);
+
+ return t0;
+}
+
+target_ulong do_rdhwr_synci_step(target_ulong t0)
+{
+ if ((env->hflags & MIPS_HFLAG_CP0) ||
+ (env->CP0_HWREna & (1 << 1)))
+ t0 = env->SYNCI_Step;
+ else
+ do_raise_exception(EXCP_RI);
+
+ return t0;
+}
+
+target_ulong do_rdhwr_cc(target_ulong t0)
+{
+ if ((env->hflags & MIPS_HFLAG_CP0) ||
+ (env->CP0_HWREna & (1 << 2)))
+ t0 = env->CP0_Count;
+ else
+ do_raise_exception(EXCP_RI);
+
+ return t0;
+}
+
+target_ulong do_rdhwr_ccres(target_ulong t0)
+{
+ if ((env->hflags & MIPS_HFLAG_CP0) ||
+ (env->CP0_HWREna & (1 << 3)))
+ t0 = env->CCRes;
+ else
+ do_raise_exception(EXCP_RI);
+
+ return t0;
+}
+
+/* Bitfield operations. */
+target_ulong do_ext(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
+{
+ return (int32_t)((t1 >> pos) & ((size < 32) ? ((1 << size) - 1) : ~0));
+}
+
+target_ulong do_ins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
+{
+ target_ulong mask = ((size < 32) ? ((1 << size) - 1) : ~0) << pos;
+
+ return (int32_t)((t0 & ~mask) | ((t1 << pos) & mask));
+}
+
+target_ulong do_wsbh(target_ulong t0, target_ulong t1)
+{
+ return (int32_t)(((t1 << 8) & ~0x00FF00FF) | ((t1 >> 8) & 0x00FF00FF));
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong do_dext(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
+{
+ return (t1 >> pos) & ((size < 64) ? ((1ULL << size) - 1) : ~0ULL);
+}
+
+target_ulong do_dins(target_ulong t0, target_ulong t1, uint32_t pos, uint32_t size)
+{
+ target_ulong mask = ((size < 64) ? ((1ULL << size) - 1) : ~0ULL) << pos;
+
+ return (t0 & ~mask) | ((t1 << pos) & mask);
+}
+
+target_ulong do_dsbh(target_ulong t0, target_ulong t1)
+{
+ return ((t1 << 8) & ~0x00FF00FF00FF00FFULL) | ((t1 >> 8) & 0x00FF00FF00FF00FFULL);
+}
+
+target_ulong do_dshd(target_ulong t0, target_ulong t1)
+{
+ t1 = ((t1 << 16) & ~0x0000FFFF0000FFFFULL) | ((t1 >> 16) & 0x0000FFFF0000FFFFULL);
+ return (t1 << 32) | (t1 >> 32);
+}
+#endif
+
void do_pmon (int function)
{
function /= 2;
@@ -617,6 +1825,12 @@ void do_pmon (int function)
}
}
+void do_wait (void)
+{
+ env->halted = 1;
+ do_raise_exception(EXCP_HLT);
+}
+
#if !defined(CONFIG_USER_ONLY)
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
@@ -679,7 +1893,7 @@ void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
else
do_raise_exception(EXCP_DBE);
}
-#endif
+#endif /* !CONFIG_USER_ONLY */
/* Complex FPU operations which may need stack space. */
@@ -703,51 +1917,55 @@ unsigned int ieee_rm[] = {
#define RESTORE_ROUNDING_MODE \
set_float_rounding_mode(ieee_rm[env->fpu->fcr31 & 3], &env->fpu->fp_status)
-void do_cfc1 (int reg)
+target_ulong do_cfc1 (uint32_t reg)
{
+ target_ulong t0;
+
switch (reg) {
case 0:
- T0 = (int32_t)env->fpu->fcr0;
+ t0 = (int32_t)env->fpu->fcr0;
break;
case 25:
- T0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
+ t0 = ((env->fpu->fcr31 >> 24) & 0xfe) | ((env->fpu->fcr31 >> 23) & 0x1);
break;
case 26:
- T0 = env->fpu->fcr31 & 0x0003f07c;
+ t0 = env->fpu->fcr31 & 0x0003f07c;
break;
case 28:
- T0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
+ t0 = (env->fpu->fcr31 & 0x00000f83) | ((env->fpu->fcr31 >> 22) & 0x4);
break;
default:
- T0 = (int32_t)env->fpu->fcr31;
+ t0 = (int32_t)env->fpu->fcr31;
break;
}
+
+ return t0;
}
-void do_ctc1 (int reg)
+void do_ctc1 (target_ulong t0, uint32_t reg)
{
switch(reg) {
case 25:
- if (T0 & 0xffffff00)
+ if (t0 & 0xffffff00)
return;
- env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((T0 & 0xfe) << 24) |
- ((T0 & 0x1) << 23);
+ env->fpu->fcr31 = (env->fpu->fcr31 & 0x017fffff) | ((t0 & 0xfe) << 24) |
+ ((t0 & 0x1) << 23);
break;
case 26:
- if (T0 & 0x007c0000)
+ if (t0 & 0x007c0000)
return;
- env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (T0 & 0x0003f07c);
+ env->fpu->fcr31 = (env->fpu->fcr31 & 0xfffc0f83) | (t0 & 0x0003f07c);
break;
case 28:
- if (T0 & 0x007c0000)
+ if (t0 & 0x007c0000)
return;
- env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (T0 & 0x00000f83) |
- ((T0 & 0x4) << 22);
+ env->fpu->fcr31 = (env->fpu->fcr31 & 0xfefff07c) | (t0 & 0x00000f83) |
+ ((t0 & 0x4) << 22);
break;
case 31:
- if (T0 & 0x007c0000)
+ if (t0 & 0x007c0000)
return;
- env->fpu->fcr31 = T0;
+ env->fpu->fcr31 = t0;
break;
default:
return;
@@ -788,8 +2006,26 @@ static always_inline void update_fcr31(void)
UPDATE_FP_FLAGS(env->fpu->fcr31, tmp);
}
+/* Float support.
+ Single precition routines have a "s" suffix, double precision a
+ "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
+ paired single lower "pl", paired single upper "pu". */
+
#define FLOAT_OP(name, p) void do_float_##name##_##p(void)
+/* unary operations, modifying fp status */
+#define FLOAT_UNOP(name) \
+FLOAT_OP(name, d) \
+{ \
+ FDT2 = float64_ ## name(FDT0, &env->fpu->fp_status); \
+} \
+FLOAT_OP(name, s) \
+{ \
+ FST2 = float32_ ## name(FST0, &env->fpu->fp_status); \
+}
+FLOAT_UNOP(sqrt)
+#undef FLOAT_UNOP
+
FLOAT_OP(cvtd, s)
{
set_float_exception_flags(0, &env->fpu->fp_status);
@@ -1028,6 +2264,25 @@ FLOAT_OP(floorw, s)
WT2 = FLOAT_SNAN32;
}
+/* unary operations, not modifying fp status */
+#define FLOAT_UNOP(name) \
+FLOAT_OP(name, d) \
+{ \
+ FDT2 = float64_ ## name(FDT0); \
+} \
+FLOAT_OP(name, s) \
+{ \
+ FST2 = float32_ ## name(FST0); \
+} \
+FLOAT_OP(name, ps) \
+{ \
+ FST2 = float32_ ## name(FST0); \
+ FSTH2 = float32_ ## name(FSTH0); \
+}
+FLOAT_UNOP(abs)
+FLOAT_UNOP(chs)
+#undef FLOAT_UNOP
+
/* MIPS specific unary operations */
FLOAT_OP(recip, d)
{
@@ -1136,6 +2391,56 @@ FLOAT_BINOP(mul)
FLOAT_BINOP(div)
#undef FLOAT_BINOP
+/* ternary operations */
+#define FLOAT_TERNOP(name1, name2) \
+FLOAT_OP(name1 ## name2, d) \
+{ \
+ FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \
+ FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \
+} \
+FLOAT_OP(name1 ## name2, s) \
+{ \
+ FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
+ FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
+} \
+FLOAT_OP(name1 ## name2, ps) \
+{ \
+ FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
+ FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
+ FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
+ FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
+}
+FLOAT_TERNOP(mul, add)
+FLOAT_TERNOP(mul, sub)
+#undef FLOAT_TERNOP
+
+/* negated ternary operations */
+#define FLOAT_NTERNOP(name1, name2) \
+FLOAT_OP(n ## name1 ## name2, d) \
+{ \
+ FDT0 = float64_ ## name1 (FDT0, FDT1, &env->fpu->fp_status); \
+ FDT2 = float64_ ## name2 (FDT0, FDT2, &env->fpu->fp_status); \
+ FDT2 = float64_chs(FDT2); \
+} \
+FLOAT_OP(n ## name1 ## name2, s) \
+{ \
+ FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
+ FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
+ FST2 = float32_chs(FST2); \
+} \
+FLOAT_OP(n ## name1 ## name2, ps) \
+{ \
+ FST0 = float32_ ## name1 (FST0, FST1, &env->fpu->fp_status); \
+ FSTH0 = float32_ ## name1 (FSTH0, FSTH1, &env->fpu->fp_status); \
+ FST2 = float32_ ## name2 (FST0, FST2, &env->fpu->fp_status); \
+ FSTH2 = float32_ ## name2 (FSTH0, FSTH2, &env->fpu->fp_status); \
+ FST2 = float32_chs(FST2); \
+ FSTH2 = float32_chs(FSTH2); \
+}
+FLOAT_NTERNOP(mul, add)
+FLOAT_NTERNOP(mul, sub)
+#undef FLOAT_NTERNOP
+
/* MIPS specific binary operations */
FLOAT_OP(recip2, d)
{
diff --git a/target-mips/op_mem.c b/target-mips/op_mem.c
deleted file mode 100644
index 4b889a88d..000000000
--- a/target-mips/op_mem.c
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * MIPS emulation memory micro-operations for qemu.
- *
- * Copyright (c) 2004-2005 Jocelyn Mayer
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* "half" load and stores. We must do the memory access inline,
- or fault handling won't work. */
-
-#ifdef TARGET_WORDS_BIGENDIAN
-#define GET_LMASK(v) ((v) & 3)
-#define GET_OFFSET(addr, offset) (addr + (offset))
-#else
-#define GET_LMASK(v) (((v) & 3) ^ 3)
-#define GET_OFFSET(addr, offset) (addr - (offset))
-#endif
-
-void glue(op_lwl, MEMSUFFIX) (void)
-{
- target_ulong tmp;
-
- tmp = glue(ldub, MEMSUFFIX)(T0);
- T1 = (T1 & 0x00FFFFFF) | (tmp << 24);
-
- if (GET_LMASK(T0) <= 2) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 1));
- T1 = (T1 & 0xFF00FFFF) | (tmp << 16);
- }
-
- if (GET_LMASK(T0) <= 1) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 2));
- T1 = (T1 & 0xFFFF00FF) | (tmp << 8);
- }
-
- if (GET_LMASK(T0) == 0) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 3));
- T1 = (T1 & 0xFFFFFF00) | tmp;
- }
- T1 = (int32_t)T1;
- FORCE_RET();
-}
-
-void glue(op_lwr, MEMSUFFIX) (void)
-{
- target_ulong tmp;
-
- tmp = glue(ldub, MEMSUFFIX)(T0);
- T1 = (T1 & 0xFFFFFF00) | tmp;
-
- if (GET_LMASK(T0) >= 1) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -1));
- T1 = (T1 & 0xFFFF00FF) | (tmp << 8);
- }
-
- if (GET_LMASK(T0) >= 2) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -2));
- T1 = (T1 & 0xFF00FFFF) | (tmp << 16);
- }
-
- if (GET_LMASK(T0) == 3) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -3));
- T1 = (T1 & 0x00FFFFFF) | (tmp << 24);
- }
- T1 = (int32_t)T1;
- FORCE_RET();
-}
-
-void glue(op_swl, MEMSUFFIX) (void)
-{
- glue(stb, MEMSUFFIX)(T0, (uint8_t)(T1 >> 24));
-
- if (GET_LMASK(T0) <= 2)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 1), (uint8_t)(T1 >> 16));
-
- if (GET_LMASK(T0) <= 1)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 2), (uint8_t)(T1 >> 8));
-
- if (GET_LMASK(T0) == 0)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 3), (uint8_t)T1);
-
- FORCE_RET();
-}
-
-void glue(op_swr, MEMSUFFIX) (void)
-{
- glue(stb, MEMSUFFIX)(T0, (uint8_t)T1);
-
- if (GET_LMASK(T0) >= 1)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -1), (uint8_t)(T1 >> 8));
-
- if (GET_LMASK(T0) >= 2)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -2), (uint8_t)(T1 >> 16));
-
- if (GET_LMASK(T0) == 3)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -3), (uint8_t)(T1 >> 24));
-
- FORCE_RET();
-}
-
-#if defined(TARGET_MIPS64)
-/* "half" load and stores. We must do the memory access inline,
- or fault handling won't work. */
-
-#ifdef TARGET_WORDS_BIGENDIAN
-#define GET_LMASK64(v) ((v) & 7)
-#else
-#define GET_LMASK64(v) (((v) & 7) ^ 7)
-#endif
-
-void glue(op_ldl, MEMSUFFIX) (void)
-{
- uint64_t tmp;
-
- tmp = glue(ldub, MEMSUFFIX)(T0);
- T1 = (T1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
-
- if (GET_LMASK64(T0) <= 6) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 1));
- T1 = (T1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
- }
-
- if (GET_LMASK64(T0) <= 5) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 2));
- T1 = (T1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
- }
-
- if (GET_LMASK64(T0) <= 4) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 3));
- T1 = (T1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
- }
-
- if (GET_LMASK64(T0) <= 3) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 4));
- T1 = (T1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
- }
-
- if (GET_LMASK64(T0) <= 2) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 5));
- T1 = (T1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
- }
-
- if (GET_LMASK64(T0) <= 1) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 6));
- T1 = (T1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
- }
-
- if (GET_LMASK64(T0) == 0) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, 7));
- T1 = (T1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
- }
-
- FORCE_RET();
-}
-
-void glue(op_ldr, MEMSUFFIX) (void)
-{
- uint64_t tmp;
-
- tmp = glue(ldub, MEMSUFFIX)(T0);
- T1 = (T1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
-
- if (GET_LMASK64(T0) >= 1) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -1));
- T1 = (T1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
- }
-
- if (GET_LMASK64(T0) >= 2) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -2));
- T1 = (T1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
- }
-
- if (GET_LMASK64(T0) >= 3) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -3));
- T1 = (T1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
- }
-
- if (GET_LMASK64(T0) >= 4) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -4));
- T1 = (T1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
- }
-
- if (GET_LMASK64(T0) >= 5) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -5));
- T1 = (T1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
- }
-
- if (GET_LMASK64(T0) >= 6) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -6));
- T1 = (T1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
- }
-
- if (GET_LMASK64(T0) == 7) {
- tmp = glue(ldub, MEMSUFFIX)(GET_OFFSET(T0, -7));
- T1 = (T1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
- }
-
- FORCE_RET();
-}
-
-void glue(op_sdl, MEMSUFFIX) (void)
-{
- glue(stb, MEMSUFFIX)(T0, (uint8_t)(T1 >> 56));
-
- if (GET_LMASK64(T0) <= 6)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 1), (uint8_t)(T1 >> 48));
-
- if (GET_LMASK64(T0) <= 5)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 2), (uint8_t)(T1 >> 40));
-
- if (GET_LMASK64(T0) <= 4)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 3), (uint8_t)(T1 >> 32));
-
- if (GET_LMASK64(T0) <= 3)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 4), (uint8_t)(T1 >> 24));
-
- if (GET_LMASK64(T0) <= 2)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 5), (uint8_t)(T1 >> 16));
-
- if (GET_LMASK64(T0) <= 1)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 6), (uint8_t)(T1 >> 8));
-
- if (GET_LMASK64(T0) <= 0)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, 7), (uint8_t)T1);
-
- FORCE_RET();
-}
-
-void glue(op_sdr, MEMSUFFIX) (void)
-{
- glue(stb, MEMSUFFIX)(T0, (uint8_t)T1);
-
- if (GET_LMASK64(T0) >= 1)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -1), (uint8_t)(T1 >> 8));
-
- if (GET_LMASK64(T0) >= 2)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -2), (uint8_t)(T1 >> 16));
-
- if (GET_LMASK64(T0) >= 3)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -3), (uint8_t)(T1 >> 24));
-
- if (GET_LMASK64(T0) >= 4)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -4), (uint8_t)(T1 >> 32));
-
- if (GET_LMASK64(T0) >= 5)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -5), (uint8_t)(T1 >> 40));
-
- if (GET_LMASK64(T0) >= 6)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -6), (uint8_t)(T1 >> 48));
-
- if (GET_LMASK64(T0) == 7)
- glue(stb, MEMSUFFIX)(GET_OFFSET(T0, -7), (uint8_t)(T1 >> 56));
-
- FORCE_RET();
-}
-#endif /* TARGET_MIPS64 */
-
-void glue(op_lwc1, MEMSUFFIX) (void)
-{
- WT0 = glue(ldl, MEMSUFFIX)(T0);
- FORCE_RET();
-}
-void glue(op_swc1, MEMSUFFIX) (void)
-{
- glue(stl, MEMSUFFIX)(T0, WT0);
- FORCE_RET();
-}
-void glue(op_ldc1, MEMSUFFIX) (void)
-{
- DT0 = glue(ldq, MEMSUFFIX)(T0);
- FORCE_RET();
-}
-void glue(op_sdc1, MEMSUFFIX) (void)
-{
- glue(stq, MEMSUFFIX)(T0, DT0);
- FORCE_RET();
-}
-void glue(op_luxc1, MEMSUFFIX) (void)
-{
- DT0 = glue(ldq, MEMSUFFIX)(T0 & ~0x7);
- FORCE_RET();
-}
-void glue(op_suxc1, MEMSUFFIX) (void)
-{
- glue(stq, MEMSUFFIX)(T0 & ~0x7, DT0);
- FORCE_RET();
-}
diff --git a/target-mips/translate.c b/target-mips/translate.c
index f0dc14d82..41a27b4e2 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -423,24 +423,87 @@ enum {
};
/* global register indices */
-static TCGv cpu_env, current_tc_gprs, current_tc_hi, cpu_T[2];
+static TCGv cpu_env, current_tc_gprs, current_tc_hi, current_fpu;
-static inline void tcg_gen_helper_0_1i(void *func, TCGv arg)
+/* FPU TNs, global for now. */
+static TCGv fpu32_T[3], fpu64_T[3], fpu32h_T[3];
+
+static inline void tcg_gen_helper_0_i(void *func, TCGv arg)
{
- TCGv t = tcg_const_i32(arg);
+ TCGv tmp = tcg_const_i32(arg);
- tcg_gen_helper_0_1(func, t);
- tcg_temp_free(t);
+ tcg_gen_helper_0_1(func, tmp);
+ tcg_temp_free(tmp);
}
-static inline void tcg_gen_helper_0_2ii(void *func, TCGv arg1, TCGv arg2)
+static inline void tcg_gen_helper_0_ii(void *func, TCGv arg1, TCGv arg2)
{
- TCGv t1 = tcg_const_i32(arg1);
- TCGv t2 = tcg_const_i32(arg2);
+ TCGv tmp1 = tcg_const_i32(arg1);
+ TCGv tmp2 = tcg_const_i32(arg2);
- tcg_gen_helper_0_2(func, t1, t2);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
+ tcg_gen_helper_0_2(func, tmp1, tmp2);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
+}
+
+static inline void tcg_gen_helper_0_1i(void *func, TCGv arg1, TCGv arg2)
+{
+ TCGv tmp = tcg_const_i32(arg2);
+
+ tcg_gen_helper_0_2(func, arg1, tmp);
+ tcg_temp_free(tmp);
+}
+
+static inline void tcg_gen_helper_0_2i(void *func, TCGv arg1, TCGv arg2, TCGv arg3)
+{
+ TCGv tmp = tcg_const_i32(arg3);
+
+ tcg_gen_helper_0_3(func, arg1, arg2, tmp);
+ tcg_temp_free(tmp);
+}
+
+static inline void tcg_gen_helper_0_2ii(void *func, TCGv arg1, TCGv arg2, TCGv arg3, TCGv arg4)
+{
+ TCGv tmp1 = tcg_const_i32(arg3);
+ TCGv tmp2 = tcg_const_i32(arg3);
+
+ tcg_gen_helper_0_4(func, arg1, arg2, tmp1, tmp2);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
+}
+
+static inline void tcg_gen_helper_1_i(void *func, TCGv ret, TCGv arg)
+{
+ TCGv tmp = tcg_const_i32(arg);
+
+ tcg_gen_helper_1_1(func, ret, tmp);
+ tcg_temp_free(tmp);
+}
+
+static inline void tcg_gen_helper_1_1i(void *func, TCGv ret, TCGv arg1, TCGv arg2)
+{
+ TCGv tmp = tcg_const_i32(arg2);
+
+ tcg_gen_helper_1_2(func, ret, arg1, tmp);
+ tcg_temp_free(tmp);
+}
+
+static inline void tcg_gen_helper_1_2i(void *func, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3)
+{
+ TCGv tmp = tcg_const_i32(arg3);
+
+ tcg_gen_helper_1_3(func, ret, arg1, arg2, tmp);
+ tcg_temp_free(tmp);
+}
+
+static inline void tcg_gen_helper_1_2ii(void *func, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, TCGv arg4)
+{
+ TCGv tmp1 = tcg_const_i32(arg3);
+ TCGv tmp2 = tcg_const_i32(arg3);
+
+ tcg_gen_helper_1_4(func, ret, arg1, arg2, tmp1, tmp2);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
}
typedef struct DisasContext {
@@ -537,116 +600,140 @@ static inline void gen_store_HI (TCGv t, int reg)
}
/* Moves to/from shadow registers. */
-static inline void gen_load_srsgpr (TCGv t, int reg)
+static inline void gen_load_srsgpr (int from, int to)
{
- if (reg == 0)
- tcg_gen_movi_tl(t, 0);
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_TL);
+
+ if (from == 0)
+ tcg_gen_movi_tl(r_tmp1, 0);
else {
- TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
- tcg_gen_ld_i32(r_tmp, cpu_env, offsetof(CPUState, CP0_SRSCtl));
- tcg_gen_shri_i32(r_tmp, r_tmp, CP0SRSCtl_PSS);
- tcg_gen_andi_i32(r_tmp, r_tmp, 0xf);
- tcg_gen_muli_i32(r_tmp, r_tmp, sizeof(target_ulong) * 32);
- tcg_gen_add_i32(r_tmp, cpu_env, r_tmp);
+ tcg_gen_ld_i32(r_tmp2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
+ tcg_gen_shri_i32(r_tmp2, r_tmp2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(r_tmp2, r_tmp2, 0xf);
+ tcg_gen_muli_i32(r_tmp2, r_tmp2, sizeof(target_ulong) * 32);
+ tcg_gen_add_i32(r_tmp2, cpu_env, r_tmp2);
- tcg_gen_ld_tl(t, r_tmp, sizeof(target_ulong) * reg);
- tcg_temp_free(r_tmp);
+ tcg_gen_ld_tl(r_tmp1, r_tmp2, sizeof(target_ulong) * from);
+ tcg_temp_free(r_tmp2);
}
+ gen_store_gpr(r_tmp1, to);
+ tcg_temp_free(r_tmp1);
}
-static inline void gen_store_srsgpr (TCGv t, int reg)
+static inline void gen_store_srsgpr (int from, int to)
{
- if (reg != 0) {
- TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
-
- tcg_gen_ld_i32(r_tmp, cpu_env, offsetof(CPUState, CP0_SRSCtl));
- tcg_gen_shri_i32(r_tmp, r_tmp, CP0SRSCtl_PSS);
- tcg_gen_andi_i32(r_tmp, r_tmp, 0xf);
- tcg_gen_muli_i32(r_tmp, r_tmp, sizeof(target_ulong) * 32);
- tcg_gen_add_i32(r_tmp, cpu_env, r_tmp);
-
- tcg_gen_st_tl(t, r_tmp, sizeof(target_ulong) * reg);
- tcg_temp_free(r_tmp);
+ if (to != 0) {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_TL);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
+
+ gen_load_gpr(r_tmp1, from);
+ tcg_gen_ld_i32(r_tmp2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
+ tcg_gen_shri_i32(r_tmp2, r_tmp2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(r_tmp2, r_tmp2, 0xf);
+ tcg_gen_muli_i32(r_tmp2, r_tmp2, sizeof(target_ulong) * 32);
+ tcg_gen_add_i32(r_tmp2, cpu_env, r_tmp2);
+
+ tcg_gen_st_tl(r_tmp1, r_tmp2, sizeof(target_ulong) * to);
+ tcg_temp_free(r_tmp1);
+ tcg_temp_free(r_tmp2);
}
}
/* Floating point register moves. */
-#define FGEN32(func, NAME) \
-static GenOpFunc *NAME ## _table [32] = { \
-NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \
-NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \
-NAME ## 8, NAME ## 9, NAME ## 10, NAME ## 11, \
-NAME ## 12, NAME ## 13, NAME ## 14, NAME ## 15, \
-NAME ## 16, NAME ## 17, NAME ## 18, NAME ## 19, \
-NAME ## 20, NAME ## 21, NAME ## 22, NAME ## 23, \
-NAME ## 24, NAME ## 25, NAME ## 26, NAME ## 27, \
-NAME ## 28, NAME ## 29, NAME ## 30, NAME ## 31, \
-}; \
-static always_inline void func(int n) \
-{ \
- NAME ## _table[n](); \
+static inline void gen_load_fpr32 (TCGv t, int reg)
+{
+ tcg_gen_ld_i32(t, current_fpu, 8 * reg + 4 * FP_ENDIAN_IDX);
}
-FGEN32(gen_op_load_fpr_WT0, gen_op_load_fpr_WT0_fpr);
-FGEN32(gen_op_store_fpr_WT0, gen_op_store_fpr_WT0_fpr);
-
-FGEN32(gen_op_load_fpr_WT1, gen_op_load_fpr_WT1_fpr);
-FGEN32(gen_op_store_fpr_WT1, gen_op_store_fpr_WT1_fpr);
-
-FGEN32(gen_op_load_fpr_WT2, gen_op_load_fpr_WT2_fpr);
-FGEN32(gen_op_store_fpr_WT2, gen_op_store_fpr_WT2_fpr);
-
-FGEN32(gen_op_load_fpr_DT0, gen_op_load_fpr_DT0_fpr);
-FGEN32(gen_op_store_fpr_DT0, gen_op_store_fpr_DT0_fpr);
-
-FGEN32(gen_op_load_fpr_DT1, gen_op_load_fpr_DT1_fpr);
-FGEN32(gen_op_store_fpr_DT1, gen_op_store_fpr_DT1_fpr);
+static inline void gen_store_fpr32 (TCGv t, int reg)
+{
+ tcg_gen_st_i32(t, current_fpu, 8 * reg + 4 * FP_ENDIAN_IDX);
+}
-FGEN32(gen_op_load_fpr_DT2, gen_op_load_fpr_DT2_fpr);
-FGEN32(gen_op_store_fpr_DT2, gen_op_store_fpr_DT2_fpr);
+static inline void gen_load_fpr64 (DisasContext *ctx, TCGv t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_ld_i64(t, current_fpu, 8 * reg);
+ } else {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ld_i32(r_tmp1, current_fpu, 8 * (reg | 1) + 4 * FP_ENDIAN_IDX);
+ tcg_gen_extu_i32_i64(t, r_tmp1);
+ tcg_gen_shli_i64(t, t, 32);
+ tcg_gen_ld_i32(r_tmp1, current_fpu, 8 * (reg & ~1) + 4 * FP_ENDIAN_IDX);
+ tcg_gen_extu_i32_i64(r_tmp2, r_tmp1);
+ tcg_gen_or_i64(t, t, r_tmp2);
+ tcg_temp_free(r_tmp1);
+ tcg_temp_free(r_tmp2);
+ }
+}
-FGEN32(gen_op_load_fpr_WTH0, gen_op_load_fpr_WTH0_fpr);
-FGEN32(gen_op_store_fpr_WTH0, gen_op_store_fpr_WTH0_fpr);
+static inline void gen_store_fpr64 (DisasContext *ctx, TCGv t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_st_i64(t, current_fpu, 8 * reg);
+ } else {
+ TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
-FGEN32(gen_op_load_fpr_WTH1, gen_op_load_fpr_WTH1_fpr);
-FGEN32(gen_op_store_fpr_WTH1, gen_op_store_fpr_WTH1_fpr);
+ tcg_gen_trunc_i64_i32(r_tmp, t);
+ tcg_gen_st_i32(r_tmp, current_fpu, 8 * (reg & ~1) + 4 * FP_ENDIAN_IDX);
+ tcg_gen_shri_i64(t, t, 32);
+ tcg_gen_trunc_i64_i32(r_tmp, t);
+ tcg_gen_st_i32(r_tmp, current_fpu, 8 * (reg | 1) + 4 * FP_ENDIAN_IDX);
+ tcg_temp_free(r_tmp);
+ }
+}
-FGEN32(gen_op_load_fpr_WTH2, gen_op_load_fpr_WTH2_fpr);
-FGEN32(gen_op_store_fpr_WTH2, gen_op_store_fpr_WTH2_fpr);
+static inline void gen_load_fpr32h (TCGv t, int reg)
+{
+ tcg_gen_ld_i32(t, current_fpu, 8 * reg + 4 * !FP_ENDIAN_IDX);
+}
-#define GEN_LOAD_FREG_FTN(FTn, Fn) \
-do { \
- glue(gen_op_load_fpr_, FTn)(Fn); \
-} while (0)
+static inline void gen_store_fpr32h (TCGv t, int reg)
+{
+ tcg_gen_st_i32(t, current_fpu, 8 * reg + 4 * !FP_ENDIAN_IDX);
+}
-#define GEN_STORE_FTN_FREG(Fn, FTn) \
-do { \
- glue(gen_op_store_fpr_, FTn)(Fn); \
-} while (0)
+static inline void get_fp_cond (TCGv t)
+{
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_ld_i32(r_tmp1, current_fpu, offsetof(CPUMIPSFPUContext, fcr31));
+ tcg_gen_shri_i32(r_tmp2, r_tmp1, 24);
+ tcg_gen_andi_i32(r_tmp2, r_tmp2, 0xfe);
+ tcg_gen_shri_i32(r_tmp1, r_tmp1, 23);
+ tcg_gen_andi_i32(r_tmp1, r_tmp1, 0x1);
+ tcg_gen_or_i32(t, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp1);
+ tcg_temp_free(r_tmp2);
+}
-#define FOP_CONDS(type, fmt) \
-static GenOpFunc1 * gen_op_cmp ## type ## _ ## fmt ## _table[16] = { \
- gen_op_cmp ## type ## _ ## fmt ## _f, \
- gen_op_cmp ## type ## _ ## fmt ## _un, \
- gen_op_cmp ## type ## _ ## fmt ## _eq, \
- gen_op_cmp ## type ## _ ## fmt ## _ueq, \
- gen_op_cmp ## type ## _ ## fmt ## _olt, \
- gen_op_cmp ## type ## _ ## fmt ## _ult, \
- gen_op_cmp ## type ## _ ## fmt ## _ole, \
- gen_op_cmp ## type ## _ ## fmt ## _ule, \
- gen_op_cmp ## type ## _ ## fmt ## _sf, \
- gen_op_cmp ## type ## _ ## fmt ## _ngle, \
- gen_op_cmp ## type ## _ ## fmt ## _seq, \
- gen_op_cmp ## type ## _ ## fmt ## _ngl, \
- gen_op_cmp ## type ## _ ## fmt ## _lt, \
- gen_op_cmp ## type ## _ ## fmt ## _nge, \
- gen_op_cmp ## type ## _ ## fmt ## _le, \
- gen_op_cmp ## type ## _ ## fmt ## _ngt, \
-}; \
-static always_inline void gen_cmp ## type ## _ ## fmt(int n, long cc) \
-{ \
- gen_op_cmp ## type ## _ ## fmt ## _table[n](cc); \
+#define FOP_CONDS(type, fmt) \
+static GenOpFunc1 * fcmp ## type ## _ ## fmt ## _table[16] = { \
+ do_cmp ## type ## _ ## fmt ## _f, \
+ do_cmp ## type ## _ ## fmt ## _un, \
+ do_cmp ## type ## _ ## fmt ## _eq, \
+ do_cmp ## type ## _ ## fmt ## _ueq, \
+ do_cmp ## type ## _ ## fmt ## _olt, \
+ do_cmp ## type ## _ ## fmt ## _ult, \
+ do_cmp ## type ## _ ## fmt ## _ole, \
+ do_cmp ## type ## _ ## fmt ## _ule, \
+ do_cmp ## type ## _ ## fmt ## _sf, \
+ do_cmp ## type ## _ ## fmt ## _ngle, \
+ do_cmp ## type ## _ ## fmt ## _seq, \
+ do_cmp ## type ## _ ## fmt ## _ngl, \
+ do_cmp ## type ## _ ## fmt ## _lt, \
+ do_cmp ## type ## _ ## fmt ## _nge, \
+ do_cmp ## type ## _ ## fmt ## _le, \
+ do_cmp ## type ## _ ## fmt ## _ngt, \
+}; \
+static inline void gen_cmp ## type ## _ ## fmt(int n, long cc) \
+{ \
+ tcg_gen_helper_0_i(fcmp ## type ## _ ## fmt ## _table[n], cc); \
}
FOP_CONDS(, d)
@@ -655,19 +742,20 @@ FOP_CONDS(, s)
FOP_CONDS(abs, s)
FOP_CONDS(, ps)
FOP_CONDS(abs, ps)
+#undef FOP_CONDS
/* Tests */
#define OP_COND(name, cond) \
-void glue(gen_op_, name) (void) \
+void glue(gen_op_, name) (TCGv t0, TCGv t1) \
{ \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
- tcg_gen_brcond_tl(cond, cpu_T[0], cpu_T[1], l1); \
- tcg_gen_movi_tl(cpu_T[0], 0); \
+ tcg_gen_brcond_tl(cond, t0, t1, l1); \
+ tcg_gen_movi_tl(t0, 0); \
tcg_gen_br(l2); \
gen_set_label(l1); \
- tcg_gen_movi_tl(cpu_T[0], 1); \
+ tcg_gen_movi_tl(t0, 1); \
gen_set_label(l2); \
}
OP_COND(eq, TCG_COND_EQ);
@@ -679,16 +767,16 @@ OP_COND(ltu, TCG_COND_LTU);
#undef OP_COND
#define OP_CONDI(name, cond) \
-void glue(gen_op_, name) (target_ulong val) \
+void glue(gen_op_, name) (TCGv t, target_ulong val) \
{ \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
- tcg_gen_brcondi_tl(cond, cpu_T[0], val, l1); \
- tcg_gen_movi_tl(cpu_T[0], 0); \
+ tcg_gen_brcondi_tl(cond, t, val, l1); \
+ tcg_gen_movi_tl(t, 0); \
tcg_gen_br(l2); \
gen_set_label(l1); \
- tcg_gen_movi_tl(cpu_T[0], 1); \
+ tcg_gen_movi_tl(t, 1); \
gen_set_label(l2); \
}
OP_CONDI(lti, TCG_COND_LT);
@@ -696,16 +784,16 @@ OP_CONDI(ltiu, TCG_COND_LTU);
#undef OP_CONDI
#define OP_CONDZ(name, cond) \
-void glue(gen_op_, name) (void) \
+void glue(gen_op_, name) (TCGv t) \
{ \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
\
- tcg_gen_brcondi_tl(cond, cpu_T[0], 0, l1); \
- tcg_gen_movi_tl(cpu_T[0], 0); \
+ tcg_gen_brcondi_tl(cond, t, 0, l1); \
+ tcg_gen_movi_tl(t, 0); \
tcg_gen_br(l2); \
gen_set_label(l1); \
- tcg_gen_movi_tl(cpu_T[0], 1); \
+ tcg_gen_movi_tl(t, 1); \
gen_set_label(l2); \
}
OP_CONDZ(gez, TCG_COND_GE);
@@ -783,7 +871,11 @@ static always_inline void save_cpu_state (DisasContext *ctx, int do_save_pc)
ctx->saved_pc = ctx->pc;
}
if (ctx->hflags != ctx->saved_hflags) {
- gen_op_save_state(ctx->hflags);
+ TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_movi_i32(r_tmp, ctx->hflags);
+ tcg_gen_st_i32(r_tmp, cpu_env, offsetof(CPUState, hflags));
+ tcg_temp_free(r_tmp);
ctx->saved_hflags = ctx->hflags;
switch (ctx->hflags & MIPS_HFLAG_BMASK) {
case MIPS_HFLAG_BR:
@@ -815,7 +907,7 @@ static always_inline void
generate_exception_err (DisasContext *ctx, int excp, int err)
{
save_cpu_state(ctx, 1);
- tcg_gen_helper_0_2ii(do_raise_exception_err, excp, err);
+ tcg_gen_helper_0_ii(do_raise_exception_err, excp, err);
tcg_gen_helper_0_0(do_interrupt_restart);
tcg_gen_exit_tb(0);
}
@@ -824,15 +916,15 @@ static always_inline void
generate_exception (DisasContext *ctx, int excp)
{
save_cpu_state(ctx, 1);
- tcg_gen_helper_0_1i(do_raise_exception, excp);
+ tcg_gen_helper_0_i(do_raise_exception, excp);
tcg_gen_helper_0_0(do_interrupt_restart);
tcg_gen_exit_tb(0);
}
/* Addresses computation */
-static inline void gen_op_addr_add (void)
+static inline void gen_op_addr_add (TCGv t0, TCGv t1)
{
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_add_tl(t0, t0, t1);
#if defined(TARGET_MIPS64)
/* For compatibility with 32-bit code, data reference in user mode
@@ -840,22 +932,16 @@ static inline void gen_op_addr_add (void)
See the MIPS64 PRA manual, section 4.10. */
{
int l1 = gen_new_label();
-
- {
- TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
-
- tcg_gen_ld_i32(r_tmp, cpu_env, offsetof(CPUState, hflags));
- tcg_gen_andi_i32(r_tmp, r_tmp, MIPS_HFLAG_KSU);
- tcg_gen_brcondi_i32(TCG_COND_NE, r_tmp, MIPS_HFLAG_UM, l1);
- }
- {
- TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
-
- tcg_gen_ld_i32(r_tmp, cpu_env, offsetof(CPUState, CP0_Status));
- tcg_gen_andi_i32(r_tmp, r_tmp, (1 << CP0St_UX));
- tcg_gen_brcondi_i32(TCG_COND_NE, r_tmp, 0, l1);
- }
- tcg_gen_ext32s_i64(cpu_T[0], cpu_T[0]);
+ TCGv r_tmp = tcg_temp_local_new(TCG_TYPE_I32);
+
+ tcg_gen_ld_i32(r_tmp, cpu_env, offsetof(CPUState, hflags));
+ tcg_gen_andi_i32(r_tmp, r_tmp, MIPS_HFLAG_KSU);
+ tcg_gen_brcondi_i32(TCG_COND_NE, r_tmp, MIPS_HFLAG_UM, l1);
+ tcg_gen_ld_i32(r_tmp, cpu_env, offsetof(CPUState, CP0_Status));
+ tcg_gen_andi_i32(r_tmp, r_tmp, (1 << CP0St_UX));
+ tcg_gen_brcondi_i32(TCG_COND_NE, r_tmp, 0, l1);
+ tcg_temp_free(r_tmp);
+ tcg_gen_ext32s_i64(t0, t0);
gen_set_label(l1);
}
#endif
@@ -926,47 +1012,10 @@ static always_inline void check_mips_64(DisasContext *ctx)
}
/* load/store instructions. */
-#if defined(CONFIG_USER_ONLY)
-#define op_ldst(name) gen_op_##name##_raw()
-#define OP_LD_TABLE(width)
-#define OP_ST_TABLE(width)
-#else
-#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
-#define OP_LD_TABLE(width) \
-static GenOpFunc *gen_op_l##width[] = { \
- &gen_op_l##width##_kernel, \
- &gen_op_l##width##_super, \
- &gen_op_l##width##_user, \
-}
-#define OP_ST_TABLE(width) \
-static GenOpFunc *gen_op_s##width[] = { \
- &gen_op_s##width##_kernel, \
- &gen_op_s##width##_super, \
- &gen_op_s##width##_user, \
-}
-#endif
-
-#if defined(TARGET_MIPS64)
-OP_LD_TABLE(dl);
-OP_LD_TABLE(dr);
-OP_ST_TABLE(dl);
-OP_ST_TABLE(dr);
-#endif
-OP_LD_TABLE(wl);
-OP_LD_TABLE(wr);
-OP_ST_TABLE(wl);
-OP_ST_TABLE(wr);
-OP_LD_TABLE(wc1);
-OP_ST_TABLE(wc1);
-OP_LD_TABLE(dc1);
-OP_ST_TABLE(dc1);
-OP_LD_TABLE(uxc1);
-OP_ST_TABLE(uxc1);
-
#define OP_LD(insn,fname) \
-void inline op_ldst_##insn(DisasContext *ctx) \
+void inline op_ldst_##insn(TCGv t0, DisasContext *ctx) \
{ \
- tcg_gen_qemu_##fname(cpu_T[0], cpu_T[0], ctx->mem_idx); \
+ tcg_gen_qemu_##fname(t0, t0, ctx->mem_idx); \
}
OP_LD(lb,ld8s);
OP_LD(lbu,ld8u);
@@ -980,9 +1029,9 @@ OP_LD(ld,ld64);
#undef OP_LD
#define OP_ST(insn,fname) \
-void inline op_ldst_##insn(DisasContext *ctx) \
+void inline op_ldst_##insn(TCGv t0, TCGv t1, DisasContext *ctx) \
{ \
- tcg_gen_qemu_##fname(cpu_T[1], cpu_T[0], ctx->mem_idx); \
+ tcg_gen_qemu_##fname(t1, t0, ctx->mem_idx); \
}
OP_ST(sb,st8);
OP_ST(sh,st16);
@@ -993,11 +1042,11 @@ OP_ST(sd,st64);
#undef OP_ST
#define OP_LD_ATOMIC(insn,fname) \
-void inline op_ldst_##insn(DisasContext *ctx) \
+void inline op_ldst_##insn(TCGv t0, TCGv t1, DisasContext *ctx) \
{ \
- tcg_gen_mov_tl(cpu_T[1], cpu_T[0]); \
- tcg_gen_qemu_##fname(cpu_T[0], cpu_T[0], ctx->mem_idx); \
- tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUState, CP0_LLAddr)); \
+ tcg_gen_mov_tl(t1, t0); \
+ tcg_gen_qemu_##fname(t0, t0, ctx->mem_idx); \
+ tcg_gen_st_tl(t1, cpu_env, offsetof(CPUState, CP0_LLAddr)); \
}
OP_LD_ATOMIC(ll,ld32s);
#if defined(TARGET_MIPS64)
@@ -1006,26 +1055,26 @@ OP_LD_ATOMIC(lld,ld64);
#undef OP_LD_ATOMIC
#define OP_ST_ATOMIC(insn,fname,almask) \
-void inline op_ldst_##insn(DisasContext *ctx) \
+void inline op_ldst_##insn(TCGv t0, TCGv t1, DisasContext *ctx) \
{ \
TCGv r_tmp = tcg_temp_local_new(TCG_TYPE_TL); \
int l1 = gen_new_label(); \
int l2 = gen_new_label(); \
int l3 = gen_new_label(); \
\
- tcg_gen_andi_tl(r_tmp, cpu_T[0], almask); \
+ tcg_gen_andi_tl(r_tmp, t0, almask); \
tcg_gen_brcondi_tl(TCG_COND_EQ, r_tmp, 0, l1); \
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_BadVAddr)); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_BadVAddr)); \
generate_exception(ctx, EXCP_AdES); \
gen_set_label(l1); \
tcg_gen_ld_tl(r_tmp, cpu_env, offsetof(CPUState, CP0_LLAddr)); \
- tcg_gen_brcond_tl(TCG_COND_NE, cpu_T[0], r_tmp, l2); \
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, r_tmp, l2); \
tcg_temp_free(r_tmp); \
- tcg_gen_qemu_##fname(cpu_T[1], cpu_T[0], ctx->mem_idx); \
- tcg_gen_movi_tl(cpu_T[0], 1); \
+ tcg_gen_qemu_##fname(t1, t0, ctx->mem_idx); \
+ tcg_gen_movi_tl(t0, 1); \
tcg_gen_br(l3); \
gen_set_label(l2); \
- tcg_gen_movi_tl(cpu_T[0], 0); \
+ tcg_gen_movi_tl(t0, 0); \
gen_set_label(l3); \
}
OP_ST_ATOMIC(sc,st32,0x3);
@@ -1034,175 +1083,168 @@ OP_ST_ATOMIC(scd,st64,0x7);
#endif
#undef OP_ST_ATOMIC
-void inline op_ldst_lwc1(DisasContext *ctx)
-{
- op_ldst(lwc1);
-}
-
-void inline op_ldst_ldc1(DisasContext *ctx)
-{
- op_ldst(ldc1);
-}
-
-void inline op_ldst_swc1(DisasContext *ctx)
-{
- op_ldst(swc1);
-}
-
-void inline op_ldst_sdc1(DisasContext *ctx)
-{
- op_ldst(sdc1);
-}
-
/* Load and store */
static void gen_ldst (DisasContext *ctx, uint32_t opc, int rt,
int base, int16_t offset)
{
const char *opn = "ldst";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
if (base == 0) {
- tcg_gen_movi_tl(cpu_T[0], offset);
+ tcg_gen_movi_tl(t0, offset);
} else if (offset == 0) {
- gen_load_gpr(cpu_T[0], base);
+ gen_load_gpr(t0, base);
} else {
- gen_load_gpr(cpu_T[0], base);
- tcg_gen_movi_tl(cpu_T[1], offset);
- gen_op_addr_add();
+ gen_load_gpr(t0, base);
+ tcg_gen_movi_tl(t1, offset);
+ gen_op_addr_add(t0, t1);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_LWU:
- op_ldst_lwu(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lwu(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "lwu";
break;
case OPC_LD:
- op_ldst_ld(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_ld(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "ld";
break;
case OPC_LLD:
- op_ldst_lld(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lld(t0, t1, ctx);
+ gen_store_gpr(t0, rt);
opn = "lld";
break;
case OPC_SD:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst_sd(ctx);
+ gen_load_gpr(t1, rt);
+ op_ldst_sd(t0, t1, ctx);
opn = "sd";
break;
case OPC_SCD:
save_cpu_state(ctx, 1);
- gen_load_gpr(cpu_T[1], rt);
- op_ldst_scd(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ gen_load_gpr(t1, rt);
+ op_ldst_scd(t0, t1, ctx);
+ gen_store_gpr(t0, rt);
opn = "scd";
break;
case OPC_LDL:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(ldl);
- gen_store_gpr(cpu_T[1], rt);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2i(do_ldl, t1, t0, t1, ctx->mem_idx);
+ gen_store_gpr(t1, rt);
opn = "ldl";
break;
case OPC_SDL:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(sdl);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_0_2i(do_sdl, t0, t1, ctx->mem_idx);
opn = "sdl";
break;
case OPC_LDR:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(ldr);
- gen_store_gpr(cpu_T[1], rt);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2i(do_ldr, t1, t0, t1, ctx->mem_idx);
+ gen_store_gpr(t1, rt);
opn = "ldr";
break;
case OPC_SDR:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(sdr);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_0_2i(do_sdr, t0, t1, ctx->mem_idx);
opn = "sdr";
break;
#endif
case OPC_LW:
- op_ldst_lw(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lw(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "lw";
break;
case OPC_SW:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst_sw(ctx);
+ gen_load_gpr(t1, rt);
+ op_ldst_sw(t0, t1, ctx);
opn = "sw";
break;
case OPC_LH:
- op_ldst_lh(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lh(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "lh";
break;
case OPC_SH:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst_sh(ctx);
+ gen_load_gpr(t1, rt);
+ op_ldst_sh(t0, t1, ctx);
opn = "sh";
break;
case OPC_LHU:
- op_ldst_lhu(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lhu(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "lhu";
break;
case OPC_LB:
- op_ldst_lb(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lb(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "lb";
break;
case OPC_SB:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst_sb(ctx);
+ gen_load_gpr(t1, rt);
+ op_ldst_sb(t0, t1, ctx);
opn = "sb";
break;
case OPC_LBU:
- op_ldst_lbu(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_lbu(t0, ctx);
+ gen_store_gpr(t0, rt);
opn = "lbu";
break;
case OPC_LWL:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(lwl);
- gen_store_gpr(cpu_T[1], rt);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2i(do_lwl, t1, t0, t1, ctx->mem_idx);
+ gen_store_gpr(t1, rt);
opn = "lwl";
break;
case OPC_SWL:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(swl);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_0_2i(do_swl, t0, t1, ctx->mem_idx);
opn = "swr";
break;
case OPC_LWR:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(lwr);
- gen_store_gpr(cpu_T[1], rt);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2i(do_lwr, t1, t0, t1, ctx->mem_idx);
+ gen_store_gpr(t1, rt);
opn = "lwr";
break;
case OPC_SWR:
- gen_load_gpr(cpu_T[1], rt);
- op_ldst(swr);
+ save_cpu_state(ctx, 1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_0_2i(do_swr, t0, t1, ctx->mem_idx);
opn = "swr";
break;
case OPC_LL:
- op_ldst_ll(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ op_ldst_ll(t0, t1, ctx);
+ gen_store_gpr(t0, rt);
opn = "ll";
break;
case OPC_SC:
save_cpu_state(ctx, 1);
- gen_load_gpr(cpu_T[1], rt);
- op_ldst_sc(ctx);
- gen_store_gpr(cpu_T[0], rt);
+ gen_load_gpr(t1, rt);
+ op_ldst_sc(t0, t1, ctx);
+ gen_store_gpr(t0, rt);
opn = "sc";
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
MIPS_DEBUG("%s %s, %d(%s)", opn, regnames[rt], offset, regnames[base]);
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* Load and store */
@@ -1210,45 +1252,51 @@ static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
int base, int16_t offset)
{
const char *opn = "flt_ldst";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
if (base == 0) {
- tcg_gen_movi_tl(cpu_T[0], offset);
+ tcg_gen_movi_tl(t0, offset);
} else if (offset == 0) {
- gen_load_gpr(cpu_T[0], base);
+ gen_load_gpr(t0, base);
} else {
- gen_load_gpr(cpu_T[0], base);
- tcg_gen_movi_tl(cpu_T[1], offset);
- gen_op_addr_add();
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, base);
+ tcg_gen_movi_tl(t1, offset);
+ gen_op_addr_add(t0, t1);
+ tcg_temp_free(t1);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
case OPC_LWC1:
- op_ldst_lwc1(ctx);
- GEN_STORE_FTN_FREG(ft, WT0);
+ tcg_gen_qemu_ld32s(fpu32_T[0], t0, ctx->mem_idx);
+ gen_store_fpr32(fpu32_T[0], ft);
opn = "lwc1";
break;
case OPC_SWC1:
- GEN_LOAD_FREG_FTN(WT0, ft);
- op_ldst_swc1(ctx);
+ gen_load_fpr32(fpu32_T[0], ft);
+ tcg_gen_qemu_st32(fpu32_T[0], t0, ctx->mem_idx);
opn = "swc1";
break;
case OPC_LDC1:
- op_ldst_ldc1(ctx);
- GEN_STORE_FTN_FREG(ft, DT0);
+ tcg_gen_qemu_ld64(fpu64_T[0], t0, ctx->mem_idx);
+ gen_store_fpr64(ctx, fpu64_T[0], ft);
opn = "ldc1";
break;
case OPC_SDC1:
- GEN_LOAD_FREG_FTN(DT0, ft);
- op_ldst_sdc1(ctx);
+ gen_load_fpr64(ctx, fpu64_T[0], ft);
+ tcg_gen_qemu_st64(fpu64_T[0], t0, ctx->mem_idx);
opn = "sdc1";
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
MIPS_DEBUG("%s %s, %d(%s)", opn, fregnames[ft], offset, regnames[base]);
+ out:
+ tcg_temp_free(t0);
}
/* Arithmetic with immediate operand */
@@ -1257,12 +1305,13 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
{
target_ulong uimm;
const char *opn = "imm arith";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) {
/* If no destination, treat it as a NOP.
For addi, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
- return;
+ goto out;
}
uimm = (uint16_t)imm;
switch (opc) {
@@ -1275,15 +1324,14 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
case OPC_SLTI:
case OPC_SLTIU:
uimm = (target_long)imm; /* Sign extend to 32/64 bits */
- tcg_gen_movi_tl(cpu_T[1], uimm);
/* Fall through. */
case OPC_ANDI:
case OPC_ORI:
case OPC_XORI:
- gen_load_gpr(cpu_T[0], rs);
+ gen_load_gpr(t0, rs);
break;
case OPC_LUI:
- tcg_gen_movi_tl(cpu_T[0], imm << 16);
+ tcg_gen_movi_tl(t0, imm << 16);
break;
case OPC_SLL:
case OPC_SRA:
@@ -1297,7 +1345,7 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
case OPC_DSRL32:
#endif
uimm &= 0x1f;
- gen_load_gpr(cpu_T[0], rs);
+ gen_load_gpr(t0, rs);
break;
}
switch (opc) {
@@ -1308,12 +1356,12 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
save_cpu_state(ctx, 1);
- tcg_gen_ext32s_tl(r_tmp1, cpu_T[0]);
- tcg_gen_addi_tl(cpu_T[0], r_tmp1, uimm);
+ tcg_gen_ext32s_tl(r_tmp1, t0);
+ tcg_gen_addi_tl(t0, r_tmp1, uimm);
tcg_gen_xori_tl(r_tmp1, r_tmp1, uimm);
tcg_gen_xori_tl(r_tmp1, r_tmp1, -1);
- tcg_gen_xori_tl(r_tmp2, cpu_T[0], uimm);
+ tcg_gen_xori_tl(r_tmp2, t0, uimm);
tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
tcg_temp_free(r_tmp2);
tcg_gen_shri_tl(r_tmp1, r_tmp1, 31);
@@ -1323,14 +1371,14 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
}
opn = "addi";
break;
case OPC_ADDIU:
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], uimm);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_addi_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "addiu";
break;
#if defined(TARGET_MIPS64)
@@ -1341,12 +1389,12 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
save_cpu_state(ctx, 1);
- tcg_gen_mov_tl(r_tmp1, cpu_T[0]);
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_mov_tl(r_tmp1, t0);
+ tcg_gen_addi_tl(t0, t0, uimm);
tcg_gen_xori_tl(r_tmp1, r_tmp1, uimm);
tcg_gen_xori_tl(r_tmp1, r_tmp1, -1);
- tcg_gen_xori_tl(r_tmp2, cpu_T[0], uimm);
+ tcg_gen_xori_tl(r_tmp2, t0, uimm);
tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
tcg_temp_free(r_tmp2);
tcg_gen_shri_tl(r_tmp1, r_tmp1, 63);
@@ -1359,51 +1407,51 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
opn = "daddi";
break;
case OPC_DADDIU:
- tcg_gen_addi_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_addi_tl(t0, t0, uimm);
opn = "daddiu";
break;
#endif
case OPC_SLTI:
- gen_op_lti(uimm);
+ gen_op_lti(t0, uimm);
opn = "slti";
break;
case OPC_SLTIU:
- gen_op_ltiu(uimm);
+ gen_op_ltiu(t0, uimm);
opn = "sltiu";
break;
case OPC_ANDI:
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_andi_tl(t0, t0, uimm);
opn = "andi";
break;
case OPC_ORI:
- tcg_gen_ori_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_ori_tl(t0, t0, uimm);
opn = "ori";
break;
case OPC_XORI:
- tcg_gen_xori_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_xori_tl(t0, t0, uimm);
opn = "xori";
break;
case OPC_LUI:
opn = "lui";
break;
case OPC_SLL:
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], uimm);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shli_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "sll";
break;
case OPC_SRA:
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], uimm);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_sari_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "sra";
break;
case OPC_SRL:
switch ((ctx->opcode >> 21) & 0x1f) {
case 0:
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "srl";
break;
case 1:
@@ -1413,21 +1461,21 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
- tcg_gen_trunc_tl_i32(r_tmp1, cpu_T[0]);
+ tcg_gen_trunc_tl_i32(r_tmp1, t0);
tcg_gen_movi_i32(r_tmp2, 0x20);
tcg_gen_subi_i32(r_tmp2, r_tmp2, uimm);
tcg_gen_shl_i32(r_tmp2, r_tmp1, r_tmp2);
tcg_gen_shri_i32(r_tmp1, r_tmp1, uimm);
tcg_gen_or_i32(r_tmp1, r_tmp1, r_tmp2);
- tcg_gen_ext_i32_tl(cpu_T[0], r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
tcg_temp_free(r_tmp1);
tcg_temp_free(r_tmp2);
}
opn = "rotr";
} else {
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "srl";
}
break;
@@ -1439,17 +1487,17 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
break;
#if defined(TARGET_MIPS64)
case OPC_DSLL:
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_shli_tl(t0, t0, uimm);
opn = "dsll";
break;
case OPC_DSRA:
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_sari_tl(t0, t0, uimm);
opn = "dsra";
break;
case OPC_DSRL:
switch ((ctx->opcode >> 21) & 0x1f) {
case 0:
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_shri_tl(t0, t0, uimm);
opn = "dsrl";
break;
case 1:
@@ -1460,14 +1508,14 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
tcg_gen_movi_tl(r_tmp1, 0x40);
tcg_gen_subi_tl(r_tmp1, r_tmp1, uimm);
- tcg_gen_shl_tl(r_tmp1, cpu_T[0], r_tmp1);
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], r_tmp1);
+ tcg_gen_shl_tl(r_tmp1, t0, r_tmp1);
+ tcg_gen_shri_tl(t0, t0, uimm);
+ tcg_gen_or_tl(t0, t0, r_tmp1);
tcg_temp_free(r_tmp1);
}
opn = "drotr";
} else {
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm);
+ tcg_gen_shri_tl(t0, t0, uimm);
opn = "dsrl";
}
break;
@@ -1478,17 +1526,17 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
}
break;
case OPC_DSLL32:
- tcg_gen_shli_tl(cpu_T[0], cpu_T[0], uimm + 32);
+ tcg_gen_shli_tl(t0, t0, uimm + 32);
opn = "dsll32";
break;
case OPC_DSRA32:
- tcg_gen_sari_tl(cpu_T[0], cpu_T[0], uimm + 32);
+ tcg_gen_sari_tl(t0, t0, uimm + 32);
opn = "dsra32";
break;
case OPC_DSRL32:
switch ((ctx->opcode >> 21) & 0x1f) {
case 0:
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm + 32);
+ tcg_gen_shri_tl(t0, t0, uimm + 32);
opn = "dsrl32";
break;
case 1:
@@ -1501,14 +1549,14 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
tcg_gen_movi_tl(r_tmp2, 32);
tcg_gen_addi_tl(r_tmp2, r_tmp2, uimm);
tcg_gen_sub_tl(r_tmp1, r_tmp1, r_tmp2);
- tcg_gen_shl_tl(r_tmp1, cpu_T[0], r_tmp1);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], r_tmp2);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], r_tmp1);
+ tcg_gen_shl_tl(r_tmp1, t0, r_tmp1);
+ tcg_gen_shr_tl(t0, t0, r_tmp2);
+ tcg_gen_or_tl(t0, t0, r_tmp1);
tcg_temp_free(r_tmp1);
tcg_temp_free(r_tmp2);
opn = "drotr32";
} else {
- tcg_gen_shri_tl(cpu_T[0], cpu_T[0], uimm + 32);
+ tcg_gen_shri_tl(t0, t0, uimm + 32);
opn = "dsrl32";
}
break;
@@ -1522,10 +1570,12 @@ static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
- gen_store_gpr(cpu_T[0], rt);
+ gen_store_gpr(t0, rt);
MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm);
+ out:
+ tcg_temp_free(t0);
}
/* Arithmetic */
@@ -1533,22 +1583,24 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char *opn = "arith";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB
&& opc != OPC_DADD && opc != OPC_DSUB) {
/* If no destination, treat it as a NOP.
For add & sub, we must generate the overflow exception when needed. */
MIPS_DEBUG("NOP");
- return;
+ goto out;
}
- gen_load_gpr(cpu_T[0], rs);
+ gen_load_gpr(t0, rs);
/* Specialcase the conventional move operation. */
if (rt == 0 && (opc == OPC_ADDU || opc == OPC_DADDU
|| opc == OPC_SUBU || opc == OPC_DSUBU)) {
- gen_store_gpr(cpu_T[0], rd);
- return;
+ gen_store_gpr(t0, rd);
+ goto out;
}
- gen_load_gpr(cpu_T[1], rt);
+ gen_load_gpr(t1, rt);
switch (opc) {
case OPC_ADD:
{
@@ -1557,13 +1609,13 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
save_cpu_state(ctx, 1);
- tcg_gen_ext32s_tl(r_tmp1, cpu_T[0]);
- tcg_gen_ext32s_tl(r_tmp2, cpu_T[1]);
- tcg_gen_add_tl(cpu_T[0], r_tmp1, r_tmp2);
+ tcg_gen_ext32s_tl(r_tmp1, t0);
+ tcg_gen_ext32s_tl(r_tmp2, t1);
+ tcg_gen_add_tl(t0, r_tmp1, r_tmp2);
- tcg_gen_xor_tl(r_tmp1, r_tmp1, cpu_T[1]);
+ tcg_gen_xor_tl(r_tmp1, r_tmp1, t1);
tcg_gen_xori_tl(r_tmp1, r_tmp1, -1);
- tcg_gen_xor_tl(r_tmp2, cpu_T[0], cpu_T[1]);
+ tcg_gen_xor_tl(r_tmp2, t0, t1);
tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
tcg_temp_free(r_tmp2);
tcg_gen_shri_tl(r_tmp1, r_tmp1, 31);
@@ -1573,15 +1625,15 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
}
opn = "add";
break;
case OPC_ADDU:
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "addu";
break;
case OPC_SUB:
@@ -1591,12 +1643,12 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
save_cpu_state(ctx, 1);
- tcg_gen_ext32s_tl(r_tmp1, cpu_T[0]);
- tcg_gen_ext32s_tl(r_tmp2, cpu_T[1]);
- tcg_gen_sub_tl(cpu_T[0], r_tmp1, r_tmp2);
+ tcg_gen_ext32s_tl(r_tmp1, t0);
+ tcg_gen_ext32s_tl(r_tmp2, t1);
+ tcg_gen_sub_tl(t0, r_tmp1, r_tmp2);
- tcg_gen_xor_tl(r_tmp2, r_tmp1, cpu_T[1]);
- tcg_gen_xor_tl(r_tmp1, r_tmp1, cpu_T[0]);
+ tcg_gen_xor_tl(r_tmp2, r_tmp1, t1);
+ tcg_gen_xor_tl(r_tmp1, r_tmp1, t0);
tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
tcg_temp_free(r_tmp2);
tcg_gen_shri_tl(r_tmp1, r_tmp1, 31);
@@ -1606,15 +1658,15 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
generate_exception(ctx, EXCP_OVERFLOW);
gen_set_label(l1);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
}
opn = "sub";
break;
case OPC_SUBU:
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_sub_tl(t0, t0, t1);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "subu";
break;
#if defined(TARGET_MIPS64)
@@ -1625,12 +1677,12 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
save_cpu_state(ctx, 1);
- tcg_gen_mov_tl(r_tmp1, cpu_T[0]);
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(r_tmp1, t0);
+ tcg_gen_add_tl(t0, t0, t1);
- tcg_gen_xor_tl(r_tmp1, r_tmp1, cpu_T[1]);
+ tcg_gen_xor_tl(r_tmp1, r_tmp1, t1);
tcg_gen_xori_tl(r_tmp1, r_tmp1, -1);
- tcg_gen_xor_tl(r_tmp2, cpu_T[0], cpu_T[1]);
+ tcg_gen_xor_tl(r_tmp2, t0, t1);
tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
tcg_temp_free(r_tmp2);
tcg_gen_shri_tl(r_tmp1, r_tmp1, 63);
@@ -1643,7 +1695,7 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
opn = "dadd";
break;
case OPC_DADDU:
- tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_add_tl(t0, t0, t1);
opn = "daddu";
break;
case OPC_DSUB:
@@ -1653,11 +1705,11 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
save_cpu_state(ctx, 1);
- tcg_gen_mov_tl(r_tmp1, cpu_T[0]);
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(r_tmp1, t0);
+ tcg_gen_sub_tl(t0, t0, t1);
- tcg_gen_xor_tl(r_tmp2, r_tmp1, cpu_T[1]);
- tcg_gen_xor_tl(r_tmp1, r_tmp1, cpu_T[0]);
+ tcg_gen_xor_tl(r_tmp2, r_tmp1, t1);
+ tcg_gen_xor_tl(r_tmp1, r_tmp1, t0);
tcg_gen_and_tl(r_tmp1, r_tmp1, r_tmp2);
tcg_temp_free(r_tmp2);
tcg_gen_shri_tl(r_tmp1, r_tmp1, 63);
@@ -1670,48 +1722,48 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
opn = "dsub";
break;
case OPC_DSUBU:
- tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_sub_tl(t0, t0, t1);
opn = "dsubu";
break;
#endif
case OPC_SLT:
- gen_op_lt();
+ gen_op_lt(t0, t1);
opn = "slt";
break;
case OPC_SLTU:
- gen_op_ltu();
+ gen_op_ltu(t0, t1);
opn = "sltu";
break;
case OPC_AND:
- tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_and_tl(t0, t0, t1);
opn = "and";
break;
case OPC_NOR:
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_not_tl(t0, t0);
opn = "nor";
break;
case OPC_OR:
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_or_tl(t0, t0, t1);
opn = "or";
break;
case OPC_XOR:
- tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_xor_tl(t0, t0, t1);
opn = "xor";
break;
case OPC_MUL:
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_mul_tl(t0, t0, t1);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "mul";
break;
case OPC_MOVN:
{
int l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, l1);
- gen_store_gpr(cpu_T[0], rd);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ gen_store_gpr(t0, rd);
gen_set_label(l1);
}
opn = "movn";
@@ -1720,34 +1772,34 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
{
int l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_NE, cpu_T[1], 0, l1);
- gen_store_gpr(cpu_T[0], rd);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ gen_store_gpr(t0, rd);
gen_set_label(l1);
}
opn = "movz";
goto print;
case OPC_SLLV:
- tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x1f);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shl_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "sllv";
break;
case OPC_SRAV:
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x1f);
- tcg_gen_sar_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_sar_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "srav";
break;
case OPC_SRLV:
switch ((ctx->opcode >> 6) & 0x1f) {
case 0:
- tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x1f);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "srlv";
break;
case 1:
@@ -1756,35 +1808,35 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
int l2 = gen_new_label();
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x1f);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[0], 0, l1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
{
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I32);
- tcg_gen_trunc_tl_i32(r_tmp1, cpu_T[0]);
- tcg_gen_trunc_tl_i32(r_tmp2, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(r_tmp1, t0);
+ tcg_gen_trunc_tl_i32(r_tmp2, t1);
tcg_gen_movi_i32(r_tmp3, 0x20);
tcg_gen_sub_i32(r_tmp3, r_tmp3, r_tmp1);
tcg_gen_shl_i32(r_tmp3, r_tmp2, r_tmp3);
tcg_gen_shr_i32(r_tmp1, r_tmp2, r_tmp1);
tcg_gen_or_i32(r_tmp1, r_tmp1, r_tmp3);
- tcg_gen_ext_i32_tl(cpu_T[0], r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
tcg_temp_free(r_tmp1);
tcg_temp_free(r_tmp2);
tcg_temp_free(r_tmp3);
tcg_gen_br(l2);
}
gen_set_label(l1);
- tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(t0, t1);
gen_set_label(l2);
opn = "rotrv";
} else {
- tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x1f);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(t0, t0);
opn = "srlv";
}
break;
@@ -1796,20 +1848,20 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
break;
#if defined(TARGET_MIPS64)
case OPC_DSLLV:
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x3f);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shl_tl(t0, t1, t0);
opn = "dsllv";
break;
case OPC_DSRAV:
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x3f);
- tcg_gen_sar_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_sar_tl(t0, t1, t0);
opn = "dsrav";
break;
case OPC_DSRLV:
switch ((ctx->opcode >> 6) & 0x1f) {
case 0:
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x3f);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shr_tl(t0, t1, t0);
opn = "dsrlv";
break;
case 1:
@@ -1818,26 +1870,26 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
int l1 = gen_new_label();
int l2 = gen_new_label();
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x3f);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[0], 0, l1);
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
{
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_TL);
tcg_gen_movi_tl(r_tmp1, 0x40);
- tcg_gen_sub_tl(r_tmp1, r_tmp1, cpu_T[0]);
- tcg_gen_shl_tl(r_tmp1, cpu_T[1], r_tmp1);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
- tcg_gen_or_tl(cpu_T[0], cpu_T[0], r_tmp1);
+ tcg_gen_sub_tl(r_tmp1, r_tmp1, t0);
+ tcg_gen_shl_tl(r_tmp1, t1, r_tmp1);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_or_tl(t0, t0, r_tmp1);
tcg_temp_free(r_tmp1);
tcg_gen_br(l2);
}
gen_set_label(l1);
- tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(t0, t1);
gen_set_label(l2);
opn = "drotrv";
} else {
- tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0x3f);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[1], cpu_T[0]);
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shr_tl(t0, t1, t0);
opn = "dsrlv";
}
break;
@@ -1851,85 +1903,93 @@ static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
- gen_store_gpr(cpu_T[0], rd);
+ gen_store_gpr(t0, rd);
print:
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* Arithmetic on HI/LO registers */
static void gen_HILO (DisasContext *ctx, uint32_t opc, int reg)
{
const char *opn = "hilo";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) {
/* Treat as NOP. */
MIPS_DEBUG("NOP");
- return;
+ goto out;
}
switch (opc) {
case OPC_MFHI:
- gen_load_HI(cpu_T[0], 0);
- gen_store_gpr(cpu_T[0], reg);
+ gen_load_HI(t0, 0);
+ gen_store_gpr(t0, reg);
opn = "mfhi";
break;
case OPC_MFLO:
- gen_load_LO(cpu_T[0], 0);
- gen_store_gpr(cpu_T[0], reg);
+ gen_load_LO(t0, 0);
+ gen_store_gpr(t0, reg);
opn = "mflo";
break;
case OPC_MTHI:
- gen_load_gpr(cpu_T[0], reg);
- gen_store_HI(cpu_T[0], 0);
+ gen_load_gpr(t0, reg);
+ gen_store_HI(t0, 0);
opn = "mthi";
break;
case OPC_MTLO:
- gen_load_gpr(cpu_T[0], reg);
- gen_store_LO(cpu_T[0], 0);
+ gen_load_gpr(t0, reg);
+ gen_store_LO(t0, 0);
opn = "mtlo";
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
MIPS_DEBUG("%s %s", opn, regnames[reg]);
+ out:
+ tcg_temp_free(t0);
}
static void gen_muldiv (DisasContext *ctx, uint32_t opc,
int rs, int rt)
{
const char *opn = "mul/div";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
- gen_load_gpr(cpu_T[0], rs);
- gen_load_gpr(cpu_T[1], rt);
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
switch (opc) {
case OPC_DIV:
{
int l1 = gen_new_label();
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, l1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
{
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_ext_tl_i64(r_tmp1, cpu_T[0]);
- tcg_gen_ext_tl_i64(r_tmp2, cpu_T[1]);
+ tcg_gen_ext_tl_i64(r_tmp1, t0);
+ tcg_gen_ext_tl_i64(r_tmp2, t1);
tcg_gen_div_i64(r_tmp3, r_tmp1, r_tmp2);
tcg_gen_rem_i64(r_tmp2, r_tmp1, r_tmp2);
- tcg_gen_trunc_i64_tl(cpu_T[0], r_tmp3);
- tcg_gen_trunc_i64_tl(cpu_T[1], r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp3);
+ tcg_gen_trunc_i64_tl(t1, r_tmp2);
tcg_temp_free(r_tmp1);
tcg_temp_free(r_tmp2);
tcg_temp_free(r_tmp3);
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- gen_store_LO(cpu_T[0], 0);
- gen_store_HI(cpu_T[1], 0);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
}
gen_set_label(l1);
}
@@ -1939,35 +1999,71 @@ static void gen_muldiv (DisasContext *ctx, uint32_t opc,
{
int l1 = gen_new_label();
- tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, l1);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
{
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I32);
- tcg_gen_trunc_tl_i32(r_tmp1, cpu_T[0]);
- tcg_gen_trunc_tl_i32(r_tmp2, cpu_T[1]);
+ tcg_gen_trunc_tl_i32(r_tmp1, t0);
+ tcg_gen_trunc_tl_i32(r_tmp2, t1);
tcg_gen_divu_i32(r_tmp3, r_tmp1, r_tmp2);
tcg_gen_remu_i32(r_tmp1, r_tmp1, r_tmp2);
- tcg_gen_ext_i32_tl(cpu_T[0], r_tmp3);
- tcg_gen_ext_i32_tl(cpu_T[1], r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp3);
+ tcg_gen_ext_i32_tl(t1, r_tmp1);
tcg_temp_free(r_tmp1);
tcg_temp_free(r_tmp2);
tcg_temp_free(r_tmp3);
- gen_store_LO(cpu_T[0], 0);
- gen_store_HI(cpu_T[1], 0);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
}
gen_set_label(l1);
}
opn = "divu";
break;
case OPC_MULT:
- gen_op_mult();
+ {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_ext_tl_i64(r_tmp1, t0);
+ tcg_gen_ext_tl_i64(r_tmp2, t1);
+ tcg_gen_mul_i64(r_tmp1, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp1);
+ tcg_gen_shri_i64(r_tmp1, r_tmp1, 32);
+ tcg_gen_trunc_i64_tl(t1, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
+ }
opn = "mult";
break;
case OPC_MULTU:
- gen_op_multu();
+ {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(r_tmp1, t0);
+ tcg_gen_extu_tl_i64(r_tmp2, t1);
+ tcg_gen_mul_i64(r_tmp1, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp1);
+ tcg_gen_shri_i64(r_tmp1, r_tmp1, 32);
+ tcg_gen_trunc_i64_tl(t1, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
+ }
opn = "multu";
break;
#if defined(TARGET_MIPS64)
@@ -1975,16 +2071,16 @@ static void gen_muldiv (DisasContext *ctx, uint32_t opc,
{
int l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
{
int l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_NE, cpu_T[0], -1LL << 63, l2);
- tcg_gen_brcondi_tl(TCG_COND_NE, cpu_T[1], -1LL, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
{
- tcg_gen_movi_tl(cpu_T[1], 0);
- gen_store_LO(cpu_T[0], 0);
- gen_store_HI(cpu_T[1], 0);
+ tcg_gen_movi_tl(t1, 0);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
tcg_gen_br(l1);
}
gen_set_label(l2);
@@ -1992,8 +2088,8 @@ static void gen_muldiv (DisasContext *ctx, uint32_t opc,
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_div_i64(r_tmp1, cpu_T[0], cpu_T[1]);
- tcg_gen_rem_i64(r_tmp2, cpu_T[0], cpu_T[1]);
+ tcg_gen_div_i64(r_tmp1, t0, t1);
+ tcg_gen_rem_i64(r_tmp2, t0, t1);
gen_store_LO(r_tmp1, 0);
gen_store_HI(r_tmp2, 0);
tcg_temp_free(r_tmp1);
@@ -2008,13 +2104,13 @@ static void gen_muldiv (DisasContext *ctx, uint32_t opc,
{
int l1 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
{
TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
- tcg_gen_divu_i64(r_tmp1, cpu_T[0], cpu_T[1]);
- tcg_gen_remu_i64(r_tmp2, cpu_T[0], cpu_T[1]);
+ tcg_gen_divu_i64(r_tmp1, t0, t1);
+ tcg_gen_remu_i64(r_tmp2, t0, t1);
tcg_temp_free(r_tmp1);
tcg_temp_free(r_tmp2);
gen_store_LO(r_tmp1, 0);
@@ -2025,148 +2121,270 @@ static void gen_muldiv (DisasContext *ctx, uint32_t opc,
opn = "ddivu";
break;
case OPC_DMULT:
- gen_op_dmult();
+ tcg_gen_helper_0_2(do_dmult, t0, t1);
opn = "dmult";
break;
case OPC_DMULTU:
- gen_op_dmultu();
+ tcg_gen_helper_0_2(do_dmultu, t0, t1);
opn = "dmultu";
break;
#endif
case OPC_MADD:
- gen_op_madd();
+ {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_ext_tl_i64(r_tmp1, t0);
+ tcg_gen_ext_tl_i64(r_tmp2, t1);
+ tcg_gen_mul_i64(r_tmp1, r_tmp1, r_tmp2);
+ gen_load_LO(t0, 0);
+ gen_load_HI(t1, 0);
+ tcg_gen_extu_tl_i64(r_tmp2, t0);
+ tcg_gen_extu_tl_i64(r_tmp3, t1);
+ tcg_gen_shli_i64(r_tmp3, r_tmp3, 32);
+ tcg_gen_or_i64(r_tmp2, r_tmp2, r_tmp3);
+ tcg_temp_free(r_tmp3);
+ tcg_gen_add_i64(r_tmp1, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp1);
+ tcg_gen_shri_i64(r_tmp1, r_tmp1, 32);
+ tcg_gen_trunc_i64_tl(t1, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
+ }
opn = "madd";
break;
case OPC_MADDU:
- gen_op_maddu();
+ {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(r_tmp1, t0);
+ tcg_gen_extu_tl_i64(r_tmp2, t1);
+ tcg_gen_mul_i64(r_tmp1, r_tmp1, r_tmp2);
+ gen_load_LO(t0, 0);
+ gen_load_HI(t1, 0);
+ tcg_gen_extu_tl_i64(r_tmp2, t0);
+ tcg_gen_extu_tl_i64(r_tmp3, t1);
+ tcg_gen_shli_i64(r_tmp3, r_tmp3, 32);
+ tcg_gen_or_i64(r_tmp2, r_tmp2, r_tmp3);
+ tcg_temp_free(r_tmp3);
+ tcg_gen_add_i64(r_tmp1, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp1);
+ tcg_gen_shri_i64(r_tmp1, r_tmp1, 32);
+ tcg_gen_trunc_i64_tl(t1, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
+ }
opn = "maddu";
break;
case OPC_MSUB:
- gen_op_msub();
+ {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_ext_tl_i64(r_tmp1, t0);
+ tcg_gen_ext_tl_i64(r_tmp2, t1);
+ tcg_gen_mul_i64(r_tmp1, r_tmp1, r_tmp2);
+ gen_load_LO(t0, 0);
+ gen_load_HI(t1, 0);
+ tcg_gen_extu_tl_i64(r_tmp2, t0);
+ tcg_gen_extu_tl_i64(r_tmp3, t1);
+ tcg_gen_shli_i64(r_tmp3, r_tmp3, 32);
+ tcg_gen_or_i64(r_tmp2, r_tmp2, r_tmp3);
+ tcg_temp_free(r_tmp3);
+ tcg_gen_sub_i64(r_tmp1, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp1);
+ tcg_gen_shri_i64(r_tmp1, r_tmp1, 32);
+ tcg_gen_trunc_i64_tl(t1, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
+ }
opn = "msub";
break;
case OPC_MSUBU:
- gen_op_msubu();
+ {
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I64);
+ TCGv r_tmp3 = tcg_temp_new(TCG_TYPE_I64);
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(r_tmp1, t0);
+ tcg_gen_extu_tl_i64(r_tmp2, t1);
+ tcg_gen_mul_i64(r_tmp1, r_tmp1, r_tmp2);
+ gen_load_LO(t0, 0);
+ gen_load_HI(t1, 0);
+ tcg_gen_extu_tl_i64(r_tmp2, t0);
+ tcg_gen_extu_tl_i64(r_tmp3, t1);
+ tcg_gen_shli_i64(r_tmp3, r_tmp3, 32);
+ tcg_gen_or_i64(r_tmp2, r_tmp2, r_tmp3);
+ tcg_temp_free(r_tmp3);
+ tcg_gen_sub_i64(r_tmp1, r_tmp1, r_tmp2);
+ tcg_temp_free(r_tmp2);
+ tcg_gen_trunc_i64_tl(t0, r_tmp1);
+ tcg_gen_shri_i64(r_tmp1, r_tmp1, 32);
+ tcg_gen_trunc_i64_tl(t1, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ gen_store_LO(t0, 0);
+ gen_store_HI(t1, 0);
+ }
opn = "msubu";
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
MIPS_DEBUG("%s %s %s", opn, regnames[rs], regnames[rt]);
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char *opn = "mul vr54xx";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
- gen_load_gpr(cpu_T[0], rs);
- gen_load_gpr(cpu_T[1], rt);
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
switch (opc) {
case OPC_VR54XX_MULS:
- gen_op_muls();
+ tcg_gen_helper_1_2(do_muls, t0, t0, t1);
opn = "muls";
break;
case OPC_VR54XX_MULSU:
- gen_op_mulsu();
+ tcg_gen_helper_1_2(do_mulsu, t0, t0, t1);
opn = "mulsu";
break;
case OPC_VR54XX_MACC:
- gen_op_macc();
+ tcg_gen_helper_1_2(do_macc, t0, t0, t1);
opn = "macc";
break;
case OPC_VR54XX_MACCU:
- gen_op_maccu();
+ tcg_gen_helper_1_2(do_maccu, t0, t0, t1);
opn = "maccu";
break;
case OPC_VR54XX_MSAC:
- gen_op_msac();
+ tcg_gen_helper_1_2(do_msac, t0, t0, t1);
opn = "msac";
break;
case OPC_VR54XX_MSACU:
- gen_op_msacu();
+ tcg_gen_helper_1_2(do_msacu, t0, t0, t1);
opn = "msacu";
break;
case OPC_VR54XX_MULHI:
- gen_op_mulhi();
+ tcg_gen_helper_1_2(do_mulhi, t0, t0, t1);
opn = "mulhi";
break;
case OPC_VR54XX_MULHIU:
- gen_op_mulhiu();
+ tcg_gen_helper_1_2(do_mulhiu, t0, t0, t1);
opn = "mulhiu";
break;
case OPC_VR54XX_MULSHI:
- gen_op_mulshi();
+ tcg_gen_helper_1_2(do_mulshi, t0, t0, t1);
opn = "mulshi";
break;
case OPC_VR54XX_MULSHIU:
- gen_op_mulshiu();
+ tcg_gen_helper_1_2(do_mulshiu, t0, t0, t1);
opn = "mulshiu";
break;
case OPC_VR54XX_MACCHI:
- gen_op_macchi();
+ tcg_gen_helper_1_2(do_macchi, t0, t0, t1);
opn = "macchi";
break;
case OPC_VR54XX_MACCHIU:
- gen_op_macchiu();
+ tcg_gen_helper_1_2(do_macchiu, t0, t0, t1);
opn = "macchiu";
break;
case OPC_VR54XX_MSACHI:
- gen_op_msachi();
+ tcg_gen_helper_1_2(do_msachi, t0, t0, t1);
opn = "msachi";
break;
case OPC_VR54XX_MSACHIU:
- gen_op_msachiu();
+ tcg_gen_helper_1_2(do_msachiu, t0, t0, t1);
opn = "msachiu";
break;
default:
MIPS_INVAL("mul vr54xx");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
- gen_store_gpr(cpu_T[0], rd);
+ gen_store_gpr(t0, rd);
MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]);
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
static void gen_cl (DisasContext *ctx, uint32_t opc,
int rd, int rs)
{
const char *opn = "CLx";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
if (rd == 0) {
/* Treat as NOP. */
MIPS_DEBUG("NOP");
- return;
+ goto out;
}
- gen_load_gpr(cpu_T[0], rs);
+ gen_load_gpr(t0, rs);
switch (opc) {
case OPC_CLO:
- tcg_gen_helper_0_0(do_clo);
+ tcg_gen_helper_1_1(do_clo, t0, t0);
opn = "clo";
break;
case OPC_CLZ:
- tcg_gen_helper_0_0(do_clz);
+ tcg_gen_helper_1_1(do_clz, t0, t0);
opn = "clz";
break;
#if defined(TARGET_MIPS64)
case OPC_DCLO:
- tcg_gen_helper_0_0(do_dclo);
+ tcg_gen_helper_1_1(do_dclo, t0, t0);
opn = "dclo";
break;
case OPC_DCLZ:
- tcg_gen_helper_0_0(do_dclz);
+ tcg_gen_helper_1_1(do_dclz, t0, t0);
opn = "dclz";
break;
#endif
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
- gen_store_gpr(cpu_T[0], rd);
+ gen_store_gpr(t0, rd);
MIPS_DEBUG("%s %s, %s", opn, regnames[rd], regnames[rs]);
+
+ out:
+ tcg_temp_free(t0);
}
/* Traps */
@@ -2174,6 +2392,8 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
int rs, int rt, int16_t imm)
{
int cond;
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
cond = 0;
/* Load needed operands */
@@ -2186,8 +2406,8 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
case OPC_TNE:
/* Compare two registers */
if (rs != rt) {
- gen_load_gpr(cpu_T[0], rs);
- gen_load_gpr(cpu_T[1], rt);
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
cond = 1;
}
break;
@@ -2199,8 +2419,8 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
case OPC_TNEI:
/* Compare register to immediate */
if (rs != 0 || imm != 0) {
- gen_load_gpr(cpu_T[0], rs);
- tcg_gen_movi_tl(cpu_T[1], (int32_t)imm);
+ gen_load_gpr(t0, rs);
+ tcg_gen_movi_tl(t1, (int32_t)imm);
cond = 1;
}
break;
@@ -2214,7 +2434,7 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
case OPC_TGEU: /* rs >= rs unsigned */
case OPC_TGEIU: /* r0 >= 0 unsigned */
/* Always trap */
- tcg_gen_movi_tl(cpu_T[0], 1);
+ tcg_gen_movi_tl(t0, 1);
break;
case OPC_TLT: /* rs < rs */
case OPC_TLTI: /* r0 < 0 */
@@ -2223,47 +2443,56 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
case OPC_TNE: /* rs != rs */
case OPC_TNEI: /* r0 != 0 */
/* Never trap: treat as NOP. */
- return;
+ goto out;
default:
MIPS_INVAL("trap");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
} else {
switch (opc) {
case OPC_TEQ:
case OPC_TEQI:
- gen_op_eq();
+ gen_op_eq(t0, t1);
break;
case OPC_TGE:
case OPC_TGEI:
- gen_op_ge();
+ gen_op_ge(t0, t1);
break;
case OPC_TGEU:
case OPC_TGEIU:
- gen_op_geu();
+ gen_op_geu(t0, t1);
break;
case OPC_TLT:
case OPC_TLTI:
- gen_op_lt();
+ gen_op_lt(t0, t1);
break;
case OPC_TLTU:
case OPC_TLTIU:
- gen_op_ltu();
+ gen_op_ltu(t0, t1);
break;
case OPC_TNE:
case OPC_TNEI:
- gen_op_ne();
+ gen_op_ne(t0, t1);
break;
default:
MIPS_INVAL("trap");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
}
save_cpu_state(ctx, 1);
- gen_op_trap();
+ {
+ int l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_helper_0_i(do_raise_exception, EXCP_TRAP);
+ gen_set_label(l1);
+ }
ctx->bstate = BS_STOP;
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
static always_inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
@@ -2287,6 +2516,8 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
target_ulong btarget = -1;
int blink = 0;
int bcond = 0;
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
if (ctx->hflags & MIPS_HFLAG_BMASK) {
#ifdef MIPS_DEBUG_DISAS
@@ -2297,7 +2528,7 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
}
#endif
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
/* Load needed operands */
@@ -2308,8 +2539,8 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
case OPC_BNEL:
/* Compare two registers */
if (rs != rt) {
- gen_load_gpr(cpu_T[0], rs);
- gen_load_gpr(cpu_T[1], rt);
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
bcond = 1;
}
btarget = ctx->pc + 4 + offset;
@@ -2328,7 +2559,7 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
case OPC_BLTZL:
/* Compare to zero */
if (rs != 0) {
- gen_load_gpr(cpu_T[0], rs);
+ gen_load_gpr(t0, rs);
bcond = 1;
}
btarget = ctx->pc + 4 + offset;
@@ -2346,14 +2577,14 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
others are reserved. */
MIPS_INVAL("jump hint");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
gen_save_breg_target(rs);
break;
default:
MIPS_INVAL("branch/jump");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
if (bcond == 0) {
/* No condition to be computed */
@@ -2380,26 +2611,26 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
case OPC_BLTZ: /* 0 < 0 */
/* Treat as NOP. */
MIPS_DEBUG("bnever (NOP)");
- return;
+ goto out;
case OPC_BLTZAL: /* 0 < 0 */
- tcg_gen_movi_tl(cpu_T[0], ctx->pc + 8);
- gen_store_gpr(cpu_T[0], 31);
+ tcg_gen_movi_tl(t0, ctx->pc + 8);
+ gen_store_gpr(t0, 31);
MIPS_DEBUG("bnever and link");
- return;
+ goto out;
case OPC_BLTZALL: /* 0 < 0 likely */
- tcg_gen_movi_tl(cpu_T[0], ctx->pc + 8);
- gen_store_gpr(cpu_T[0], 31);
+ tcg_gen_movi_tl(t0, ctx->pc + 8);
+ gen_store_gpr(t0, 31);
/* Skip the instruction in the delay slot */
MIPS_DEBUG("bnever, link and skip");
ctx->pc += 4;
- return;
+ goto out;
case OPC_BNEL: /* rx != rx likely */
case OPC_BGTZL: /* 0 > 0 likely */
case OPC_BLTZL: /* 0 < 0 likely */
/* Skip the instruction in the delay slot */
MIPS_DEBUG("bnever and skip");
ctx->pc += 4;
- return;
+ goto out;
case OPC_J:
ctx->hflags |= MIPS_HFLAG_B;
MIPS_DEBUG("j " TARGET_FMT_lx, btarget);
@@ -2421,92 +2652,92 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
default:
MIPS_INVAL("branch/jump");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
} else {
switch (opc) {
case OPC_BEQ:
- gen_op_eq();
+ gen_op_eq(t0, t1);
MIPS_DEBUG("beq %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btarget);
goto not_likely;
case OPC_BEQL:
- gen_op_eq();
+ gen_op_eq(t0, t1);
MIPS_DEBUG("beql %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btarget);
goto likely;
case OPC_BNE:
- gen_op_ne();
+ gen_op_ne(t0, t1);
MIPS_DEBUG("bne %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btarget);
goto not_likely;
case OPC_BNEL:
- gen_op_ne();
+ gen_op_ne(t0, t1);
MIPS_DEBUG("bnel %s, %s, " TARGET_FMT_lx,
regnames[rs], regnames[rt], btarget);
goto likely;
case OPC_BGEZ:
- gen_op_gez();
+ gen_op_gez(t0);
MIPS_DEBUG("bgez %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto not_likely;
case OPC_BGEZL:
- gen_op_gez();
+ gen_op_gez(t0);
MIPS_DEBUG("bgezl %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto likely;
case OPC_BGEZAL:
- gen_op_gez();
+ gen_op_gez(t0);
MIPS_DEBUG("bgezal %s, " TARGET_FMT_lx, regnames[rs], btarget);
blink = 31;
goto not_likely;
case OPC_BGEZALL:
- gen_op_gez();
+ gen_op_gez(t0);
blink = 31;
MIPS_DEBUG("bgezall %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto likely;
case OPC_BGTZ:
- gen_op_gtz();
+ gen_op_gtz(t0);
MIPS_DEBUG("bgtz %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto not_likely;
case OPC_BGTZL:
- gen_op_gtz();
+ gen_op_gtz(t0);
MIPS_DEBUG("bgtzl %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto likely;
case OPC_BLEZ:
- gen_op_lez();
+ gen_op_lez(t0);
MIPS_DEBUG("blez %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto not_likely;
case OPC_BLEZL:
- gen_op_lez();
+ gen_op_lez(t0);
MIPS_DEBUG("blezl %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto likely;
case OPC_BLTZ:
- gen_op_ltz();
+ gen_op_ltz(t0);
MIPS_DEBUG("bltz %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto not_likely;
case OPC_BLTZL:
- gen_op_ltz();
+ gen_op_ltz(t0);
MIPS_DEBUG("bltzl %s, " TARGET_FMT_lx, regnames[rs], btarget);
goto likely;
case OPC_BLTZAL:
- gen_op_ltz();
+ gen_op_ltz(t0);
blink = 31;
MIPS_DEBUG("bltzal %s, " TARGET_FMT_lx, regnames[rs], btarget);
not_likely:
ctx->hflags |= MIPS_HFLAG_BC;
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, bcond));
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, bcond));
break;
case OPC_BLTZALL:
- gen_op_ltz();
+ gen_op_ltz(t0);
blink = 31;
MIPS_DEBUG("bltzall %s, " TARGET_FMT_lx, regnames[rs], btarget);
likely:
ctx->hflags |= MIPS_HFLAG_BL;
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, bcond));
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, bcond));
break;
default:
MIPS_INVAL("conditional branch/jump");
generate_exception(ctx, EXCP_RI);
- return;
+ goto out;
}
}
MIPS_DEBUG("enter ds: link %d cond %02x target " TARGET_FMT_lx,
@@ -2514,75 +2745,87 @@ static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
ctx->btarget = btarget;
if (blink > 0) {
- tcg_gen_movi_tl(cpu_T[0], ctx->pc + 8);
- gen_store_gpr(cpu_T[0], blink);
+ tcg_gen_movi_tl(t0, ctx->pc + 8);
+ gen_store_gpr(t0, blink);
}
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* special3 bitfield operations */
static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt,
int rs, int lsb, int msb)
{
- gen_load_gpr(cpu_T[1], rs);
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t1, rs);
switch (opc) {
case OPC_EXT:
if (lsb + msb > 31)
goto fail;
- gen_op_ext(lsb, msb + 1);
+ tcg_gen_helper_1_2ii(do_ext, t0, t0, t1, lsb, msb + 1);
break;
#if defined(TARGET_MIPS64)
case OPC_DEXTM:
if (lsb + msb > 63)
goto fail;
- gen_op_dext(lsb, msb + 1 + 32);
+ tcg_gen_helper_1_2ii(do_dext, t0, t0, t1, lsb, msb + 1 + 32);
break;
case OPC_DEXTU:
if (lsb + msb > 63)
goto fail;
- gen_op_dext(lsb + 32, msb + 1);
+ tcg_gen_helper_1_2ii(do_dext, t0, t0, t1, lsb + 32, msb + 1);
break;
case OPC_DEXT:
if (lsb + msb > 63)
goto fail;
- gen_op_dext(lsb, msb + 1);
+ tcg_gen_helper_1_2ii(do_dext, t0, t0, t1, lsb, msb + 1);
break;
#endif
case OPC_INS:
if (lsb > msb)
goto fail;
- gen_load_gpr(cpu_T[0], rt);
- gen_op_ins(lsb, msb - lsb + 1);
+ gen_load_gpr(t0, rt);
+ tcg_gen_helper_1_2ii(do_ins, t0, t0, t1, lsb, msb - lsb + 1);
break;
#if defined(TARGET_MIPS64)
case OPC_DINSM:
if (lsb > msb)
goto fail;
- gen_load_gpr(cpu_T[0], rt);
- gen_op_dins(lsb, msb - lsb + 1 + 32);
+ gen_load_gpr(t0, rt);
+ tcg_gen_helper_1_2ii(do_dins, t0, t0, t1, lsb, msb - lsb + 1 + 32);
break;
case OPC_DINSU:
if (lsb > msb)
goto fail;
- gen_load_gpr(cpu_T[0], rt);
- gen_op_dins(lsb + 32, msb - lsb + 1);
+ gen_load_gpr(t0, rt);
+ tcg_gen_helper_1_2ii(do_dins, t0, t0, t1, lsb + 32, msb - lsb + 1);
break;
case OPC_DINS:
if (lsb > msb)
goto fail;
- gen_load_gpr(cpu_T[0], rt);
- gen_op_dins(lsb, msb - lsb + 1);
+ gen_load_gpr(t0, rt);
+ tcg_gen_helper_1_2ii(do_dins, t0, t0, t1, lsb, msb - lsb + 1);
break;
#endif
default:
fail:
MIPS_INVAL("bitops");
generate_exception(ctx, EXCP_RI);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
return;
}
- gen_store_gpr(cpu_T[0], rt);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* CP0 (MMU and control) */
+#ifndef CONFIG_USER_ONLY
static inline void gen_mfc0_load32 (TCGv t, target_ulong off)
{
TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
@@ -2594,14 +2837,26 @@ static inline void gen_mfc0_load32 (TCGv t, target_ulong off)
static inline void gen_mfc0_load64 (TCGv t, target_ulong off)
{
- TCGv r_tmp = tcg_temp_new(TCG_TYPE_I64);
+ tcg_gen_ld_tl(t, cpu_env, off);
+ tcg_gen_ext32s_tl(t, t);
+}
+
+static inline void gen_mtc0_store32 (TCGv t, target_ulong off)
+{
+ TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
- tcg_gen_ld_i64(r_tmp, cpu_env, off);
- tcg_gen_trunc_i64_tl(t, r_tmp);
+ tcg_gen_trunc_tl_i32(r_tmp, t);
+ tcg_gen_st_i32(r_tmp, cpu_env, off);
tcg_temp_free(r_tmp);
}
-static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
+static inline void gen_mtc0_store64 (TCGv t, target_ulong off)
+{
+ tcg_gen_ext32s_tl(t, t);
+ tcg_gen_st_tl(t, cpu_env, off);
+}
+
+static void gen_mfc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
{
const char *rn = "invalid";
@@ -2612,22 +2867,22 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 0:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Index));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Index));
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_mvpcontrol();
+ tcg_gen_helper_1_0(do_mfc0_mvpcontrol, t0);
rn = "MVPControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_mvpconf0();
+ tcg_gen_helper_1_0(do_mfc0_mvpconf0, t0);
rn = "MVPConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_mvpconf1();
+ tcg_gen_helper_1_0(do_mfc0_mvpconf1, t0);
rn = "MVPConf1";
break;
default:
@@ -2637,42 +2892,42 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 1:
switch (sel) {
case 0:
- gen_op_mfc0_random();
+ tcg_gen_helper_1_0(do_mfc0_random, t0);
rn = "Random";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEControl));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEControl));
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEConf0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf0));
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEConf1));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf1));
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(cpu_T[0], offsetof(CPUState, CP0_YQMask));
+ gen_mfc0_load64(t0, offsetof(CPUState, CP0_YQMask));
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(cpu_T[0], offsetof(CPUState, CP0_VPESchedule));
+ gen_mfc0_load64(t0, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(cpu_T[0], offsetof(CPUState, CP0_VPEScheFBack));
+ gen_mfc0_load64(t0, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEOpt));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEOpt));
rn = "VPEOpt";
break;
default:
@@ -2682,43 +2937,43 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EntryLo0));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo0));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcstatus();
+ tcg_gen_helper_1_0(do_mfc0_tcstatus, t0);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcbind();
+ tcg_gen_helper_1_0(do_mfc0_tcbind, t0);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcrestart();
+ tcg_gen_helper_1_0(do_mfc0_tcrestart, t0);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tchalt();
+ tcg_gen_helper_1_0(do_mfc0_tchalt, t0);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tccontext();
+ tcg_gen_helper_1_0(do_mfc0_tccontext, t0);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcschedule();
+ tcg_gen_helper_1_0(do_mfc0_tcschedule, t0);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcschefback();
+ tcg_gen_helper_1_0(do_mfc0_tcschefback, t0);
rn = "TCScheFBack";
break;
default:
@@ -2728,8 +2983,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 3:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EntryLo1));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo1));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "EntryLo1";
break;
default:
@@ -2739,12 +2994,12 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 4:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_Context));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_Context));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "Context";
break;
case 1:
-// gen_op_mfc0_contextconfig(); /* SmartMIPS ASE */
+// tcg_gen_helper_1_0(do_mfc0_contextconfig, t0); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
@@ -2754,12 +3009,12 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 5:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_PageMask));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageMask));
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_PageGrain));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageGrain));
rn = "PageGrain";
break;
default:
@@ -2769,32 +3024,32 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 6:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Wired));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Wired));
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf0));
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf1));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf1));
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf2));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf2));
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf3));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf3));
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf4));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf4));
rn = "SRSConf4";
break;
default:
@@ -2805,7 +3060,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_HWREna));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_HWREna));
rn = "HWREna";
break;
default:
@@ -2815,8 +3070,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 8:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_BadVAddr));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_BadVAddr));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "BadVAddr";
break;
default:
@@ -2826,7 +3081,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 9:
switch (sel) {
case 0:
- gen_op_mfc0_count();
+ tcg_gen_helper_1_0(do_mfc0_count, t0);
rn = "Count";
break;
/* 6,7 are implementation dependent */
@@ -2837,8 +3092,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 10:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EntryHi));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryHi));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "EntryHi";
break;
default:
@@ -2848,7 +3103,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 11:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Compare));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Compare));
rn = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -2859,22 +3114,22 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 12:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Status));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Status));
rn = "Status";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_IntCtl));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_IntCtl));
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSCtl));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSCtl));
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSMap));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSMap));
rn = "SRSMap";
break;
default:
@@ -2884,7 +3139,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 13:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Cause));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Cause));
rn = "Cause";
break;
default:
@@ -2894,8 +3149,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 14:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EPC));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "EPC";
break;
default:
@@ -2905,12 +3160,12 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 15:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_PRid));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_PRid));
rn = "PRid";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_EBase));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_EBase));
rn = "EBase";
break;
default:
@@ -2920,29 +3175,29 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 16:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config0));
rn = "Config";
break;
case 1:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config1));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config1));
rn = "Config1";
break;
case 2:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config2));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config2));
rn = "Config2";
break;
case 3:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config3));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config3));
rn = "Config3";
break;
/* 4,5 are reserved */
/* 6,7 are implementation dependent */
case 6:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config6));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config6));
rn = "Config6";
break;
case 7:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config7));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config7));
rn = "Config7";
break;
default:
@@ -2952,7 +3207,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 17:
switch (sel) {
case 0:
- gen_op_mfc0_lladdr();
+ tcg_gen_helper_1_0(do_mfc0_lladdr, t0);
rn = "LLAddr";
break;
default:
@@ -2962,7 +3217,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 18:
switch (sel) {
case 0 ... 7:
- gen_op_mfc0_watchlo(sel);
+ tcg_gen_helper_1_i(do_mfc0_watchlo, t0, sel);
rn = "WatchLo";
break;
default:
@@ -2972,7 +3227,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 19:
switch (sel) {
case 0 ...7:
- gen_op_mfc0_watchhi(sel);
+ tcg_gen_helper_1_i(do_mfc0_watchhi, t0, sel);
rn = "WatchHi";
break;
default:
@@ -2984,8 +3239,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 0:
#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_XContext));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_XContext));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "XContext";
break;
#endif
@@ -2997,7 +3252,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Framemask));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Framemask));
rn = "Framemask";
break;
default:
@@ -3011,23 +3266,23 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 23:
switch (sel) {
case 0:
- gen_op_mfc0_debug(); /* EJTAG support */
+ tcg_gen_helper_1_0(do_mfc0_debug, t0); /* EJTAG support */
rn = "Debug";
break;
case 1:
-// gen_op_mfc0_tracecontrol(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_mfc0_tracecontrol, t0); /* PDtrace support */
rn = "TraceControl";
// break;
case 2:
-// gen_op_mfc0_tracecontrol2(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_mfc0_tracecontrol2, t0); /* PDtrace support */
rn = "TraceControl2";
// break;
case 3:
-// gen_op_mfc0_usertracedata(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_mfc0_usertracedata, t0); /* PDtrace support */
rn = "UserTraceData";
// break;
case 4:
-// gen_op_mfc0_debug(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_mfc0_tracebpc, t0); /* PDtrace support */
rn = "TraceBPC";
// break;
default:
@@ -3038,8 +3293,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_DEPC));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "DEPC";
break;
default:
@@ -3049,35 +3304,35 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 25:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Performance0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Performance0));
rn = "Performance0";
break;
case 1:
-// gen_op_mfc0_performance1();
+// tcg_gen_helper_1_0(do_mfc0_performance1, t0);
rn = "Performance1";
// break;
case 2:
-// gen_op_mfc0_performance2();
+// tcg_gen_helper_1_0(do_mfc0_performance2, t0);
rn = "Performance2";
// break;
case 3:
-// gen_op_mfc0_performance3();
+// tcg_gen_helper_1_0(do_mfc0_performance3, t0);
rn = "Performance3";
// break;
case 4:
-// gen_op_mfc0_performance4();
+// tcg_gen_helper_1_0(do_mfc0_performance4, t0);
rn = "Performance4";
// break;
case 5:
-// gen_op_mfc0_performance5();
+// tcg_gen_helper_1_0(do_mfc0_performance5, t0);
rn = "Performance5";
// break;
case 6:
-// gen_op_mfc0_performance6();
+// tcg_gen_helper_1_0(do_mfc0_performance6, t0);
rn = "Performance6";
// break;
case 7:
-// gen_op_mfc0_performance7();
+// tcg_gen_helper_1_0(do_mfc0_performance7, t0);
rn = "Performance7";
// break;
default:
@@ -3103,14 +3358,14 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_TagLo));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagLo));
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_DataLo));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataLo));
rn = "DataLo";
break;
default:
@@ -3123,14 +3378,14 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_TagHi));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagHi));
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_DataHi));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataHi));
rn = "DataHi";
break;
default:
@@ -3140,8 +3395,8 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 30:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_ErrorEPC));
- tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_ext32s_tl(t0, t0);
rn = "ErrorEPC";
break;
default:
@@ -3152,7 +3407,7 @@ static void gen_mfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
/* EJTAG support */
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_DESAVE));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -3180,7 +3435,7 @@ die:
generate_exception(ctx, EXCP_RI);
}
-static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
+static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
{
const char *rn = "invalid";
@@ -3191,12 +3446,12 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 0:
switch (sel) {
case 0:
- gen_op_mtc0_index();
+ tcg_gen_helper_0_1(do_mtc0_index, t0);
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_mvpcontrol();
+ tcg_gen_helper_0_1(do_mtc0_mvpcontrol, t0);
rn = "MVPControl";
break;
case 2:
@@ -3221,37 +3476,37 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpecontrol();
+ tcg_gen_helper_0_1(do_mtc0_vpecontrol, t0);
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeconf0();
+ tcg_gen_helper_0_1(do_mtc0_vpeconf0, t0);
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeconf1();
+ tcg_gen_helper_0_1(do_mtc0_vpeconf1, t0);
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_yqmask();
+ tcg_gen_helper_0_1(do_mtc0_yqmask, t0);
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeschedule();
+ gen_mtc0_store64(t0, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeschefback();
+ gen_mtc0_store64(t0, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeopt();
+ tcg_gen_helper_0_1(do_mtc0_vpeopt, t0);
rn = "VPEOpt";
break;
default:
@@ -3261,42 +3516,42 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
switch (sel) {
case 0:
- gen_op_mtc0_entrylo0();
+ tcg_gen_helper_0_1(do_mtc0_entrylo0, t0);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcstatus();
+ tcg_gen_helper_0_1(do_mtc0_tcstatus, t0);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcbind();
+ tcg_gen_helper_0_1(do_mtc0_tcbind, t0);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcrestart();
+ tcg_gen_helper_0_1(do_mtc0_tcrestart, t0);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tchalt();
+ tcg_gen_helper_0_1(do_mtc0_tchalt, t0);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tccontext();
+ tcg_gen_helper_0_1(do_mtc0_tccontext, t0);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcschedule();
+ tcg_gen_helper_0_1(do_mtc0_tcschedule, t0);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcschefback();
+ tcg_gen_helper_0_1(do_mtc0_tcschefback, t0);
rn = "TCScheFBack";
break;
default:
@@ -3306,7 +3561,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 3:
switch (sel) {
case 0:
- gen_op_mtc0_entrylo1();
+ tcg_gen_helper_0_1(do_mtc0_entrylo1, t0);
rn = "EntryLo1";
break;
default:
@@ -3316,11 +3571,11 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 4:
switch (sel) {
case 0:
- gen_op_mtc0_context();
+ tcg_gen_helper_0_1(do_mtc0_context, t0);
rn = "Context";
break;
case 1:
-// gen_op_mtc0_contextconfig(); /* SmartMIPS ASE */
+// tcg_gen_helper_0_1(do_mtc0_contextconfig, t0); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
@@ -3330,12 +3585,12 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 5:
switch (sel) {
case 0:
- gen_op_mtc0_pagemask();
+ tcg_gen_helper_0_1(do_mtc0_pagemask, t0);
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_pagegrain();
+ tcg_gen_helper_0_1(do_mtc0_pagegrain, t0);
rn = "PageGrain";
break;
default:
@@ -3345,32 +3600,32 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 6:
switch (sel) {
case 0:
- gen_op_mtc0_wired();
+ tcg_gen_helper_0_1(do_mtc0_wired, t0);
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf0();
+ tcg_gen_helper_0_1(do_mtc0_srsconf0, t0);
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf1();
+ tcg_gen_helper_0_1(do_mtc0_srsconf1, t0);
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf2();
+ tcg_gen_helper_0_1(do_mtc0_srsconf2, t0);
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf3();
+ tcg_gen_helper_0_1(do_mtc0_srsconf3, t0);
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf4();
+ tcg_gen_helper_0_1(do_mtc0_srsconf4, t0);
rn = "SRSConf4";
break;
default:
@@ -3381,7 +3636,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_hwrena();
+ tcg_gen_helper_0_1(do_mtc0_hwrena, t0);
rn = "HWREna";
break;
default:
@@ -3395,7 +3650,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 9:
switch (sel) {
case 0:
- gen_op_mtc0_count();
+ tcg_gen_helper_0_1(do_mtc0_count, t0);
rn = "Count";
break;
/* 6,7 are implementation dependent */
@@ -3408,7 +3663,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 10:
switch (sel) {
case 0:
- gen_op_mtc0_entryhi();
+ tcg_gen_helper_0_1(do_mtc0_entryhi, t0);
rn = "EntryHi";
break;
default:
@@ -3418,7 +3673,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 11:
switch (sel) {
case 0:
- gen_op_mtc0_compare();
+ tcg_gen_helper_0_1(do_mtc0_compare, t0);
rn = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -3431,7 +3686,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 12:
switch (sel) {
case 0:
- gen_op_mtc0_status();
+ tcg_gen_helper_0_1(do_mtc0_status, t0);
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
@@ -3439,21 +3694,21 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_intctl();
+ tcg_gen_helper_0_1(do_mtc0_intctl, t0);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsctl();
+ tcg_gen_helper_0_1(do_mtc0_srsctl, t0);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsmap();
+ gen_mtc0_store32(t0, offsetof(CPUState, CP0_SRSMap));
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSMap";
@@ -3465,7 +3720,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 13:
switch (sel) {
case 0:
- gen_op_mtc0_cause();
+ tcg_gen_helper_0_1(do_mtc0_cause, t0);
rn = "Cause";
break;
default:
@@ -3477,7 +3732,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 14:
switch (sel) {
case 0:
- gen_op_mtc0_epc();
+ gen_mtc0_store64(t0, offsetof(CPUState, CP0_EPC));
rn = "EPC";
break;
default:
@@ -3492,7 +3747,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_ebase();
+ tcg_gen_helper_0_1(do_mtc0_ebase, t0);
rn = "EBase";
break;
default:
@@ -3502,7 +3757,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 16:
switch (sel) {
case 0:
- gen_op_mtc0_config0();
+ tcg_gen_helper_0_1(do_mtc0_config0, t0);
rn = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
@@ -3512,7 +3767,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
rn = "Config1";
break;
case 2:
- gen_op_mtc0_config2();
+ tcg_gen_helper_0_1(do_mtc0_config2, t0);
rn = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
@@ -3549,7 +3804,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 18:
switch (sel) {
case 0 ... 7:
- gen_op_mtc0_watchlo(sel);
+ tcg_gen_helper_0_1i(do_mtc0_watchlo, t0, sel);
rn = "WatchLo";
break;
default:
@@ -3559,7 +3814,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 19:
switch (sel) {
case 0 ... 7:
- gen_op_mtc0_watchhi(sel);
+ tcg_gen_helper_0_1i(do_mtc0_watchhi, t0, sel);
rn = "WatchHi";
break;
default:
@@ -3571,7 +3826,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 0:
#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
- gen_op_mtc0_xcontext();
+ tcg_gen_helper_0_1(do_mtc0_xcontext, t0);
rn = "XContext";
break;
#endif
@@ -3583,7 +3838,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_op_mtc0_framemask();
+ tcg_gen_helper_0_1(do_mtc0_framemask, t0);
rn = "Framemask";
break;
default:
@@ -3597,20 +3852,20 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 23:
switch (sel) {
case 0:
- gen_op_mtc0_debug(); /* EJTAG support */
+ tcg_gen_helper_0_1(do_mtc0_debug, t0); /* EJTAG support */
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
rn = "Debug";
break;
case 1:
-// gen_op_mtc0_tracecontrol(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_tracecontrol, t0); /* PDtrace support */
rn = "TraceControl";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
// break;
case 2:
-// gen_op_mtc0_tracecontrol2(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_tracecontrol2, t0); /* PDtrace support */
rn = "TraceControl2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
@@ -3618,13 +3873,13 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 3:
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
-// gen_op_mtc0_usertracedata(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_usertracedata, t0); /* PDtrace support */
rn = "UserTraceData";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
// break;
case 4:
-// gen_op_mtc0_debug(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_tracebpc, t0); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceBPC";
@@ -3636,7 +3891,8 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 24:
switch (sel) {
case 0:
- gen_op_mtc0_depc(); /* EJTAG support */
+ /* EJTAG support */
+ gen_mtc0_store64(t0, offsetof(CPUState, CP0_DEPC));
rn = "DEPC";
break;
default:
@@ -3646,35 +3902,35 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 25:
switch (sel) {
case 0:
- gen_op_mtc0_performance0();
+ tcg_gen_helper_0_1(do_mtc0_performance0, t0);
rn = "Performance0";
break;
case 1:
-// gen_op_mtc0_performance1();
+// tcg_gen_helper_0_1(do_mtc0_performance1, t0);
rn = "Performance1";
// break;
case 2:
-// gen_op_mtc0_performance2();
+// tcg_gen_helper_0_1(do_mtc0_performance2, t0);
rn = "Performance2";
// break;
case 3:
-// gen_op_mtc0_performance3();
+// tcg_gen_helper_0_1(do_mtc0_performance3, t0);
rn = "Performance3";
// break;
case 4:
-// gen_op_mtc0_performance4();
+// tcg_gen_helper_0_1(do_mtc0_performance4, t0);
rn = "Performance4";
// break;
case 5:
-// gen_op_mtc0_performance5();
+// tcg_gen_helper_0_1(do_mtc0_performance5, t0);
rn = "Performance5";
// break;
case 6:
-// gen_op_mtc0_performance6();
+// tcg_gen_helper_0_1(do_mtc0_performance6, t0);
rn = "Performance6";
// break;
case 7:
-// gen_op_mtc0_performance7();
+// tcg_gen_helper_0_1(do_mtc0_performance7, t0);
rn = "Performance7";
// break;
default:
@@ -3701,14 +3957,14 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_op_mtc0_taglo();
+ tcg_gen_helper_0_1(do_mtc0_taglo, t0);
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_op_mtc0_datalo();
+ tcg_gen_helper_0_1(do_mtc0_datalo, t0);
rn = "DataLo";
break;
default:
@@ -3721,14 +3977,14 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_op_mtc0_taghi();
+ tcg_gen_helper_0_1(do_mtc0_taghi, t0);
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_op_mtc0_datahi();
+ tcg_gen_helper_0_1(do_mtc0_datahi, t0);
rn = "DataHi";
break;
default:
@@ -3739,7 +3995,7 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 30:
switch (sel) {
case 0:
- gen_op_mtc0_errorepc();
+ gen_mtc0_store64(t0, offsetof(CPUState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
@@ -3749,7 +4005,8 @@ static void gen_mtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 31:
switch (sel) {
case 0:
- gen_op_mtc0_desave(); /* EJTAG support */
+ /* EJTAG support */
+ gen_mtc0_store32(t0, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -3780,7 +4037,7 @@ die:
}
#if defined(TARGET_MIPS64)
-static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
+static void gen_dmfc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
{
const char *rn = "invalid";
@@ -3791,22 +4048,22 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 0:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Index));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Index));
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_mvpcontrol();
+ tcg_gen_helper_1_0(do_mfc0_mvpcontrol, t0);
rn = "MVPControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_mvpconf0();
+ tcg_gen_helper_1_0(do_mfc0_mvpconf0, t0);
rn = "MVPConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_mvpconf1();
+ tcg_gen_helper_1_0(do_mfc0_mvpconf1, t0);
rn = "MVPConf1";
break;
default:
@@ -3816,42 +4073,42 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 1:
switch (sel) {
case 0:
- gen_op_mfc0_random();
+ tcg_gen_helper_1_0(do_mfc0_random, t0);
rn = "Random";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEControl));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEControl));
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEConf0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf0));
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEConf1));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEConf1));
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_YQMask));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_YQMask));
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_VPESchedule));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_VPEOpt));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_VPEOpt));
rn = "VPEOpt";
break;
default:
@@ -3861,42 +4118,42 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EntryLo0));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo0));
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcstatus();
+ tcg_gen_helper_1_0(do_mfc0_tcstatus, t0);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mfc0_tcbind();
+ tcg_gen_helper_1_0(do_mfc0_tcbind, t0);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_dmfc0_tcrestart();
+ tcg_gen_helper_1_0(do_dmfc0_tcrestart, t0);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_op_dmfc0_tchalt();
+ tcg_gen_helper_1_0(do_dmfc0_tchalt, t0);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_op_dmfc0_tccontext();
+ tcg_gen_helper_1_0(do_dmfc0_tccontext, t0);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_op_dmfc0_tcschedule();
+ tcg_gen_helper_1_0(do_dmfc0_tcschedule, t0);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_op_dmfc0_tcschefback();
+ tcg_gen_helper_1_0(do_dmfc0_tcschefback, t0);
rn = "TCScheFBack";
break;
default:
@@ -3906,7 +4163,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 3:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EntryLo1));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryLo1));
rn = "EntryLo1";
break;
default:
@@ -3916,11 +4173,11 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 4:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_Context));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_Context));
rn = "Context";
break;
case 1:
-// gen_op_dmfc0_contextconfig(); /* SmartMIPS ASE */
+// tcg_gen_helper_1_0(do_dmfc0_contextconfig, t0); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
@@ -3930,12 +4187,12 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 5:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_PageMask));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageMask));
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_PageGrain));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_PageGrain));
rn = "PageGrain";
break;
default:
@@ -3945,32 +4202,32 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 6:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Wired));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Wired));
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf0));
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf1));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf1));
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf2));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf2));
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf3));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf3));
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSConf4));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSConf4));
rn = "SRSConf4";
break;
default:
@@ -3981,7 +4238,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_HWREna));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_HWREna));
rn = "HWREna";
break;
default:
@@ -3991,7 +4248,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 8:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_BadVAddr));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_BadVAddr));
rn = "BadVAddr";
break;
default:
@@ -4001,7 +4258,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 9:
switch (sel) {
case 0:
- gen_op_mfc0_count();
+ tcg_gen_helper_1_0(do_mfc0_count, t0);
rn = "Count";
break;
/* 6,7 are implementation dependent */
@@ -4012,7 +4269,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 10:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EntryHi));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EntryHi));
rn = "EntryHi";
break;
default:
@@ -4022,7 +4279,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 11:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Compare));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Compare));
rn = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -4033,22 +4290,22 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 12:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Status));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Status));
rn = "Status";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_IntCtl));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_IntCtl));
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSCtl));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSCtl));
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_SRSMap));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_SRSMap));
rn = "SRSMap";
break;
default:
@@ -4058,7 +4315,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 13:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Cause));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Cause));
rn = "Cause";
break;
default:
@@ -4068,7 +4325,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 14:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_EPC));
rn = "EPC";
break;
default:
@@ -4078,12 +4335,12 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 15:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_PRid));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_PRid));
rn = "PRid";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_EBase));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_EBase));
rn = "EBase";
break;
default:
@@ -4093,28 +4350,28 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 16:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config0));
rn = "Config";
break;
case 1:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config1));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config1));
rn = "Config1";
break;
case 2:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config2));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config2));
rn = "Config2";
break;
case 3:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config3));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config3));
rn = "Config3";
break;
/* 6,7 are implementation dependent */
case 6:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config6));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config6));
rn = "Config6";
break;
case 7:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Config7));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Config7));
rn = "Config7";
break;
default:
@@ -4124,7 +4381,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 17:
switch (sel) {
case 0:
- gen_op_dmfc0_lladdr();
+ tcg_gen_helper_1_0(do_dmfc0_lladdr, t0);
rn = "LLAddr";
break;
default:
@@ -4134,7 +4391,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 18:
switch (sel) {
case 0 ... 7:
- gen_op_dmfc0_watchlo(sel);
+ tcg_gen_helper_1_i(do_dmfc0_watchlo, t0, sel);
rn = "WatchLo";
break;
default:
@@ -4144,7 +4401,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 19:
switch (sel) {
case 0 ... 7:
- gen_op_mfc0_watchhi(sel);
+ tcg_gen_helper_1_i(do_mfc0_watchhi, t0, sel);
rn = "WatchHi";
break;
default:
@@ -4155,7 +4412,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS3);
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_XContext));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_XContext));
rn = "XContext";
break;
default:
@@ -4166,7 +4423,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Framemask));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Framemask));
rn = "Framemask";
break;
default:
@@ -4180,23 +4437,23 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 23:
switch (sel) {
case 0:
- gen_op_mfc0_debug(); /* EJTAG support */
+ tcg_gen_helper_1_0(do_mfc0_debug, t0); /* EJTAG support */
rn = "Debug";
break;
case 1:
-// gen_op_dmfc0_tracecontrol(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_dmfc0_tracecontrol, t0); /* PDtrace support */
rn = "TraceControl";
// break;
case 2:
-// gen_op_dmfc0_tracecontrol2(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_dmfc0_tracecontrol2, t0); /* PDtrace support */
rn = "TraceControl2";
// break;
case 3:
-// gen_op_dmfc0_usertracedata(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_dmfc0_usertracedata, t0); /* PDtrace support */
rn = "UserTraceData";
// break;
case 4:
-// gen_op_dmfc0_debug(); /* PDtrace support */
+// tcg_gen_helper_1_0(do_dmfc0_tracebpc, t0); /* PDtrace support */
rn = "TraceBPC";
// break;
default:
@@ -4207,7 +4464,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_DEPC));
rn = "DEPC";
break;
default:
@@ -4217,35 +4474,35 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 25:
switch (sel) {
case 0:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_Performance0));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_Performance0));
rn = "Performance0";
break;
case 1:
-// gen_op_dmfc0_performance1();
+// tcg_gen_helper_1_0(do_dmfc0_performance1, t0);
rn = "Performance1";
// break;
case 2:
-// gen_op_dmfc0_performance2();
+// tcg_gen_helper_1_0(do_dmfc0_performance2, t0);
rn = "Performance2";
// break;
case 3:
-// gen_op_dmfc0_performance3();
+// tcg_gen_helper_1_0(do_dmfc0_performance3, t0);
rn = "Performance3";
// break;
case 4:
-// gen_op_dmfc0_performance4();
+// tcg_gen_helper_1_0(do_dmfc0_performance4, t0);
rn = "Performance4";
// break;
case 5:
-// gen_op_dmfc0_performance5();
+// tcg_gen_helper_1_0(do_dmfc0_performance5, t0);
rn = "Performance5";
// break;
case 6:
-// gen_op_dmfc0_performance6();
+// tcg_gen_helper_1_0(do_dmfc0_performance6, t0);
rn = "Performance6";
// break;
case 7:
-// gen_op_dmfc0_performance7();
+// tcg_gen_helper_1_0(do_dmfc0_performance7, t0);
rn = "Performance7";
// break;
default:
@@ -4271,14 +4528,14 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_TagLo));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagLo));
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_DataLo));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataLo));
rn = "DataLo";
break;
default:
@@ -4291,14 +4548,14 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_TagHi));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_TagHi));
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_DataHi));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_DataHi));
rn = "DataHi";
break;
default:
@@ -4308,7 +4565,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 30:
switch (sel) {
case 0:
- tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
@@ -4319,7 +4576,7 @@ static void gen_dmfc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
/* EJTAG support */
- gen_mfc0_load32(cpu_T[0], offsetof(CPUState, CP0_DESAVE));
+ gen_mfc0_load32(t0, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -4347,7 +4604,7 @@ die:
generate_exception(ctx, EXCP_RI);
}
-static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
+static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv t0, int reg, int sel)
{
const char *rn = "invalid";
@@ -4358,12 +4615,12 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 0:
switch (sel) {
case 0:
- gen_op_mtc0_index();
+ tcg_gen_helper_0_1(do_mtc0_index, t0);
rn = "Index";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_mvpcontrol();
+ tcg_gen_helper_0_1(do_mtc0_mvpcontrol, t0);
rn = "MVPControl";
break;
case 2:
@@ -4388,37 +4645,37 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpecontrol();
+ tcg_gen_helper_0_1(do_mtc0_vpecontrol, t0);
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeconf0();
+ tcg_gen_helper_0_1(do_mtc0_vpeconf0, t0);
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeconf1();
+ tcg_gen_helper_0_1(do_mtc0_vpeconf1, t0);
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_yqmask();
+ tcg_gen_helper_0_1(do_mtc0_yqmask, t0);
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeschedule();
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeschefback();
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_vpeopt();
+ tcg_gen_helper_0_1(do_mtc0_vpeopt, t0);
rn = "VPEOpt";
break;
default:
@@ -4428,42 +4685,42 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
switch (sel) {
case 0:
- gen_op_mtc0_entrylo0();
+ tcg_gen_helper_0_1(do_mtc0_entrylo0, t0);
rn = "EntryLo0";
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcstatus();
+ tcg_gen_helper_0_1(do_mtc0_tcstatus, t0);
rn = "TCStatus";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcbind();
+ tcg_gen_helper_0_1(do_mtc0_tcbind, t0);
rn = "TCBind";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcrestart();
+ tcg_gen_helper_0_1(do_mtc0_tcrestart, t0);
rn = "TCRestart";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tchalt();
+ tcg_gen_helper_0_1(do_mtc0_tchalt, t0);
rn = "TCHalt";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tccontext();
+ tcg_gen_helper_0_1(do_mtc0_tccontext, t0);
rn = "TCContext";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcschedule();
+ tcg_gen_helper_0_1(do_mtc0_tcschedule, t0);
rn = "TCSchedule";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_op_mtc0_tcschefback();
+ tcg_gen_helper_0_1(do_mtc0_tcschefback, t0);
rn = "TCScheFBack";
break;
default:
@@ -4473,7 +4730,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 3:
switch (sel) {
case 0:
- gen_op_mtc0_entrylo1();
+ tcg_gen_helper_0_1(do_mtc0_entrylo1, t0);
rn = "EntryLo1";
break;
default:
@@ -4483,11 +4740,11 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 4:
switch (sel) {
case 0:
- gen_op_mtc0_context();
+ tcg_gen_helper_0_1(do_mtc0_context, t0);
rn = "Context";
break;
case 1:
-// gen_op_mtc0_contextconfig(); /* SmartMIPS ASE */
+// tcg_gen_helper_0_1(do_mtc0_contextconfig, t0); /* SmartMIPS ASE */
rn = "ContextConfig";
// break;
default:
@@ -4497,12 +4754,12 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 5:
switch (sel) {
case 0:
- gen_op_mtc0_pagemask();
+ tcg_gen_helper_0_1(do_mtc0_pagemask, t0);
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_pagegrain();
+ tcg_gen_helper_0_1(do_mtc0_pagegrain, t0);
rn = "PageGrain";
break;
default:
@@ -4512,32 +4769,32 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 6:
switch (sel) {
case 0:
- gen_op_mtc0_wired();
+ tcg_gen_helper_0_1(do_mtc0_wired, t0);
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf0();
+ tcg_gen_helper_0_1(do_mtc0_srsconf0, t0);
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf1();
+ tcg_gen_helper_0_1(do_mtc0_srsconf1, t0);
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf2();
+ tcg_gen_helper_0_1(do_mtc0_srsconf2, t0);
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf3();
+ tcg_gen_helper_0_1(do_mtc0_srsconf3, t0);
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsconf4();
+ tcg_gen_helper_0_1(do_mtc0_srsconf4, t0);
rn = "SRSConf4";
break;
default:
@@ -4548,7 +4805,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_hwrena();
+ tcg_gen_helper_0_1(do_mtc0_hwrena, t0);
rn = "HWREna";
break;
default:
@@ -4562,7 +4819,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 9:
switch (sel) {
case 0:
- gen_op_mtc0_count();
+ tcg_gen_helper_0_1(do_mtc0_count, t0);
rn = "Count";
break;
/* 6,7 are implementation dependent */
@@ -4575,7 +4832,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 10:
switch (sel) {
case 0:
- gen_op_mtc0_entryhi();
+ tcg_gen_helper_0_1(do_mtc0_entryhi, t0);
rn = "EntryHi";
break;
default:
@@ -4585,7 +4842,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 11:
switch (sel) {
case 0:
- gen_op_mtc0_compare();
+ tcg_gen_helper_0_1(do_mtc0_compare, t0);
rn = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -4598,7 +4855,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 12:
switch (sel) {
case 0:
- gen_op_mtc0_status();
+ tcg_gen_helper_0_1(do_mtc0_status, t0);
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
@@ -4606,21 +4863,21 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_intctl();
+ tcg_gen_helper_0_1(do_mtc0_intctl, t0);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsctl();
+ tcg_gen_helper_0_1(do_mtc0_srsctl, t0);
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_srsmap();
+ gen_mtc0_store32(t0, offsetof(CPUState, CP0_SRSMap));
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSMap";
@@ -4632,7 +4889,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 13:
switch (sel) {
case 0:
- gen_op_mtc0_cause();
+ tcg_gen_helper_0_1(do_mtc0_cause, t0);
rn = "Cause";
break;
default:
@@ -4644,7 +4901,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 14:
switch (sel) {
case 0:
- gen_op_mtc0_epc();
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_EPC));
rn = "EPC";
break;
default:
@@ -4659,7 +4916,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_op_mtc0_ebase();
+ tcg_gen_helper_0_1(do_mtc0_ebase, t0);
rn = "EBase";
break;
default:
@@ -4669,7 +4926,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 16:
switch (sel) {
case 0:
- gen_op_mtc0_config0();
+ tcg_gen_helper_0_1(do_mtc0_config0, t0);
rn = "Config";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
@@ -4679,7 +4936,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
rn = "Config1";
break;
case 2:
- gen_op_mtc0_config2();
+ tcg_gen_helper_0_1(do_mtc0_config2, t0);
rn = "Config2";
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
@@ -4707,7 +4964,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 18:
switch (sel) {
case 0 ... 7:
- gen_op_mtc0_watchlo(sel);
+ tcg_gen_helper_0_1i(do_mtc0_watchlo, t0, sel);
rn = "WatchLo";
break;
default:
@@ -4717,7 +4974,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 19:
switch (sel) {
case 0 ... 7:
- gen_op_mtc0_watchhi(sel);
+ tcg_gen_helper_0_1i(do_mtc0_watchhi, t0, sel);
rn = "WatchHi";
break;
default:
@@ -4728,7 +4985,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS3);
- gen_op_mtc0_xcontext();
+ tcg_gen_helper_0_1(do_mtc0_xcontext, t0);
rn = "XContext";
break;
default:
@@ -4739,7 +4996,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_op_mtc0_framemask();
+ tcg_gen_helper_0_1(do_mtc0_framemask, t0);
rn = "Framemask";
break;
default:
@@ -4753,32 +5010,32 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 23:
switch (sel) {
case 0:
- gen_op_mtc0_debug(); /* EJTAG support */
+ tcg_gen_helper_0_1(do_mtc0_debug, t0); /* EJTAG support */
/* BS_STOP isn't good enough here, hflags may have changed. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
rn = "Debug";
break;
case 1:
-// gen_op_mtc0_tracecontrol(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_tracecontrol, t0); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceControl";
// break;
case 2:
-// gen_op_mtc0_tracecontrol2(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_tracecontrol2, t0); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceControl2";
// break;
case 3:
-// gen_op_mtc0_usertracedata(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_usertracedata, t0); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "UserTraceData";
// break;
case 4:
-// gen_op_mtc0_debug(); /* PDtrace support */
+// tcg_gen_helper_0_1(do_mtc0_tracebpc, t0); /* PDtrace support */
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "TraceBPC";
@@ -4790,7 +5047,8 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 24:
switch (sel) {
case 0:
- gen_op_mtc0_depc(); /* EJTAG support */
+ /* EJTAG support */
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_DEPC));
rn = "DEPC";
break;
default:
@@ -4800,35 +5058,35 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 25:
switch (sel) {
case 0:
- gen_op_mtc0_performance0();
+ tcg_gen_helper_0_1(do_mtc0_performance0, t0);
rn = "Performance0";
break;
case 1:
-// gen_op_mtc0_performance1();
+// tcg_gen_helper_0_1(do_mtc0_performance1, t0);
rn = "Performance1";
// break;
case 2:
-// gen_op_mtc0_performance2();
+// tcg_gen_helper_0_1(do_mtc0_performance2, t0);
rn = "Performance2";
// break;
case 3:
-// gen_op_mtc0_performance3();
+// tcg_gen_helper_0_1(do_mtc0_performance3, t0);
rn = "Performance3";
// break;
case 4:
-// gen_op_mtc0_performance4();
+// tcg_gen_helper_0_1(do_mtc0_performance4, t0);
rn = "Performance4";
// break;
case 5:
-// gen_op_mtc0_performance5();
+// tcg_gen_helper_0_1(do_mtc0_performance5, t0);
rn = "Performance5";
// break;
case 6:
-// gen_op_mtc0_performance6();
+// tcg_gen_helper_0_1(do_mtc0_performance6, t0);
rn = "Performance6";
// break;
case 7:
-// gen_op_mtc0_performance7();
+// tcg_gen_helper_0_1(do_mtc0_performance7, t0);
rn = "Performance7";
// break;
default:
@@ -4855,14 +5113,14 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_op_mtc0_taglo();
+ tcg_gen_helper_0_1(do_mtc0_taglo, t0);
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_op_mtc0_datalo();
+ tcg_gen_helper_0_1(do_mtc0_datalo, t0);
rn = "DataLo";
break;
default:
@@ -4875,14 +5133,14 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 2:
case 4:
case 6:
- gen_op_mtc0_taghi();
+ tcg_gen_helper_0_1(do_mtc0_taghi, t0);
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_op_mtc0_datahi();
+ tcg_gen_helper_0_1(do_mtc0_datahi, t0);
rn = "DataHi";
break;
default:
@@ -4893,7 +5151,7 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 30:
switch (sel) {
case 0:
- gen_op_mtc0_errorepc();
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
@@ -4903,7 +5161,8 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
case 31:
switch (sel) {
case 0:
- gen_op_mtc0_desave(); /* EJTAG support */
+ /* EJTAG support */
+ gen_mtc0_store32(t0, offsetof(CPUState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -4921,9 +5180,11 @@ static void gen_dmtc0 (CPUState *env, DisasContext *ctx, int reg, int sel)
rn, reg, sel);
}
#endif
+ tcg_temp_free(t0);
return;
die:
+ tcg_temp_free(t0);
#if defined MIPS_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "dmtc0 %s (reg %d sel %d)\n",
@@ -4934,125 +5195,126 @@ die:
}
#endif /* TARGET_MIPS64 */
-static void gen_mftr(CPUState *env, DisasContext *ctx, int rt,
+static void gen_mftr(CPUState *env, DisasContext *ctx, int rt, int rd,
int u, int sel, int h)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
((env->CP0_TCBind[other_tc] & (0xf << CP0TCBd_CurVPE)) !=
(env->CP0_TCBind[env->current_tc] & (0xf << CP0TCBd_CurVPE))))
- tcg_gen_movi_tl(cpu_T[0], -1);
+ tcg_gen_movi_tl(t0, -1);
else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) >
(env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC)))
- tcg_gen_movi_tl(cpu_T[0], -1);
+ tcg_gen_movi_tl(t0, -1);
else if (u == 0) {
switch (rt) {
case 2:
switch (sel) {
case 1:
- gen_op_mftc0_tcstatus();
+ tcg_gen_helper_1_1(do_mftc0_tcstatus, t0, t0);
break;
case 2:
- gen_op_mftc0_tcbind();
+ tcg_gen_helper_1_1(do_mftc0_tcbind, t0, t0);
break;
case 3:
- gen_op_mftc0_tcrestart();
+ tcg_gen_helper_1_1(do_mftc0_tcrestart, t0, t0);
break;
case 4:
- gen_op_mftc0_tchalt();
+ tcg_gen_helper_1_1(do_mftc0_tchalt, t0, t0);
break;
case 5:
- gen_op_mftc0_tccontext();
+ tcg_gen_helper_1_1(do_mftc0_tccontext, t0, t0);
break;
case 6:
- gen_op_mftc0_tcschedule();
+ tcg_gen_helper_1_1(do_mftc0_tcschedule, t0, t0);
break;
case 7:
- gen_op_mftc0_tcschefback();
+ tcg_gen_helper_1_1(do_mftc0_tcschefback, t0, t0);
break;
default:
- gen_mfc0(env, ctx, rt, sel);
+ gen_mfc0(env, ctx, t0, rt, sel);
break;
}
break;
case 10:
switch (sel) {
case 0:
- gen_op_mftc0_entryhi();
+ tcg_gen_helper_1_1(do_mftc0_entryhi, t0, t0);
break;
default:
- gen_mfc0(env, ctx, rt, sel);
+ gen_mfc0(env, ctx, t0, rt, sel);
break;
}
case 12:
switch (sel) {
case 0:
- gen_op_mftc0_status();
+ tcg_gen_helper_1_1(do_mftc0_status, t0, t0);
break;
default:
- gen_mfc0(env, ctx, rt, sel);
+ gen_mfc0(env, ctx, t0, rt, sel);
break;
}
case 23:
switch (sel) {
case 0:
- gen_op_mftc0_debug();
+ tcg_gen_helper_1_1(do_mftc0_debug, t0, t0);
break;
default:
- gen_mfc0(env, ctx, rt, sel);
+ gen_mfc0(env, ctx, t0, rt, sel);
break;
}
break;
default:
- gen_mfc0(env, ctx, rt, sel);
+ gen_mfc0(env, ctx, t0, rt, sel);
}
} else switch (sel) {
/* GPR registers. */
case 0:
- gen_op_mftgpr(rt);
+ tcg_gen_helper_1_1i(do_mftgpr, t0, t0, rt);
break;
/* Auxiliary CPU registers */
case 1:
switch (rt) {
case 0:
- gen_op_mftlo(0);
+ tcg_gen_helper_1_1i(do_mftlo, t0, t0, 0);
break;
case 1:
- gen_op_mfthi(0);
+ tcg_gen_helper_1_1i(do_mfthi, t0, t0, 0);
break;
case 2:
- gen_op_mftacx(0);
+ tcg_gen_helper_1_1i(do_mftacx, t0, t0, 0);
break;
case 4:
- gen_op_mftlo(1);
+ tcg_gen_helper_1_1i(do_mftlo, t0, t0, 1);
break;
case 5:
- gen_op_mfthi(1);
+ tcg_gen_helper_1_1i(do_mfthi, t0, t0, 1);
break;
case 6:
- gen_op_mftacx(1);
+ tcg_gen_helper_1_1i(do_mftacx, t0, t0, 1);
break;
case 8:
- gen_op_mftlo(2);
+ tcg_gen_helper_1_1i(do_mftlo, t0, t0, 2);
break;
case 9:
- gen_op_mfthi(2);
+ tcg_gen_helper_1_1i(do_mfthi, t0, t0, 2);
break;
case 10:
- gen_op_mftacx(2);
+ tcg_gen_helper_1_1i(do_mftacx, t0, t0, 2);
break;
case 12:
- gen_op_mftlo(3);
+ tcg_gen_helper_1_1i(do_mftlo, t0, t0, 3);
break;
case 13:
- gen_op_mfthi(3);
+ tcg_gen_helper_1_1i(do_mfthi, t0, t0, 3);
break;
case 14:
- gen_op_mftacx(3);
+ tcg_gen_helper_1_1i(do_mftacx, t0, t0, 3);
break;
case 16:
- gen_op_mftdsp();
+ tcg_gen_helper_1_1(do_mftdsp, t0, t0);
break;
default:
goto die;
@@ -5062,16 +5324,16 @@ static void gen_mftr(CPUState *env, DisasContext *ctx, int rt,
case 2:
/* XXX: For now we support only a single FPU context. */
if (h == 0) {
- GEN_LOAD_FREG_FTN(WT0, rt);
- gen_op_mfc1();
+ gen_load_fpr32(fpu32_T[0], rt);
+ tcg_gen_ext_i32_tl(t0, fpu32_T[0]);
} else {
- GEN_LOAD_FREG_FTN(WTH0, rt);
- gen_op_mfhc1();
+ gen_load_fpr32h(fpu32h_T[0], rt);
+ tcg_gen_ext_i32_tl(t0, fpu32h_T[0]);
}
break;
case 3:
/* XXX: For now we support only a single FPU context. */
- gen_op_cfc1(rt);
+ tcg_gen_helper_1_1i(do_cfc1, t0, t0, rt);
break;
/* COP2: Not implemented. */
case 4:
@@ -5086,9 +5348,12 @@ static void gen_mftr(CPUState *env, DisasContext *ctx, int rt,
rt, u, sel, h);
}
#endif
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
return;
die:
+ tcg_temp_free(t0);
#if defined MIPS_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "mftr (reg %d u %d sel %d h %d)\n",
@@ -5098,11 +5363,13 @@ die:
generate_exception(ctx, EXCP_RI);
}
-static void gen_mttr(CPUState *env, DisasContext *ctx, int rd,
+static void gen_mttr(CPUState *env, DisasContext *ctx, int rd, int rt,
int u, int sel, int h)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ gen_load_gpr(t0, rt);
if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
((env->CP0_TCBind[other_tc] & (0xf << CP0TCBd_CurVPE)) !=
(env->CP0_TCBind[env->current_tc] & (0xf << CP0TCBd_CurVPE))))
@@ -5115,108 +5382,108 @@ static void gen_mttr(CPUState *env, DisasContext *ctx, int rd,
case 2:
switch (sel) {
case 1:
- gen_op_mttc0_tcstatus();
+ tcg_gen_helper_0_1(do_mttc0_tcstatus, t0);
break;
case 2:
- gen_op_mttc0_tcbind();
+ tcg_gen_helper_0_1(do_mttc0_tcbind, t0);
break;
case 3:
- gen_op_mttc0_tcrestart();
+ tcg_gen_helper_0_1(do_mttc0_tcrestart, t0);
break;
case 4:
- gen_op_mttc0_tchalt();
+ tcg_gen_helper_0_1(do_mttc0_tchalt, t0);
break;
case 5:
- gen_op_mttc0_tccontext();
+ tcg_gen_helper_0_1(do_mttc0_tccontext, t0);
break;
case 6:
- gen_op_mttc0_tcschedule();
+ tcg_gen_helper_0_1(do_mttc0_tcschedule, t0);
break;
case 7:
- gen_op_mttc0_tcschefback();
+ tcg_gen_helper_0_1(do_mttc0_tcschefback, t0);
break;
default:
- gen_mtc0(env, ctx, rd, sel);
+ gen_mtc0(env, ctx, t0, rd, sel);
break;
}
break;
case 10:
switch (sel) {
case 0:
- gen_op_mttc0_entryhi();
+ tcg_gen_helper_0_1(do_mttc0_entryhi, t0);
break;
default:
- gen_mtc0(env, ctx, rd, sel);
+ gen_mtc0(env, ctx, t0, rd, sel);
break;
}
case 12:
switch (sel) {
case 0:
- gen_op_mttc0_status();
+ tcg_gen_helper_0_1(do_mttc0_status, t0);
break;
default:
- gen_mtc0(env, ctx, rd, sel);
+ gen_mtc0(env, ctx, t0, rd, sel);
break;
}
case 23:
switch (sel) {
case 0:
- gen_op_mttc0_debug();
+ tcg_gen_helper_0_1(do_mttc0_debug, t0);
break;
default:
- gen_mtc0(env, ctx, rd, sel);
+ gen_mtc0(env, ctx, t0, rd, sel);
break;
}
break;
default:
- gen_mtc0(env, ctx, rd, sel);
+ gen_mtc0(env, ctx, t0, rd, sel);
}
} else switch (sel) {
/* GPR registers. */
case 0:
- gen_op_mttgpr(rd);
+ tcg_gen_helper_0_1i(do_mttgpr, t0, rd);
break;
/* Auxiliary CPU registers */
case 1:
switch (rd) {
case 0:
- gen_op_mttlo(0);
+ tcg_gen_helper_0_1i(do_mttlo, t0, 0);
break;
case 1:
- gen_op_mtthi(0);
+ tcg_gen_helper_0_1i(do_mtthi, t0, 0);
break;
case 2:
- gen_op_mttacx(0);
+ tcg_gen_helper_0_1i(do_mttacx, t0, 0);
break;
case 4:
- gen_op_mttlo(1);
+ tcg_gen_helper_0_1i(do_mttlo, t0, 1);
break;
case 5:
- gen_op_mtthi(1);
+ tcg_gen_helper_0_1i(do_mtthi, t0, 1);
break;
case 6:
- gen_op_mttacx(1);
+ tcg_gen_helper_0_1i(do_mttacx, t0, 1);
break;
case 8:
- gen_op_mttlo(2);
+ tcg_gen_helper_0_1i(do_mttlo, t0, 2);
break;
case 9:
- gen_op_mtthi(2);
+ tcg_gen_helper_0_1i(do_mtthi, t0, 2);
break;
case 10:
- gen_op_mttacx(2);
+ tcg_gen_helper_0_1i(do_mttacx, t0, 2);
break;
case 12:
- gen_op_mttlo(3);
+ tcg_gen_helper_0_1i(do_mttlo, t0, 3);
break;
case 13:
- gen_op_mtthi(3);
+ tcg_gen_helper_0_1i(do_mtthi, t0, 3);
break;
case 14:
- gen_op_mttacx(3);
+ tcg_gen_helper_0_1i(do_mttacx, t0, 3);
break;
case 16:
- gen_op_mttdsp();
+ tcg_gen_helper_0_1(do_mttdsp, t0);
break;
default:
goto die;
@@ -5226,16 +5493,16 @@ static void gen_mttr(CPUState *env, DisasContext *ctx, int rd,
case 2:
/* XXX: For now we support only a single FPU context. */
if (h == 0) {
- gen_op_mtc1();
- GEN_STORE_FTN_FREG(rd, WT0);
+ tcg_gen_trunc_tl_i32(fpu32_T[0], t0);
+ gen_store_fpr32(fpu32_T[0], rd);
} else {
- gen_op_mthc1();
- GEN_STORE_FTN_FREG(rd, WTH0);
+ tcg_gen_trunc_tl_i32(fpu32h_T[0], t0);
+ gen_store_fpr32h(fpu32h_T[0], rd);
}
break;
case 3:
/* XXX: For now we support only a single FPU context. */
- gen_op_ctc1(rd);
+ tcg_gen_helper_0_1i(do_ctc1, t0, rd);
break;
/* COP2: Not implemented. */
case 4:
@@ -5250,9 +5517,11 @@ static void gen_mttr(CPUState *env, DisasContext *ctx, int rd,
rd, u, sel, h);
}
#endif
+ tcg_temp_free(t0);
return;
die:
+ tcg_temp_free(t0);
#if defined MIPS_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "mttr (reg %d u %d sel %d h %d)\n",
@@ -5272,14 +5541,24 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
/* Treat as NOP. */
return;
}
- gen_mfc0(env, ctx, rd, ctx->opcode & 0x7);
- gen_store_gpr(cpu_T[0], rt);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_mfc0(env, ctx, t0, rd, ctx->opcode & 0x7);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
opn = "mfc0";
break;
case OPC_MTC0:
- gen_load_gpr(cpu_T[0], rt);
- save_cpu_state(ctx, 1);
- gen_mtc0(env, ctx, rd, ctx->opcode & 0x7);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, rt);
+ save_cpu_state(ctx, 1);
+ gen_mtc0(env, ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
opn = "mtc0";
break;
#if defined(TARGET_MIPS64)
@@ -5289,15 +5568,25 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
/* Treat as NOP. */
return;
}
- gen_dmfc0(env, ctx, rd, ctx->opcode & 0x7);
- gen_store_gpr(cpu_T[0], rt);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_dmfc0(env, ctx, t0, rd, ctx->opcode & 0x7);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
opn = "dmfc0";
break;
case OPC_DMTC0:
check_insn(env, ctx, ISA_MIPS3);
- gen_load_gpr(cpu_T[0], rt);
- save_cpu_state(ctx, 1);
- gen_dmtc0(env, ctx, rd, ctx->opcode & 0x7);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, rt);
+ save_cpu_state(ctx, 1);
+ gen_dmtc0(env, ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
opn = "dmtc0";
break;
#endif
@@ -5307,15 +5596,13 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
/* Treat as NOP. */
return;
}
- gen_mftr(env, ctx, rt, (ctx->opcode >> 5) & 1,
+ gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1,
ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
- gen_store_gpr(cpu_T[0], rd);
opn = "mftr";
break;
case OPC_MTTR:
check_insn(env, ctx, ASE_MT);
- gen_load_gpr(cpu_T[0], rt);
- gen_mttr(env, ctx, rd, (ctx->opcode >> 5) & 1,
+ gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1,
ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
opn = "mttr";
break;
@@ -5323,31 +5610,31 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
opn = "tlbwi";
if (!env->tlb->do_tlbwi)
goto die;
- gen_op_tlbwi();
+ tcg_gen_helper_0_0(env->tlb->do_tlbwi);
break;
case OPC_TLBWR:
opn = "tlbwr";
if (!env->tlb->do_tlbwr)
goto die;
- gen_op_tlbwr();
+ tcg_gen_helper_0_0(env->tlb->do_tlbwr);
break;
case OPC_TLBP:
opn = "tlbp";
if (!env->tlb->do_tlbp)
goto die;
- gen_op_tlbp();
+ tcg_gen_helper_0_0(env->tlb->do_tlbp);
break;
case OPC_TLBR:
opn = "tlbr";
if (!env->tlb->do_tlbr)
goto die;
- gen_op_tlbr();
+ tcg_gen_helper_0_0(env->tlb->do_tlbr);
break;
case OPC_ERET:
opn = "eret";
check_insn(env, ctx, ISA_MIPS2);
save_cpu_state(ctx, 1);
- gen_op_eret();
+ tcg_gen_helper_0_0(do_eret);
ctx->bstate = BS_EXCP;
break;
case OPC_DERET:
@@ -5358,7 +5645,7 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
generate_exception(ctx, EXCP_RI);
} else {
save_cpu_state(ctx, 1);
- gen_op_deret();
+ tcg_gen_helper_0_0(do_deret);
ctx->bstate = BS_EXCP;
}
break;
@@ -5369,7 +5656,7 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
ctx->pc += 4;
save_cpu_state(ctx, 1);
ctx->pc -= 4;
- gen_op_wait();
+ tcg_gen_helper_0_0(do_wait);
ctx->bstate = BS_EXCP;
break;
default:
@@ -5380,6 +5667,7 @@ static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int
}
MIPS_DEBUG("%s %s %d", opn, regnames[rt], rd);
}
+#endif /* !CONFIG_USER_ONLY */
/* CP1 Branches (before delay slot) */
static void gen_compute_branch1 (CPUState *env, DisasContext *ctx, uint32_t op,
@@ -5387,6 +5675,8 @@ static void gen_compute_branch1 (CPUState *env, DisasContext *ctx, uint32_t op,
{
target_ulong btarget;
const char *opn = "cp1 cond branch";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
if (cc != 0)
check_insn(env, ctx, ISA_MIPS4 | ISA_MIPS32);
@@ -5395,51 +5685,187 @@ static void gen_compute_branch1 (CPUState *env, DisasContext *ctx, uint32_t op,
switch (op) {
case OPC_BC1F:
- gen_op_bc1f(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_not_tl(t0, t0);
+ tcg_gen_movi_tl(t1, 0x1 << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1f";
goto not_likely;
case OPC_BC1FL:
- gen_op_bc1f(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_not_tl(t0, t0);
+ tcg_gen_movi_tl(t1, 0x1 << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1fl";
goto likely;
case OPC_BC1T:
- gen_op_bc1t(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_movi_tl(t1, 0x1 << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1t";
goto not_likely;
case OPC_BC1TL:
- gen_op_bc1t(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_movi_tl(t1, 0x1 << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1tl";
likely:
ctx->hflags |= MIPS_HFLAG_BL;
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, bcond));
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, bcond));
break;
case OPC_BC1FANY2:
- gen_op_bc1any2f(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_not_tl(t0, t0);
+ tcg_gen_movi_tl(t1, 0x3 << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1any2f";
goto not_likely;
case OPC_BC1TANY2:
- gen_op_bc1any2t(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_movi_tl(t1, 0x3 << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1any2t";
goto not_likely;
case OPC_BC1FANY4:
- gen_op_bc1any4f(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_not_tl(t0, t0);
+ tcg_gen_movi_tl(t1, 0xf << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1any4f";
goto not_likely;
case OPC_BC1TANY4:
- gen_op_bc1any4t(cc);
+ {
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_ext_i32_tl(t0, r_tmp1);
+ tcg_temp_free(r_tmp1);
+ tcg_gen_movi_tl(t1, 0xf << cc);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(t0, 1);
+ gen_set_label(l2);
+ }
opn = "bc1any4t";
not_likely:
ctx->hflags |= MIPS_HFLAG_BC;
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, bcond));
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, bcond));
break;
default:
MIPS_INVAL(opn);
generate_exception (ctx, EXCP_RI);
- return;
+ goto out;
}
MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn,
ctx->hflags, btarget);
ctx->btarget = btarget;
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
/* Coprocessor 1 (FPU) */
@@ -5449,60 +5875,64 @@ static void gen_compute_branch1 (CPUState *env, DisasContext *ctx, uint32_t op,
static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs)
{
const char *opn = "cp1 move";
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
switch (opc) {
case OPC_MFC1:
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_mfc1();
- gen_store_gpr(cpu_T[0], rt);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_ext_i32_tl(t0, fpu32_T[0]);
+ gen_store_gpr(t0, rt);
opn = "mfc1";
break;
case OPC_MTC1:
- gen_load_gpr(cpu_T[0], rt);
- gen_op_mtc1();
- GEN_STORE_FTN_FREG(fs, WT0);
+ gen_load_gpr(t0, rt);
+ tcg_gen_trunc_tl_i32(fpu32_T[0], t0);
+ gen_store_fpr32(fpu32_T[0], fs);
opn = "mtc1";
break;
case OPC_CFC1:
- gen_op_cfc1(fs);
- gen_store_gpr(cpu_T[0], rt);
+ tcg_gen_helper_1_i(do_cfc1, t0, fs);
+ gen_store_gpr(t0, rt);
opn = "cfc1";
break;
case OPC_CTC1:
- gen_load_gpr(cpu_T[0], rt);
- gen_op_ctc1(fs);
+ gen_load_gpr(t0, rt);
+ tcg_gen_helper_0_1i(do_ctc1, t0, fs);
opn = "ctc1";
break;
case OPC_DMFC1:
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_dmfc1();
- gen_store_gpr(cpu_T[0], rt);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_mov_tl(t0, fpu64_T[0]);
+ gen_store_gpr(t0, rt);
opn = "dmfc1";
break;
case OPC_DMTC1:
- gen_load_gpr(cpu_T[0], rt);
- gen_op_dmtc1();
- GEN_STORE_FTN_FREG(fs, DT0);
+ gen_load_gpr(t0, rt);
+ tcg_gen_mov_tl(fpu64_T[0], t0);
+ gen_store_fpr64(ctx, fpu64_T[0], fs);
opn = "dmtc1";
break;
case OPC_MFHC1:
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_mfhc1();
- gen_store_gpr(cpu_T[0], rt);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_ext_i32_tl(t0, fpu32h_T[0]);
+ gen_store_gpr(t0, rt);
opn = "mfhc1";
break;
case OPC_MTHC1:
- gen_load_gpr(cpu_T[0], rt);
- gen_op_mthc1();
- GEN_STORE_FTN_FREG(fs, WTH0);
+ gen_load_gpr(t0, rt);
+ tcg_gen_trunc_tl_i32(fpu32h_T[0], t0);
+ gen_store_fpr32h(fpu32h_T[0], fs);
opn = "mthc1";
break;
default:
MIPS_INVAL(opn);
generate_exception (ctx, EXCP_RI);
- return;
+ goto out;
}
MIPS_DEBUG("%s %s %s", opn, regnames[rt], fregnames[fs]);
+
+ out:
+ tcg_temp_free(t0);
}
static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf)
@@ -5510,6 +5940,8 @@ static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf)
int l1 = gen_new_label();
uint32_t ccbit;
TCGCond cond;
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
if (cc)
ccbit = 1 << (24 + cc);
@@ -5520,41 +5952,104 @@ static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf)
else
cond = TCG_COND_NE;
- gen_load_gpr(cpu_T[0], rd);
- gen_load_gpr(cpu_T[1], rs);
+ gen_load_gpr(t0, rd);
+ gen_load_gpr(t1, rs);
{
TCGv r_ptr = tcg_temp_new(TCG_TYPE_PTR);
- TCGv r_tmp = tcg_temp_new(TCG_TYPE_I32);
+ TCGv r_tmp = tcg_temp_local_new(TCG_TYPE_I32);
tcg_gen_ld_ptr(r_ptr, cpu_env, offsetof(CPUState, fpu));
tcg_gen_ld_i32(r_tmp, r_ptr, offsetof(CPUMIPSFPUContext, fcr31));
tcg_temp_free(r_ptr);
tcg_gen_andi_i32(r_tmp, r_tmp, ccbit);
tcg_gen_brcondi_i32(cond, r_tmp, 0, l1);
+ tcg_temp_free(r_tmp);
}
- tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(t0, t1);
+ tcg_temp_free(t1);
+
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_movcf_s (int cc, int tf)
+{
+ uint32_t ccbit;
+ int cond;
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+ int l1 = gen_new_label();
+
+ if (cc)
+ ccbit = 1 << (24 + cc);
+ else
+ ccbit = 1 << 23;
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ tcg_gen_ld_i32(r_tmp1, current_fpu, offsetof(CPUMIPSFPUContext, fcr31));
+ tcg_gen_andi_i32(r_tmp1, r_tmp1, ccbit);
+ tcg_gen_brcondi_i32(cond, r_tmp1, 0, l1);
+ tcg_gen_movi_i32(fpu32_T[2], fpu32_T[0]);
+ gen_set_label(l1);
+ tcg_temp_free(r_tmp1);
+}
+static inline void gen_movcf_d (int cc, int tf)
+{
+ uint32_t ccbit;
+ int cond;
+ TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32);
+ int l1 = gen_new_label();
+
+ if (cc)
+ ccbit = 1 << (24 + cc);
+ else
+ ccbit = 1 << 23;
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ tcg_gen_ld_i32(r_tmp1, current_fpu, offsetof(CPUMIPSFPUContext, fcr31));
+ tcg_gen_andi_i32(r_tmp1, r_tmp1, ccbit);
+ tcg_gen_brcondi_i32(cond, r_tmp1, 0, l1);
+ tcg_gen_movi_i64(fpu64_T[2], fpu64_T[0]);
gen_set_label(l1);
- gen_store_gpr(cpu_T[0], rd);
+ tcg_temp_free(r_tmp1);
}
-#define GEN_MOVCF(fmt) \
-static void glue(gen_movcf_, fmt) (DisasContext *ctx, int cc, int tf) \
-{ \
- uint32_t ccbit; \
- \
- if (cc) { \
- ccbit = 1 << (24 + cc); \
- } else \
- ccbit = 1 << 23; \
- if (!tf) \
- glue(gen_op_float_movf_, fmt)(ccbit); \
- else \
- glue(gen_op_float_movt_, fmt)(ccbit); \
+static inline void gen_movcf_ps (int cc, int tf)
+{
+ int cond;
+ TCGv r_tmp1 = tcg_temp_local_new(TCG_TYPE_I32);
+ TCGv r_tmp2 = tcg_temp_local_new(TCG_TYPE_I32);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ get_fp_cond(r_tmp1);
+ tcg_gen_shri_i32(r_tmp1, r_tmp1, cc);
+ tcg_gen_andi_i32(r_tmp2, r_tmp1, 0x1);
+ tcg_gen_brcondi_i32(cond, r_tmp2, 0, l1);
+ tcg_gen_movi_i32(fpu32_T[2], fpu32_T[0]);
+ gen_set_label(l1);
+ tcg_gen_andi_i32(r_tmp2, r_tmp1, 0x2);
+ tcg_gen_brcondi_i32(cond, r_tmp2, 0, l2);
+ tcg_gen_movi_i32(fpu32h_T[2], fpu32h_T[0]);
+ gen_set_label(l2);
+ tcg_temp_free(r_tmp1);
+ tcg_temp_free(r_tmp2);
}
-GEN_MOVCF(d);
-GEN_MOVCF(s);
-#undef GEN_MOVCF
+
static void gen_farith (DisasContext *ctx, uint32_t op1,
int ft, int fs, int fd, int cc)
@@ -5601,207 +6096,224 @@ static void gen_farith (DisasContext *ctx, uint32_t op1,
switch (ctx->opcode & FOP(0x3f, 0x1f)) {
case FOP(0, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- gen_op_float_add_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ tcg_gen_helper_0_0(do_float_add_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "add.s";
optype = BINOP;
break;
case FOP(1, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- gen_op_float_sub_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ tcg_gen_helper_0_0(do_float_sub_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "sub.s";
optype = BINOP;
break;
case FOP(2, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- gen_op_float_mul_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ tcg_gen_helper_0_0(do_float_mul_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "mul.s";
optype = BINOP;
break;
case FOP(3, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- gen_op_float_div_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ tcg_gen_helper_0_0(do_float_div_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "div.s";
optype = BINOP;
break;
case FOP(4, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_sqrt_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_sqrt_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "sqrt.s";
break;
case FOP(5, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_abs_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_abs_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "abs.s";
break;
case FOP(6, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_mov_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_store_fpr32(fpu32_T[0], fd);
opn = "mov.s";
break;
case FOP(7, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_chs_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_chs_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "neg.s";
break;
case FOP(8, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_roundl_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_roundl_s);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "round.l.s";
break;
case FOP(9, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_truncl_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_truncl_s);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "trunc.l.s";
break;
case FOP(10, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_ceill_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_ceill_s);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "ceil.l.s";
break;
case FOP(11, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_floorl_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_floorl_s);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "floor.l.s";
break;
case FOP(12, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_roundw_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_roundw_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "round.w.s";
break;
case FOP(13, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_truncw_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_truncw_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "trunc.w.s";
break;
case FOP(14, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_ceilw_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_ceilw_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "ceil.w.s";
break;
case FOP(15, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_floorw_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_floorw_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "floor.w.s";
break;
case FOP(17, 16):
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- gen_movcf_s(ctx, (ft >> 2) & 0x7, ft & 0x1);
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ gen_movcf_s((ft >> 2) & 0x7, ft & 0x1);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "movcf.s";
break;
case FOP(18, 16):
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- gen_op_float_movz_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ {
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, ft);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32_T[0]);
+ gen_set_label(l1);
+ }
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "movz.s";
break;
case FOP(19, 16):
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- gen_op_float_movn_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ {
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, ft);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32_T[0]);
+ gen_set_label(l1);
+ }
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "movn.s";
break;
case FOP(21, 16):
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_recip_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_recip_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "recip.s";
break;
case FOP(22, 16):
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_rsqrt_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_rsqrt_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "rsqrt.s";
break;
case FOP(28, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- gen_op_float_recip2_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ tcg_gen_helper_0_0(do_float_recip2_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "recip2.s";
break;
case FOP(29, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_recip1_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_recip1_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "recip1.s";
break;
case FOP(30, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_rsqrt1_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_rsqrt1_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "rsqrt1.s";
break;
case FOP(31, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT2, ft);
- gen_op_float_rsqrt2_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], ft);
+ tcg_gen_helper_0_0(do_float_rsqrt2_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "rsqrt2.s";
break;
case FOP(33, 16):
check_cp1_registers(ctx, fd);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_cvtd_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtd_s);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "cvt.d.s";
break;
case FOP(36, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_cvtw_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtw_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.w.s";
break;
case FOP(37, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_cvtl_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtl_s);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "cvt.l.s";
break;
case FOP(38, 16):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT1, fs);
- GEN_LOAD_FREG_FTN(WT0, ft);
- gen_op_float_cvtps_s();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ tcg_gen_extu_i32_i64(fpu64_T[0], fpu32_T[0]);
+ tcg_gen_extu_i32_i64(fpu64_T[1], fpu32_T[1]);
+ tcg_gen_shli_i64(fpu64_T[1], fpu64_T[1], 32);
+ tcg_gen_or_i64(fpu64_T[2], fpu64_T[0], fpu64_T[1]);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "cvt.ps.s";
break;
case FOP(48, 16):
@@ -5820,8 +6332,8 @@ static void gen_farith (DisasContext *ctx, uint32_t op1,
case FOP(61, 16):
case FOP(62, 16):
case FOP(63, 16):
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
if (ctx->opcode & (1 << 6)) {
check_cop1x(ctx);
gen_cmpabs_s(func-48, cc);
@@ -5833,190 +6345,204 @@ static void gen_farith (DisasContext *ctx, uint32_t op1,
break;
case FOP(0, 17):
check_cp1_registers(ctx, fs | ft | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- gen_op_float_add_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ tcg_gen_helper_0_0(do_float_add_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "add.d";
optype = BINOP;
break;
case FOP(1, 17):
check_cp1_registers(ctx, fs | ft | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- gen_op_float_sub_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ tcg_gen_helper_0_0(do_float_sub_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "sub.d";
optype = BINOP;
break;
case FOP(2, 17):
check_cp1_registers(ctx, fs | ft | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- gen_op_float_mul_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ tcg_gen_helper_0_0(do_float_mul_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "mul.d";
optype = BINOP;
break;
case FOP(3, 17):
check_cp1_registers(ctx, fs | ft | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- gen_op_float_div_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ tcg_gen_helper_0_0(do_float_div_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "div.d";
optype = BINOP;
break;
case FOP(4, 17):
check_cp1_registers(ctx, fs | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_sqrt_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_sqrt_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "sqrt.d";
break;
case FOP(5, 17):
check_cp1_registers(ctx, fs | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_abs_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_abs_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "abs.d";
break;
case FOP(6, 17):
check_cp1_registers(ctx, fs | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_mov_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_store_fpr64(ctx, fpu64_T[0], fd);
opn = "mov.d";
break;
case FOP(7, 17):
check_cp1_registers(ctx, fs | fd);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_chs_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_chs_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "neg.d";
break;
case FOP(8, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_roundl_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_roundl_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "round.l.d";
break;
case FOP(9, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_truncl_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_truncl_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "trunc.l.d";
break;
case FOP(10, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_ceill_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_ceill_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "ceil.l.d";
break;
case FOP(11, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_floorl_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_floorl_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "floor.l.d";
break;
case FOP(12, 17):
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_roundw_d();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_roundw_d);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "round.w.d";
break;
case FOP(13, 17):
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_truncw_d();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_truncw_d);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "trunc.w.d";
break;
case FOP(14, 17):
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_ceilw_d();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_ceilw_d);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "ceil.w.d";
break;
case FOP(15, 17):
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_floorw_d();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_floorw_d);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "floor.w.d";
break;
case FOP(17, 17):
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT2, fd);
- gen_movcf_d(ctx, (ft >> 2) & 0x7, ft & 0x1);
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[2], fd);
+ gen_movcf_d((ft >> 2) & 0x7, ft & 0x1);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "movcf.d";
break;
case FOP(18, 17):
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT2, fd);
- gen_op_float_movz_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[2], fd);
+ {
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, ft);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i64(fpu64_T[2], fpu64_T[0]);
+ gen_set_label(l1);
+ }
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "movz.d";
break;
case FOP(19, 17):
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT2, fd);
- gen_op_float_movn_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[2], fd);
+ {
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, ft);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i64(fpu64_T[2], fpu64_T[0]);
+ gen_set_label(l1);
+ }
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "movn.d";
break;
case FOP(21, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_recip_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_recip_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "recip.d";
break;
case FOP(22, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_rsqrt_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_rsqrt_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "rsqrt.d";
break;
case FOP(28, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT2, ft);
- gen_op_float_recip2_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[2], ft);
+ tcg_gen_helper_0_0(do_float_recip2_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "recip2.d";
break;
case FOP(29, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_recip1_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_recip1_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "recip1.d";
break;
case FOP(30, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_rsqrt1_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_rsqrt1_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "rsqrt1.d";
break;
case FOP(31, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT2, ft);
- gen_op_float_rsqrt2_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[2], ft);
+ tcg_gen_helper_0_0(do_float_rsqrt2_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "rsqrt2.d";
break;
case FOP(48, 17):
@@ -6035,8 +6561,8 @@ static void gen_farith (DisasContext *ctx, uint32_t op1,
case FOP(61, 17):
case FOP(62, 17):
case FOP(63, 17):
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
if (ctx->opcode & (1 << 6)) {
check_cop1x(ctx);
check_cp1_registers(ctx, fs | ft);
@@ -6050,275 +6576,288 @@ static void gen_farith (DisasContext *ctx, uint32_t op1,
break;
case FOP(32, 17):
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_cvts_d();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvts_d);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.s.d";
break;
case FOP(36, 17):
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_cvtw_d();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtw_d);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.w.d";
break;
case FOP(37, 17):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_cvtl_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtl_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "cvt.l.d";
break;
case FOP(32, 20):
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_cvts_w();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvts_w);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.s.w";
break;
case FOP(33, 20):
check_cp1_registers(ctx, fd);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_cvtd_w();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtd_w);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "cvt.d.w";
break;
case FOP(32, 21):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_cvts_l();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvts_l);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.s.l";
break;
case FOP(33, 21):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- gen_op_float_cvtd_l();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtd_l);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "cvt.d.l";
break;
case FOP(38, 20):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_cvtps_pw();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtps_pw);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "cvt.ps.pw";
break;
case FOP(0, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- gen_op_float_add_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ tcg_gen_helper_0_0(do_float_add_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "add.ps";
break;
case FOP(1, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- gen_op_float_sub_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ tcg_gen_helper_0_0(do_float_sub_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "sub.ps";
break;
case FOP(2, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- gen_op_float_mul_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ tcg_gen_helper_0_0(do_float_mul_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "mul.ps";
break;
case FOP(5, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_abs_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_abs_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "abs.ps";
break;
case FOP(6, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_mov_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_store_fpr32(fpu32_T[0], fd);
+ gen_store_fpr32h(fpu32h_T[0], fd);
opn = "mov.ps";
break;
case FOP(7, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_chs_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_chs_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "neg.ps";
break;
case FOP(17, 22):
check_cp1_64bitmode(ctx);
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- GEN_LOAD_FREG_FTN(WTH2, fd);
- if (ft & 0x1)
- gen_op_float_movt_ps ((ft >> 2) & 0x7);
- else
- gen_op_float_movf_ps ((ft >> 2) & 0x7);
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ gen_load_fpr32h(fpu32h_T[2], fd);
+ gen_movcf_ps((ft >> 2) & 0x7, ft & 0x1);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "movcf.ps";
break;
case FOP(18, 22):
check_cp1_64bitmode(ctx);
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- GEN_LOAD_FREG_FTN(WTH2, fd);
- gen_op_float_movz_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ gen_load_fpr32h(fpu32h_T[2], fd);
+ {
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, ft);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32_T[0]);
+ tcg_gen_mov_i32(fpu32h_T[2], fpu32h_T[0]);
+ gen_set_label(l1);
+ }
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "movz.ps";
break;
case FOP(19, 22):
check_cp1_64bitmode(ctx);
- gen_load_gpr(cpu_T[0], ft);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- GEN_LOAD_FREG_FTN(WTH2, fd);
- gen_op_float_movn_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ gen_load_fpr32h(fpu32h_T[2], fd);
+ {
+ int l1 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, ft);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32_T[0]);
+ tcg_gen_mov_i32(fpu32h_T[2], fpu32h_T[0]);
+ gen_set_label(l1);
+ }
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "movn.ps";
break;
case FOP(24, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, ft);
- GEN_LOAD_FREG_FTN(WTH0, ft);
- GEN_LOAD_FREG_FTN(WT1, fs);
- GEN_LOAD_FREG_FTN(WTH1, fs);
- gen_op_float_addr_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], ft);
+ gen_load_fpr32h(fpu32h_T[0], ft);
+ gen_load_fpr32(fpu32_T[1], fs);
+ gen_load_fpr32h(fpu32h_T[1], fs);
+ tcg_gen_helper_0_0(do_float_addr_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "addr.ps";
break;
case FOP(26, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, ft);
- GEN_LOAD_FREG_FTN(WTH0, ft);
- GEN_LOAD_FREG_FTN(WT1, fs);
- GEN_LOAD_FREG_FTN(WTH1, fs);
- gen_op_float_mulr_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], ft);
+ gen_load_fpr32h(fpu32h_T[0], ft);
+ gen_load_fpr32(fpu32_T[1], fs);
+ gen_load_fpr32h(fpu32h_T[1], fs);
+ tcg_gen_helper_0_0(do_float_mulr_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "mulr.ps";
break;
case FOP(28, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT2, fd);
- GEN_LOAD_FREG_FTN(WTH2, fd);
- gen_op_float_recip2_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], fd);
+ gen_load_fpr32h(fpu32h_T[2], fd);
+ tcg_gen_helper_0_0(do_float_recip2_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "recip2.ps";
break;
case FOP(29, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_recip1_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_recip1_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "recip1.ps";
break;
case FOP(30, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_rsqrt1_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_rsqrt1_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "rsqrt1.ps";
break;
case FOP(31, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT2, ft);
- GEN_LOAD_FREG_FTN(WTH2, ft);
- gen_op_float_rsqrt2_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[2], ft);
+ gen_load_fpr32h(fpu32h_T[2], ft);
+ tcg_gen_helper_0_0(do_float_rsqrt2_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "rsqrt2.ps";
break;
case FOP(32, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_cvts_pu();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvts_pu);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.s.pu";
break;
case FOP(36, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- gen_op_float_cvtpw_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvtpw_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "cvt.pw.ps";
break;
case FOP(40, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- gen_op_float_cvts_pl();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_helper_0_0(do_float_cvts_pl);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "cvt.s.pl";
break;
case FOP(44, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- gen_op_float_pll_ps();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_store_fpr32h(fpu32_T[0], fd);
+ gen_store_fpr32(fpu32_T[1], fd);
opn = "pll.ps";
break;
case FOP(45, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- gen_op_float_plu_ps();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ gen_store_fpr32(fpu32h_T[1], fd);
+ gen_store_fpr32h(fpu32_T[0], fd);
opn = "plu.ps";
break;
case FOP(46, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- gen_op_float_pul_ps();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_store_fpr32(fpu32_T[1], fd);
+ gen_store_fpr32h(fpu32h_T[0], fd);
opn = "pul.ps";
break;
case FOP(47, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- gen_op_float_puu_ps();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ gen_store_fpr32(fpu32h_T[1], fd);
+ gen_store_fpr32h(fpu32h_T[0], fd);
opn = "puu.ps";
break;
case FOP(48, 22):
@@ -6338,10 +6877,10 @@ static void gen_farith (DisasContext *ctx, uint32_t op1,
case FOP(62, 22):
case FOP(63, 22):
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
if (ctx->opcode & (1 << 6)) {
gen_cmpabs_ps(func-48, cc);
opn = condnames_abs[func-48];
@@ -6374,65 +6913,73 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
{
const char *opn = "extended float load/store";
int store = 0;
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
if (base == 0) {
- gen_load_gpr(cpu_T[0], index);
+ gen_load_gpr(t0, index);
} else if (index == 0) {
- gen_load_gpr(cpu_T[0], base);
+ gen_load_gpr(t0, base);
} else {
- gen_load_gpr(cpu_T[0], base);
- gen_load_gpr(cpu_T[1], index);
- gen_op_addr_add();
+ gen_load_gpr(t0, base);
+ gen_load_gpr(t1, index);
+ gen_op_addr_add(t0, t1);
}
/* Don't do NOP if destination is zero: we must perform the actual
memory access. */
switch (opc) {
case OPC_LWXC1:
check_cop1x(ctx);
- op_ldst_lwc1(ctx);
- GEN_STORE_FTN_FREG(fd, WT0);
+ tcg_gen_qemu_ld32s(fpu32_T[0], t0, ctx->mem_idx);
+ gen_store_fpr32(fpu32_T[0], fd);
opn = "lwxc1";
break;
case OPC_LDXC1:
check_cop1x(ctx);
check_cp1_registers(ctx, fd);
- op_ldst_ldc1(ctx);
- GEN_STORE_FTN_FREG(fd, DT0);
+ tcg_gen_qemu_ld64(fpu64_T[0], t0, ctx->mem_idx);
+ gen_store_fpr64(ctx, fpu64_T[0], fd);
opn = "ldxc1";
break;
case OPC_LUXC1:
check_cp1_64bitmode(ctx);
- op_ldst(luxc1);
- GEN_STORE_FTN_FREG(fd, DT0);
+ tcg_gen_andi_tl(t0, t0, ~0x7);
+ tcg_gen_qemu_ld64(fpu64_T[0], t0, ctx->mem_idx);
+ gen_store_fpr64(ctx, fpu64_T[0], fd);
opn = "luxc1";
break;
case OPC_SWXC1:
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- op_ldst_swc1(ctx);
+ gen_load_fpr32(fpu32_T[0], fs);
+ tcg_gen_qemu_st32(fpu32_T[0], t0, ctx->mem_idx);
opn = "swxc1";
store = 1;
break;
case OPC_SDXC1:
check_cop1x(ctx);
check_cp1_registers(ctx, fs);
- GEN_LOAD_FREG_FTN(DT0, fs);
- op_ldst_sdc1(ctx);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_qemu_st64(fpu64_T[0], t0, ctx->mem_idx);
opn = "sdxc1";
store = 1;
break;
case OPC_SUXC1:
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(DT0, fs);
- op_ldst(suxc1);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ tcg_gen_andi_tl(t0, t0, ~0x7);
+ tcg_gen_qemu_st64(fpu64_T[0], t0, ctx->mem_idx);
opn = "suxc1";
store = 1;
break;
default:
MIPS_INVAL(opn);
generate_exception(ctx, EXCP_RI);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
return;
}
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
MIPS_DEBUG("%s %s, %s(%s)", opn, fregnames[store ? fs : fd],
regnames[index], regnames[base]);
}
@@ -6445,139 +6992,164 @@ static void gen_flt3_arith (DisasContext *ctx, uint32_t opc,
switch (opc) {
case OPC_ALNV_PS:
check_cp1_64bitmode(ctx);
- gen_load_gpr(cpu_T[0], fr);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- gen_op_float_alnv_ps();
- GEN_STORE_FTN_FREG(fd, DT2);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ int l1 = gen_new_label();
+ int l2 = gen_new_label();
+
+ gen_load_gpr(t0, fr);
+ tcg_gen_andi_tl(t0, t0, 0x7);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32_T[0]);
+ tcg_gen_mov_i32(fpu32h_T[2], fpu32h_T[0]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
+ tcg_temp_free(t0);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_mov_i32(fpu32h_T[2], fpu32_T[0]);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32h_T[1]);
+#else
+ tcg_gen_mov_i32(fpu32h_T[2], fpu32_T[1]);
+ tcg_gen_mov_i32(fpu32_T[2], fpu32h_T[0]);
+#endif
+ gen_set_label(l2);
+ }
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "alnv.ps";
break;
case OPC_MADD_S:
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- gen_op_float_muladd_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ tcg_gen_helper_0_0(do_float_muladd_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "madd.s";
break;
case OPC_MADD_D:
check_cop1x(ctx);
check_cp1_registers(ctx, fd | fs | ft | fr);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- GEN_LOAD_FREG_FTN(DT2, fr);
- gen_op_float_muladd_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ gen_load_fpr64(ctx, fpu64_T[2], fr);
+ tcg_gen_helper_0_0(do_float_muladd_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "madd.d";
break;
case OPC_MADD_PS:
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- GEN_LOAD_FREG_FTN(WTH2, fr);
- gen_op_float_muladd_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ gen_load_fpr32h(fpu32h_T[2], fr);
+ tcg_gen_helper_0_0(do_float_muladd_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "madd.ps";
break;
case OPC_MSUB_S:
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- gen_op_float_mulsub_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ tcg_gen_helper_0_0(do_float_mulsub_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "msub.s";
break;
case OPC_MSUB_D:
check_cop1x(ctx);
check_cp1_registers(ctx, fd | fs | ft | fr);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- GEN_LOAD_FREG_FTN(DT2, fr);
- gen_op_float_mulsub_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ gen_load_fpr64(ctx, fpu64_T[2], fr);
+ tcg_gen_helper_0_0(do_float_mulsub_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "msub.d";
break;
case OPC_MSUB_PS:
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- GEN_LOAD_FREG_FTN(WTH2, fr);
- gen_op_float_mulsub_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ gen_load_fpr32h(fpu32h_T[2], fr);
+ tcg_gen_helper_0_0(do_float_mulsub_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "msub.ps";
break;
case OPC_NMADD_S:
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- gen_op_float_nmuladd_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ tcg_gen_helper_0_0(do_float_nmuladd_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "nmadd.s";
break;
case OPC_NMADD_D:
check_cop1x(ctx);
check_cp1_registers(ctx, fd | fs | ft | fr);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- GEN_LOAD_FREG_FTN(DT2, fr);
- gen_op_float_nmuladd_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ gen_load_fpr64(ctx, fpu64_T[2], fr);
+ tcg_gen_helper_0_0(do_float_nmuladd_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "nmadd.d";
break;
case OPC_NMADD_PS:
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- GEN_LOAD_FREG_FTN(WTH2, fr);
- gen_op_float_nmuladd_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ gen_load_fpr32h(fpu32h_T[2], fr);
+ tcg_gen_helper_0_0(do_float_nmuladd_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "nmadd.ps";
break;
case OPC_NMSUB_S:
check_cop1x(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- gen_op_float_nmulsub_s();
- GEN_STORE_FTN_FREG(fd, WT2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ tcg_gen_helper_0_0(do_float_nmulsub_s);
+ gen_store_fpr32(fpu32_T[2], fd);
opn = "nmsub.s";
break;
case OPC_NMSUB_D:
check_cop1x(ctx);
check_cp1_registers(ctx, fd | fs | ft | fr);
- GEN_LOAD_FREG_FTN(DT0, fs);
- GEN_LOAD_FREG_FTN(DT1, ft);
- GEN_LOAD_FREG_FTN(DT2, fr);
- gen_op_float_nmulsub_d();
- GEN_STORE_FTN_FREG(fd, DT2);
+ gen_load_fpr64(ctx, fpu64_T[0], fs);
+ gen_load_fpr64(ctx, fpu64_T[1], ft);
+ gen_load_fpr64(ctx, fpu64_T[2], fr);
+ tcg_gen_helper_0_0(do_float_nmulsub_d);
+ gen_store_fpr64(ctx, fpu64_T[2], fd);
opn = "nmsub.d";
break;
case OPC_NMSUB_PS:
check_cp1_64bitmode(ctx);
- GEN_LOAD_FREG_FTN(WT0, fs);
- GEN_LOAD_FREG_FTN(WTH0, fs);
- GEN_LOAD_FREG_FTN(WT1, ft);
- GEN_LOAD_FREG_FTN(WTH1, ft);
- GEN_LOAD_FREG_FTN(WT2, fr);
- GEN_LOAD_FREG_FTN(WTH2, fr);
- gen_op_float_nmulsub_ps();
- GEN_STORE_FTN_FREG(fd, WT2);
- GEN_STORE_FTN_FREG(fd, WTH2);
+ gen_load_fpr32(fpu32_T[0], fs);
+ gen_load_fpr32h(fpu32h_T[0], fs);
+ gen_load_fpr32(fpu32_T[1], ft);
+ gen_load_fpr32h(fpu32h_T[1], ft);
+ gen_load_fpr32(fpu32_T[2], fr);
+ gen_load_fpr32h(fpu32h_T[2], fr);
+ tcg_gen_helper_0_0(do_float_nmulsub_ps);
+ gen_store_fpr32(fpu32_T[2], fd);
+ gen_store_fpr32h(fpu32h_T[2], fd);
opn = "nmsub.ps";
break;
default:
@@ -6622,7 +7194,13 @@ static void decode_opc (CPUState *env, DisasContext *ctx)
tcg_gen_ld_tl(r_tmp, cpu_env, offsetof(CPUState, bcond));
tcg_gen_brcondi_tl(TCG_COND_NE, r_tmp, 0, l1);
tcg_temp_free(r_tmp);
- gen_op_save_state(ctx->hflags & ~MIPS_HFLAG_BMASK);
+ {
+ TCGv r_tmp2 = tcg_temp_new(TCG_TYPE_I32);
+
+ tcg_gen_movi_i32(r_tmp2, ctx->hflags & ~MIPS_HFLAG_BMASK);
+ tcg_gen_st_i32(r_tmp2, cpu_env, offsetof(CPUState, hflags));
+ tcg_temp_free(r_tmp2);
+ }
gen_goto_tb(ctx, 1, ctx->pc + 4);
gen_set_label(l1);
}
@@ -6676,7 +7254,7 @@ static void decode_opc (CPUState *env, DisasContext *ctx)
MIPS_INVAL("PMON / selsl");
generate_exception(ctx, EXCP_RI);
#else
- gen_op_pmon(sa);
+ tcg_gen_helper_0_i(do_pmon, sa);
#endif
break;
case OPC_SYSCALL:
@@ -6781,78 +7359,106 @@ static void decode_opc (CPUState *env, DisasContext *ctx)
}
break;
case OPC_SPECIAL3:
- op1 = MASK_SPECIAL3(ctx->opcode);
- switch (op1) {
- case OPC_EXT:
- case OPC_INS:
- check_insn(env, ctx, ISA_MIPS32R2);
- gen_bitops(ctx, op1, rt, rs, sa, rd);
- break;
- case OPC_BSHFL:
- check_insn(env, ctx, ISA_MIPS32R2);
- op2 = MASK_BSHFL(ctx->opcode);
- switch (op2) {
- case OPC_WSBH:
- gen_load_gpr(cpu_T[1], rt);
- gen_op_wsbh();
- break;
- case OPC_SEB:
- gen_load_gpr(cpu_T[1], rt);
- tcg_gen_ext8s_tl(cpu_T[0], cpu_T[1]);
- break;
- case OPC_SEH:
- gen_load_gpr(cpu_T[1], rt);
- tcg_gen_ext16s_tl(cpu_T[0], cpu_T[1]);
- break;
- default: /* Invalid */
- MIPS_INVAL("bshfl");
- generate_exception(ctx, EXCP_RI);
- break;
+ op1 = MASK_SPECIAL3(ctx->opcode);
+ switch (op1) {
+ case OPC_EXT:
+ case OPC_INS:
+ check_insn(env, ctx, ISA_MIPS32R2);
+ gen_bitops(ctx, op1, rt, rs, sa, rd);
+ break;
+ case OPC_BSHFL:
+ check_insn(env, ctx, ISA_MIPS32R2);
+ op2 = MASK_BSHFL(ctx->opcode);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ switch (op2) {
+ case OPC_WSBH:
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2(do_wsbh, t0, t0, t1);
+ gen_store_gpr(t0, rd);
+ break;
+ case OPC_SEB:
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext8s_tl(t0, t1);
+ gen_store_gpr(t0, rd);
+ break;
+ case OPC_SEH:
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext16s_tl(t0, t1);
+ gen_store_gpr(t0, rd);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("bshfl");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
- gen_store_gpr(cpu_T[0], rd);
break;
case OPC_RDHWR:
check_insn(env, ctx, ISA_MIPS32R2);
- switch (rd) {
- case 0:
- save_cpu_state(ctx, 1);
- gen_op_rdhwr_cpunum();
- break;
- case 1:
- save_cpu_state(ctx, 1);
- gen_op_rdhwr_synci_step();
- break;
- case 2:
- save_cpu_state(ctx, 1);
- gen_op_rdhwr_cc();
- break;
- case 3:
- save_cpu_state(ctx, 1);
- gen_op_rdhwr_ccres();
- break;
- case 29:
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ switch (rd) {
+ case 0:
+ save_cpu_state(ctx, 1);
+ tcg_gen_helper_1_1(do_rdhwr_cpunum, t0, t0);
+ break;
+ case 1:
+ save_cpu_state(ctx, 1);
+ tcg_gen_helper_1_1(do_rdhwr_synci_step, t0, t0);
+ break;
+ case 2:
+ save_cpu_state(ctx, 1);
+ tcg_gen_helper_1_1(do_rdhwr_cc, t0, t0);
+ break;
+ case 3:
+ save_cpu_state(ctx, 1);
+ tcg_gen_helper_1_1(do_rdhwr_ccres, t0, t0);
+ break;
+ case 29:
#if defined (CONFIG_USER_ONLY)
- gen_op_tls_value();
- break;
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, tls_value));
+ break;
+#else
+ /* XXX: Some CPUs implement this in hardware. Not supported yet. */
#endif
- default: /* Invalid */
- MIPS_INVAL("rdhwr");
- generate_exception(ctx, EXCP_RI);
- break;
+ default: /* Invalid */
+ MIPS_INVAL("rdhwr");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
}
- gen_store_gpr(cpu_T[0], rt);
break;
case OPC_FORK:
check_insn(env, ctx, ASE_MT);
- gen_load_gpr(cpu_T[0], rt);
- gen_load_gpr(cpu_T[1], rs);
- gen_op_fork();
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ tcg_gen_helper_0_2(do_fork, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
break;
case OPC_YIELD:
check_insn(env, ctx, ASE_MT);
- gen_load_gpr(cpu_T[0], rs);
- gen_op_yield();
- gen_store_gpr(cpu_T[0], rd);
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ gen_load_gpr(t0, rs);
+ tcg_gen_helper_1_1(do_yield, t0, t0);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
break;
#if defined(TARGET_MIPS64)
case OPC_DEXTM ... OPC_DEXT:
@@ -6865,21 +7471,28 @@ static void decode_opc (CPUState *env, DisasContext *ctx)
check_insn(env, ctx, ISA_MIPS64R2);
check_mips_64(ctx);
op2 = MASK_DBSHFL(ctx->opcode);
- switch (op2) {
- case OPC_DSBH:
- gen_load_gpr(cpu_T[1], rt);
- gen_op_dsbh();
- break;
- case OPC_DSHD:
- gen_load_gpr(cpu_T[1], rt);
- gen_op_dshd();
- break;
- default: /* Invalid */
- MIPS_INVAL("dbshfl");
- generate_exception(ctx, EXCP_RI);
- break;
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+ TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ switch (op2) {
+ case OPC_DSBH:
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2(do_dsbh, t0, t0, t1);
+ break;
+ case OPC_DSHD:
+ gen_load_gpr(t1, rt);
+ tcg_gen_helper_1_2(do_dshd, t0, t0, t1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("dbshfl");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
}
- gen_store_gpr(cpu_T[0], rd);
break;
#endif
default: /* Invalid */
@@ -6921,60 +7534,67 @@ static void decode_opc (CPUState *env, DisasContext *ctx)
case OPC_DMFC0:
case OPC_DMTC0:
#endif
+#ifndef CONFIG_USER_ONLY
gen_cp0(env, ctx, op1, rt, rd);
+#endif
break;
case OPC_C0_FIRST ... OPC_C0_LAST:
+#ifndef CONFIG_USER_ONLY
gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd);
+#endif
break;
case OPC_MFMC0:
op2 = MASK_MFMC0(ctx->opcode);
- switch (op2) {
- case OPC_DMT:
- check_insn(env, ctx, ASE_MT);
- gen_op_dmt();
- break;
- case OPC_EMT:
- check_insn(env, ctx, ASE_MT);
- gen_op_emt();
- break;
- case OPC_DVPE:
- check_insn(env, ctx, ASE_MT);
- gen_op_dvpe();
- break;
- case OPC_EVPE:
- check_insn(env, ctx, ASE_MT);
- gen_op_evpe();
- break;
- case OPC_DI:
- check_insn(env, ctx, ISA_MIPS32R2);
- save_cpu_state(ctx, 1);
- gen_op_di();
- /* Stop translation as we may have switched the execution mode */
- ctx->bstate = BS_STOP;
- break;
- case OPC_EI:
- check_insn(env, ctx, ISA_MIPS32R2);
- save_cpu_state(ctx, 1);
- gen_op_ei();
- /* Stop translation as we may have switched the execution mode */
- ctx->bstate = BS_STOP;
- break;
- default: /* Invalid */
- MIPS_INVAL("mfmc0");
- generate_exception(ctx, EXCP_RI);
- break;
+ {
+ TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
+
+ switch (op2) {
+ case OPC_DMT:
+ check_insn(env, ctx, ASE_MT);
+ tcg_gen_helper_1_1(do_dmt, t0, t0);
+ break;
+ case OPC_EMT:
+ check_insn(env, ctx, ASE_MT);
+ tcg_gen_helper_1_1(do_emt, t0, t0);
+ break;
+ case OPC_DVPE:
+ check_insn(env, ctx, ASE_MT);
+ tcg_gen_helper_1_1(do_dvpe, t0, t0);
+ break;
+ case OPC_EVPE:
+ check_insn(env, ctx, ASE_MT);
+ tcg_gen_helper_1_1(do_evpe, t0, t0);
+ break;
+ case OPC_DI:
+ check_insn(env, ctx, ISA_MIPS32R2);
+ save_cpu_state(ctx, 1);
+ tcg_gen_helper_1_1(do_di, t0, t0);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case OPC_EI:
+ check_insn(env, ctx, ISA_MIPS32R2);
+ save_cpu_state(ctx, 1);
+ tcg_gen_helper_1_1(do_ei, t0, t0);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("mfmc0");
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
}
- gen_store_gpr(cpu_T[0], rt);
break;
case OPC_RDPGPR:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_load_srsgpr(cpu_T[0], rt);
- gen_store_gpr(cpu_T[0], rd);
+ gen_load_srsgpr(rt, rd);
break;
case OPC_WRPGPR:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_load_gpr(cpu_T[0], rt);
- gen_store_srsgpr(cpu_T[0], rd);
+ gen_store_srsgpr(rt, rd);
break;
default:
MIPS_INVAL("cp0");
@@ -7212,7 +7832,8 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
fprintf (logfile, "search pc %d\n", search_pc);
pc_start = tb->pc;
- gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ /* Leave some spare opc slots for branch handling. */
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE - 16;
ctx.pc = pc_start;
ctx.saved_pc = -1;
ctx.tb = tb;
@@ -7237,13 +7858,13 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
fprintf(logfile, "\ntb %p idx %d hflags %04x\n",
tb, ctx.mem_idx, ctx.hflags);
#endif
- while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
+ while (ctx.bstate == BS_NONE) {
if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) {
if (env->breakpoints[j] == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
- gen_op_debug();
+ tcg_gen_helper_0_i(do_raise_exception, EXCP_DEBUG);
/* Include the breakpoint location or the tb won't
* be flushed when it must be. */
ctx.pc += 4;
@@ -7273,13 +7894,19 @@ gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
break;
+ if (gen_opc_ptr >= gen_opc_end)
+ break;
+
+ if (gen_opc_ptr >= gen_opc_end)
+ break;
+
#if defined (MIPS_SINGLE_STEP)
break;
#endif
}
if (env->singlestep_enabled) {
save_cpu_state(&ctx, ctx.bstate == BS_NONE);
- gen_op_debug();
+ tcg_gen_helper_0_i(do_raise_exception, EXCP_DEBUG);
} else {
switch (ctx.bstate) {
case BS_STOP:
@@ -7364,9 +7991,9 @@ void fpu_dump_state(CPUState *env, FILE *f,
fpu_fprintf(f, "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%08x(0x%02x)\n",
env->fpu->fcr0, env->fpu->fcr31, is_fpu64, env->fpu->fp_status,
get_float_exception_flags(&env->fpu->fp_status));
- fpu_fprintf(f, "FT0: "); printfpr(&env->fpu->ft0);
- fpu_fprintf(f, "FT1: "); printfpr(&env->fpu->ft1);
- fpu_fprintf(f, "FT2: "); printfpr(&env->fpu->ft2);
+ fpu_fprintf(f, "FT0: "); printfpr(&env->ft0);
+ fpu_fprintf(f, "FT1: "); printfpr(&env->ft1);
+ fpu_fprintf(f, "FT2: "); printfpr(&env->ft2);
for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) {
fpu_fprintf(f, "%3s: ", fregnames[i]);
printfpr(&env->fpu->fpr[i]);
@@ -7466,15 +8093,25 @@ static void mips_tcg_init(void)
TCG_AREG0,
offsetof(CPUState, current_tc_hi),
"current_tc_hi");
-#if TARGET_LONG_BITS > HOST_LONG_BITS
- cpu_T[0] = tcg_global_mem_new(TCG_TYPE_TL,
- TCG_AREG0, offsetof(CPUState, t0), "T0");
- cpu_T[1] = tcg_global_mem_new(TCG_TYPE_TL,
- TCG_AREG0, offsetof(CPUState, t1), "T1");
-#else
- cpu_T[0] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG1, "T0");
- cpu_T[1] = tcg_global_reg_new(TCG_TYPE_TL, TCG_AREG2, "T1");
-#endif
+ current_fpu = tcg_global_mem_new(TCG_TYPE_PTR,
+ TCG_AREG0,
+ offsetof(CPUState, fpu),
+ "current_fpu");
+
+ /* register helpers */
+#undef DEF_HELPER
+#define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
+#include "helper.h"
+
+ fpu32_T[0] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, ft0.w[FP_ENDIAN_IDX]), "WT0");
+ fpu32_T[1] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, ft1.w[FP_ENDIAN_IDX]), "WT1");
+ fpu32_T[2] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, ft2.w[FP_ENDIAN_IDX]), "WT2");
+ fpu64_T[0] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, offsetof(CPUState, ft0.d), "DT0");
+ fpu64_T[1] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, offsetof(CPUState, ft1.d), "DT1");
+ fpu64_T[2] = tcg_global_mem_new(TCG_TYPE_I64, TCG_AREG0, offsetof(CPUState, ft2.d), "DT2");
+ fpu32h_T[0] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, ft0.w[!FP_ENDIAN_IDX]), "WTH0");
+ fpu32h_T[1] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, ft1.w[!FP_ENDIAN_IDX]), "WTH1");
+ fpu32h_T[2] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0, offsetof(CPUState, ft2.w[!FP_ENDIAN_IDX]), "WTH2");
inited = 1;
}
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 19de8695a..28cb6abdd 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -362,7 +362,7 @@ EXTRACT_HELPER(TO, 21, 5);
EXTRACT_HELPER(CRM, 12, 8);
EXTRACT_HELPER(FM, 17, 8);
EXTRACT_HELPER(SR, 16, 4);
-EXTRACT_HELPER(FPIMM, 20, 4);
+EXTRACT_HELPER(FPIMM, 12, 4);
/*** Jump target decoding ***/
/* Displacement */
diff --git a/target-sparc/TODO b/target-sparc/TODO
index 5a4937ba0..0d39994b2 100644
--- a/target-sparc/TODO
+++ b/target-sparc/TODO
@@ -6,7 +6,6 @@ CPU common:
slot next page)
- Atomical instructions
- CPU features should match real CPUs (also ASI selection)
- - Allow choosing of NWINDOWS (CPU model specific and as a CPU feature)
- Optimizations/improvements:
- Condition code/branch handling like x86, also for FPU?
- Remove remaining explicit alignment checks
@@ -54,8 +53,7 @@ Sun4m:
- Unimplemented features/bugs:
- Hardware devices do not match real boards
- Floppy does not work
- - Missing keys, sendkey support for all keys
- - CS4231 (DMA, make sound, make available for PC)
+ - CS4231: merge with cs4231a, add DMA
- Add cg6, bwtwo
- Arbitrary resolution support
- PCI for MicroSparc-IIe
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
index ad9aeb867..b56dc91ff 100644
--- a/target-sparc/cpu.h
+++ b/target-sparc/cpu.h
@@ -170,8 +170,9 @@
#define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT)
#define PG_CACHE_MASK (1 << PG_CACHE_BIT)
-/* 2 <= NWINDOWS <= 32. In QEMU it must also be a power of two. */
-#define NWINDOWS 8
+/* 3 <= NWINDOWS <= 32. */
+#define MIN_NWINDOWS 3
+#define MAX_NWINDOWS 32
#if !defined(TARGET_SPARC64)
#define NB_MMU_MODES 2
@@ -222,8 +223,9 @@ typedef struct CPUSPARCState {
uint32_t mmu_cxr_mask;
uint32_t mmu_sfsr_mask;
uint32_t mmu_trcr_mask;
+ uint32_t nwindows;
/* NOTE: we allow 8 more registers to handle wrapping */
- target_ulong regbase[NWINDOWS * 16 + 8];
+ target_ulong regbase[MAX_NWINDOWS * 16 + 8];
CPU_COMMON
@@ -330,6 +332,20 @@ void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
#ifndef NO_CPU_IO_DEFS
void cpu_set_cwp(CPUSPARCState *env1, int new_cwp);
+
+static inline int cpu_cwp_inc(CPUSPARCState *env1, int cwp)
+{
+ if (unlikely(cwp >= env1->nwindows))
+ cwp -= env1->nwindows;
+ return cwp;
+}
+
+static inline int cpu_cwp_dec(CPUSPARCState *env1, int cwp)
+{
+ if (unlikely(cwp < 0))
+ cwp += env1->nwindows;
+ return cwp;
+}
#endif
#define PUT_PSR(env, val) do { int _tmp = val; \
@@ -348,10 +364,16 @@ void cpu_set_cwp(CPUSPARCState *env1, int new_cwp);
env->xcc = (_tmp >> 4) << 20; \
env->psr = (_tmp & 0xf) << 20; \
} while (0)
-#define GET_CWP64(env) (NWINDOWS - 1 - (env)->cwp)
-#define PUT_CWP64(env, val) \
- cpu_set_cwp(env, NWINDOWS - 1 - ((val) & (NWINDOWS - 1)))
+#define GET_CWP64(env) (env->nwindows - 1 - (env)->cwp)
+#ifndef NO_CPU_IO_DEFS
+static inline void PUT_CWP64(CPUSPARCState *env1, int cwp)
+{
+ if (unlikely(cwp >= env1->nwindows || cwp < 0))
+ cwp = 0;
+ cpu_set_cwp(env1, env1->nwindows - 1 - cwp);
+}
+#endif
#endif
int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
diff --git a/target-sparc/helper.c b/target-sparc/helper.c
index bf74c0d29..a3e50bd4c 100644
--- a/target-sparc/helper.c
+++ b/target-sparc/helper.c
@@ -47,6 +47,7 @@ struct sparc_def_t {
uint32_t mmu_sfsr_mask;
uint32_t mmu_trcr_mask;
uint32_t features;
+ uint32_t nwindows;
};
static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model);
@@ -86,14 +87,14 @@ int cpu_sparc_handle_mmu_fault(CPUState *env1, target_ulong address, int rw,
* Sparc V8 Reference MMU (SRMMU)
*/
static const int access_table[8][8] = {
- { 0, 0, 0, 0, 2, 0, 3, 3 },
- { 0, 0, 0, 0, 2, 0, 0, 0 },
- { 2, 2, 0, 0, 0, 2, 3, 3 },
- { 2, 2, 0, 0, 0, 2, 0, 0 },
- { 2, 0, 2, 0, 2, 2, 3, 3 },
- { 2, 0, 2, 0, 2, 0, 2, 0 },
- { 2, 2, 2, 0, 2, 2, 3, 3 },
- { 2, 2, 2, 0, 2, 2, 2, 0 }
+ { 0, 0, 0, 0, 8, 0, 12, 12 },
+ { 0, 0, 0, 0, 8, 0, 0, 0 },
+ { 8, 8, 0, 0, 0, 8, 12, 12 },
+ { 8, 8, 0, 0, 0, 8, 0, 0 },
+ { 8, 0, 8, 0, 8, 8, 12, 12 },
+ { 8, 0, 8, 0, 8, 0, 8, 0 },
+ { 8, 8, 8, 0, 8, 8, 12, 12 },
+ { 8, 8, 8, 0, 8, 8, 8, 0 }
};
static const int perm_table[2][8] = {
@@ -750,11 +751,11 @@ void do_interrupt(CPUState *env)
change_pstate(PS_PEF | PS_PRIV | PS_AG);
if (intno == TT_CLRWIN)
- cpu_set_cwp(env, (env->cwp - 1) & (NWINDOWS - 1));
+ cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
else if ((intno & 0x1c0) == TT_SPILL)
- cpu_set_cwp(env, (env->cwp - env->cansave - 2) & (NWINDOWS - 1));
+ cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
else if ((intno & 0x1c0) == TT_FILL)
- cpu_set_cwp(env, (env->cwp + 1) & (NWINDOWS - 1));
+ cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
env->tbr &= ~0x7fffULL;
env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
if (env->tl < MAXTL - 1) {
@@ -853,7 +854,7 @@ void do_interrupt(CPUState *env)
}
#endif
env->psret = 0;
- cwp = (env->cwp - 1) & (NWINDOWS - 1);
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
cpu_set_cwp(env, cwp);
env->regwptr[9] = env->pc;
env->regwptr[10] = env->npc;
@@ -887,8 +888,8 @@ void cpu_reset(CPUSPARCState *env)
#if defined(CONFIG_USER_ONLY)
env->user_mode_only = 1;
#ifdef TARGET_SPARC64
- env->cleanwin = NWINDOWS - 2;
- env->cansave = NWINDOWS - 2;
+ env->cleanwin = env->nwindows - 2;
+ env->cansave = env->nwindows - 2;
env->pstate = PS_RMO | PS_PEF | PS_IE;
env->asi = 0x82; // Primary no-fault
#endif
@@ -921,6 +922,7 @@ static int cpu_sparc_register(CPUSPARCState *env, const char *cpu_model)
env->cpu_model_str = cpu_model;
env->version = def->iu_version;
env->fsr = def->fpu_version;
+ env->nwindows = def->nwindows;
#if !defined(TARGET_SPARC64)
env->mmu_bm = def->mmu_bm;
env->mmu_ctpr_mask = def->mmu_ctpr_mask;
@@ -929,6 +931,8 @@ static int cpu_sparc_register(CPUSPARCState *env, const char *cpu_model)
env->mmu_trcr_mask = def->mmu_trcr_mask;
env->mmuregs[0] |= def->mmu_version;
cpu_sparc_set_id(env, 0);
+#else
+ env->version |= def->nwindows - 1;
#endif
return 0;
}
@@ -970,121 +974,136 @@ static const sparc_def_t sparc_defs[] = {
{
.name = "Fujitsu Sparc64",
.iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 4,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Fujitsu Sparc64 III",
.iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 5,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Fujitsu Sparc64 IV",
.iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Fujitsu Sparc64 V",
.iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "TI UltraSparc I",
.iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "TI UltraSparc II",
.iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "TI UltraSparc IIi",
.iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "TI UltraSparc IIe",
.iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Sun UltraSparc III",
.iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Sun UltraSparc III Cu",
.iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Sun UltraSparc IIIi",
.iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Sun UltraSparc IV",
.iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Sun UltraSparc IV+",
.iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "Sun UltraSparc IIIi+",
.iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
.name = "NEC UltraSparc I",
.iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)
- | (MAXTL << 8) | (NWINDOWS - 1)),
+ | (MAXTL << 8)),
.fpu_version = 0x00000000,
.mmu_version = 0,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
#else
@@ -1098,6 +1117,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 7,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_FSMULD,
},
{
@@ -1110,6 +1130,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x000000ff,
.mmu_sfsr_mask = 0x00016fff,
.mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1122,6 +1143,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x000000ff,
.mmu_sfsr_mask = 0x00016fff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1134,6 +1156,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
CPU_FEATURE_FSMULD,
},
@@ -1147,6 +1170,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
CPU_FEATURE_FSMULD,
},
@@ -1160,6 +1184,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
CPU_FEATURE_FSMULD,
},
@@ -1173,6 +1198,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000ffff,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1185,6 +1211,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0x00016fff,
.mmu_trcr_mask = 0x0000003f,
+ .nwindows = 7,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL |
CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT |
CPU_FEATURE_FMUL,
@@ -1199,6 +1226,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x000000ff,
.mmu_sfsr_mask = 0x00016fff,
.mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1211,6 +1239,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x000000ff,
.mmu_sfsr_mask = 0x00016bff,
.mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1223,6 +1252,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000ffff,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1235,6 +1265,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000ffff,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1247,6 +1278,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000ffff,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1259,6 +1291,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000ffff,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1271,6 +1304,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000ffff,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1283,6 +1317,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1295,6 +1330,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1307,6 +1343,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_FSQRT |
CPU_FEATURE_FSMULD,
},
@@ -1320,6 +1357,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_FEATURE_FLOAT | CPU_FEATURE_MUL | CPU_FEATURE_FSQRT |
CPU_FEATURE_FSMULD,
},
@@ -1333,6 +1371,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1345,6 +1384,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
{
@@ -1357,6 +1397,7 @@ static const sparc_def_t sparc_defs[] = {
.mmu_cxr_mask = 0x0000003f,
.mmu_sfsr_mask = 0xffffffff,
.mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
.features = CPU_DEFAULT_FEATURES,
},
#endif
@@ -1411,7 +1452,7 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
uint32_t plus_features = 0;
uint32_t minus_features = 0;
long long iu_version;
- uint32_t fpu_version, mmu_version;
+ uint32_t fpu_version, mmu_version, nwindows;
for (i = 0; i < sizeof(sparc_defs) / sizeof(sparc_def_t); i++) {
if (strcasecmp(name, sparc_defs[i].name) == 0) {
@@ -1468,6 +1509,19 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
#ifdef DEBUG_FEATURES
fprintf(stderr, "mmu_version %llx\n", mmu_version);
#endif
+ } else if (!strcmp(featurestr, "nwindows")) {
+ char *err;
+
+ nwindows = strtol(val, &err, 0);
+ if (!*val || *err || nwindows > MAX_NWINDOWS ||
+ nwindows < MIN_NWINDOWS) {
+ fprintf(stderr, "bad numerical value %s\n", val);
+ goto error;
+ }
+ cpu_def->nwindows = nwindows;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "nwindows %d\n", nwindows);
+#endif
} else {
fprintf(stderr, "unrecognized feature %s\n", featurestr);
goto error;
@@ -1497,11 +1551,12 @@ void sparc_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
unsigned int i;
for (i = 0; i < sizeof(sparc_defs) / sizeof(sparc_def_t); i++) {
- (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx " FPU %08x MMU %08x ",
+ (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx " FPU %08x MMU %08x NWINS %d ",
sparc_defs[i].name,
sparc_defs[i].iu_version,
sparc_defs[i].fpu_version,
- sparc_defs[i].mmu_version);
+ sparc_defs[i].mmu_version,
+ sparc_defs[i].nwindows);
print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES &
~sparc_defs[i].features, "-");
print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES &
@@ -1512,7 +1567,7 @@ void sparc_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
print_features(f, cpu_fprintf, -1, NULL);
(*cpu_fprintf)(f, "\n");
(*cpu_fprintf)(f, "Numerical features (=): iu_version fpu_version "
- "mmu_version\n");
+ "mmu_version nwindows\n");
}
#define GET_FLAG(a,b) ((env->psr & a)?b:'-')
@@ -1558,7 +1613,7 @@ void cpu_dump_state(CPUState *env, FILE *f,
cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate %d "
"cleanwin %d cwp %d\n",
env->cansave, env->canrestore, env->otherwin, env->wstate,
- env->cleanwin, NWINDOWS - 1 - env->cwp);
+ env->cleanwin, env->nwindows - 1 - env->cwp);
#else
cpu_fprintf(f, "psr: 0x%08x -> %c%c%c%c %c%c%c wim: 0x%08x\n",
GET_PSR(env), GET_FLAG(PSR_ZERO, 'Z'), GET_FLAG(PSR_OVF, 'V'),
diff --git a/target-sparc/machine.c b/target-sparc/machine.c
index 0e7a23e9f..aaac30b1e 100644
--- a/target-sparc/machine.c
+++ b/target-sparc/machine.c
@@ -31,7 +31,8 @@ void cpu_save(QEMUFile *f, void *opaque)
for(i = 0; i < 8; i++)
qemu_put_betls(f, &env->gregs[i]);
- for(i = 0; i < NWINDOWS * 16; i++)
+ qemu_put_be32s(f, &env->nwindows);
+ for(i = 0; i < env->nwindows * 16; i++)
qemu_put_betls(f, &env->regbase[i]);
/* FPU */
@@ -65,9 +66,12 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
int i;
uint32_t tmp;
+ if (version_id != 4)
+ return -EINVAL;
for(i = 0; i < 8; i++)
qemu_get_betls(f, &env->gregs[i]);
- for(i = 0; i < NWINDOWS * 16; i++)
+ qemu_get_be32s(f, &env->nwindows);
+ for(i = 0; i < env->nwindows * 16; i++)
qemu_get_betls(f, &env->regbase[i]);
/* FPU */
diff --git a/target-sparc/op_helper.c b/target-sparc/op_helper.c
index 3c04fe54f..f15cc224b 100644
--- a/target-sparc/op_helper.c
+++ b/target-sparc/op_helper.c
@@ -2178,7 +2178,7 @@ void helper_rett(void)
raise_exception(TT_ILL_INSN);
env->psret = 1;
- cwp = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp = cpu_cwp_inc(env, env->cwp + 1) ;
if (env->wim & (1 << cwp)) {
raise_exception(TT_WIN_UNF);
}
@@ -2399,7 +2399,7 @@ void helper_save(void)
{
uint32_t cwp;
- cwp = (env->cwp - 1) & (NWINDOWS - 1);
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
if (env->wim & (1 << cwp)) {
raise_exception(TT_WIN_OVF);
}
@@ -2410,7 +2410,7 @@ void helper_restore(void)
{
uint32_t cwp;
- cwp = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp = cpu_cwp_inc(env, env->cwp + 1);
if (env->wim & (1 << cwp)) {
raise_exception(TT_WIN_UNF);
}
@@ -2419,7 +2419,7 @@ void helper_restore(void)
void helper_wrpsr(target_ulong new_psr)
{
- if ((new_psr & PSR_CWP) >= NWINDOWS)
+ if ((new_psr & PSR_CWP) >= env->nwindows)
raise_exception(TT_ILL_INSN);
else
PUT_PSR(env, new_psr);
@@ -2437,7 +2437,7 @@ void helper_save(void)
{
uint32_t cwp;
- cwp = (env->cwp - 1) & (NWINDOWS - 1);
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
if (env->cansave == 0) {
raise_exception(TT_SPILL | (env->otherwin != 0 ?
(TT_WOTHER | ((env->wstate & 0x38) >> 1)):
@@ -2458,7 +2458,7 @@ void helper_restore(void)
{
uint32_t cwp;
- cwp = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp = cpu_cwp_inc(env, env->cwp + 1);
if (env->canrestore == 0) {
raise_exception(TT_FILL | (env->otherwin != 0 ?
(TT_WOTHER | ((env->wstate & 0x38) >> 1)):
@@ -2472,7 +2472,7 @@ void helper_restore(void)
void helper_flushw(void)
{
- if (env->cansave != NWINDOWS - 2) {
+ if (env->cansave != env->nwindows - 2) {
raise_exception(TT_SPILL | (env->otherwin != 0 ?
(TT_WOTHER | ((env->wstate & 0x38) >> 1)):
((env->wstate & 0x7) << 2)));
@@ -2491,7 +2491,7 @@ void helper_saved(void)
void helper_restored(void)
{
env->canrestore++;
- if (env->cleanwin < NWINDOWS - 1)
+ if (env->cleanwin < env->nwindows - 1)
env->cleanwin++;
if (env->otherwin == 0)
env->cansave--;
@@ -2622,12 +2622,12 @@ void helper_retry(void)
void cpu_set_cwp(CPUState *env1, int new_cwp)
{
/* put the modified wrap registers at their proper location */
- if (env1->cwp == (NWINDOWS - 1))
- memcpy32(env1->regbase, env1->regbase + NWINDOWS * 16);
+ if (env1->cwp == env1->nwindows - 1)
+ memcpy32(env1->regbase, env1->regbase + env1->nwindows * 16);
env1->cwp = new_cwp;
/* put the wrap registers at their temporary location */
- if (new_cwp == (NWINDOWS - 1))
- memcpy32(env1->regbase + NWINDOWS * 16, env1->regbase);
+ if (new_cwp == env1->nwindows - 1)
+ memcpy32(env1->regbase + env1->nwindows * 16, env1->regbase);
env1->regwptr = env1->regbase + (new_cwp * 16);
}
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index eb0ab3343..98c629150 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -38,7 +38,7 @@
according to jump_pc[T2] */
/* global register indexes */
-static TCGv cpu_env, cpu_T[2], cpu_regwptr;
+static TCGv cpu_env, cpu_regwptr;
static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
static TCGv cpu_psr, cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
static TCGv cpu_cond, cpu_src1, cpu_src2, cpu_dst, cpu_addr, cpu_val;
@@ -450,8 +450,7 @@ static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_add_tl(dst, src1, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_add_icc(cpu_cc_dst, cpu_cc_src);
@@ -462,6 +461,7 @@ static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_add_xcc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_add_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_addx_cc(TCGv dst, TCGv src1, TCGv src2)
@@ -469,15 +469,14 @@ static inline void gen_op_addx_cc(TCGv dst, TCGv src1, TCGv src2)
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
gen_mov_reg_C(cpu_tmp0, cpu_psr);
- tcg_gen_add_tl(dst, src1, cpu_tmp0);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_tmp0);
gen_cc_clear_icc();
- gen_cc_C_add_icc(dst, cpu_cc_src);
+ gen_cc_C_add_icc(cpu_cc_dst, cpu_cc_src);
#ifdef TARGET_SPARC64
gen_cc_clear_xcc();
- gen_cc_C_add_xcc(dst, cpu_cc_src);
+ gen_cc_C_add_xcc(cpu_cc_dst, cpu_cc_src);
#endif
- tcg_gen_add_tl(dst, dst, cpu_cc_src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_dst, cpu_cc_src2);
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_add_icc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_add_icc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
@@ -486,14 +485,14 @@ static inline void gen_op_addx_cc(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_add_xcc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_add_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_tadd_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_add_tl(dst, src1, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_add_icc(cpu_cc_dst, cpu_cc_src);
@@ -505,6 +504,7 @@ static inline void gen_op_tadd_cc(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_add_xcc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_add_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_tadd_ccTV(TCGv dst, TCGv src1, TCGv src2)
@@ -512,9 +512,8 @@ static inline void gen_op_tadd_ccTV(TCGv dst, TCGv src1, TCGv src2)
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
gen_tag_tv(cpu_cc_src, cpu_cc_src2);
- tcg_gen_add_tl(dst, src1, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
- gen_add_tv(dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ gen_add_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_add_icc(cpu_cc_dst, cpu_cc_src);
@@ -524,6 +523,7 @@ static inline void gen_op_tadd_ccTV(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_add_xcc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_add_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
/* old op:
@@ -619,8 +619,7 @@ static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_sub_tl(dst, src1, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_sub_icc(cpu_cc_src, cpu_cc_src2);
@@ -631,6 +630,7 @@ static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_sub_xcc(cpu_cc_src, cpu_cc_src2);
gen_cc_V_sub_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_subx_cc(TCGv dst, TCGv src1, TCGv src2)
@@ -638,15 +638,14 @@ static inline void gen_op_subx_cc(TCGv dst, TCGv src1, TCGv src2)
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
gen_mov_reg_C(cpu_tmp0, cpu_psr);
- tcg_gen_sub_tl(dst, src1, cpu_tmp0);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_tmp0);
gen_cc_clear_icc();
- gen_cc_C_sub_icc(dst, cpu_cc_src);
+ gen_cc_C_sub_icc(cpu_cc_dst, cpu_cc_src);
#ifdef TARGET_SPARC64
gen_cc_clear_xcc();
- gen_cc_C_sub_xcc(dst, cpu_cc_src);
+ gen_cc_C_sub_xcc(cpu_cc_dst, cpu_cc_src);
#endif
- tcg_gen_sub_tl(dst, dst, cpu_cc_src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_dst, cpu_cc_src2);
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_sub_icc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_sub_icc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
@@ -655,14 +654,14 @@ static inline void gen_op_subx_cc(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_sub_xcc(cpu_cc_dst, cpu_cc_src);
gen_cc_V_sub_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_tsub_cc(TCGv dst, TCGv src1, TCGv src2)
{
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- tcg_gen_sub_tl(dst, src1, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_sub_icc(cpu_cc_src, cpu_cc_src2);
@@ -674,6 +673,7 @@ static inline void gen_op_tsub_cc(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_sub_xcc(cpu_cc_src, cpu_cc_src2);
gen_cc_V_sub_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_tsub_ccTV(TCGv dst, TCGv src1, TCGv src2)
@@ -681,9 +681,8 @@ static inline void gen_op_tsub_ccTV(TCGv dst, TCGv src1, TCGv src2)
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
gen_tag_tv(cpu_cc_src, cpu_cc_src2);
- tcg_gen_sub_tl(dst, src1, src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
- gen_sub_tv(dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ gen_sub_tv(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_C_sub_icc(cpu_cc_src, cpu_cc_src2);
@@ -693,6 +692,7 @@ static inline void gen_op_tsub_ccTV(TCGv dst, TCGv src1, TCGv src2)
gen_cc_C_sub_xcc(cpu_cc_src, cpu_cc_src2);
gen_cc_V_sub_xcc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
#endif
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
@@ -741,13 +741,13 @@ static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
/* do addition and update flags */
- tcg_gen_add_tl(dst, cpu_cc_src, cpu_cc_src2);
- tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
gen_cc_V_add_icc(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
gen_cc_C_add_icc(cpu_cc_dst, cpu_cc_src);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
}
static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
@@ -818,7 +818,7 @@ static inline void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
l2 = gen_new_label();
tcg_gen_mov_tl(cpu_cc_src, src1);
tcg_gen_mov_tl(cpu_cc_src2, src2);
- gen_trap_ifdivzero_tl(src2);
+ gen_trap_ifdivzero_tl(cpu_cc_src2);
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_cc_src, INT64_MIN, l1);
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_cc_src2, -1, l1);
tcg_gen_movi_i64(dst, INT64_MIN);
@@ -837,8 +837,7 @@ static inline void gen_op_div_cc(TCGv dst)
gen_cc_clear_icc();
gen_cc_NZ_icc(cpu_cc_dst);
l1 = gen_new_label();
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUSPARCState, cc_src2));
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_src2, 0, l1);
tcg_gen_ori_i32(cpu_psr, cpu_psr, PSR_OVF);
gen_set_label(l1);
}
@@ -1866,11 +1865,9 @@ static inline TCGv get_src1(unsigned int insn, TCGv def)
rs1 = GET_FIELD(insn, 13, 17);
if (rs1 == 0)
- //r_rs1 = tcg_const_tl(0);
- tcg_gen_movi_tl(def, 0);
+ r_rs1 = tcg_const_tl(0); // XXX how to free?
else if (rs1 < 8)
- //r_rs1 = cpu_gregs[rs1];
- tcg_gen_mov_tl(def, cpu_gregs[rs1]);
+ r_rs1 = cpu_gregs[rs1];
else
tcg_gen_ld_tl(def, cpu_regwptr, (rs1 - 8) * sizeof(target_ulong));
return r_rs1;
@@ -1915,13 +1912,8 @@ static void disas_sparc_insn(DisasContext * dc)
rd = GET_FIELD(insn, 2, 6);
- cpu_dst = cpu_T[0];
- cpu_src1 = cpu_T[0]; // const
- cpu_src2 = cpu_T[1]; // const
-
- // loads and stores
- cpu_addr = cpu_T[0];
- cpu_val = cpu_T[1];
+ cpu_src1 = tcg_temp_new(TCG_TYPE_TL); // const
+ cpu_src2 = tcg_temp_new(TCG_TYPE_TL); // const
switch (opc) {
case 0: /* branches/sethi */
@@ -2081,9 +2073,9 @@ static void disas_sparc_insn(DisasContext * dc)
SPARCv8 manual, rdy on the
microSPARC II */
#endif
- tcg_gen_ld_tl(cpu_dst, cpu_env,
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, y));
- gen_movl_TN_reg(rd, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_tmp0);
break;
#ifdef TARGET_SPARC64
case 0x2: /* V9 rdccr */
@@ -2129,14 +2121,14 @@ static void disas_sparc_insn(DisasContext * dc)
case 0x13: /* Graphics Status */
if (gen_trap_ifnofpu(dc, cpu_cond))
goto jmp_insn;
- tcg_gen_ld_tl(cpu_dst, cpu_env,
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, gsr));
- gen_movl_TN_reg(rd, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_tmp0);
break;
case 0x17: /* Tick compare */
- tcg_gen_ld_tl(cpu_dst, cpu_env,
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, tick_cmpr));
- gen_movl_TN_reg(rd, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_tmp0);
break;
case 0x18: /* System tick */
{
@@ -2152,9 +2144,9 @@ static void disas_sparc_insn(DisasContext * dc)
}
break;
case 0x19: /* System tick compare */
- tcg_gen_ld_tl(cpu_dst, cpu_env,
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, stick_cmpr));
- gen_movl_TN_reg(rd, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_tmp0);
break;
case 0x10: /* Performance Control */
case 0x11: /* Performance Instrumentation Counter */
@@ -2222,7 +2214,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_ld_tl(cpu_dst, r_tsptr,
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free(r_tsptr);
}
@@ -2234,7 +2226,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_ld_tl(cpu_dst, r_tsptr,
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free(r_tsptr);
}
@@ -2246,7 +2238,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_ld_tl(cpu_dst, r_tsptr,
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tstate));
tcg_temp_free(r_tsptr);
}
@@ -2258,7 +2250,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_ld_i32(cpu_dst, r_tsptr,
+ tcg_gen_ld_i32(cpu_tmp0, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free(r_tsptr);
}
@@ -2270,73 +2262,73 @@ static void disas_sparc_insn(DisasContext * dc)
r_tickptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUState, tick));
- tcg_gen_helper_1_1(helper_tick_get_count, cpu_dst,
+ tcg_gen_helper_1_1(helper_tick_get_count, cpu_tmp0,
r_tickptr);
- gen_movl_TN_reg(rd, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_tmp0);
tcg_temp_free(r_tickptr);
}
break;
case 5: // tba
- tcg_gen_ld_tl(cpu_dst, cpu_env,
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, tbr));
break;
case 6: // pstate
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, pstate));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 7: // tl
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 8: // pil
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, psrpil));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 9: // cwp
- tcg_gen_helper_1_0(helper_rdcwp, cpu_dst);
+ tcg_gen_helper_1_0(helper_rdcwp, cpu_tmp0);
break;
case 10: // cansave
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cansave));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 11: // canrestore
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, canrestore));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 12: // cleanwin
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cleanwin));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 13: // otherwin
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, otherwin));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 14: // wstate
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wstate));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 16: // UA2005 gl
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 26: // UA2005 strand status
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, ssr));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 31: // ver
- tcg_gen_ld_tl(cpu_dst, cpu_env,
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, version));
break;
case 15: // fq
@@ -2346,9 +2338,9 @@ static void disas_sparc_insn(DisasContext * dc)
#else
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wim));
- tcg_gen_ext_i32_tl(cpu_dst, cpu_tmp32);
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
#endif
- gen_movl_TN_reg(rd, cpu_dst);
+ gen_movl_TN_reg(rd, cpu_tmp0);
break;
} else if (xop == 0x2b) { /* rdtbr / V9 flushw */
#ifdef TARGET_SPARC64
@@ -2356,8 +2348,8 @@ static void disas_sparc_insn(DisasContext * dc)
#else
if (!supervisor(dc))
goto priv_insn;
- tcg_gen_ld_tl(cpu_dst, cpu_env, offsetof(CPUSPARCState, tbr));
- gen_movl_TN_reg(rd, cpu_dst);
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUSPARCState, tbr));
+ gen_movl_TN_reg(rd, cpu_tmp0);
#endif
break;
#endif
@@ -3068,8 +3060,10 @@ static void disas_sparc_insn(DisasContext * dc)
break;
#ifdef TARGET_SPARC64
case 0xd: /* V9 udivx */
- gen_trap_ifdivzero_tl(cpu_src2);
- tcg_gen_divu_i64(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_src1);
+ tcg_gen_mov_tl(cpu_cc_src2, cpu_src2);
+ gen_trap_ifdivzero_tl(cpu_cc_src2);
+ tcg_gen_divu_i64(cpu_dst, cpu_cc_src, cpu_cc_src2);
break;
#endif
case 0xe:
@@ -3150,8 +3144,8 @@ static void disas_sparc_insn(DisasContext * dc)
{
switch(rd) {
case 0: /* wry */
- tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, y));
break;
#ifndef TARGET_SPARC64
@@ -3194,8 +3188,8 @@ static void disas_sparc_insn(DisasContext * dc)
case 0x13: /* Graphics Status */
if (gen_trap_ifnofpu(dc, cpu_cond))
goto jmp_insn;
- tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, gsr));
break;
case 0x17: /* Tick compare */
@@ -3206,16 +3200,16 @@ static void disas_sparc_insn(DisasContext * dc)
{
TCGv r_tickptr;
- tcg_gen_xor_tl(cpu_dst, cpu_src1,
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
cpu_src2);
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
tick_cmpr));
r_tickptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUState, tick));
tcg_gen_helper_0_2(helper_tick_set_limit,
- r_tickptr, cpu_dst);
+ r_tickptr, cpu_tmp0);
tcg_temp_free(r_tickptr);
}
break;
@@ -3245,16 +3239,16 @@ static void disas_sparc_insn(DisasContext * dc)
{
TCGv r_tickptr;
- tcg_gen_xor_tl(cpu_dst, cpu_src1,
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
cpu_src2);
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
stick_cmpr));
r_tickptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUState, stick));
tcg_gen_helper_0_2(helper_tick_set_limit,
- r_tickptr, cpu_dst);
+ r_tickptr, cpu_tmp0);
tcg_temp_free(r_tickptr);
}
break;
@@ -3307,7 +3301,7 @@ static void disas_sparc_insn(DisasContext * dc)
{
if (!supervisor(dc))
goto priv_insn;
- tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
#ifdef TARGET_SPARC64
switch (rd) {
case 0: // tpc
@@ -3317,7 +3311,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_st_tl(cpu_dst, r_tsptr,
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free(r_tsptr);
}
@@ -3329,7 +3323,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_st_tl(cpu_dst, r_tsptr,
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free(r_tsptr);
}
@@ -3341,7 +3335,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_st_tl(cpu_dst, r_tsptr,
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state,
tstate));
tcg_temp_free(r_tsptr);
@@ -3354,7 +3348,7 @@ static void disas_sparc_insn(DisasContext * dc)
r_tsptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tsptr, cpu_env,
offsetof(CPUState, tsptr));
- tcg_gen_st_i32(cpu_dst, r_tsptr,
+ tcg_gen_st_i32(cpu_tmp0, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free(r_tsptr);
}
@@ -3367,74 +3361,74 @@ static void disas_sparc_insn(DisasContext * dc)
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUState, tick));
tcg_gen_helper_0_2(helper_tick_set_count,
- r_tickptr, cpu_dst);
+ r_tickptr, cpu_tmp0);
tcg_temp_free(r_tickptr);
}
break;
case 5: // tba
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, tbr));
break;
case 6: // pstate
save_state(dc, cpu_cond);
- tcg_gen_helper_0_1(helper_wrpstate, cpu_dst);
+ tcg_gen_helper_0_1(helper_wrpstate, cpu_tmp0);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 7: // tl
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
break;
case 8: // pil
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
psrpil));
break;
case 9: // cwp
- tcg_gen_helper_0_1(helper_wrcwp, cpu_dst);
+ tcg_gen_helper_0_1(helper_wrcwp, cpu_tmp0);
break;
case 10: // cansave
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cansave));
break;
case 11: // canrestore
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
canrestore));
break;
case 12: // cleanwin
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cleanwin));
break;
case 13: // otherwin
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
otherwin));
break;
case 14: // wstate
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
wstate));
break;
case 16: // UA2005 gl
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
break;
case 26: // UA2005 strand status
if (!hypervisor(dc))
goto priv_insn;
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, ssr));
break;
@@ -3442,9 +3436,7 @@ static void disas_sparc_insn(DisasContext * dc)
goto illegal_insn;
}
#else
- tcg_gen_andi_tl(cpu_dst, cpu_dst,
- ((1 << NWINDOWS) - 1));
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wim));
#endif
@@ -3455,13 +3447,13 @@ static void disas_sparc_insn(DisasContext * dc)
#ifndef TARGET_SPARC64
if (!supervisor(dc))
goto priv_insn;
- tcg_gen_xor_tl(cpu_dst, cpu_dst, cpu_src2);
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, tbr));
#else
if (!hypervisor(dc))
goto priv_insn;
- tcg_gen_xor_tl(cpu_dst, cpu_dst, cpu_src2);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (rd) {
case 0: // hpstate
// XXX gen_op_wrhpstate();
@@ -3474,12 +3466,12 @@ static void disas_sparc_insn(DisasContext * dc)
// XXX gen_op_wrhtstate();
break;
case 3: // hintp
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, hintp));
break;
case 5: // htba
- tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_dst);
+ tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, htba));
break;
@@ -3487,14 +3479,14 @@ static void disas_sparc_insn(DisasContext * dc)
{
TCGv r_tickptr;
- tcg_gen_st_tl(cpu_dst, cpu_env,
+ tcg_gen_st_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
hstick_cmpr));
r_tickptr = tcg_temp_new(TCG_TYPE_PTR);
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUState, hstick));
tcg_gen_helper_0_2(helper_tick_set_limit,
- r_tickptr, cpu_dst);
+ r_tickptr, cpu_tmp0);
tcg_temp_free(r_tickptr);
}
break;
@@ -4224,7 +4216,7 @@ static void disas_sparc_insn(DisasContext * dc)
save_state(dc, cpu_cond);
r_const = tcg_const_i32(7);
- tcg_gen_helper_0_2(helper_check_align, cpu_dst,
+ tcg_gen_helper_0_2(helper_check_align, cpu_addr,
r_const); // XXX remove
tcg_temp_free(r_const);
ABI32_MASK(cpu_addr);
@@ -4749,6 +4741,12 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb,
cpu_tmp32 = tcg_temp_new(TCG_TYPE_I32);
cpu_tmp64 = tcg_temp_new(TCG_TYPE_I64);
+ cpu_dst = tcg_temp_local_new(TCG_TYPE_TL);
+
+ // loads and stores
+ cpu_val = tcg_temp_local_new(TCG_TYPE_TL);
+ cpu_addr = tcg_temp_local_new(TCG_TYPE_TL);
+
do {
if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) {
@@ -4798,6 +4796,9 @@ static inline int gen_intermediate_code_internal(TranslationBlock * tb,
(dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
exit_gen_loop:
+ tcg_temp_free(cpu_addr);
+ tcg_temp_free(cpu_val);
+ tcg_temp_free(cpu_dst);
tcg_temp_free(cpu_tmp64);
tcg_temp_free(cpu_tmp32);
tcg_temp_free(cpu_tmp0);
@@ -4878,11 +4879,6 @@ void gen_intermediate_code_init(CPUSPARCState *env)
TCG_AREG0, offsetof(CPUState, xcc),
"xcc");
#endif
- /* XXX: T0 and T1 should be temporaries */
- cpu_T[0] = tcg_global_mem_new(TCG_TYPE_TL,
- TCG_AREG0, offsetof(CPUState, t0), "T0");
- cpu_T[1] = tcg_global_mem_new(TCG_TYPE_TL,
- TCG_AREG0, offsetof(CPUState, t1), "T1");
cpu_cond = tcg_global_mem_new(TCG_TYPE_TL,
TCG_AREG0, offsetof(CPUState, cond),
"cond");
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index 6d955c949..39319903c 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -67,9 +67,20 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
};
static const int tcg_target_reg_alloc_order[] = {
- TCG_REG_R0,
- TCG_REG_R1,
- TCG_REG_R2,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R16,
+ TCG_REG_R17,
+ TCG_REG_R18,
+ TCG_REG_R19,
+ TCG_REG_R20,
+ TCG_REG_R21,
+ TCG_REG_R22,
+ TCG_REG_R23,
+ TCG_REG_R28,
+ TCG_REG_R29,
+ TCG_REG_R30,
+ TCG_REG_R31,
TCG_REG_R3,
TCG_REG_R4,
TCG_REG_R5,
@@ -81,24 +92,13 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_R11,
TCG_REG_R12,
TCG_REG_R13,
- TCG_REG_R14,
- TCG_REG_R15,
- TCG_REG_R16,
- TCG_REG_R17,
- TCG_REG_R18,
- TCG_REG_R19,
- TCG_REG_R20,
- TCG_REG_R21,
- TCG_REG_R22,
- TCG_REG_R23,
+ TCG_REG_R0,
+ TCG_REG_R1,
+ TCG_REG_R2,
TCG_REG_R24,
TCG_REG_R25,
TCG_REG_R26,
- TCG_REG_R27,
- TCG_REG_R28,
- TCG_REG_R29,
- TCG_REG_R30,
- TCG_REG_R31
+ TCG_REG_R27
};
static const int tcg_target_call_iarg_regs[] = {
@@ -118,7 +118,6 @@ static const int tcg_target_call_oarg_regs[2] = {
};
static const int tcg_target_callee_save_regs[] = {
- TCG_REG_R13, /* sould r13 be saved? */
TCG_REG_R14,
TCG_REG_R15,
TCG_REG_R16,
@@ -198,6 +197,10 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
ct_str = *pct_str;
switch (ct_str[0]) {
+ case 'A': case 'B': case 'C': case 'D':
+ ct->ct |= TCG_CT_REG;
+ tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
+ break;
case 'r':
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
@@ -278,9 +281,12 @@ static int tcg_target_const_match(tcg_target_long val,
#define RLWINM OPCD(21)
-#define BCLR XO19(16)
+#define BCLR XO19( 16)
#define BCCTR XO19(528)
#define CRAND XO19(257)
+#define CRANDC XO19(129)
+#define CRNAND XO19(225)
+#define CROR XO19(449)
#define EXTSB XO31(954)
#define EXTSH XO31(922)
@@ -388,7 +394,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
else {
tcg_out32 (s, ADDIS | RT (ret) | RA (0) | ((arg >> 16) & 0xffff));
if (arg & 0xffff)
- tcg_out32 (s, ORI | RT (ret) | RA (ret) | (arg & 0xffff));
+ tcg_out32 (s, ORI | RS (ret) | RA (ret) | (arg & 0xffff));
}
}
@@ -816,7 +822,7 @@ void tcg_target_qemu_prologue (TCGContext *s)
| (i * 4 + 8 + TCG_STATIC_CALL_ARGS_SIZE)
)
);
- tcg_out32 (s, STW | RS (0) | RA (1) | (frame_size - 4));
+ tcg_out32 (s, STW | RS (0) | RA (1) | (frame_size + 4));
tcg_out32 (s, MTSPR | RS (3) | CTR);
tcg_out32 (s, BCCTR | BO_ALWAYS);
@@ -829,7 +835,7 @@ void tcg_target_qemu_prologue (TCGContext *s)
| (i * 4 + 8 + TCG_STATIC_CALL_ARGS_SIZE)
)
);
- tcg_out32 (s, LWZ | RT (0) | RA (1) | (frame_size - 4));
+ tcg_out32 (s, LWZ | RT (0) | RA (1) | (frame_size + 4));
tcg_out32 (s, MTSPR | RS (0) | LR);
tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
tcg_out32 (s, BCLR | BO_ALWAYS);
@@ -866,11 +872,9 @@ static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
ppc_addi (s, reg, reg, val);
}
-static void tcg_out_brcond(TCGContext *s, int cond,
- TCGArg arg1, TCGArg arg2, int const_arg2,
- int label_index)
+static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
+ int const_arg2, int cr)
{
- TCGLabel *l = &s->labels[label_index];
int imm;
uint32_t op;
@@ -926,7 +930,7 @@ static void tcg_out_brcond(TCGContext *s, int cond,
default:
tcg_abort ();
}
- op |= BF (7);
+ op |= BF (cr);
if (imm)
tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
@@ -939,83 +943,79 @@ static void tcg_out_brcond(TCGContext *s, int cond,
tcg_out32 (s, op | RA (arg1) | RB (arg2));
}
- if (l->has_value) {
- tcg_target_long disp;
+}
- disp = (tcg_target_long) s->code_ptr - l->u.value;
- if (disp != (int16_t) disp)
- tcg_abort ();
+static void tcg_out_bc (TCGContext *s, int bc, int label_index)
+{
+ TCGLabel *l = &s->labels[label_index];
- tcg_out32 (s, tcg_to_bc[cond] | reloc_pc14_val (s->code_ptr,
- l->u.value));
- }
+ if (l->has_value)
+ tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
else {
- tcg_out32 (s, tcg_to_bc[cond]);
+ uint16_t val = *(uint16_t *) &s->code_ptr[2];
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, bc | (val & 0xfffc));
tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
}
}
-/* brcond2 is taken verbatim from i386 tcg-target */
+static void tcg_out_brcond (TCGContext *s, int cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index)
+{
+ tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7);
+ tcg_out_bc (s, tcg_to_bc[cond], label_index);
+}
+
/* XXX: we implement it at the target level to avoid having to
handle cross basic blocks temporaries */
-static void tcg_out_brcond2(TCGContext *s,
- const TCGArg *args, const int *const_args)
+static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
+ const int *const_args)
{
- int label_next;
- label_next = gen_new_label();
- switch(args[4]) {
+ int cond = args[4], label_index = args[5], op;
+ struct { int bit1; int bit2; int cond2; } bits[] = {
+ [TCG_COND_LT ] = { CR_LT, CR_LT, TCG_COND_LT },
+ [TCG_COND_LE ] = { CR_LT, CR_GT, TCG_COND_LT },
+ [TCG_COND_GT ] = { CR_GT, CR_GT, TCG_COND_GT },
+ [TCG_COND_GE ] = { CR_GT, CR_LT, TCG_COND_GT },
+ [TCG_COND_LTU] = { CR_LT, CR_LT, TCG_COND_LTU },
+ [TCG_COND_LEU] = { CR_LT, CR_GT, TCG_COND_LTU },
+ [TCG_COND_GTU] = { CR_GT, CR_GT, TCG_COND_GTU },
+ [TCG_COND_GEU] = { CR_GT, CR_LT, TCG_COND_GTU },
+ }, *b = &bits[cond];
+
+ switch (cond) {
case TCG_COND_EQ:
- tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next);
- tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]);
+ tcg_out_cmp (s, TCG_COND_EQ, args[0], args[2], const_args[2], 6);
+ tcg_out_cmp (s, TCG_COND_EQ, args[1], args[3], const_args[3], 7);
+ tcg_out32 (s, CRAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
break;
case TCG_COND_NE:
- tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]);
+ tcg_out_cmp (s, TCG_COND_NE, args[0], args[2], const_args[2], 6);
+ tcg_out_cmp (s, TCG_COND_NE, args[1], args[3], const_args[3], 7);
+ tcg_out32 (s, CRNAND | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, CR_EQ));
break;
case TCG_COND_LT:
- tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_LT, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_LE:
- tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_LE, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_GT:
- tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_GT, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_GE:
- tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_GE, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_LTU:
- tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_LEU:
- tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_GTU:
- tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
- break;
case TCG_COND_GEU:
- tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], label_next);
- tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
+ op = (b->bit1 != b->bit2) ? CRANDC : CRAND;
+ tcg_out_cmp (s, b->cond2, args[1], args[3], const_args[3], 5);
+ tcg_out_cmp (s, TCG_COND_EQ, args[1], args[3], const_args[3], 6);
+ tcg_out_cmp (s, cond, args[0], args[2], const_args[2], 7);
+ tcg_out32 (s, op | BT (7, CR_EQ) | BA (6, CR_EQ) | BB (7, b->bit2));
+ tcg_out32 (s, CROR | BT (7, CR_EQ) | BA (5, b->bit1) | BB (7, CR_EQ));
break;
default:
tcg_abort();
}
- tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
+
+ tcg_out_bc (s, (BC | BI (7, CR_EQ) | BO_COND_TRUE), label_index);
}
static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
@@ -1029,24 +1029,9 @@ static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
/* direct jump method */
- uint32_t val;
- uint16_t *p;
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
- /* Thanks to Andrzej Zaborowski for this */
- val = *(uint32_t *) s->code_ptr & 0x3fffffc;
-
- tcg_out32 (s, B | val);
-
- /* For branches outside of LL range
- This must be in concord with tb_set_jmp_target1 */
- p = (uint16_t *) s->code_ptr;
- p[0] = (ADDIS | RT (0) | RA (0)) >> 16;
- p[2] = (ORI | RT (0) | RA (0)) >> 16;
- s->code_ptr += 8;
-
- tcg_out32 (s, MTSPR | RS (0) | CTR);
- tcg_out32 (s, BCCTR | BO_ALWAYS);
+ s->code_ptr += 16;
}
else {
tcg_abort ();
@@ -1061,7 +1046,10 @@ static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
tcg_out_b (s, 0, l->u.value);
}
else {
- tcg_out32 (s, B);
+ uint32_t val = *(uint32_t *) s->code_ptr;
+
+ /* Thanks to Andrzej Zaborowski */
+ tcg_out32 (s, B | (val & 0x3fffffc));
tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
}
}
@@ -1208,6 +1196,27 @@ static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
else
tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2]));
break;
+
+ case INDEX_op_div_i32:
+ tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
+ tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
+ tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
+ break;
+
case INDEX_op_mulu2_i32:
if (args[0] == args[2] || args[0] == args[3]) {
tcg_out32 (s, MULLW | TAB (0, args[2], args[3]));
@@ -1219,34 +1228,6 @@ static void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
tcg_out32 (s, MULHWU | TAB (args[1], args[2], args[3]));
}
break;
- case INDEX_op_div2_i32:
- if (args[0] == args[2] || args[0] == args[3]) {
- tcg_out32 (s, DIVW | TAB (0, args[2], args[3]));
- tcg_out32 (s, MULLW | TAB (0, 0, args[3]));
- tcg_out32 (s, SUBF | TAB (0, 0, args[2]));
- tcg_out32 (s, DIVW | TAB (args[0], args[2], args[3]));
- tcg_out_mov (s, args[1], 0);
- }
- else {
- tcg_out32 (s, DIVW | TAB (args[0], args[2], args[3]));
- tcg_out32 (s, MULLW | TAB (0, args[0], args[3]));
- tcg_out32 (s, SUBF | TAB (args[1], 0, args[2]));
- }
- break;
- case INDEX_op_divu2_i32:
- if (args[0] == args[2] || args[0] == args[3]) {
- tcg_out32 (s, DIVWU | TAB (0, args[2], args[3]));
- tcg_out32 (s, MULLW | TAB (0, 0, args[3]));
- tcg_out32 (s, SUBF | TAB (0, 0, args[2]));
- tcg_out32 (s, DIVWU | TAB (args[0], args[2], args[3]));
- tcg_out_mov (s, args[1], 0);
- }
- else {
- tcg_out32 (s, DIVWU | TAB (args[0], args[2], args[3]));
- tcg_out32 (s, MULLW | TAB (0, args[0], args[3]));
- tcg_out32 (s, SUBF | TAB (args[1], 0, args[2]));
- }
- break;
case INDEX_op_shl_i32:
if (const_args[2]) {
@@ -1387,9 +1368,11 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_add_i32, { "r", "r", "ri" } },
{ INDEX_op_mul_i32, { "r", "r", "ri" } },
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
{ INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
- { INDEX_op_div2_i32, { "r", "r", "r", "r", "r" } },
- { INDEX_op_divu2_i32, { "r", "r", "r", "r", "r" } },
{ INDEX_op_sub_i32, { "r", "r", "ri" } },
{ INDEX_op_and_i32, { "r", "r", "ri" } },
{ INDEX_op_or_i32, { "r", "r", "ri" } },
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index e7a1c2a0d..db1b12170 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -70,6 +70,7 @@ enum {
/* optional instructions */
#define TCG_TARGET_HAS_neg_i32
+#define TCG_TARGET_HAS_div_i32
#define TCG_AREG0 TCG_REG_R27
#define TCG_AREG1 TCG_REG_R24
diff --git a/tcg/tcg.c b/tcg/tcg.c
index ef4460d25..e97605411 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -353,7 +353,7 @@ TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset,
#if TCG_TARGET_REG_BITS == 32
if (type == TCG_TYPE_I64) {
char buf[64];
- tcg_temp_alloc(s, s->nb_globals + 1);
+ tcg_temp_alloc(s, s->nb_globals + 2);
ts = &s->temps[s->nb_globals];
ts->base_type = type;
ts->type = TCG_TYPE_I32;
@@ -423,7 +423,7 @@ TCGv tcg_temp_new_internal(TCGType type, int temp_local)
idx = s->nb_temps;
#if TCG_TARGET_REG_BITS == 32
if (type == TCG_TYPE_I64) {
- tcg_temp_alloc(s, s->nb_temps + 1);
+ tcg_temp_alloc(s, s->nb_temps + 2);
ts = &s->temps[s->nb_temps];
ts->base_type = type;
ts->type = TCG_TYPE_I32;
@@ -1961,7 +1961,7 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
break;
}
args += def->nb_args;
- next: ;
+ next:
if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
return op_index;
}
diff --git a/tests/cris/Makefile b/tests/cris/Makefile
index 69068996f..cfe494c3a 100644
--- a/tests/cris/Makefile
+++ b/tests/cris/Makefile
@@ -140,9 +140,9 @@ build: $(CRT) $(SYS) $(TESTCASES)
check: $(CRT) $(SYS) $(TESTCASES)
@echo -e "\nQEMU simulator."
- @for case in $(TESTCASES); do \
+ for case in $(TESTCASES); do \
echo -n "$$case "; \
- $(SIM) $$case; \
+ $(SIM) ./$$case; \
done
check-g: $(CRT) $(SYS) $(TESTCASES)
@echo -e "\nGDB simulator."
diff --git a/tests/cris/check_movei.s b/tests/cris/check_movei.s
index 2defda5f3..bbfa63337 100644
--- a/tests/cris/check_movei.s
+++ b/tests/cris/check_movei.s
@@ -7,8 +7,11 @@
.include "testutils.inc"
start
+ move.d 0, $r3
; A write that works. Check that flags are set correspondingly.
move.d d,r4
+ ;; store to bring it into the tlb with the right prot bits
+ move.d r3,[r4]
moveq -2,r5
setf c
clearf p
diff --git a/tests/cris/check_swap.c b/tests/cris/check_swap.c
index c1eac88e5..743cfc54d 100644
--- a/tests/cris/check_swap.c
+++ b/tests/cris/check_swap.c
@@ -72,4 +72,5 @@ int main(void)
{
check_swap();
pass();
+ return 0;
}
diff --git a/tests/cris/check_xarith.s b/tests/cris/check_xarith.s
index d0356abed..80038b2ab 100644
--- a/tests/cris/check_xarith.s
+++ b/tests/cris/check_xarith.s
@@ -42,5 +42,31 @@
nop
fail
1:
+
+ ;; test for broken X sequence, run it several times.
+ moveq 8, $r0
+1:
+ moveq 0, $r3
+ move.d $r0, $r1
+ andq 1, $r1
+ lslq 4, $r1
+ moveq 1, $r2
+ or.d $r1, $r2
+ ba 2f
+ move $r2, $ccs
+2:
+ addq 0, $r3
+ move.d $r0, $r4
+ move.d $r1, $r5
+ move.d $r2, $r6
+ move.d $r3, $r7
+ lsrq 4, $r1
+ move.d $r1, $r8
+ xor $r1, $r3
+ checkr3 0
+ subq 1, $r0
+ bne 1b
+ nop
+
pass
quit
diff --git a/tests/cris/crisutils.h b/tests/cris/crisutils.h
index 63c713897..7d1ea86f7 100644
--- a/tests/cris/crisutils.h
+++ b/tests/cris/crisutils.h
@@ -4,7 +4,7 @@ static char *tst_cc_loc = NULL;
do { tst_cc_loc = "test_cc failed at " CURRENT_LOCATION; } while(0)
/* We need a real symbol to signal error. */
-static void _err(void) {
+void _err(void) {
if (!tst_cc_loc)
tst_cc_loc = "tst_cc_failed\n";
_fail(tst_cc_loc);
diff --git a/tests/cris/sys.c b/tests/cris/sys.c
index 264ec06f3..551c5dd7c 100644
--- a/tests/cris/sys.c
+++ b/tests/cris/sys.c
@@ -16,7 +16,7 @@ void pass(void) {
}
void _fail(char *reason) {
- char s[] = "failed: ";
+ char s[] = "\nfailed: ";
int len = mystrlen(reason);
write (1, s, sizeof (s) - 1);
write (1, reason, len);
@@ -41,8 +41,11 @@ void exit (int status) {
ssize_t write (int fd, const void *buf, size_t count) {
int r;
- asm volatile ("moveq 4, $r9\n" /* NR_write. */
- "break 13\n" : : : "memory");
- asm volatile ("move.d $r10, %0\n" : "=r" (r));
+ asm ("move.d %0, $r10\n"
+ "move.d %1, $r11\n"
+ "move.d %2, $r12\n"
+ "moveq 4, $r9\n" /* NR_write. */
+ "break 13\n" : : "r" (fd), "r" (buf), "r" (count) : "memory");
+ asm ("move.d $r10, %0\n" : "=r" (r));
return r;
}
diff --git a/tests/cris/testutils.inc b/tests/cris/testutils.inc
index 4f434e1e4..aa1641b2e 100644
--- a/tests/cris/testutils.inc
+++ b/tests/cris/testutils.inc
@@ -23,7 +23,7 @@ main:
.macro fail
.data
99:
- .asciz " checkr3 failed"
+ .asciz " checkr3 failed\n"
.text
move.d 99b, $r10
jsr _fail
@@ -36,7 +36,7 @@ main:
nop
.data
99:
- .asciz "checkr3 failed"
+ .asciz "checkr3 failed\n"
.text
move.d 99b, $r10
jsr _fail
@@ -79,7 +79,7 @@ main:
9:
.data
99:
- .asciz "test_move_cc failed"
+ .asciz "test_move_cc failed\n"
.text
move.d 99b, $r10
jsr _fail
@@ -108,7 +108,7 @@ main:
9:
.data
99:
- .asciz "test_move_cc failed"
+ .asciz "test_move_cc failed\n"
.text
move.d 99b, $r10
jsr _fail
diff --git a/vl.c b/vl.c
index f573dced3..3032eafe5 100644
--- a/vl.c
+++ b/vl.c
@@ -3429,8 +3429,7 @@ static CharDriverState *qemu_chr_open_tcp(const char *host_str,
#ifndef _WIN32
if (is_unix) {
char path[109];
- strncpy(path, uaddr.sun_path, 108);
- path[108] = 0;
+ pstrcpy(path, sizeof(path), uaddr.sun_path);
unlink(path);
} else
#endif
@@ -5291,7 +5290,7 @@ int drive_init(struct drive_opt *arg, int snapshot,
}
if (get_param_value(buf, sizeof(buf), "if", str)) {
- strncpy(devname, buf, sizeof(devname));
+ pstrcpy(devname, sizeof(devname), buf);
if (!strcmp(buf, "ide")) {
type = IF_IDE;
max_devs = MAX_IDE_DEVS;
@@ -6340,6 +6339,10 @@ static int qemu_savevm_state(QEMUFile *f)
qemu_put_be64(f, 0); /* total size */
for(se = first_se; se != NULL; se = se->next) {
+ if (se->save_state == NULL)
+ /* this one has a loader only, for backwards compatibility */
+ continue;
+
/* ID string */
len = strlen(se->idstr);
qemu_put_byte(f, len);
@@ -7406,14 +7409,6 @@ void qemu_system_powerdown_request(void)
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
}
-/* boot_set handler */
-QEMUBootSetHandler *qemu_boot_set_handler = NULL;
-
-void qemu_register_boot_set(QEMUBootSetHandler *func)
-{
- qemu_boot_set_handler = func;
-}
-
static int qemu_select(int max_fd, fd_set *rfds, fd_set *wfds, fd_set *xfds,
struct timeval *tv)
{
@@ -8121,6 +8116,16 @@ struct soundhw soundhw[] = {
{ .init_isa = SB16_init }
},
+#ifdef CONFIG_CS4231A
+ {
+ "cs4231a",
+ "CS4231A",
+ 0,
+ 1,
+ { .init_isa = cs4231a_init }
+ },
+#endif
+
#ifdef CONFIG_ADLIB
{
"adlib",