aboutsummaryrefslogtreecommitdiff
path: root/linux-user/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user/main.c')
-rw-r--r--linux-user/main.c184
1 files changed, 160 insertions, 24 deletions
diff --git a/linux-user/main.c b/linux-user/main.c
index 21c2d3ba0..a4ffea3fd 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -26,6 +26,8 @@
#include "qemu.h"
#include "qemu-common.h"
+/* For tb_lock */
+#include "exec-all.h"
#define DEBUG_LOGFILE "/tmp/qemu.log"
@@ -123,6 +125,135 @@ int64_t cpu_get_real_ticks(void)
#endif
+#if defined(USE_NPTL)
+/***********************************************************/
+/* Helper routines for implementing atomic operations. */
+
+/* To implement exclusive operations we force all cpus to syncronise.
+ We don't require a full sync, only that no cpus are executing guest code.
+ The alternative is to map target atomic ops onto host equivalents,
+ which requires quite a lot of per host/target work. */
+static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static int pending_cpus;
+
+/* Make sure everything is in a consistent state for calling fork(). */
+void fork_start(void)
+{
+ mmap_fork_start();
+ pthread_mutex_lock(&tb_lock);
+ pthread_mutex_lock(&exclusive_lock);
+}
+
+void fork_end(int child)
+{
+ if (child) {
+ /* Child processes created by fork() only have a single thread.
+ Discard information about the parent threads. */
+ first_cpu = thread_env;
+ thread_env->next_cpu = NULL;
+ pending_cpus = 0;
+ pthread_mutex_init(&exclusive_lock, NULL);
+ pthread_cond_init(&exclusive_cond, NULL);
+ pthread_cond_init(&exclusive_resume, NULL);
+ pthread_mutex_init(&tb_lock, NULL);
+ } else {
+ pthread_mutex_unlock(&exclusive_lock);
+ pthread_mutex_unlock(&tb_lock);
+ }
+ mmap_fork_end(child);
+}
+
+/* Wait for pending exclusive operations to complete. The exclusive lock
+ must be held. */
+static inline void exclusive_idle(void)
+{
+ while (pending_cpus) {
+ pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ }
+}
+
+/* Start an exclusive operation.
+ Must only be called from outside cpu_arm_exec. */
+static inline void start_exclusive(void)
+{
+ CPUState *other;
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+
+ pending_cpus = 1;
+ /* Make all other cpus stop executing. */
+ for (other = first_cpu; other; other = other->next_cpu) {
+ if (other->running) {
+ pending_cpus++;
+ cpu_interrupt(other, CPU_INTERRUPT_EXIT);
+ }
+ }
+ if (pending_cpus > 1) {
+ pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ }
+}
+
+/* Finish an exclusive operation. */
+static inline void end_exclusive(void)
+{
+ pending_cpus = 0;
+ pthread_cond_broadcast(&exclusive_resume);
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Wait for exclusive ops to finish, and begin cpu execution. */
+static inline void cpu_exec_start(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ exclusive_idle();
+ env->running = 1;
+ pthread_mutex_unlock(&exclusive_lock);
+}
+
+/* Mark cpu as not executing, and release pending exclusive ops. */
+static inline void cpu_exec_end(CPUState *env)
+{
+ pthread_mutex_lock(&exclusive_lock);
+ env->running = 0;
+ if (pending_cpus > 1) {
+ pending_cpus--;
+ if (pending_cpus == 1) {
+ pthread_cond_signal(&exclusive_cond);
+ }
+ }
+ exclusive_idle();
+ pthread_mutex_unlock(&exclusive_lock);
+}
+#else /* if !USE_NPTL */
+/* These are no-ops because we are not threadsafe. */
+static inline void cpu_exec_start(CPUState *env)
+{
+}
+
+static inline void cpu_exec_end(CPUState *env)
+{
+}
+
+static inline void start_exclusive(void)
+{
+}
+
+static inline void end_exclusive(void)
+{
+}
+
+void fork_start(void)
+{
+}
+
+void fork_end(int child)
+{
+}
+#endif
+
+
#ifdef TARGET_I386
/***********************************************************/
/* CPUX86 core interface */
@@ -378,8 +509,11 @@ do_kernel_trap(CPUARMState *env)
/* ??? No-op. Will need to do better for SMP. */
break;
case 0xffff0fc0: /* __kernel_cmpxchg */
- /* ??? This is not really atomic. However we don't support
- threads anyway, so it doesn't realy matter. */
+ /* XXX: This only works between threads, not between processes.
+ It's probably possible to implement this with native host
+ operations. However things like ldrex/strex are much harder so
+ there's not much point trying. */
+ start_exclusive();
cpsr = cpsr_read(env);
addr = env->regs[2];
/* FIXME: This should SEGV if the access fails. */
@@ -396,6 +530,7 @@ do_kernel_trap(CPUARMState *env)
cpsr &= ~CPSR_C;
}
cpsr_write(env, cpsr, CPSR_C);
+ end_exclusive();
break;
case 0xffff0fe0: /* __kernel_get_tls */
env->regs[0] = env->cp15.c13_tls2;
@@ -422,7 +557,9 @@ void cpu_loop(CPUARMState *env)
uint32_t addr;
for(;;) {
+ cpu_exec_start(env);
trapnr = cpu_arm_exec(env);
+ cpu_exec_end(env);
switch(trapnr) {
case EXCP_UDEF:
{
@@ -626,11 +763,11 @@ void cpu_loop(CPUARMState *env)
can be found at http://www.sics.se/~psm/sparcstack.html */
static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
{
- index = (index + cwp * 16) & (16 * NWINDOWS - 1);
+ index = (index + cwp * 16) % (16 * env->nwindows);
/* wrap handling : if cwp is on the last window, then we use the
registers 'after' the end */
- if (index < 8 && env->cwp == (NWINDOWS - 1))
- index += (16 * NWINDOWS);
+ if (index < 8 && env->cwp == env->nwindows - 1)
+ index += 16 * env->nwindows;
return index;
}
@@ -642,8 +779,8 @@ static inline void save_window_offset(CPUSPARCState *env, int cwp1)
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
#if defined(DEBUG_WIN)
- printf("win_overflow: sp_ptr=0x%x save_cwp=%d\n",
- (int)sp_ptr, cwp1);
+ printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
+ sp_ptr, cwp1);
#endif
for(i = 0; i < 16; i++) {
/* FIXME - what to do if put_user() fails? */
@@ -656,12 +793,12 @@ static void save_window(CPUSPARCState *env)
{
#ifndef TARGET_SPARC64
unsigned int new_wim;
- new_wim = ((env->wim >> 1) | (env->wim << (NWINDOWS - 1))) &
- ((1LL << NWINDOWS) - 1);
- save_window_offset(env, (env->cwp - 2) & (NWINDOWS - 1));
+ new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
env->wim = new_wim;
#else
- save_window_offset(env, (env->cwp - 2) & (NWINDOWS - 1));
+ save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
env->cansave++;
env->canrestore--;
#endif
@@ -672,15 +809,15 @@ static void restore_window(CPUSPARCState *env)
unsigned int new_wim, i, cwp1;
abi_ulong sp_ptr;
- new_wim = ((env->wim << 1) | (env->wim >> (NWINDOWS - 1))) &
- ((1LL << NWINDOWS) - 1);
+ new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
+ ((1LL << env->nwindows) - 1);
/* restore the invalid window */
- cwp1 = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
#if defined(DEBUG_WIN)
- printf("win_underflow: sp_ptr=0x%x load_cwp=%d\n",
- (int)sp_ptr, cwp1);
+ printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
+ sp_ptr, cwp1);
#endif
for(i = 0; i < 16; i++) {
/* FIXME - what to do if get_user() fails? */
@@ -690,8 +827,8 @@ static void restore_window(CPUSPARCState *env)
env->wim = new_wim;
#ifdef TARGET_SPARC64
env->canrestore++;
- if (env->cleanwin < NWINDOWS - 1)
- env->cleanwin++;
+ if (env->cleanwin < env->nwindows - 1)
+ env->cleanwin++;
env->cansave--;
#endif
}
@@ -703,14 +840,14 @@ static void flush_windows(CPUSPARCState *env)
offset = 1;
for(;;) {
/* if restore would invoke restore_window(), then we can stop */
- cwp1 = (env->cwp + offset) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + offset);
if (env->wim & (1 << cwp1))
break;
save_window_offset(env, cwp1);
offset++;
}
/* set wim so that restore will reload the registers */
- cwp1 = (env->cwp + 1) & (NWINDOWS - 1);
+ cwp1 = cpu_cwp_inc(env, env->cwp + 1);
env->wim = 1 << cwp1;
#if defined(DEBUG_WIN)
printf("flush_windows: nb=%d\n", offset - 1);
@@ -2044,14 +2181,13 @@ void usage(void)
_exit(1);
}
-/* XXX: currently only used for async signals (see signal.c) */
-CPUState *global_env;
+THREAD CPUState *thread_env;
+/* Assumes contents are already zeroed. */
void init_task_state(TaskState *ts)
{
int i;
- memset(ts, 0, sizeof(TaskState));
ts->used = 1;
ts->first_free = ts->sigqueue_table;
for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
@@ -2203,7 +2339,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
- global_env = env;
+ thread_env = env;
if (getenv("QEMU_STRACE")) {
do_strace = 1;