[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH] [RFC] aio/timers: Drop alarm timers; introduce QEMU
From: |
Alex Bligh |
Subject: |
[Qemu-devel] [PATCH] [RFC] aio/timers: Drop alarm timers; introduce QEMUClock to AioContext; run timers in aio_poll |
Date: |
Fri, 19 Jul 2013 18:26:23 +0100 |
[ This is a patch for RFC purposes only. It is compile tested on Linux x86_64
only
and passes make check (or rather did before make check started dying in the
boot order test - different bug). I'd like to know whether I'm going in
the right direction ]
We no longer need alarm timers to trigger QEMUTimer as we'll be polling
them in aio_poll.
Remove static declaration from qemu_new_clock and introduce qemu_free_clock.
Maintain a list of QEMUClocks.
Introduce qemu_clock_deadline_ns and qemu_clock_deadine_all_ns which calculate
how
long aio_poll etc. should wait, plus (for the time being) a conversion to
milliseconds.
Make qemu_run_timers return a bool to indicate progress.
Add QEMUClock to AioContext.
Run timers attached to clock in aio_poll
Signed-off-by: Alex Bligh <address@hidden>
---
aio-posix.c | 16 +-
aio-win32.c | 20 +-
async.c | 2 +
include/block/aio.h | 5 +
include/qemu/timer.h | 15 +-
main-loop.c | 9 +-
qemu-timer.c | 599 ++++++++------------------------------------------
vl.c | 5 +-
8 files changed, 150 insertions(+), 521 deletions(-)
diff --git a/aio-posix.c b/aio-posix.c
index b68eccd..6401259 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -173,6 +173,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
int ret;
+ int timeout;
bool busy, progress;
progress = false;
@@ -195,6 +196,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
return true;
}
+ /* Run our timers */
+ progress |= qemu_run_timers(ctx->clock);
+
ctx->walking_handlers++;
g_array_set_size(ctx->pollfds, 0);
@@ -232,9 +236,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
/* wait until next event */
+ timeout = qemu_timeout_ns_to_ms(qemu_clock_deadline_all_ns());
ret = g_poll((GPollFD *)ctx->pollfds->data,
ctx->pollfds->len,
- blocking ? -1 : 0);
+ blocking ? timeout : 0);
/* if we have any readable fds, dispatch event */
if (ret > 0) {
@@ -250,6 +255,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
+ if (blocking) {
+ /* Run the timers a second time. We do this because otherwise aio_wait
+ * will not note progress - and will stop a drain early - if we have
+ * a timer that was not ready to run entering g_poll but is ready
+ * after g_poll. This will only do anything if a timer has expired.
+ */
+ progress |= qemu_run_timers(ctx->clock);
+ }
+
assert(progress || busy);
return true;
}
diff --git a/aio-win32.c b/aio-win32.c
index 38723bf..68343ba 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -98,6 +98,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
bool busy, progress;
int count;
+ int timeout;
progress = false;
@@ -111,6 +112,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress = true;
}
+ /* Run timers */
+ progress |= qemu_run_timers(ctx->clock);
+
/*
* Then dispatch any pending callbacks from the GSource.
*
@@ -174,8 +178,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* wait until next event */
while (count > 0) {
- int timeout = blocking ? INFINITE : 0;
- int ret = WaitForMultipleObjects(count, events, FALSE, timeout);
+ int ret;
+
+ timeout = blocking ?
+ qemu_timeout_ns_to_ms(qemu_clock_deadline_all_ns()) : 0;
+ ret = WaitForMultipleObjects(count, events, FALSE, timeout);
/* if we have any signaled events, dispatch event */
if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
@@ -214,6 +221,15 @@ bool aio_poll(AioContext *ctx, bool blocking)
events[ret - WAIT_OBJECT_0] = events[--count];
}
+ if (blocking) {
+ /* Run the timers a second time. We do this because otherwise aio_wait
+ * will not note progress - and will stop a drain early - if we have
+ * a timer that was not ready to run entering g_poll but is ready
+ * after g_poll. This will only do anything if a timer has expired.
+ */
+ progress |= qemu_run_timers(ctx->clock);
+ }
+
assert(progress || busy);
return true;
}
diff --git a/async.c b/async.c
index 90fe906..0d41431 100644
--- a/async.c
+++ b/async.c
@@ -177,6 +177,7 @@ aio_ctx_finalize(GSource *source)
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
g_array_free(ctx->pollfds, TRUE);
+ qemu_free_clock(ctx->clock);
}
static GSourceFuncs aio_source_funcs = {
@@ -215,6 +216,7 @@ AioContext *aio_context_new(void)
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
event_notifier_test_and_clear, NULL);
+ ctx->clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
return ctx;
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 1836793..0835a4d 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -41,6 +41,8 @@ typedef struct AioHandler AioHandler;
typedef void QEMUBHFunc(void *opaque);
typedef void IOHandler(void *opaque);
+typedef struct QEMUClock QEMUClock;
+
typedef struct AioContext {
GSource source;
@@ -69,6 +71,9 @@ typedef struct AioContext {
/* Thread pool for performing work and receiving completion callbacks */
struct ThreadPool *thread_pool;
+
+ /* Clock for calling timers */
+ QEMUClock *clock;
} AioContext;
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 9dd206c..0649064 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -11,6 +11,10 @@
#define SCALE_US 1000
#define SCALE_NS 1
+#define QEMU_CLOCK_REALTIME 0
+#define QEMU_CLOCK_VIRTUAL 1
+#define QEMU_CLOCK_HOST 2
+
typedef struct QEMUClock QEMUClock;
typedef void QEMUTimerCB(void *opaque);
@@ -32,10 +36,15 @@ extern QEMUClock *vm_clock;
the virtual clock. */
extern QEMUClock *host_clock;
+QEMUClock *qemu_new_clock(int type);
+void qemu_free_clock(QEMUClock *clock);
int64_t qemu_get_clock_ns(QEMUClock *clock);
int64_t qemu_clock_has_timers(QEMUClock *clock);
int64_t qemu_clock_expired(QEMUClock *clock);
int64_t qemu_clock_deadline(QEMUClock *clock);
+int64_t qemu_clock_deadline_ns(QEMUClock *clock);
+int64_t qemu_clock_deadline_all_ns(void);
+int qemu_timeout_ns_to_ms(int64_t ns);
void qemu_clock_enable(QEMUClock *clock, bool enabled);
void qemu_clock_warp(QEMUClock *clock);
@@ -53,11 +62,9 @@ bool qemu_timer_pending(QEMUTimer *ts);
bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
-void qemu_run_timers(QEMUClock *clock);
-void qemu_run_all_timers(void);
-void configure_alarms(char const *opt);
+bool qemu_run_timers(QEMUClock *clock);
+bool qemu_run_all_timers(void);
void init_clocks(void);
-int init_timer_alarm(void);
int64_t cpu_get_ticks(void);
void cpu_enable_ticks(void);
diff --git a/main-loop.c b/main-loop.c
index a44fff6..626eda2 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -131,10 +131,6 @@ int qemu_init_main_loop(void)
GSource *src;
init_clocks();
- if (init_timer_alarm() < 0) {
- fprintf(stderr, "could not initialize alarm timer\n");
- exit(1);
- }
ret = qemu_signal_init();
if (ret) {
@@ -449,6 +445,11 @@ int main_loop_wait(int nonblocking)
{
int ret;
uint32_t timeout = UINT32_MAX;
+ int32_t timer_timeout =
qemu_timeout_ns_to_ms(qemu_clock_deadline_all_ns());
+
+ if (timer_timeout >= 0) {
+ timeout = timer_timeout;
+ }
if (nonblocking) {
timeout = 0;
diff --git a/qemu-timer.c b/qemu-timer.c
index b2d95e2..e643d94 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -33,17 +33,9 @@
#include <pthread.h>
#endif
-#ifdef _WIN32
-#include <mmsystem.h>
-#endif
-
/***********************************************************/
/* timers */
-#define QEMU_CLOCK_REALTIME 0
-#define QEMU_CLOCK_VIRTUAL 1
-#define QEMU_CLOCK_HOST 2
-
struct QEMUClock {
QEMUTimer *active_timers;
@@ -52,6 +44,8 @@ struct QEMUClock {
int type;
bool enabled;
+
+ QLIST_ENTRY(QEMUClock) list;
};
struct QEMUTimer {
@@ -63,175 +57,19 @@ struct QEMUTimer {
int scale;
};
-struct qemu_alarm_timer {
- char const *name;
- int (*start)(struct qemu_alarm_timer *t);
- void (*stop)(struct qemu_alarm_timer *t);
- void (*rearm)(struct qemu_alarm_timer *t, int64_t nearest_delta_ns);
-#if defined(__linux__)
- timer_t timer;
- int fd;
-#elif defined(_WIN32)
- HANDLE timer;
-#endif
- bool expired;
- bool pending;
-};
-
-static struct qemu_alarm_timer *alarm_timer;
+static QLIST_HEAD(, QEMUClock) qemu_clocks =
+ QLIST_HEAD_INITIALIZER(qemu_clocks);
static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
{
return timer_head && (timer_head->expire_time <= current_time);
}
-static int64_t qemu_next_alarm_deadline(void)
-{
- int64_t delta = INT64_MAX;
- int64_t rtdelta;
-
- if (!use_icount && vm_clock->enabled && vm_clock->active_timers) {
- delta = vm_clock->active_timers->expire_time -
- qemu_get_clock_ns(vm_clock);
- }
- if (host_clock->enabled && host_clock->active_timers) {
- int64_t hdelta = host_clock->active_timers->expire_time -
- qemu_get_clock_ns(host_clock);
- if (hdelta < delta) {
- delta = hdelta;
- }
- }
- if (rt_clock->enabled && rt_clock->active_timers) {
- rtdelta = (rt_clock->active_timers->expire_time -
- qemu_get_clock_ns(rt_clock));
- if (rtdelta < delta) {
- delta = rtdelta;
- }
- }
-
- return delta;
-}
-
-static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
-{
- int64_t nearest_delta_ns = qemu_next_alarm_deadline();
- if (nearest_delta_ns < INT64_MAX) {
- t->rearm(t, nearest_delta_ns);
- }
-}
-
-/* TODO: MIN_TIMER_REARM_NS should be optimized */
-#define MIN_TIMER_REARM_NS 250000
-
-#ifdef _WIN32
-
-static int mm_start_timer(struct qemu_alarm_timer *t);
-static void mm_stop_timer(struct qemu_alarm_timer *t);
-static void mm_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-static int win32_start_timer(struct qemu_alarm_timer *t);
-static void win32_stop_timer(struct qemu_alarm_timer *t);
-static void win32_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-#else
-
-static int unix_start_timer(struct qemu_alarm_timer *t);
-static void unix_stop_timer(struct qemu_alarm_timer *t);
-static void unix_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-#ifdef __linux__
-
-static int dynticks_start_timer(struct qemu_alarm_timer *t);
-static void dynticks_stop_timer(struct qemu_alarm_timer *t);
-static void dynticks_rearm_timer(struct qemu_alarm_timer *t, int64_t delta);
-
-#endif /* __linux__ */
-
-#endif /* _WIN32 */
-
-static struct qemu_alarm_timer alarm_timers[] = {
-#ifndef _WIN32
-#ifdef __linux__
- {"dynticks", dynticks_start_timer,
- dynticks_stop_timer, dynticks_rearm_timer},
-#endif
- {"unix", unix_start_timer, unix_stop_timer, unix_rearm_timer},
-#else
- {"mmtimer", mm_start_timer, mm_stop_timer, mm_rearm_timer},
- {"dynticks", win32_start_timer, win32_stop_timer, win32_rearm_timer},
-#endif
- {NULL, }
-};
-
-static void show_available_alarms(void)
-{
- int i;
-
- printf("Available alarm timers, in order of precedence:\n");
- for (i = 0; alarm_timers[i].name; i++)
- printf("%s\n", alarm_timers[i].name);
-}
-
-void configure_alarms(char const *opt)
-{
- int i;
- int cur = 0;
- int count = ARRAY_SIZE(alarm_timers) - 1;
- char *arg;
- char *name;
- struct qemu_alarm_timer tmp;
-
- if (is_help_option(opt)) {
- show_available_alarms();
- exit(0);
- }
-
- arg = g_strdup(opt);
-
- /* Reorder the array */
- name = strtok(arg, ",");
- while (name) {
- for (i = 0; i < count && alarm_timers[i].name; i++) {
- if (!strcmp(alarm_timers[i].name, name))
- break;
- }
-
- if (i == count) {
- fprintf(stderr, "Unknown clock %s\n", name);
- goto next;
- }
-
- if (i < cur)
- /* Ignore */
- goto next;
-
- /* Swap */
- tmp = alarm_timers[i];
- alarm_timers[i] = alarm_timers[cur];
- alarm_timers[cur] = tmp;
-
- cur++;
-next:
- name = strtok(NULL, ",");
- }
-
- g_free(arg);
-
- if (cur) {
- /* Disable remaining timers */
- for (i = cur; i < count; i++)
- alarm_timers[i].name = NULL;
- } else {
- show_available_alarms();
- exit(1);
- }
-}
-
QEMUClock *rt_clock;
QEMUClock *vm_clock;
QEMUClock *host_clock;
-static QEMUClock *qemu_new_clock(int type)
+QEMUClock *qemu_new_clock(int type)
{
QEMUClock *clock;
@@ -240,16 +78,19 @@ static QEMUClock *qemu_new_clock(int type)
clock->enabled = true;
clock->last = INT64_MIN;
notifier_list_init(&clock->reset_notifiers);
+ QLIST_INSERT_HEAD(&qemu_clocks, clock, list);
return clock;
}
+void qemu_free_clock(QEMUClock *clock)
+{
+ QLIST_REMOVE(clock, list);
+ g_free(clock);
+}
+
void qemu_clock_enable(QEMUClock *clock, bool enabled)
{
- bool old = clock->enabled;
clock->enabled = enabled;
- if (enabled && !old) {
- qemu_rearm_alarm_timer(alarm_timer);
- }
}
int64_t qemu_clock_has_timers(QEMUClock *clock)
@@ -268,7 +109,7 @@ int64_t qemu_clock_deadline(QEMUClock *clock)
/* To avoid problems with overflow limit this to 2^32. */
int64_t delta = INT32_MAX;
- if (clock->active_timers) {
+ if (clock->enabled && clock->active_timers) {
delta = clock->active_timers->expire_time - qemu_get_clock_ns(clock);
}
if (delta < 0) {
@@ -277,6 +118,71 @@ int64_t qemu_clock_deadline(QEMUClock *clock)
return delta;
}
+/*
+ * As above, but return -1 for no deadline, and do not cap to 2^32
+ * as we know the result is always positive.
+ */
+
+int64_t qemu_clock_deadline_ns(QEMUClock *clock)
+{
+ int64_t delta;
+
+ if (!clock->enabled || !clock->active_timers) {
+ return -1;
+ }
+
+ delta = clock->active_timers->expire_time - qemu_get_clock_ns(clock);
+
+ if (delta <= 0) {
+ return 0;
+ }
+
+ return delta;
+}
+
+/* Return the minimum deadline across all clocks or -1 for no
+ * deadline
+ */
+int64_t qemu_clock_deadline_all_ns(void)
+{
+ QEMUClock *clock;
+ int64_t ret = -1;
+ QLIST_FOREACH(clock, &qemu_clocks, list) {
+ int64_t ns = qemu_clock_deadline_ns(clock);
+ if ((ns >= 0) && ((ret == -1) || (ns < ret))) {
+ ret = ns;
+ }
+ }
+ return ret;
+}
+
+/* Transition function to convert a nanosecond timeout to ms
+ * This will be deleted when we switch to ppoll
+ */
+int qemu_timeout_ns_to_ms(int64_t ns)
+{
+ int64_t ms;
+ if (ns < 0) {
+ return -1;
+ }
+
+ if (!ns) {
+ return 0;
+ }
+
+ /* Always round up, because it's better to wait too long than to wait too
+ * little and effectively busy-wait
+ */
+ ms = (ns + SCALE_MS - 1) / SCALE_MS;
+
+ /* To avoid overflow problems, limit this to 2^31, i.e. approx 25 days */
+ if (ms > (int64_t) INT32_MAX) {
+ ms = INT32_MAX;
+ }
+
+ return (int) ms;
+}
+
QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
QEMUTimerCB *cb, void *opaque)
{
@@ -340,13 +246,10 @@ void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
/* Rearm if necessary */
if (pt == &ts->clock->active_timers) {
- if (!alarm_timer->pending) {
- qemu_rearm_alarm_timer(alarm_timer);
- }
/* Interrupt execution to force deadline recalculation. */
qemu_clock_warp(ts->clock);
if (use_icount) {
- qemu_notify_event();
+ qemu_notify_event(); /* FIXME: do we need this now? */
}
}
}
@@ -372,13 +275,14 @@ bool qemu_timer_expired(QEMUTimer *timer_head, int64_t
current_time)
return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
}
-void qemu_run_timers(QEMUClock *clock)
+bool qemu_run_timers(QEMUClock *clock)
{
QEMUTimer *ts;
int64_t current_time;
+ bool progress = false;
if (!clock->enabled)
- return;
+ return progress;
current_time = qemu_get_clock_ns(clock);
for(;;) {
@@ -392,7 +296,9 @@ void qemu_run_timers(QEMUClock *clock)
/* run the callback (the timer list can be modified) */
ts->cb(ts->opaque);
+ progress = true;
}
+ return progress;
}
int64_t qemu_get_clock_ns(QEMUClock *clock)
@@ -444,337 +350,12 @@ uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts)
return qemu_timer_pending(ts) ? ts->expire_time : -1;
}
-void qemu_run_all_timers(void)
+bool qemu_run_all_timers(void)
{
- alarm_timer->pending = false;
-
/* vm time timers */
- qemu_run_timers(vm_clock);
- qemu_run_timers(rt_clock);
- qemu_run_timers(host_clock);
-
- /* rearm timer, if not periodic */
- if (alarm_timer->expired) {
- alarm_timer->expired = false;
- qemu_rearm_alarm_timer(alarm_timer);
- }
-}
-
-#ifdef _WIN32
-static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
-#else
-static void host_alarm_handler(int host_signum)
-#endif
-{
- struct qemu_alarm_timer *t = alarm_timer;
- if (!t)
- return;
-
- t->expired = true;
- t->pending = true;
- qemu_notify_event();
-}
-
-#if defined(__linux__)
-
-#include "qemu/compatfd.h"
-
-static int dynticks_start_timer(struct qemu_alarm_timer *t)
-{
- struct sigevent ev;
- timer_t host_timer;
- struct sigaction act;
-
- sigfillset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = host_alarm_handler;
-
- sigaction(SIGALRM, &act, NULL);
-
- /*
- * Initialize ev struct to 0 to avoid valgrind complaining
- * about uninitialized data in timer_create call
- */
- memset(&ev, 0, sizeof(ev));
- ev.sigev_value.sival_int = 0;
- ev.sigev_notify = SIGEV_SIGNAL;
-#ifdef CONFIG_SIGEV_THREAD_ID
- if (qemu_signalfd_available()) {
- ev.sigev_notify = SIGEV_THREAD_ID;
- ev._sigev_un._tid = qemu_get_thread_id();
- }
-#endif /* CONFIG_SIGEV_THREAD_ID */
- ev.sigev_signo = SIGALRM;
-
- if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
- perror("timer_create");
- return -1;
- }
-
- t->timer = host_timer;
-
- return 0;
-}
-
-static void dynticks_stop_timer(struct qemu_alarm_timer *t)
-{
- timer_t host_timer = t->timer;
-
- timer_delete(host_timer);
-}
-
-static void dynticks_rearm_timer(struct qemu_alarm_timer *t,
- int64_t nearest_delta_ns)
-{
- timer_t host_timer = t->timer;
- struct itimerspec timeout;
- int64_t current_ns;
-
- if (nearest_delta_ns < MIN_TIMER_REARM_NS)
- nearest_delta_ns = MIN_TIMER_REARM_NS;
-
- /* check whether a timer is already running */
- if (timer_gettime(host_timer, &timeout)) {
- perror("gettime");
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
- current_ns = timeout.it_value.tv_sec * 1000000000LL +
timeout.it_value.tv_nsec;
- if (current_ns && current_ns <= nearest_delta_ns)
- return;
-
- timeout.it_interval.tv_sec = 0;
- timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
- timeout.it_value.tv_sec = nearest_delta_ns / 1000000000;
- timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000;
- if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
- perror("settime");
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
-}
-
-#endif /* defined(__linux__) */
-
-#if !defined(_WIN32)
-
-static int unix_start_timer(struct qemu_alarm_timer *t)
-{
- struct sigaction act;
-
- /* timer signal */
- sigfillset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = host_alarm_handler;
-
- sigaction(SIGALRM, &act, NULL);
- return 0;
-}
-
-static void unix_rearm_timer(struct qemu_alarm_timer *t,
- int64_t nearest_delta_ns)
-{
- struct itimerval itv;
- int err;
-
- if (nearest_delta_ns < MIN_TIMER_REARM_NS)
- nearest_delta_ns = MIN_TIMER_REARM_NS;
-
- itv.it_interval.tv_sec = 0;
- itv.it_interval.tv_usec = 0; /* 0 for one-shot timer */
- itv.it_value.tv_sec = nearest_delta_ns / 1000000000;
- itv.it_value.tv_usec = (nearest_delta_ns % 1000000000) / 1000;
- err = setitimer(ITIMER_REAL, &itv, NULL);
- if (err) {
- perror("setitimer");
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
-}
-
-static void unix_stop_timer(struct qemu_alarm_timer *t)
-{
- struct itimerval itv;
-
- memset(&itv, 0, sizeof(itv));
- setitimer(ITIMER_REAL, &itv, NULL);
-}
-
-#endif /* !defined(_WIN32) */
-
-
-#ifdef _WIN32
-
-static MMRESULT mm_timer;
-static TIMECAPS mm_tc;
-
-static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
- DWORD_PTR dwUser, DWORD_PTR dw1,
- DWORD_PTR dw2)
-{
- struct qemu_alarm_timer *t = alarm_timer;
- if (!t) {
- return;
- }
- t->expired = true;
- t->pending = true;
- qemu_notify_event();
-}
-
-static int mm_start_timer(struct qemu_alarm_timer *t)
-{
- timeGetDevCaps(&mm_tc, sizeof(mm_tc));
- return 0;
-}
-
-static void mm_stop_timer(struct qemu_alarm_timer *t)
-{
- if (mm_timer) {
- timeKillEvent(mm_timer);
- }
+ bool progress = false;
+ progress |= qemu_run_timers(vm_clock);
+ progress |= qemu_run_timers(rt_clock);
+ progress |= qemu_run_timers(host_clock);
+ return progress;
}
-
-static void mm_rearm_timer(struct qemu_alarm_timer *t, int64_t delta)
-{
- int64_t nearest_delta_ms = delta / 1000000;
- if (nearest_delta_ms < mm_tc.wPeriodMin) {
- nearest_delta_ms = mm_tc.wPeriodMin;
- } else if (nearest_delta_ms > mm_tc.wPeriodMax) {
- nearest_delta_ms = mm_tc.wPeriodMax;
- }
-
- if (mm_timer) {
- timeKillEvent(mm_timer);
- }
- mm_timer = timeSetEvent((UINT)nearest_delta_ms,
- mm_tc.wPeriodMin,
- mm_alarm_handler,
- (DWORD_PTR)t,
- TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
-
- if (!mm_timer) {
- fprintf(stderr, "Failed to re-arm win32 alarm timer\n");
- timeEndPeriod(mm_tc.wPeriodMin);
- exit(1);
- }
-}
-
-static int win32_start_timer(struct qemu_alarm_timer *t)
-{
- HANDLE hTimer;
- BOOLEAN success;
-
- /* If you call ChangeTimerQueueTimer on a one-shot timer (its period
- is zero) that has already expired, the timer is not updated. Since
- creating a new timer is relatively expensive, set a bogus one-hour
- interval in the dynticks case. */
- success = CreateTimerQueueTimer(&hTimer,
- NULL,
- host_alarm_handler,
- t,
- 1,
- 3600000,
- WT_EXECUTEINTIMERTHREAD);
-
- if (!success) {
- fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
- GetLastError());
- return -1;
- }
-
- t->timer = hTimer;
- return 0;
-}
-
-static void win32_stop_timer(struct qemu_alarm_timer *t)
-{
- HANDLE hTimer = t->timer;
-
- if (hTimer) {
- DeleteTimerQueueTimer(NULL, hTimer, NULL);
- }
-}
-
-static void win32_rearm_timer(struct qemu_alarm_timer *t,
- int64_t nearest_delta_ns)
-{
- HANDLE hTimer = t->timer;
- int64_t nearest_delta_ms;
- BOOLEAN success;
-
- nearest_delta_ms = nearest_delta_ns / 1000000;
- if (nearest_delta_ms < 1) {
- nearest_delta_ms = 1;
- }
- /* ULONG_MAX can be 32 bit */
- if (nearest_delta_ms > ULONG_MAX) {
- nearest_delta_ms = ULONG_MAX;
- }
- success = ChangeTimerQueueTimer(NULL,
- hTimer,
- (unsigned long) nearest_delta_ms,
- 3600000);
-
- if (!success) {
- fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n",
- GetLastError());
- exit(-1);
- }
-
-}
-
-#endif /* _WIN32 */
-
-static void quit_timers(void)
-{
- struct qemu_alarm_timer *t = alarm_timer;
- alarm_timer = NULL;
- t->stop(t);
-}
-
-#ifdef CONFIG_POSIX
-static void reinit_timers(void)
-{
- struct qemu_alarm_timer *t = alarm_timer;
- t->stop(t);
- if (t->start(t)) {
- fprintf(stderr, "Internal timer error: aborting\n");
- exit(1);
- }
- qemu_rearm_alarm_timer(t);
-}
-#endif /* CONFIG_POSIX */
-
-int init_timer_alarm(void)
-{
- struct qemu_alarm_timer *t = NULL;
- int i, err = -1;
-
- if (alarm_timer) {
- return 0;
- }
-
- for (i = 0; alarm_timers[i].name; i++) {
- t = &alarm_timers[i];
-
- err = t->start(t);
- if (!err)
- break;
- }
-
- if (err) {
- err = -ENOENT;
- goto fail;
- }
-
- atexit(quit_timers);
-#ifdef CONFIG_POSIX
- pthread_atfork(NULL, NULL, reinit_timers);
-#endif
- alarm_timer = t;
- return 0;
-
-fail:
- return err;
-}
-
diff --git a/vl.c b/vl.c
index 25b8f2f..612c609 100644
--- a/vl.c
+++ b/vl.c
@@ -3714,7 +3714,10 @@ int main(int argc, char **argv, char **envp)
old_param = 1;
break;
case QEMU_OPTION_clock:
- configure_alarms(optarg);
+ /* Once upon a time we did:
+ * configure_alarms(optarg);
+ * here. This is stubbed out for compatibility.
+ */
break;
case QEMU_OPTION_startdate:
configure_rtc_date_offset(optarg, 1);
--
1.7.9.5
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, (continued)
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Alex Bligh, 2013/07/16
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Stefan Hajnoczi, 2013/07/16
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Alex Bligh, 2013/07/17
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Paolo Bonzini, 2013/07/17
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Alex Bligh, 2013/07/17
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Alex Bligh, 2013/07/18
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Stefan Hajnoczi, 2013/07/18
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Paolo Bonzini, 2013/07/19
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Alex Bligh, 2013/07/19
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Paolo Bonzini, 2013/07/19
- [Qemu-devel] [PATCH] [RFC] aio/timers: Drop alarm timers; introduce QEMUClock to AioContext; run timers in aio_poll,
Alex Bligh <=
- Re: [Qemu-devel] [PATCH] [RFC] aio/timers: Drop alarm timers; introduce QEMUClock to AioContext; run timers in aio_poll, Stefan Hajnoczi, 2013/07/25
- Re: [Qemu-devel] [PATCH] [RFC] aio/timers: Drop alarm timers; introduce QEMUClock to AioContext; run timers in aio_poll, Stefan Hajnoczi, 2013/07/25
- Re: [Qemu-devel] [PATCH] [RFC] aio/async: Add timed bottom-halves, Kevin Wolf, 2013/07/17