#include <SDL/SDL.h>
#include "debug.hh"
+#include "runloop.hh"
#include "timer.hh"
void timer::init(const function& function,
scalar seconds,
- mode mode,
- runloop* runloop)
+ enum mode mode,
+ timer_source& source)
{
- invalidate();
- ASSERT(runloop && "can't schedule timer without a runloop");
+ source_ = &source;
+ invalidate();
if ((mode_ = mode) != invalid)
{
function_ = function;
}
else
{
- absolute_ = seconds - ticks();
+ absolute_ = seconds + source_->ticks();
interval_ = seconds;
}
-
- runloop->add_timer(this);
- runloop_ = runloop;
}
}
+timer::~timer()
+{
+ detach_from_runloop();
+}
void timer::invalidate()
{
- if (mode_ != invalid)
- {
- mode_ = invalid;
- absolute_ = SCALAR(0.0);
+ mode_ = invalid;
+ absolute_ = SCALAR(0.0);
+}
+
- runloop_->remove_timer(this);
+void timer::added_to_runloop(runloop& runloop)
+{
+ detach_from_runloop();
+ runloop_ = &runloop;
+}
+
+void timer::detach_from_runloop()
+{
+ if (runloop_)
+ {
+ runloop_->remove_timer(*this);
runloop_ = 0;
}
}
{
if (function_) function_(*this, t);
- if (is_repeating())
+ if (mode_ == repeat)
{
if (is_equal(absolute_, t, 1.0)) absolute_ += interval_;
else absolute_ = interval_ + t;
}
-#if ENABLE_CLOCK_GETTIME
+scalar timer::ticks()
+{
+ return default_source().ticks();
+}
+
+
-// Since the monotonic clock will provide us with the time since the
-// computer started, the number of seconds since that time could easily
-// become so large that it cannot be accurately stored in a float (even
-// with as little two days uptime), therefore we need to start from a more
-// recent reference (when the program starts). Of course this isn't much
-// of an issue if scalar is a double-precision number.
+#if ENABLE_CLOCK_GETTIME
-static time_t set_reference()
+class real_time : public timer_source
{
- struct timespec ts;
+public:
- if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0)
+ real_time() :
+ scale_(SCALAR(1.0))
{
- return 0;
+ reset();
}
- return ts.tv_sec;
-}
-static const time_t reference_ = set_reference();
+ scalar ticks() const
+ {
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT(result == 0 && "monotonic clock not available");
+
+ return reference_ +
+ (scalar(ts.tv_sec - start_.tv_sec) +
+ scalar(ts.tv_nsec - start_.tv_nsec) *
+ SCALAR(0.000000001)) * scale_;
+ }
+ void reset()
+ {
+ reference_ = SCALAR(0.0);
+ clock_gettime(CLOCK_MONOTONIC, &start_);
+ }
-scalar timer::ticks()
-{
- struct timespec ts;
+ void scale(scalar factor)
+ {
+ reference_ = ticks();
+ clock_gettime(CLOCK_MONOTONIC, &start_);
+ scale_ = factor;
+ }
- int result = clock_gettime(CLOCK_MONOTONIC, &ts);
- ASSERT(result == 0 && "cannot access clock");
- return scalar(ts.tv_sec - reference_) +
- scalar(ts.tv_nsec) * SCALAR(0.000000001);
-}
+private:
-void timer::sleep(scalar seconds, mode mode)
+ scalar reference_;
+ struct timespec start_;
+ scalar scale_;
+};
+
+
+void timer::sleep(scalar seconds, enum mode mode)
{
if (mode == absolute) seconds -= ticks();
if (seconds < SCALAR(0.0)) return;
#else // ! ENABLE_CLOCK_GETTIME
-// If we don't have posix timers, we'll have to use a different timing
-// method. SDL only promises centisecond accuracy, but that's better than
-// a kick in the pants. It could end up being just as good anyway.
-
-scalar timer::ticks()
+class real_time : public timer_source
{
- return scalar(SDL_GetTicks()) * SCALAR(0.001);
-}
+public:
+
+ real_time() :
+ scale_(SCALAR(1.0))
+ {
+ reset();
+ }
+
+
+ scalar ticks() const
+ {
+ return reference_ + scalar(SDL_GetTicks() - start_) * scale_;
+ }
+
+ void reset()
+ {
+ reference_ = SCALAR(0.0);
+ start_ = SDL_GetTicks();
+ }
+
+ void scale(scalar factor)
+ {
+ reference_ = ticks();
+ start_ = SDL_GetTicks();
+ scale_ = factor * SCALAR(0.001);
+ }
+
+
+private:
+
+ scalar reference_;
+ Uint32 start_;
+ scalar scale_;
+};
+
void timer::sleep(scalar seconds, mode mode)
{
#endif // ENABLE_CLOCK_GETTIME
+timer_source& timer::default_source()
+{
+ static real_time t;
+ return t;
+}
+
+
} // namespace moof