From f39ab3906b9102004bfaa675a96b89d3ce1fdbd0 Mon Sep 17 00:00:00 2001 From: Tim Paine <3105306+timkpaine@users.noreply.github.com> Date: Thu, 29 Jan 2026 19:05:52 -0500 Subject: [PATCH] csp event loop, async bridges Signed-off-by: Tim Paine <3105306+timkpaine@users.noreply.github.com> --- CMakeLists.txt | 2 +- conda/dev-environment-unix.yml | 2 + conda/dev-environment-win.yml | 2 + cpp/csp/core/QueueWaiter.h | 206 +++ cpp/csp/engine/RootEngine.cpp | 201 ++- cpp/csp/engine/RootEngine.h | 37 +- cpp/csp/python/CMakeLists.txt | 2 + cpp/csp/python/PyEngine.cpp | 118 +- cpp/csp/python/PyEventLoop.cpp | 412 ++++++ cpp/csp/python/PyEventLoop.h | 126 ++ csp/__init__.py | 13 + csp/event_loop/__init__.py | 17 + csp/event_loop/bridge.py | 465 +++++++ csp/event_loop/loop.py | 1215 +++++++++++++++++ csp/impl/async_adapter.py | 1077 +++++++++++++++ csp/impl/types/common_definitions.py | 3 +- csp/impl/types/instantiation_type_resolver.py | 2 +- csp/impl/wiring/node.py | 2 +- csp/impl/wiring/node_parser.py | 208 ++- csp/impl/wiring/runtime.py | 172 ++- csp/impl/wiring/signature.py | 6 +- csp/tests/test_async_adapter.py | 645 +++++++++ csp/tests/test_asyncio_mode.py | 436 ++++++ csp/tests/test_event_loop.py | 881 ++++++++++++ csp/tests/test_event_loop_bridge.py | 574 ++++++++ csp/tests/test_examples.py | 9 +- csp/tests/test_fd_wakeup.py | 321 +++++ csp/tests/test_parsing.py | 38 +- docs/wiki/_Sidebar.md | 3 + docs/wiki/dev-guides/Architecture.md | 546 ++++++++ docs/wiki/how-tos/Async.md | 594 ++++++++ docs/wiki/how-tos/Asyncio-Integration.md | 803 +++++++++++ docs/wiki/how-tos/Event-Loop-Integration.md | 796 +++++++++++ .../06_advanced/e3_asyncio_integration.py | 354 +++++ .../06_advanced/e4_csp_asyncio_integration.py | 611 +++++++++ examples/98_just_for_fun/e2_csp_fastapi.py | 301 ++++ pyproject.toml | 2 + 37 files changed, 11088 insertions(+), 114 deletions(-) create mode 100644 cpp/csp/python/PyEventLoop.cpp create mode 100644 cpp/csp/python/PyEventLoop.h create mode 100644 csp/event_loop/__init__.py create mode 100644 csp/event_loop/bridge.py create mode 100644 csp/event_loop/loop.py create mode 100644 csp/impl/async_adapter.py create mode 100644 csp/tests/test_async_adapter.py create mode 100644 csp/tests/test_asyncio_mode.py create mode 100644 csp/tests/test_event_loop.py create mode 100644 csp/tests/test_event_loop_bridge.py create mode 100644 csp/tests/test_fd_wakeup.py create mode 100644 docs/wiki/dev-guides/Architecture.md create mode 100644 docs/wiki/how-tos/Async.md create mode 100644 docs/wiki/how-tos/Asyncio-Integration.md create mode 100644 docs/wiki/how-tos/Event-Loop-Integration.md create mode 100644 examples/06_advanced/e3_asyncio_integration.py create mode 100644 examples/06_advanced/e4_csp_asyncio_integration.py create mode 100644 examples/98_just_for_fun/e2_csp_fastapi.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 68b35e6de..fb13ee9e5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -169,7 +169,7 @@ if(WIN32) foreach(warning 4244 4251 4267 4275 4290 4786 4305 4996) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd${warning}") endforeach(warning) - add_compile_definitions(WIN32 _WIN32) + add_compile_definitions(WIN32 _WIN32 WIN32_LEAN_AND_MEAN NOMINMAX) else() if(CSP_BUILD_NO_CXX_ABI) diff --git a/conda/dev-environment-unix.yml b/conda/dev-environment-unix.yml index e892d7e13..10b884684 100644 --- a/conda/dev-environment-unix.yml +++ b/conda/dev-environment-unix.yml @@ -14,6 +14,7 @@ dependencies: - deprecated - docutils<0.22.1 - exprtk + - fastapi - flex - graphviz - gtest @@ -57,6 +58,7 @@ dependencies: - twine - typing-extensions - unzip + - uvicorn - wheel - zip - zlib diff --git a/conda/dev-environment-win.yml b/conda/dev-environment-win.yml index 65ca65cb4..6f2f1634b 100644 --- a/conda/dev-environment-win.yml +++ b/conda/dev-environment-win.yml @@ -14,6 +14,7 @@ dependencies: - deprecated - docutils<0.22.1 - exprtk + - fastapi # - flex # not available on windows - graphviz - gtest @@ -57,6 +58,7 @@ dependencies: - twine - typing-extensions # - unzip # not available on windows + - uvicorn - wheel # - zip # not available on windows - zlib diff --git a/cpp/csp/core/QueueWaiter.h b/cpp/csp/core/QueueWaiter.h index 4c496f7ac..d586d63e2 100644 --- a/cpp/csp/core/QueueWaiter.h +++ b/cpp/csp/core/QueueWaiter.h @@ -1,11 +1,26 @@ #ifndef _IN_CSP_CORE_QUEUEBLOCKINGWAIT_H #define _IN_CSP_CORE_QUEUEBLOCKINGWAIT_H +// Windows: Include winsock2.h for FdWaiter socket pair implementation +// WIN32_LEAN_AND_MEAN is defined project-wide to prevent winsock.h/winsock2.h conflicts +#ifdef _WIN32 +#include +#pragma comment(lib, "ws2_32.lib") +#endif + #include #include #include #include +#ifdef __linux__ +#include +#include +#elif defined(__APPLE__) +#include +#include +#endif + namespace csp { @@ -43,6 +58,197 @@ class QueueWaiter bool m_eventsPending; }; +// FdWaiter provides file descriptor based signaling for integration with +// external event loops like asyncio. The read fd can be registered with +// select/poll/epoll and will become readable when notify() is called. +class FdWaiter +{ +public: + FdWaiter() + { +#ifdef __linux__ + // Linux: use eventfd (single fd, most efficient) + m_eventfd = eventfd( 0, EFD_NONBLOCK | EFD_CLOEXEC ); + m_readFd = m_eventfd; + m_writeFd = m_eventfd; +#elif defined(__APPLE__) + // macOS: use pipe + int fds[2]; + if( pipe( fds ) == 0 ) + { + m_readFd = fds[0]; + m_writeFd = fds[1]; + // Set non-blocking + fcntl( m_readFd, F_SETFL, O_NONBLOCK ); + fcntl( m_writeFd, F_SETFL, O_NONBLOCK ); + } + else + { + m_readFd = -1; + m_writeFd = -1; + } +#elif defined(_WIN32) + // Windows: use socket pair (localhost loopback) + m_readFd = INVALID_SOCKET; + m_writeFd = INVALID_SOCKET; + createSocketPair(); +#endif + } + + ~FdWaiter() + { +#ifdef __linux__ + if( m_eventfd >= 0 ) + close( m_eventfd ); +#elif defined(__APPLE__) + if( m_readFd >= 0 ) + close( m_readFd ); + if( m_writeFd >= 0 ) + close( m_writeFd ); +#elif defined(_WIN32) + if( m_readFd != INVALID_SOCKET ) + closesocket( m_readFd ); + if( m_writeFd != INVALID_SOCKET ) + closesocket( m_writeFd ); +#endif + } + + // Get the file descriptor for select/poll registration + // Returns -1 (or INVALID_SOCKET on Windows) if not available +#ifdef _WIN32 + SOCKET readFd() const { return m_readFd; } +#else + int readFd() const { return m_readFd; } +#endif + + // Signal the fd (makes it readable) + void notify() + { + std::lock_guard guard( m_lock ); + if( m_notified ) + return; // Already notified, avoid filling buffer + + m_notified = true; + +#ifdef __linux__ + uint64_t val = 1; + [[maybe_unused]] auto rv = write( m_eventfd, &val, sizeof( val ) ); +#elif defined(__APPLE__) + char c = 1; + [[maybe_unused]] auto rv = write( m_writeFd, &c, 1 ); +#elif defined(_WIN32) + char c = 1; + send( m_writeFd, &c, 1, 0 ); +#endif + } + + // Clear the notification (call after processing) + void clear() + { + std::lock_guard guard( m_lock ); + m_notified = false; + +#ifdef __linux__ + uint64_t val; + [[maybe_unused]] auto rv = read( m_eventfd, &val, sizeof( val ) ); +#elif defined(__APPLE__) + char buf[64]; + while( read( m_readFd, buf, sizeof( buf ) ) > 0 ) {} +#elif defined(_WIN32) + char buf[64]; + while( recv( m_readFd, buf, sizeof( buf ), 0 ) > 0 ) {} +#endif + } + + bool isValid() const + { +#ifdef _WIN32 + return m_readFd != INVALID_SOCKET; +#else + return m_readFd >= 0; +#endif + } + +private: +#ifdef _WIN32 + void createSocketPair() + { + // Create a listening socket on localhost + SOCKET listener = socket( AF_INET, SOCK_STREAM, IPPROTO_TCP ); + if( listener == INVALID_SOCKET ) + return; + + struct sockaddr_in addr; + memset( &addr, 0, sizeof( addr ) ); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl( INADDR_LOOPBACK ); + addr.sin_port = 0; // Let OS pick a port + + if( bind( listener, (struct sockaddr*)&addr, sizeof( addr ) ) == SOCKET_ERROR ) + { + closesocket( listener ); + return; + } + + int addrlen = sizeof( addr ); + if( getsockname( listener, (struct sockaddr*)&addr, &addrlen ) == SOCKET_ERROR ) + { + closesocket( listener ); + return; + } + + if( listen( listener, 1 ) == SOCKET_ERROR ) + { + closesocket( listener ); + return; + } + + // Create client socket and connect + m_writeFd = socket( AF_INET, SOCK_STREAM, IPPROTO_TCP ); + if( m_writeFd == INVALID_SOCKET ) + { + closesocket( listener ); + return; + } + + if( connect( m_writeFd, (struct sockaddr*)&addr, sizeof( addr ) ) == SOCKET_ERROR ) + { + closesocket( m_writeFd ); + closesocket( listener ); + m_writeFd = INVALID_SOCKET; + return; + } + + // Accept the connection + m_readFd = accept( listener, NULL, NULL ); + closesocket( listener ); // Done with listener + + if( m_readFd == INVALID_SOCKET ) + { + closesocket( m_writeFd ); + m_writeFd = INVALID_SOCKET; + return; + } + + // Set non-blocking + u_long mode = 1; + ioctlsocket( m_readFd, FIONBIO, &mode ); + ioctlsocket( m_writeFd, FIONBIO, &mode ); + } + + SOCKET m_readFd; + SOCKET m_writeFd; +#else + int m_readFd; + int m_writeFd; +#ifdef __linux__ + int m_eventfd; +#endif +#endif + std::mutex m_lock; + bool m_notified = false; +}; + } #endif diff --git a/cpp/csp/engine/RootEngine.cpp b/cpp/csp/engine/RootEngine.cpp index a0325f959..3b808660a 100644 --- a/cpp/csp/engine/RootEngine.cpp +++ b/cpp/csp/engine/RootEngine.cpp @@ -75,6 +75,7 @@ RootEngine::RootEngine( const Dictionary & settings ) : Engine( m_cycleStepTable m_cycleCount( 0 ), m_settings( settings ), m_inRealtime( false ), + m_haveEvents( false ), m_initSignalCount( g_SIGNAL_COUNT ), m_pushEventQueue( m_settings.queueWaitTime > TimeDelta::ZERO() ) { @@ -147,120 +148,155 @@ void RootEngine::processEndCycle() m_endCycleListeners.clear(); } -void RootEngine::runSim( DateTime end ) +bool RootEngine::processOneCycle( TimeDelta maxWait ) { - m_inRealtime = false; - while( m_scheduler.hasEvents() && m_state == State::RUNNING && !interrupted() ) - { - m_now = m_scheduler.nextTime(); - if( m_now > end ) - break; + if( m_state != State::RUNNING || interrupted() ) + return false; - ++m_cycleCount; + // Check if we've passed the end time + if( m_now > m_endTime ) + return false; - m_scheduler.executeNextEvents( m_now ); - m_cycleStepTable.executeCycle( m_profiler.get() ); + bool hasWork = false; - processEndCycle(); - } - - m_now = std::min( m_now, end ); -} - -void RootEngine::runRealtime( DateTime end ) -{ - struct DialectReleaseGIL + if( m_inRealtime ) { - DialectReleaseGIL( RootEngine * e ) : engine( e ) { engine -> dialectUnlockGIL(); } - ~DialectReleaseGIL() { engine -> dialectLockGIL(); } - RootEngine * engine; - }; - - std::vector dirtyGroups; + // Realtime mode: check for push events and timers + struct DialectReleaseGIL + { + DialectReleaseGIL( RootEngine * e ) : engine( e ) { engine -> dialectUnlockGIL(); } + ~DialectReleaseGIL() { engine -> dialectLockGIL(); } + RootEngine * engine; + }; - m_inRealtime = true; - bool haveEvents = false; - while( m_state == State::RUNNING && !interrupted() ) - { - TimeDelta waitTime; + TimeDelta waitTime = maxWait; if( !m_pendingPushEvents.hasEvents() ) { DateTime now = DateTime::now(); - waitTime = std::min( m_endTime - now, m_settings.queueWaitTime ); + TimeDelta timeToEnd = m_endTime - now; + if( timeToEnd < waitTime || waitTime == TimeDelta::ZERO() ) + waitTime = timeToEnd; if( m_scheduler.hasEvents() ) - waitTime = std::min( m_scheduler.nextTime() - DateTime::now(), waitTime ); + { + TimeDelta timeToNext = m_scheduler.nextTime() - now; + if( timeToNext < waitTime ) + waitTime = timeToNext; + } } - if( !haveEvents ) + if( !m_haveEvents && waitTime > TimeDelta::ZERO() ) { - //We keep the haveEvents flag in case there were events, but we only decided to execute - //timers in the previous cycle, then we shouldnt wait again ( which can actually lead to cases - //where we miss triggers ) + // We keep the m_haveEvents flag in case there were events, but we only decided to execute + // timers in the previous cycle, then we shouldn't wait again (which can lead to missed triggers) DialectReleaseGIL release( this ); - haveEvents = m_pushEventQueue.wait( waitTime ); + m_haveEvents = m_pushEventQueue.wait( waitTime ); } - //grab time after waitForEvents so that we dont grab events with time > now + // Grab time after wait so we don't grab events with time > now m_now = DateTime::now(); - if( m_now > end ) - break; + if( m_now > m_endTime ) + { + m_now = m_endTime; + return false; + } ++m_cycleCount; - //We made a conscious decision to execute timers exactly on their requested time in realtime mode - //therefore timers that are ready are executed on their own time cycle before realtime events all processed + // Execute timers exactly on their requested time - timers that are ready + // are executed on their own time cycle before realtime push events if( m_scheduler.hasEvents() && m_scheduler.nextTime() < m_now ) { m_now = m_scheduler.nextTime(); m_scheduler.executeNextEvents( m_now ); + hasWork = true; } - else + else if( m_haveEvents || m_pendingPushEvents.hasEvents() ) { + // Process push events PushEvent * events = m_pushEventQueue.popAll(); - processPendingPushEvents( dirtyGroups ); - processPushEventQueue( events, dirtyGroups ); + processPendingPushEvents( m_dirtyGroups ); + processPushEventQueue( events, m_dirtyGroups ); - for( auto * group : dirtyGroups ) + for( auto * group : m_dirtyGroups ) group -> state = PushGroup::NONE; - - dirtyGroups.clear(); - haveEvents = false; + + m_dirtyGroups.clear(); + m_haveEvents = false; + hasWork = true; + } + } + else + { + // Sim mode: process next scheduled event + if( m_scheduler.hasEvents() ) + { + m_now = m_scheduler.nextTime(); + if( m_now <= m_endTime ) + { + ++m_cycleCount; + m_scheduler.executeNextEvents( m_now ); + hasWork = true; + } + else + { + m_now = m_endTime; + } } + } + if( hasWork ) + { m_cycleStepTable.executeCycle( m_profiler.get() ); - processEndCycle(); } - m_now = std::min( m_now, end ); + // Return true if there's more work to do + return m_state == State::RUNNING && !interrupted() && + ( m_scheduler.hasEvents() || m_pendingPushEvents.hasEvents() ); } -void RootEngine::run( DateTime start, DateTime end ) +void RootEngine::start( DateTime startTime, DateTime end ) { - try - { - preRun( start, end ); - m_exception_mutex.lock(); - if( m_state != State::SHUTDOWN ) - m_state = State::RUNNING; - m_exception_mutex.unlock(); + preRun( startTime, end ); - if( m_settings.realtime ) + m_exception_mutex.lock(); + if( m_state != State::SHUTDOWN ) + m_state = State::RUNNING; + m_exception_mutex.unlock(); + + m_inRealtime = m_settings.realtime; + m_haveEvents = false; + m_dirtyGroups.clear(); + + // In realtime mode, if start is in the past, first run through historical events + if( m_settings.realtime ) + { + DateTime rtNow = DateTime::now(); + if( startTime < rtNow && rtNow < end ) { - DateTime rtNow = DateTime::now(); - runSim( std::min( rtNow, end ) ); - if( end > rtNow ) - runRealtime( end ); + // Temporarily disable realtime to process historical sim events + m_inRealtime = false; + DateTime simEnd = std::min( rtNow, end ); + while( m_scheduler.hasEvents() && m_state == State::RUNNING && !interrupted() ) + { + DateTime nextTime = m_scheduler.nextTime(); + if( nextTime > simEnd ) + break; + m_now = nextTime; + ++m_cycleCount; + m_scheduler.executeNextEvents( m_now ); + m_cycleStepTable.executeCycle( m_profiler.get() ); + processEndCycle(); + } + m_now = std::min( m_now, simEnd ); + m_inRealtime = true; // Now switch to realtime } - else - runSim( end ); - } - catch( ... ) - { - m_exception_ptr = std::current_exception(); } +} +void RootEngine::finish() +{ try { postRun(); @@ -272,11 +308,29 @@ void RootEngine::run( DateTime start, DateTime end ) } m_state = State::DONE; + m_dirtyGroups.clear(); if( m_exception_ptr ) std::rethrow_exception( m_exception_ptr ); } +void RootEngine::run( DateTime startTime, DateTime end ) +{ + try + { + start( startTime, end ); + + // Main loop - same code path whether called via run() or step() + while( processOneCycle( m_settings.queueWaitTime ) ) {} + } + catch( ... ) + { + m_exception_ptr = std::current_exception(); + } + + finish(); +} + void RootEngine::shutdown() { m_state = State::SHUTDOWN; @@ -290,6 +344,13 @@ void RootEngine::shutdown( std::exception_ptr except_ptr ) m_exception_ptr = except_ptr; } +DateTime RootEngine::nextScheduledTime() +{ + if( m_scheduler.hasEvents() ) + return m_scheduler.nextTime(); + return DateTime::NONE(); +} + DictionaryPtr RootEngine::engine_stats() const { if( !m_profiler ) diff --git a/cpp/csp/engine/RootEngine.h b/cpp/csp/engine/RootEngine.h index 78b54ea5f..ec57024e1 100644 --- a/cpp/csp/engine/RootEngine.h +++ b/cpp/csp/engine/RootEngine.h @@ -2,6 +2,7 @@ #define _IN_CSP_ENGINE_ROOTENGINE_H #include +#include #include #include #include @@ -25,7 +26,7 @@ class EndCycleListener public: virtual ~EndCycleListener() {}; virtual void onEndCycle() = 0; - + bool isDirty() const { return m_dirty; } void setDirtyFlag() { m_dirty = true; } void clearDirtyFlag() { m_dirty = false; } @@ -54,9 +55,22 @@ class RootEngine : public Engine csp::Profiler* profiler() const { return m_profiler.get(); } void run( DateTime start, DateTime end ); + + // Backward compatibility wrappers - mode is determined by engine settings + void runSim( DateTime start, DateTime end ) { run( start, end ); } + void runRealtime( DateTime start, DateTime end ) { run( start, end ); } + void shutdown(); void shutdown( std::exception_ptr except ); + // Decomposed execution API - run() uses these internally + // External event loops can call start/processOneCycle/finish directly + void start( DateTime start, DateTime end ); + bool processOneCycle( TimeDelta maxWait = TimeDelta::ZERO() ); // Returns true if more work pending + void finish(); + bool isRunning() const { return m_state == State::RUNNING; } + DateTime nextScheduledTime(); // Returns next scheduled event time, or NONE if none + Scheduler::Handle reserveSchedulerHandle(); Scheduler::Handle scheduleCallback( TimeDelta delta, Scheduler::Callback cb ); Scheduler::Handle scheduleCallback( DateTime time, Scheduler::Callback cb ); @@ -67,8 +81,8 @@ class RootEngine : public Engine void cancelCallback( Scheduler::Handle handle ); - void schedulePushEvent( PushEvent * event ) { m_pushEventQueue.push( event ); } - void schedulePushBatch( PushEventQueue::Batch & batch ) { m_pushEventQueue.push( batch ); } + void schedulePushEvent( PushEvent * event ) { m_pushEventQueue.push( event ); m_fdWaiter.notify(); } + void schedulePushBatch( PushEventQueue::Batch & batch ) { m_pushEventQueue.push( batch ); m_fdWaiter.notify(); } bool scheduleEndCycleListener( EndCycleListener * l ); @@ -90,7 +104,12 @@ class RootEngine : public Engine bool interrupted() const; PushPullEventQueue & pushPullEventQueue() { return m_pushPullEventQueue; } - + + // Native fd-based wakeup for external event loops (asyncio, etc.) + // Returns a file descriptor that becomes readable when events are queued + int getWakeupFd() const { return m_fdWaiter.readFd(); } + void clearWakeupFd() { m_fdWaiter.clear(); } + protected: enum State { NONE, STARTING, RUNNING, SHUTDOWN, DONE }; using EndCycleListeners = std::vector; @@ -99,9 +118,6 @@ class RootEngine : public Engine void preRun( DateTime start, DateTime end ); void postRun(); - void runSim( DateTime end ); - void runRealtime( DateTime end ); - void processPendingPushEvents( std::vector & dirtyGroups ); void processPushEventQueue( PushEvent * events, std::vector & dirtyGroups ); @@ -131,8 +147,12 @@ class RootEngine : public Engine PendingPushEvents m_pendingPushEvents; Settings m_settings; bool m_inRealtime; + bool m_haveEvents; // Tracks pending events across cycles in realtime mode int m_initSignalCount; + // Shared across cycles for event processing + std::vector m_dirtyGroups; + PushEventQueue m_pushEventQueue; //This queue is managed entirely from the PushPullInputAdapter PushPullEventQueue m_pushPullEventQueue; @@ -140,6 +160,7 @@ class RootEngine : public Engine std::exception_ptr m_exception_ptr; std::mutex m_exception_mutex; std::unique_ptr m_profiler; + mutable FdWaiter m_fdWaiter; // For native fd-based event loop integration }; @@ -168,7 +189,7 @@ inline Scheduler::Handle RootEngine::scheduleCallback( Scheduler::Handle reserve if( time < m_now ) [[unlikely]] CSP_THROW( ValueError, "Cannot schedule event in the past. new time: " << time << " now: " << m_now ); - return m_scheduler.scheduleCallback( reservedHandle, time, std::move( cb ) ); + return m_scheduler.scheduleCallback( reservedHandle, time, std::move( cb ) ); } inline Scheduler::Handle RootEngine::rescheduleCallback( Scheduler::Handle id, csp::DateTime time ) diff --git a/cpp/csp/python/CMakeLists.txt b/cpp/csp/python/CMakeLists.txt index 195b99332..6a81e0900 100644 --- a/cpp/csp/python/CMakeLists.txt +++ b/cpp/csp/python/CMakeLists.txt @@ -32,6 +32,7 @@ set(CSPIMPL_PUBLIC_HEADERS NumpyConversions.h NumpyInputAdapter.h PyAdapterManagerWrapper.h + PyEventLoop.h PyBasketInputProxy.h PyBasketOutputProxy.h PyCppNode.h @@ -56,6 +57,7 @@ add_library(cspimpl SHARED NumpyConversions.cpp PyAdapterManager.cpp PyAdapterManagerWrapper.cpp + PyEventLoop.cpp PyConstAdapter.cpp PyCppNode.cpp PyEngine.cpp diff --git a/cpp/csp/python/PyEngine.cpp b/cpp/csp/python/PyEngine.cpp index e6f90de0e..6a6673454 100644 --- a/cpp/csp/python/PyEngine.cpp +++ b/cpp/csp/python/PyEngine.cpp @@ -95,7 +95,7 @@ static void PyEngine_dealloc( PyEngine * self ) { CSP_BEGIN_METHOD; self -> ~PyEngine(); - Py_TYPE( self ) -> tp_free( self ); + Py_TYPE( self ) -> tp_free( self ); CSP_RETURN; } @@ -105,7 +105,7 @@ static PyObject * PyEngine_run( PyEngine * self, PyObject * args ) PyObject * pyStart; PyObject * pyEnd; - if( !PyArg_ParseTuple( args, "OO", &pyStart, &pyEnd ) ) + if( !PyArg_ParseTuple( args, "OO", &pyStart, &pyEnd ) ) return nullptr; auto start = fromPython( pyStart ); @@ -118,8 +118,120 @@ static PyObject * PyEngine_run( PyEngine * self, PyObject * args ) CSP_RETURN_NONE; } +static PyObject * PyEngine_start( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + PyObject * pyStart; + PyObject * pyEnd; + if( !PyArg_ParseTuple( args, "OO", &pyStart, &pyEnd ) ) + return nullptr; + + auto start = fromPython( pyStart ); + auto end = fromPython( pyEnd ); + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + self -> rootEngine() -> start( start, end ); + + Py_RETURN_NONE; + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_processOneCycle( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + double maxWaitSeconds = 0.0; + if( !PyArg_ParseTuple( args, "|d", &maxWaitSeconds ) ) + return nullptr; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + + // Convert double seconds to nanoseconds to preserve sub-second precision + // fromSeconds takes int64_t which would truncate 0.001 to 0 + int64_t maxWaitNanos = static_cast( maxWaitSeconds * 1e9 ); + TimeDelta maxWait = TimeDelta::fromNanoseconds( maxWaitNanos ); + bool hasMore = self -> rootEngine() -> processOneCycle( maxWait ); + + return PyBool_FromLong( hasMore ); + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_finish( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + self -> rootEngine() -> finish(); + + return self -> collectOutputs(); + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_isRunning( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + bool running = self -> rootEngine() -> isRunning(); + + return PyBool_FromLong( running ); + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_now( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + DateTime now = self -> rootEngine() -> now(); + + return toPython( now ); + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_nextScheduledTime( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + DateTime nextTime = self -> rootEngine() -> nextScheduledTime(); + + return toPython( nextTime ); + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_getWakeupFd( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + int fd = self -> rootEngine() -> getWakeupFd(); + + return PyLong_FromLong( fd ); + CSP_RETURN_NONE; +} + +static PyObject * PyEngine_clearWakeupFd( PyEngine * self, PyObject * args ) +{ + CSP_BEGIN_METHOD; + + CSP_TRUE_OR_THROW_RUNTIME( self -> engine() -> isRootEngine(), "engine is not root engine" ); + self -> rootEngine() -> clearWakeupFd(); + + CSP_RETURN_NONE; +} + static PyMethodDef PyEngine_methods[] = { - { "run", (PyCFunction) PyEngine_run, METH_VARARGS, "start and run engine" }, + { "run", ( PyCFunction ) PyEngine_run, METH_VARARGS, "start and run engine" }, + { "start", ( PyCFunction ) PyEngine_start, METH_VARARGS, "start engine (call before process_one_cycle)" }, + { "process_one_cycle", ( PyCFunction ) PyEngine_processOneCycle, METH_VARARGS, "execute one cycle, returns True if more work pending" }, + { "finish", ( PyCFunction ) PyEngine_finish, METH_NOARGS, "finish execution and cleanup" }, + { "is_running", ( PyCFunction ) PyEngine_isRunning, METH_NOARGS, "check if engine is running" }, + { "now", ( PyCFunction ) PyEngine_now, METH_NOARGS, "get current engine time" }, + { "next_scheduled_time", ( PyCFunction ) PyEngine_nextScheduledTime, METH_NOARGS, "get next scheduled event time" }, + { "get_wakeup_fd", ( PyCFunction ) PyEngine_getWakeupFd, METH_NOARGS, "get fd that becomes readable when events are queued" }, + { "clear_wakeup_fd", ( PyCFunction ) PyEngine_clearWakeupFd, METH_NOARGS, "clear the wakeup fd after processing events" }, { NULL } }; diff --git a/cpp/csp/python/PyEventLoop.cpp b/cpp/csp/python/PyEventLoop.cpp new file mode 100644 index 000000000..a5cfc80e3 --- /dev/null +++ b/cpp/csp/python/PyEventLoop.cpp @@ -0,0 +1,412 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace csp::python +{ + +PyEventLoopAdapter::PyEventLoopAdapter( PythonEngine * engine ) : m_engine( engine ), + m_nextCallbackId( 1 ), + m_stopRequested( false ) +{ +} + +PyEventLoopAdapter::~PyEventLoopAdapter() +{ + // Clean up any remaining callbacks + for( auto & entry : m_callbacks ) + cleanupEntry( &entry ); + + m_callbacks.clear(); + + // Clean up threadsafe queue + std::lock_guard lock( m_threadSafeMutex ); + for( auto & item : m_threadSafeQueue ) + { + Py_XDECREF( std::get<0>( item ) ); + Py_XDECREF( std::get<1>( item ) ); + Py_XDECREF( std::get<2>( item ) ); + } + m_threadSafeQueue.clear(); +} + +double PyEventLoopAdapter::time() const +{ + DateTime now = m_engine -> now(); + if( now == DateTime::NONE() ) + { + // If engine hasn't started, return current wall time + now = DateTime::now(); + } + // Convert to seconds since Unix epoch + return static_cast( now.asNanoseconds() ) / 1e9; +} + +uint64_t PyEventLoopAdapter::scheduleCallback( DateTime time, PyObject * callback, + PyObject * args, PyObject * context ) +{ + uint64_t callbackId = m_nextCallbackId++; + + // Hold references + Py_INCREF( callback ); + Py_XINCREF( args ); + Py_XINCREF( context ); + + CallbackEntry entry; + entry.callback = callback; + entry.args = args; + entry.context = context; + entry.id = callbackId; + entry.cancelled = false; + + auto it = m_callbacks.insert( m_callbacks.end(), entry ); + + // Schedule with CSP's scheduler + auto handle = m_engine -> scheduleCallback( + time, + [this, it]() -> const InputAdapter * + { + if( !it -> cancelled ) + executeCallback( &( *it ) ); + + cleanupEntry( &( *it ) ); + m_callbacks.erase( it ); + return nullptr; + } + ); + + it -> handle = handle; + + return callbackId; +} + +uint64_t PyEventLoopAdapter::callSoon( PyObject * callback, PyObject * args, PyObject * context ) +{ + // Schedule for "now" - will execute in the next cycle + return scheduleCallback( m_engine -> now(), callback, args, context ); +} + +uint64_t PyEventLoopAdapter::callLater( double delay, PyObject * callback, + PyObject * args, PyObject * context ) +{ + if( delay < 0 ) + delay = 0; + + TimeDelta delta = TimeDelta::fromSeconds( delay ); + DateTime targetTime = m_engine -> now() + delta; + + return scheduleCallback( targetTime, callback, args, context ); +} + +uint64_t PyEventLoopAdapter::callAt( double when, PyObject * callback, + PyObject * args, PyObject * context ) +{ + // Convert seconds since epoch to DateTime + int64_t nanos = static_cast( when * 1e9 ); + DateTime targetTime = DateTime::fromNanoseconds( nanos ); + + return scheduleCallback( targetTime, callback, args, context ); +} + +bool PyEventLoopAdapter::cancelCallback( uint64_t callbackId ) +{ + for( auto & entry : m_callbacks ) + { + if( entry.id == callbackId && !entry.cancelled ) + { + entry.cancelled = true; + m_engine -> cancelCallback( entry.handle ); + return true; + } + } + return false; +} + +bool PyEventLoopAdapter::isCallbackPending( uint64_t callbackId ) const +{ + for( const auto & entry : m_callbacks ) + { + if( entry.id == callbackId && !entry.cancelled ) + return true; + } + return false; +} + +uint64_t PyEventLoopAdapter::callSoonThreadsafe( PyObject * callback, PyObject * args, PyObject * context ) +{ + // Add to threadsafe queue + Py_INCREF( callback ); + Py_XINCREF( args ); + Py_XINCREF( context ); + + { + std::lock_guard lock( m_threadSafeMutex ); + m_threadSafeQueue.emplace_back( callback, args, context ); + } + + // The engine will pick these up on the next cycle + // In a real implementation, we'd wake up the engine here + return 0; // No ID for threadsafe callbacks currently +} + +void PyEventLoopAdapter::processPendingThreadsafeCallbacks() +{ + std::vector> pending; + + { + std::lock_guard lock( m_threadSafeMutex ); + pending.swap( m_threadSafeQueue ); + } + + for( auto & item : pending ) + { + callSoon( std::get<0>( item ), std::get<1>( item ), std::get<2>( item ) ); + // callSoon will incref, so decref the ones we added in callSoonThreadsafe + Py_DECREF( std::get<0>( item ) ); + Py_XDECREF( std::get<1>( item ) ); + Py_XDECREF( std::get<2>( item ) ); + } +} + +void PyEventLoopAdapter::stop() +{ + m_stopRequested = true; + m_engine -> shutdown(); +} + +void PyEventLoopAdapter::executeCallback( CallbackEntry * entry ) +{ + PyObject * result = nullptr; + + if( entry -> context ) + { + // Run in context + PyObject * contextRun = PyObject_GetAttrString( entry -> context, "run" ); + if( contextRun ) + { + if( entry -> args ) + { + // Prepend callback to args + Py_ssize_t argsSize = PyTuple_Size( entry -> args ); + PyObjectPtr newArgs = PyObjectPtr::own( PyTuple_New( argsSize + 1 ) ); + Py_INCREF( entry -> callback ); + PyTuple_SET_ITEM( newArgs.ptr(), 0, entry -> callback ); + for( Py_ssize_t i = 0; i < argsSize; ++i ) + { + PyObject * item = PyTuple_GET_ITEM( entry -> args, i ); + Py_INCREF( item ); + PyTuple_SET_ITEM( newArgs.ptr(), i + 1, item ); + } + result = PyObject_Call( contextRun, newArgs.ptr(), nullptr ); + } + else + { + PyObjectPtr args = PyObjectPtr::own( PyTuple_Pack( 1, entry -> callback ) ); + result = PyObject_Call( contextRun, args.ptr(), nullptr ); + } + Py_DECREF( contextRun ); + } + } + else + { + // Direct call + if( entry -> args ) + result = PyObject_Call( entry -> callback, entry -> args, nullptr ); + else + result = PyObject_CallNoArgs( entry -> callback ); + } + + if( result ) + Py_DECREF( result ); + else + // Handle exception - for now just print it + PyErr_Print(); +} + +void PyEventLoopAdapter::cleanupEntry( CallbackEntry * entry ) +{ + Py_DECREF( entry -> callback ); + Py_XDECREF( entry -> args ); + Py_XDECREF( entry -> context ); +} + +static void PyAsyncioHandle_dealloc( PyAsyncioHandle * self ) +{ + CSP_BEGIN_METHOD; + Py_XDECREF( self -> callback ); + Py_XDECREF( ( PyObject * ) self -> loop ); + Py_TYPE( self ) -> tp_free( ( PyObject * ) self ); + CSP_RETURN; +} + +static PyObject * PyAsyncioHandle_cancel( PyAsyncioHandle * self ) +{ + CSP_BEGIN_METHOD; + if( !self -> cancelled && self -> loop && self -> loop -> adapter ) + { + self -> loop -> adapter -> cancelCallback( self -> callback_id ); + self -> cancelled = true; + } + Py_RETURN_NONE; + CSP_RETURN_NONE; +} + +static PyObject * PyAsyncioHandle_cancelled( PyAsyncioHandle * self ) +{ + if( self -> cancelled ) + Py_RETURN_TRUE; + Py_RETURN_FALSE; +} + +static PyObject * PyAsyncioHandle_repr( PyAsyncioHandle * self ) +{ + return PyUnicode_FromFormat( "", + self -> callback, + self -> cancelled ? "True" : "False" ); +} + +static PyMethodDef PyAsyncioHandle_methods[] = { + { "cancel", ( PyCFunction ) PyAsyncioHandle_cancel, METH_NOARGS, "Cancel the callback." }, + { "cancelled", ( PyCFunction ) PyAsyncioHandle_cancelled, METH_NOARGS, "Return True if the callback was cancelled." }, + { NULL } +}; + +PyTypeObject PyAsyncioHandle::PyType = { + PyVarObject_HEAD_INIT(NULL, 0) + "_cspimpl.AsyncioHandle", /* tp_name */ + sizeof(PyAsyncioHandle), /* tp_basicsize */ + 0, /* tp_itemsize */ + ( destructor ) PyAsyncioHandle_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_reserved */ + ( reprfunc ) PyAsyncioHandle_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + "asyncio Handle wrapper", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + PyAsyncioHandle_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + PyType_GenericNew, /* tp_new */ +}; + +static void PyAsyncioTimerHandle_dealloc( PyAsyncioTimerHandle * self ) +{ + CSP_BEGIN_METHOD; + Py_XDECREF( self -> callback ); + Py_XDECREF( ( PyObject * ) self -> loop ); + Py_TYPE( self ) -> tp_free( ( PyObject * ) self ); + CSP_RETURN; +} + +static PyObject * PyAsyncioTimerHandle_cancel( PyAsyncioTimerHandle * self ) +{ + CSP_BEGIN_METHOD; + if( !self -> cancelled && self -> loop && self -> loop -> adapter ) + { + self -> loop -> adapter -> cancelCallback( self -> callback_id ); + self -> cancelled = true; + } + Py_RETURN_NONE; + CSP_RETURN_NONE; +} + +static PyObject * PyAsyncioTimerHandle_cancelled( PyAsyncioTimerHandle * self ) +{ + if( self -> cancelled ) + Py_RETURN_TRUE; + Py_RETURN_FALSE; +} + +static PyObject * PyAsyncioTimerHandle_when( PyAsyncioTimerHandle * self ) +{ + return PyFloat_FromDouble( self -> when ); +} + +static PyObject * PyAsyncioTimerHandle_repr( PyAsyncioTimerHandle * self ) +{ + return PyUnicode_FromFormat( "", + self -> callback, + PyFloat_FromDouble( self -> when ), + self -> cancelled ? "True" : "False" ); +} + +static PyMethodDef PyAsyncioTimerHandle_methods[] = { + { "cancel", ( PyCFunction ) PyAsyncioTimerHandle_cancel, METH_NOARGS, "Cancel the callback." }, + { "cancelled", ( PyCFunction ) PyAsyncioTimerHandle_cancelled, METH_NOARGS, "Return True if the callback was cancelled." }, + { "when", ( PyCFunction ) PyAsyncioTimerHandle_when, METH_NOARGS, "Return scheduled callback time." }, + { NULL } +}; + +PyTypeObject PyAsyncioTimerHandle::PyType = { + PyVarObject_HEAD_INIT(NULL, 0) + "_cspimpl.AsyncioTimerHandle", /* tp_name */ + sizeof(PyAsyncioTimerHandle), /* tp_basicsize */ + 0, /* tp_itemsize */ + ( destructor ) PyAsyncioTimerHandle_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_reserved */ + ( reprfunc ) PyAsyncioTimerHandle_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + "asyncio TimerHandle wrapper", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + PyAsyncioTimerHandle_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + PyType_GenericNew, /* tp_new */ +}; + +// Register types +REGISTER_TYPE_INIT( &PyAsyncioHandle::PyType, "AsyncioHandle" ); +REGISTER_TYPE_INIT( &PyAsyncioTimerHandle::PyType, "AsyncioTimerHandle" ); + +} diff --git a/cpp/csp/python/PyEventLoop.h b/cpp/csp/python/PyEventLoop.h new file mode 100644 index 000000000..100288e75 --- /dev/null +++ b/cpp/csp/python/PyEventLoop.h @@ -0,0 +1,126 @@ +#ifndef _IN_CSP_PYTHON_PYEVENTLOOP_H +#define _IN_CSP_PYTHON_PYEVENTLOOP_H + +#include +#include +#include +#include +#include +#include +#include + +namespace csp::python +{ + +//PyEventLoopAdapter provides the bridge between CSP's scheduler and Python's asyncio. +//It allows asyncio callbacks to be scheduled and executed within CSP's event loop. +class PyEventLoopAdapter +{ +public: + struct CallbackEntry + { + PyObject * callback; + PyObject * args; // Can be nullptr + PyObject * context; // Can be nullptr + Scheduler::Handle handle; + uint64_t id; + bool cancelled; + }; + + PyEventLoopAdapter( PythonEngine * engine ); + ~PyEventLoopAdapter(); + + // Schedule a callback to be called as soon as possible + // Returns a unique callback ID that can be used for cancellation + uint64_t callSoon( PyObject * callback, PyObject * args, PyObject * context ); + + // Schedule a callback to be called at a specific time + // delay is in seconds + uint64_t callLater( double delay, PyObject * callback, PyObject * args, PyObject * context ); + + // Schedule a callback to be called at a specific absolute time + // when is in seconds since epoch (loop.time() compatible) + uint64_t callAt( double when, PyObject * callback, PyObject * args, PyObject * context ); + + // Cancel a scheduled callback + bool cancelCallback( uint64_t callbackId ); + + // Check if a callback is still pending + bool isCallbackPending( uint64_t callbackId ) const; + + // Get the current loop time in seconds + double time() const; + + // Thread-safe version of callSoon (wakes up the engine if needed) + uint64_t callSoonThreadsafe( PyObject * callback, PyObject * args, PyObject * context ); + + // Process any pending threadsafe callbacks (called from engine thread) + void processPendingThreadsafeCallbacks(); + + // Stop the loop + void stop(); + + // Check if stop was requested + bool stopRequested() const { return m_stopRequested; } + +private: + uint64_t scheduleCallback( DateTime time, PyObject * callback, PyObject * args, PyObject * context ); + void executeCallback( CallbackEntry * entry ); + void cleanupEntry( CallbackEntry * entry ); + + PythonEngine * m_engine; + uint64_t m_nextCallbackId; + std::list m_callbacks; + + // Thread-safe callback queue + std::mutex m_threadSafeMutex; + std::vector> m_threadSafeQueue; + + bool m_stopRequested; +}; + +//PyAsyncioLoop is the Python wrapper for the asyncio event loop. +struct PyAsyncioLoop +{ + PyObject_HEAD + PyEventLoopAdapter * adapter; + PyEngine * pyengine; + bool running; + bool closed; + bool debug; + PyObject * exception_handler; + PyObject * task_factory; + PyObject * ready; // deque of ready callbacks + uint64_t thread_id; + + static PyTypeObject PyType; +}; + +// Handle wrapper for Python +struct PyAsyncioHandle +{ + PyObject_HEAD + PyAsyncioLoop * loop; + uint64_t callback_id; + PyObject * callback; // For repr + bool cancelled; + + static PyTypeObject PyType; +}; + +// TimerHandle wrapper for Python +struct PyAsyncioTimerHandle +{ + PyObject_HEAD + PyAsyncioLoop * loop; + uint64_t callback_id; + PyObject * callback; // For repr + double when; // Scheduled time + bool cancelled; + + static PyTypeObject PyType; +}; + +} + +#endif diff --git a/csp/__init__.py b/csp/__init__.py index 439dc792f..b7aaa5fde 100644 --- a/csp/__init__.py +++ b/csp/__init__.py @@ -1,3 +1,5 @@ +# ruff: noqa: I001 +# Import order is intentional - async_adapter must be imported after csp.node is defined import os from csp.baselib import * @@ -31,6 +33,17 @@ from . import stats +# Import async adapter LAST since it uses @csp.node decorator which must be defined first +from csp.impl.async_adapter import ( # noqa: E402 + async_alarm, + async_for, + async_in, + async_node, + async_out, + await_, + schedule_async_alarm, +) + __version__ = "0.14.0" diff --git a/csp/event_loop/__init__.py b/csp/event_loop/__init__.py new file mode 100644 index 000000000..aa7963dbb --- /dev/null +++ b/csp/event_loop/__init__.py @@ -0,0 +1,17 @@ +from csp.event_loop.bridge import AsyncioBridge, BidirectionalBridge +from csp.event_loop.loop import CspEventLoop, CspEventLoopPolicy, new_event_loop, run + +__all__ = [ + # Standalone event loop + "CspEventLoop", + "CspEventLoopPolicy", + "EventLoopPolicy", # Alias for compatibility + "new_event_loop", + "run", + # Bridge with running graph + "AsyncioBridge", + "BidirectionalBridge", +] + +# Alias for compatibility with uvloop naming +EventLoopPolicy = CspEventLoopPolicy diff --git a/csp/event_loop/bridge.py b/csp/event_loop/bridge.py new file mode 100644 index 000000000..aeffe0fbc --- /dev/null +++ b/csp/event_loop/bridge.py @@ -0,0 +1,465 @@ +import asyncio +import threading +import time +from datetime import datetime, timedelta +from typing import Any, Callable, Coroutine, List, Optional, TypeVar + +from csp.impl.genericpushadapter import GenericPushAdapter + +_T = TypeVar("_T") + +__all__ = ("AsyncioBridge", "BidirectionalBridge") + + +class AsyncioBridge: + """ + Bridge between asyncio event loop and CSP's realtime engine. + + This class provides the ability to schedule callbacks that execute + within a running CSP graph, interacting with csp.now() and CSP's time. + + The bridge runs its own asyncio event loop in a background thread, + allowing you to schedule callbacks and run coroutines that push + data to CSP via a GenericPushAdapter. + + Attributes: + adapter: The GenericPushAdapter used to push data to CSP. + + Example: + >>> bridge = AsyncioBridge(int, "counter") + >>> bridge.start() + >>> + >>> # Schedule a callback + >>> bridge.call_later(1.0, lambda: bridge.push(42)) + >>> + >>> # Run a coroutine + >>> async def fetch_and_push(): + ... data = await some_async_operation() + ... bridge.push(data) + >>> bridge.run_coroutine(fetch_and_push()) + >>> + >>> bridge.stop() + """ + + def __init__(self, adapter_type: type = object, name: str = "asyncio_bridge"): + """ + Initialize the asyncio bridge. + + Args: + adapter_type: The type of data to push through the adapter. + This should match the type expected by your CSP nodes. + name: Name for the push adapter (for debugging/identification). + """ + self._adapter = GenericPushAdapter(adapter_type, name) + self._loop: Optional[asyncio.AbstractEventLoop] = None + self._thread: Optional[threading.Thread] = None + self._running = False + self._start_time: Optional[datetime] = None + self._lock = threading.Lock() + + @property + def adapter(self) -> GenericPushAdapter: + """ + Get the underlying push adapter to wire into CSP graph. + + Use this in your graph definition to get the edge that receives + data pushed via this bridge. + + Returns: + The GenericPushAdapter instance. + + Example: + >>> @csp.graph + ... def my_graph(): + ... data = bridge.adapter.out() + ... # data is now a ts[adapter_type] edge + """ + return self._adapter + + @property + def is_running(self) -> bool: + """Check if the bridge is currently running.""" + return self._running + + @property + def loop(self) -> Optional[asyncio.AbstractEventLoop]: + """Get the underlying asyncio event loop (if started).""" + return self._loop + + def start(self, start_time: Optional[datetime] = None) -> None: + """ + Start the asyncio event loop in a background thread. + + This must be called before scheduling callbacks or running coroutines. + The bridge can be started before or after the CSP graph starts. + + Args: + start_time: The CSP engine start time (used for time calculations + with call_at). If not provided, uses current UTC time. + + Raises: + RuntimeError: If the bridge is already running. + """ + if self._running: + raise RuntimeError("Bridge is already running") + + self._start_time = start_time or datetime.utcnow() + self._running = True + self._thread = threading.Thread(target=self._run_loop, daemon=True) + self._thread.start() + + # Wait for loop to be ready + timeout = 5.0 + start_wait = time.time() + while self._loop is None and self._running: + if time.time() - start_wait > timeout: + raise RuntimeError("Timeout waiting for event loop to start") + time.sleep(0.001) + + def stop(self, timeout: float = 5.0) -> None: + """ + Stop the asyncio event loop. + + This should be called after the CSP graph has finished to clean up + the background thread. + + Args: + timeout: Maximum time to wait for the thread to stop. + """ + if not self._running: + return + + self._running = False + if self._loop: + try: + self._loop.call_soon_threadsafe(self._loop.stop) + except RuntimeError: + pass # Loop already closed + if self._thread: + self._thread.join(timeout=timeout) + self._loop = None + self._thread = None + + def _run_loop(self) -> None: + """Run the asyncio event loop in a background thread.""" + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + try: + self._loop.run_forever() + finally: + try: + self._loop.close() + except Exception: + pass + + def push(self, value: Any) -> bool: + """ + Push a value to the CSP graph through the adapter. + + This is thread-safe and can be called from any thread, including + asyncio callbacks. + + Args: + value: The value to push. Must be compatible with the adapter_type. + + Returns: + True if the push was successful, False if the adapter is not + yet bound to a running graph. + """ + return self._adapter.push_tick(value) + + def call_soon(self, callback: Callable[..., Any], *args: Any) -> None: + """ + Schedule a callback to run as soon as possible. + + The callback will execute in the asyncio thread but can push + data to the CSP graph. + + Args: + callback: The callback to execute. + *args: Arguments to pass to the callback. + + Raises: + RuntimeError: If the bridge has not been started. + """ + if self._loop is None: + raise RuntimeError("Bridge not started - call start() first") + + def wrapped(): + try: + callback(*args) + except Exception as e: + import sys + + print(f"Error in callback: {e}", file=sys.stderr) + + self._loop.call_soon_threadsafe(wrapped) + + def call_later(self, delay: float, callback: Callable[..., Any], *args: Any) -> "asyncio.TimerHandle": + """ + Schedule a callback after delay seconds. + + The callback will execute in the asyncio thread after the specified + delay. This uses wall-clock time, not CSP engine time. + + Args: + delay: Seconds to wait before calling. Must be non-negative. + callback: The callback to execute. + *args: Arguments to pass to the callback. + + Returns: + A TimerHandle that can be used to cancel the callback. + + Raises: + RuntimeError: If the bridge has not been started. + ValueError: If delay is negative. + """ + if self._loop is None: + raise RuntimeError("Bridge not started - call start() first") + if delay < 0: + raise ValueError("delay must be non-negative") + + # Use call_soon_threadsafe to schedule the call_later + handle_container = [] + + def schedule(): + handle = self._loop.call_later(delay, callback, *args) + handle_container.append(handle) + + self._loop.call_soon_threadsafe(schedule) + + # Return a wrapper that will eventually contain the handle + # Note: The actual handle may not be available immediately + return _DeferredHandle(handle_container, self._loop) + + def call_at(self, when: datetime, callback: Callable[..., Any], *args: Any) -> "asyncio.TimerHandle": + """ + Schedule a callback at a specific datetime. + + This calculates the delay from the current wall-clock time to the + target time and schedules accordingly. If the target time is in + the past, the callback is scheduled immediately. + + Args: + when: The datetime to execute the callback. + callback: The callback to execute. + *args: Arguments to pass to the callback. + + Returns: + A TimerHandle that can be used to cancel the callback. + + Raises: + RuntimeError: If the bridge has not been started. + """ + now = datetime.utcnow() + delay = max(0.0, (when - now).total_seconds()) + return self.call_later(delay, callback, *args) + + def call_at_offset(self, offset: timedelta, callback: Callable[..., Any], *args: Any) -> "asyncio.TimerHandle": + """ + Schedule a callback at a specific offset from the start time. + + This is useful for scheduling callbacks aligned with CSP's engine + start time. The offset is from the start_time provided to start(). + + Args: + offset: Time offset from start_time. + callback: The callback to execute. + *args: Arguments to pass to the callback. + + Returns: + A TimerHandle that can be used to cancel the callback. + + Raises: + RuntimeError: If the bridge has not been started. + """ + if self._start_time is None: + raise RuntimeError("Bridge not started - call start() first") + + target_time = self._start_time + offset + return self.call_at(target_time, callback, *args) + + def run_coroutine(self, coro: Coroutine[Any, Any, _T]) -> "asyncio.Future[_T]": + """ + Run an asyncio coroutine in the bridge's event loop. + + The coroutine runs in the background thread and can push data + to the CSP graph. + + Args: + coro: The coroutine to run. + + Returns: + A Future representing the coroutine's result. + + Raises: + RuntimeError: If the bridge has not been started. + + Example: + >>> async def fetch_and_push(): + ... data = await fetch_data() + ... bridge.push(data) + ... + >>> future = bridge.run_coroutine(fetch_and_push()) + >>> result = future.result() # Wait for completion + """ + if self._loop is None: + raise RuntimeError("Bridge not started - call start() first") + return asyncio.run_coroutine_threadsafe(coro, self._loop) + + def time(self) -> float: + """ + Get current time in seconds since epoch. + + This uses wall clock time, similar to asyncio's loop.time(). + + Returns: + Current time in seconds since epoch. + """ + return time.time() + + def elapsed_since_start(self) -> timedelta: + """ + Get time elapsed since the bridge started. + + Returns: + Time elapsed since start() was called. + """ + if self._start_time is None: + return timedelta(0) + return datetime.utcnow() - self._start_time + + def wait_for_adapter(self, timeout: Optional[float] = None) -> bool: + """ + Wait for the adapter to be bound to a running graph. + + This is useful to ensure the CSP graph has started and the adapter + is ready to receive data. + + Args: + timeout: Maximum time to wait in seconds. None means wait forever. + + Returns: + True if the adapter is ready, False if timeout occurred. + """ + self._adapter.wait_for_start(timeout) + return self._adapter.started() + + +class _DeferredHandle: + """ + A handle wrapper that may not have its underlying handle immediately. + + This is used because call_later is scheduled via call_soon_threadsafe, + so the actual TimerHandle isn't available until the event loop processes it. + """ + + def __init__(self, container: List, loop: asyncio.AbstractEventLoop): + self._container = container + self._loop = loop + self._cancelled = False + + def cancel(self) -> None: + """Cancel the callback.""" + self._cancelled = True + if self._container: + self._container[0].cancel() + else: + # Schedule the cancel for when the handle is available + def do_cancel(): + if self._container: + self._container[0].cancel() + + try: + self._loop.call_soon_threadsafe(do_cancel) + except RuntimeError: + pass + + def cancelled(self) -> bool: + """Return True if the callback was cancelled.""" + if self._cancelled: + return True + if self._container: + return self._container[0].cancelled() + return False + + +class BidirectionalBridge(AsyncioBridge): + """ + Bridge supporting bidirectional communication between asyncio and CSP. + + This extends AsyncioBridge to allow not only pushing data to CSP, + but also receiving events from CSP nodes. + + Example: + >>> bridge = BidirectionalBridge(str) + >>> + >>> # Register callback to receive from CSP + >>> bridge.on_event(lambda data: print(f"Received: {data}")) + >>> + >>> @csp.node + >>> def my_node(data: ts[str], bridge_ref: object) -> ts[str]: + ... if csp.ticked(data): + ... # Emit back to asyncio + ... bridge_ref.emit({"response": data}) + ... return data + """ + + def __init__(self, adapter_type: type = object, name: str = "bidi_bridge"): + super().__init__(adapter_type, name) + self._event_callbacks: List[Callable[[Any], None]] = [] + self._callback_lock = threading.Lock() + + def on_event(self, callback: Callable[[Any], None]) -> None: + """ + Register a callback to receive events from CSP. + + The callback will be invoked in the asyncio thread when CSP + nodes call emit(). + + Args: + callback: Function to call with each event. + """ + with self._callback_lock: + self._event_callbacks.append(callback) + + def off_event(self, callback: Callable[[Any], None]) -> bool: + """ + Unregister an event callback. + + Args: + callback: The callback to remove. + + Returns: + True if the callback was found and removed. + """ + with self._callback_lock: + try: + self._event_callbacks.remove(callback) + return True + except ValueError: + return False + + def emit(self, value: Any) -> None: + """ + Emit an event from CSP to asyncio callbacks. + + This should be called from within CSP nodes to send data back + to the asyncio side. + + Args: + value: The value to emit to registered callbacks. + """ + with self._callback_lock: + callbacks = list(self._event_callbacks) + + for callback in callbacks: + if self._loop: + try: + self._loop.call_soon_threadsafe(callback, value) + except RuntimeError: + pass # Loop closed + + +# Export for convenience +__all__ = ["AsyncioBridge", "BidirectionalBridge"] diff --git a/csp/event_loop/loop.py b/csp/event_loop/loop.py new file mode 100644 index 000000000..89cf0ea99 --- /dev/null +++ b/csp/event_loop/loop.py @@ -0,0 +1,1215 @@ +import asyncio +import concurrent.futures +import contextvars +import os +import selectors +import signal +import socket +import subprocess +import sys +import threading +import time +import warnings +from collections import deque +from datetime import datetime +from typing import Any, Callable, Coroutine, Dict, List, Optional, Tuple, TypeVar, Union + +from csp.impl.__cspimpl import _cspimpl + +_T = TypeVar("_T") + +__all__ = ( + "CspEventLoop", + "CspEventLoopPolicy", + "new_event_loop", + "run", +) + + +class _CspHandle: + """Handle for a scheduled callback, compatible with asyncio.Handle.""" + + __slots__ = ("_callback", "_args", "_context", "_loop", "_cancelled", "_repr") + + def __init__( + self, + callback: Callable[..., Any], + args: Optional[Tuple[Any, ...]] = None, + loop: Optional["CspEventLoop"] = None, + context: Optional[contextvars.Context] = None, + ): + self._callback = callback + self._args = args if args else () + self._context = context + self._loop = loop + self._cancelled = False + self._repr = None + + def cancel(self) -> None: + """Cancel the callback.""" + if not self._cancelled: + self._cancelled = True + self._callback = None + self._args = None + + def cancelled(self) -> bool: + """Return True if the callback was cancelled.""" + return self._cancelled + + def _run(self) -> None: + """Execute the callback.""" + if self._cancelled: + return + try: + if self._context is not None: + self._context.run(self._callback, *self._args) + else: + self._callback(*self._args) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if self._loop is not None: + self._loop.call_exception_handler( + { + "message": "Exception in callback", + "exception": exc, + "handle": self, + } + ) + else: + raise + + def __repr__(self) -> str: + if self._repr is None: + info = [self.__class__.__name__] + if self._cancelled: + info.append("cancelled") + if self._callback is not None: + info.append(f"callback={self._callback!r}") + self._repr = f"<{' '.join(info)}>" + return self._repr + + +class _CspTimerHandle(_CspHandle): + """Handle for a scheduled timer callback.""" + + __slots__ = ("_when",) + + def __init__( + self, + when: float, + callback: Callable[..., Any], + args: Optional[Tuple[Any, ...]] = None, + loop: Optional["CspEventLoop"] = None, + context: Optional[contextvars.Context] = None, + ): + super().__init__(callback, args, loop, context) + self._when = when + + def when(self) -> float: + """Return the scheduled time as a float.""" + return self._when + + def __lt__(self, other: "_CspTimerHandle") -> bool: + return self._when < other._when + + def __le__(self, other: "_CspTimerHandle") -> bool: + return self._when <= other._when + + def __gt__(self, other: "_CspTimerHandle") -> bool: + return self._when > other._when + + def __ge__(self, other: "_CspTimerHandle") -> bool: + return self._when >= other._when + + def __eq__(self, other: object) -> bool: + if isinstance(other, _CspTimerHandle): + return self._when == other._when and self._callback == other._callback + return NotImplemented + + def __hash__(self) -> int: + return hash((self._when, id(self._callback))) + + +class CspEventLoop(asyncio.AbstractEventLoop): + """ + An asyncio-compatible event loop backed by CSP's scheduler. + + This event loop integrates with CSP's realtime event processing capabilities, + allowing asyncio coroutines to be scheduled alongside CSP graph computations. + """ + + def __init__(self, realtime: bool = True): + """ + Initialize the CSP event loop. + + Args: + realtime: If True, run in realtime mode (wall clock time). + If False, run in simulation mode. + """ + self._realtime = realtime + self._closed = False + self._running = False + self._stopping = False + self._thread_id: Optional[int] = None + self._debug = bool(os.environ.get("PYTHONASYNCIODEBUG")) + + # Callback queues + self._ready: deque = deque() + self._scheduled: List[_CspTimerHandle] = [] + + # Selector for I/O + self._selector = selectors.DefaultSelector() + self._readers: Dict[int, _CspHandle] = {} + self._writers: Dict[int, _CspHandle] = {} + + # Signal handling + self._signal_handlers: Dict[int, _CspHandle] = {} + self._ssock: Optional[socket.socket] = None + self._csock: Optional[socket.socket] = None + + # Task factory and exception handler + self._task_factory: Optional[Callable] = None + self._exception_handler: Optional[Callable] = None + self._default_executor: Optional[concurrent.futures.Executor] = None + + # For asyncgens + self._asyncgens: set = set() + self._asyncgens_shutdown_called = False + + # Time tracking + self._clock_resolution = time.get_clock_info("monotonic").resolution + self._start_time = time.monotonic() + + # CSP engine - now actively used for scheduling + self._csp_engine: Optional[_cspimpl.PyEngine] = None + self._csp_active = False + self._starttime: Optional[datetime] = None + self._endtime: Optional[datetime] = None + self._sim_start_time: Optional[datetime] = None # Track sim start for time() + + # Threadsafe callback queue + self._csock_lock = threading.Lock() + self._threadsafe_callbacks: deque = deque() + + # CSP wakeup fd for native event loop integration + self._csp_wakeup_fd: Optional[int] = None + + def _init_csp_engine(self) -> None: + """Initialize the CSP engine for use.""" + if self._csp_engine is None: + self._csp_engine = _cspimpl.PyEngine(realtime=self._realtime) + + def _start_csp_engine(self) -> None: + """Start CSP engine.""" + if not self._csp_active: + self._init_csp_engine() + from datetime import timedelta + + from csp.utils.datetime import utc_now + + if self._realtime: + # Realtime mode: use current wall-clock time + start = self._starttime or utc_now() + end = self._endtime or (start + timedelta(days=365 * 100)) + else: + # Simulation mode: use configured times or defaults + # Default to Unix epoch for simulation if not specified + start = self._starttime or datetime(1970, 1, 1) + end = self._endtime or (start + timedelta(days=365 * 100)) + + self._csp_engine.start(start, end) + self._csp_active = True + self._sim_start_time = start # Track simulation start for time() + + # Register wakeup fd with selector for native event integration + self._csp_wakeup_fd = self._csp_engine.get_wakeup_fd() + if self._csp_wakeup_fd >= 0: + try: + self._selector.register(self._csp_wakeup_fd, selectors.EVENT_READ) + except (ValueError, OSError): + # Fd already registered or invalid, fall back to polling + self._csp_wakeup_fd = None + + def _stop_csp_engine(self) -> None: + """Stop CSP engine.""" + if self._csp_active and self._csp_engine is not None: + # Unregister wakeup fd from selector + if self._csp_wakeup_fd is not None: + try: + self._selector.unregister(self._csp_wakeup_fd) + except (ValueError, OSError, KeyError): + pass # Already unregistered or invalid + self._csp_wakeup_fd = None + try: + self._csp_engine.finish() + except Exception: + pass # Ignore errors during cleanup + self._csp_active = False + + def set_simulation_time_range(self, start: Optional[datetime] = None, end: Optional[datetime] = None) -> None: + """Configure the time range for simulation mode. + + This must be called before run_forever() or run_until_complete() + to take effect. + + Args: + start: Start time for simulation. If None, defaults to Unix epoch. + end: End time for simulation. If None, defaults to 100 years from start. + + Raises: + RuntimeError: If called on a realtime event loop. + RuntimeError: If called while the loop is running. + """ + if self._realtime: + raise RuntimeError("Cannot set simulation time range on a realtime event loop") + if self._running: + raise RuntimeError("Cannot set simulation time range while the loop is running") + self._starttime = start + self._endtime = end + + def _check_closed(self) -> None: + """Raise RuntimeError if the loop is closed.""" + if self._closed: + raise RuntimeError("Event loop is closed") + + def _check_running(self) -> None: + """Raise RuntimeError if the loop is running.""" + if self._running: + raise RuntimeError("This event loop is already running") + if asyncio._get_running_loop() is not None: + raise RuntimeError("Cannot run the event loop while another loop is running") + + def _check_thread(self) -> None: + """Raise RuntimeError if called from wrong thread.""" + if self._thread_id is not None and self._thread_id != threading.get_ident(): + raise RuntimeError("Non-thread-safe operation invoked on an event loop other than the current one") + + def run_forever(self) -> None: + """Run the event loop until stop() is called.""" + self._check_closed() + self._check_running() + + self._running = True + self._thread_id = threading.get_ident() + + old_loop = asyncio._get_running_loop() + try: + asyncio._set_running_loop(self) + self._start_csp_engine() + self._run_until_stopped() + finally: + self._stop_csp_engine() + asyncio._set_running_loop(old_loop) + self._running = False + self._thread_id = None + + def _run_until_stopped(self) -> None: + """Internal implementation of run_forever.""" + while not self._stopping: + self._run_once() + self._stopping = False + + def _run_once(self, timeout: Optional[float] = None) -> None: + """Run one iteration of the event loop. + + This integrates CSP's scheduler with asyncio's I/O handling. + CSP handles timing/scheduling while selectors handle I/O. + + In realtime mode: + - Uses wall-clock time + - Waits on selectors for I/O events + - CSP processes push events from external sources + + In simulation mode: + - CSP's scheduler drives time progression + - No waiting - events are processed as fast as possible + - Time jumps instantly to next scheduled event + """ + # Process threadsafe callbacks first + self._process_threadsafe_callbacks() + + if self._realtime: + # Realtime mode: use wall-clock time and wait on I/O + self._run_once_realtime(timeout) + else: + # Simulation mode: CSP drives time, no waiting + self._run_once_simulation() + + def _run_once_realtime(self, timeout: Optional[float] = None) -> None: + """Run one iteration in realtime mode.""" + + # Calculate timeout for selector + if self._ready: + # Have ready callbacks, don't wait + timeout = 0 + elif self._scheduled: + # Calculate based on next scheduled Python callback + when = self._scheduled[0]._when + timeout = max(0, when - self.time()) + else: + timeout = timeout if timeout is not None else 1.0 + + # Also check CSP's next scheduled time if active + if self._csp_active and self._csp_engine is not None: + csp_next = self._csp_engine.next_scheduled_time() + if csp_next is not None: + csp_now = self._csp_engine.now() + if csp_now is not None: + csp_wait = (csp_next - csp_now).total_seconds() + if csp_wait >= 0: + timeout = min(timeout, csp_wait) + + # Poll for I/O events + try: + events = self._selector.select(timeout) + except (OSError, ValueError): + events = [] + + # Track if CSP wakeup fd was signaled + csp_wakeup_signaled = False + + # Process I/O events + for key, mask in events: + # Check if this is the CSP wakeup fd + if self._csp_wakeup_fd is not None and key.fd == self._csp_wakeup_fd: + csp_wakeup_signaled = True + continue # Don't add to readers dict + if mask & selectors.EVENT_READ and key.fd in self._readers: + self._ready.append(self._readers[key.fd]) + if mask & selectors.EVENT_WRITE and key.fd in self._writers: + self._ready.append(self._writers[key.fd]) + + # Step CSP engine if active and (wakeup signaled OR scheduled events due) + if self._csp_active and self._csp_engine is not None: + if csp_wakeup_signaled: + # Clear the wakeup fd before processing + self._csp_engine.clear_wakeup_fd() + try: + # Step with 0 wait - just process what's ready + self._csp_engine.process_one_cycle(0.0) + except Exception: + pass # CSP cycle errors handled elsewhere + + # Process Python scheduled callbacks that are due + now = self.time() + while self._scheduled: + handle = self._scheduled[0] + if handle._when > now: + break + handle = self._scheduled.pop(0) + if not handle.cancelled(): + self._ready.append(handle) + + # Run ready callbacks + ntodo = len(self._ready) + for _ in range(ntodo): + handle = self._ready.popleft() + if not handle.cancelled(): + handle._run() + + def _run_once_simulation(self) -> None: + """Run one iteration in simulation mode. + + In simulation mode: + - Time jumps instantly to the next scheduled event + - No waiting on selectors (just poll for ready I/O) + - CSP's scheduler drives the time progression + """ + + # Poll for I/O events without waiting (timeout=0) + try: + events = self._selector.select(0) + except (OSError, ValueError): + events = [] + + # Process I/O events + for key, mask in events: + if mask & selectors.EVENT_READ and key.fd in self._readers: + self._ready.append(self._readers[key.fd]) + if mask & selectors.EVENT_WRITE and key.fd in self._writers: + self._ready.append(self._writers[key.fd]) + + # Step CSP engine - this advances simulated time to next event + if self._csp_active and self._csp_engine is not None: + try: + # Step with 0 wait - in sim mode this jumps to next event + self._csp_engine.process_one_cycle(0.0) + except Exception: + pass # CSP cycle errors handled elsewhere + + # Process Python scheduled callbacks that are due + # In sim mode, time() returns CSP's simulated time + now = self.time() + while self._scheduled: + handle = self._scheduled[0] + if handle._when > now: + break + handle = self._scheduled.pop(0) + if not handle.cancelled(): + self._ready.append(handle) + + # Run ready callbacks + ntodo = len(self._ready) + for _ in range(ntodo): + handle = self._ready.popleft() + if not handle.cancelled(): + handle._run() + + def _process_threadsafe_callbacks(self) -> None: + """Process callbacks added via call_soon_threadsafe.""" + while True: + try: + handle = self._threadsafe_callbacks.popleft() + self._ready.append(handle) + except IndexError: + break + + def run_until_complete(self, future: Union[asyncio.Future, Coroutine]) -> Any: + """Run until the future is complete.""" + self._check_closed() + self._check_running() + + new_task = not asyncio.isfuture(future) + future = asyncio.ensure_future(future, loop=self) + if new_task: + future._log_destroy_pending = False + + def done_callback(fut: asyncio.Future) -> None: + if not fut.cancelled(): + exc = fut.exception() + if isinstance(exc, (SystemExit, KeyboardInterrupt)): + return + self.stop() + + future.add_done_callback(done_callback) + + try: + self.run_forever() + except BaseException: + if new_task and future.done() and not future.cancelled(): + future.exception() + raise + finally: + future.remove_done_callback(done_callback) + + if not future.done(): + raise RuntimeError("Event loop stopped before Future completed.") + + return future.result() + + def stop(self) -> None: + """Stop the event loop.""" + self._stopping = True + + def is_running(self) -> bool: + """Return True if the loop is running.""" + return self._running + + def is_closed(self) -> bool: + """Return True if the loop is closed.""" + return self._closed + + def close(self) -> None: + """Close the event loop.""" + if self._running: + raise RuntimeError("Cannot close a running event loop") + if self._closed: + return + + self._closed = True + + # Stop CSP engine if active + self._stop_csp_engine() + self._csp_engine = None + + # Clear callbacks + self._ready.clear() + self._scheduled.clear() + + # Close selector + self._selector.close() + + # Shutdown default executor + if self._default_executor is not None: + self._default_executor.shutdown(wait=False) + self._default_executor = None + + async def shutdown_asyncgens(self) -> None: + """Shutdown all active asynchronous generators.""" + self._asyncgens_shutdown_called = True + + if not self._asyncgens: + return + + closing_agens = list(self._asyncgens) + self._asyncgens.clear() + + results = await asyncio.gather(*[ag.aclose() for ag in closing_agens], return_exceptions=True) + + for result, agen in zip(results, closing_agens): + if isinstance(result, Exception): + self.call_exception_handler( + { + "message": f"an error occurred during closing of asynchronous generator {agen!r}", + "exception": result, + "asyncgen": agen, + } + ) + + async def shutdown_default_executor(self, timeout: Optional[float] = None) -> None: + """Schedule the shutdown of the default executor.""" + if self._default_executor is None: + return + + executor = self._default_executor + self._default_executor = None + + def shutdown_executor(): + executor.shutdown(wait=True) + + # Create a new single-use executor to run the shutdown + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as temp_executor: + future = self.create_future() + + def callback(result: concurrent.futures.Future) -> None: + if future.cancelled(): + return + try: + result.result() + except Exception as exc: + self.call_soon_threadsafe(future.set_exception, exc) + else: + self.call_soon_threadsafe(future.set_result, None) + + temp_executor.submit(shutdown_executor).add_done_callback(callback) + + if timeout is not None: + try: + await asyncio.wait_for(future, timeout) + except asyncio.TimeoutError: + warnings.warn( + f"The default executor did not finish shutting down in {timeout} seconds", + RuntimeWarning, + stacklevel=2, + ) + else: + await future + + def call_soon( + self, + callback: Callable[..., Any], + *args: Any, + context: Optional[contextvars.Context] = None, + ) -> _CspHandle: + """Schedule a callback to be called as soon as possible.""" + self._check_closed() + if self._debug: + self._check_thread() + + handle = _CspHandle(callback, args if args else None, self, context) + self._ready.append(handle) + return handle + + def call_soon_threadsafe( + self, + callback: Callable[..., Any], + *args: Any, + context: Optional[contextvars.Context] = None, + ) -> _CspHandle: + """Thread-safe version of call_soon.""" + self._check_closed() + + handle = _CspHandle(callback, args if args else None, self, context) + self._threadsafe_callbacks.append(handle) + + # Wake up the event loop if needed + with self._csock_lock: + if self._csock is not None: + try: + self._csock.send(b"\x00") + except OSError: + pass + + return handle + + def call_later( + self, + delay: float, + callback: Callable[..., Any], + *args: Any, + context: Optional[contextvars.Context] = None, + ) -> _CspTimerHandle: + """Schedule a callback to be called after delay seconds.""" + self._check_closed() + if self._debug: + self._check_thread() + + if delay < 0: + delay = 0 + + when = self.time() + delay + return self.call_at(when, callback, *args, context=context) + + def call_at( + self, + when: float, + callback: Callable[..., Any], + *args: Any, + context: Optional[contextvars.Context] = None, + ) -> _CspTimerHandle: + """Schedule a callback to be called at absolute time when.""" + self._check_closed() + if self._debug: + self._check_thread() + + handle = _CspTimerHandle(when, callback, args if args else None, self, context) + + # Insert in sorted order + # Using binary search would be more efficient for large lists + import bisect + + bisect.insort(self._scheduled, handle) + + return handle + + def time(self) -> float: + """Return the current time according to the event loop's clock. + + In realtime mode, returns the monotonic clock time. + In simulation mode, returns CSP's simulated time as seconds since epoch. + """ + if not self._realtime and self._csp_active and self._csp_engine is not None: + # Simulation mode: use CSP's simulated time + csp_now = self._csp_engine.now() + if csp_now is not None: + # Convert datetime to float seconds since start + return csp_now.timestamp() + # Realtime mode or no CSP engine: use monotonic clock + return time.monotonic() + + def create_future(self) -> asyncio.Future: + """Create and return a new Future.""" + return asyncio.Future(loop=self) + + def create_task( + self, + coro: Coroutine[Any, Any, _T], + *, + name: Optional[str] = None, + context: Optional[contextvars.Context] = None, + ) -> asyncio.Task[_T]: + """Schedule a coroutine to run as a Task.""" + self._check_closed() + + if self._task_factory is None: + # context parameter was added in Python 3.11 + if sys.version_info >= (3, 11): + task = asyncio.Task(coro, loop=self, name=name, context=context) + else: + task = asyncio.Task(coro, loop=self, name=name) + else: + if context is None: + task = self._task_factory(self, coro) + else: + task = context.run(self._task_factory, self, coro) + if name is not None and hasattr(task, "set_name"): + task.set_name(name) + + return task + + def set_task_factory(self, factory: Optional[Callable]) -> None: + """Set a task factory.""" + if factory is not None and not callable(factory): + raise TypeError("task factory must be a callable or None") + self._task_factory = factory + + def get_task_factory(self) -> Optional[Callable]: + """Get the current task factory.""" + return self._task_factory + + def add_reader(self, fd: int, callback: Callable[..., Any], *args: Any) -> None: + """Add a reader callback for a file descriptor.""" + self._check_closed() + handle = _CspHandle(callback, args if args else None, self) + + if fd in self._readers: + self.remove_reader(fd) + + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_READ, None) + else: + mask = key.events | selectors.EVENT_READ + self._selector.modify(fd, mask, None) + + self._readers[fd] = handle + + def remove_reader(self, fd: int) -> bool: + """Remove a reader callback for a file descriptor.""" + if fd not in self._readers: + return False + + del self._readers[fd] + + try: + key = self._selector.get_key(fd) + except KeyError: + return True + + if key.events & selectors.EVENT_WRITE: + self._selector.modify(fd, selectors.EVENT_WRITE, None) + else: + self._selector.unregister(fd) + + return True + + def add_writer(self, fd: int, callback: Callable[..., Any], *args: Any) -> None: + """Add a writer callback for a file descriptor.""" + self._check_closed() + handle = _CspHandle(callback, args if args else None, self) + + if fd in self._writers: + self.remove_writer(fd) + + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_WRITE, None) + else: + mask = key.events | selectors.EVENT_WRITE + self._selector.modify(fd, mask, None) + + self._writers[fd] = handle + + def remove_writer(self, fd: int) -> bool: + """Remove a writer callback for a file descriptor.""" + if fd not in self._writers: + return False + + del self._writers[fd] + + try: + key = self._selector.get_key(fd) + except KeyError: + return True + + if key.events & selectors.EVENT_READ: + self._selector.modify(fd, selectors.EVENT_READ, None) + else: + self._selector.unregister(fd) + + return True + + async def sock_recv(self, sock: socket.socket, nbytes: int) -> bytes: + """Receive data from a socket.""" + fut = self.create_future() + fd = sock.fileno() + + def callback() -> None: + try: + data = sock.recv(nbytes) + except (BlockingIOError, InterruptedError): + return # Try again + except Exception as exc: + self.remove_reader(fd) + fut.set_exception(exc) + else: + self.remove_reader(fd) + fut.set_result(data) + + self.add_reader(fd, callback) + return await fut + + async def sock_recv_into(self, sock: socket.socket, buf: bytearray) -> int: + """Receive data from a socket into a buffer.""" + fut = self.create_future() + fd = sock.fileno() + + def callback() -> None: + try: + nbytes = sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return # Try again + except Exception as exc: + self.remove_reader(fd) + fut.set_exception(exc) + else: + self.remove_reader(fd) + fut.set_result(nbytes) + + self.add_reader(fd, callback) + return await fut + + async def sock_sendall(self, sock: socket.socket, data: bytes) -> None: + """Send data to a socket.""" + fut = self.create_future() + fd = sock.fileno() + view = memoryview(data) + + def callback() -> None: + nonlocal view + try: + n = sock.send(view) + except (BlockingIOError, InterruptedError): + return # Try again + except Exception as exc: + self.remove_writer(fd) + fut.set_exception(exc) + return + + if n == len(view): + self.remove_writer(fd) + fut.set_result(None) + else: + view = view[n:] + + self.add_writer(fd, callback) + await fut + + async def sock_connect(self, sock: socket.socket, address: Tuple[str, int]) -> None: + """Connect a socket to a remote address.""" + try: + sock.connect(address) + return + except (BlockingIOError, InterruptedError): + pass + + fut = self.create_future() + fd = sock.fileno() + + def callback() -> None: + try: + err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + raise OSError(err, f"Connect call failed {address}") + except Exception as exc: + self.remove_writer(fd) + fut.set_exception(exc) + else: + self.remove_writer(fd) + fut.set_result(None) + + self.add_writer(fd, callback) + await fut + + async def sock_accept(self, sock: socket.socket) -> Tuple[socket.socket, Tuple[str, int]]: + """Accept a connection on a socket.""" + fut = self.create_future() + fd = sock.fileno() + + def callback() -> None: + try: + conn, addr = sock.accept() + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + return # Try again + except Exception as exc: + self.remove_reader(fd) + fut.set_exception(exc) + else: + self.remove_reader(fd) + fut.set_result((conn, addr)) + + self.add_reader(fd, callback) + return await fut + + async def getaddrinfo( + self, + host: Optional[str], + port: Optional[Union[str, int]], + *, + family: int = 0, + type: int = 0, + proto: int = 0, + flags: int = 0, + ) -> List[Tuple[int, int, int, str, Tuple[str, int]]]: + """Look up address info for a host.""" + return await self.run_in_executor( + None, + socket.getaddrinfo, + host, + port, + family, + type, + proto, + flags, + ) + + async def getnameinfo(self, sockaddr: Tuple[str, int], flags: int = 0) -> Tuple[str, str]: + """Look up name info for an address.""" + return await self.run_in_executor(None, socket.getnameinfo, sockaddr, flags) + + def run_in_executor( + self, + executor: Optional[concurrent.futures.Executor], + func: Callable[..., _T], + *args: Any, + ) -> asyncio.Future[_T]: + """Run a function in an executor.""" + self._check_closed() + + if executor is None: + if self._default_executor is None: + self._default_executor = concurrent.futures.ThreadPoolExecutor() + executor = self._default_executor + + fut = asyncio.Future(loop=self) + + def callback(result: concurrent.futures.Future) -> None: + if fut.cancelled(): + return + try: + res = result.result() + except Exception as exc: + self.call_soon_threadsafe(fut.set_exception, exc) + else: + self.call_soon_threadsafe(fut.set_result, res) + + executor.submit(func, *args).add_done_callback(callback) + return fut + + def set_default_executor(self, executor: Optional[concurrent.futures.Executor]) -> None: + """Set the default executor.""" + if not isinstance(executor, (type(None), concurrent.futures.Executor)): + raise TypeError("executor must be Executor instance or None") + self._default_executor = executor + + def set_exception_handler(self, handler: Optional[Callable]) -> None: + """Set an exception handler.""" + if handler is not None and not callable(handler): + raise TypeError("handler must be callable or None") + self._exception_handler = handler + + def get_exception_handler(self) -> Optional[Callable]: + """Get the current exception handler.""" + return self._exception_handler + + def default_exception_handler(self, context: Dict[str, Any]) -> None: + """Default exception handler.""" + message = context.get("message") + if not message: + message = "Unhandled exception in event loop" + + exception = context.get("exception") + + log_lines = [message] + for key, value in sorted(context.items()): + if key in {"message", "exception"}: + continue + log_lines.append(f"{key}: {value!r}") + + if exception is not None: + import traceback + + exc_info = (type(exception), exception, exception.__traceback__) + log_lines.append("Exception:") + log_lines.extend(traceback.format_exception(*exc_info)) + + print("\n".join(log_lines), file=sys.stderr) + + def call_exception_handler(self, context: Dict[str, Any]) -> None: + """Call the exception handler.""" + if self._exception_handler is None: + try: + self.default_exception_handler(context) + except Exception: + print("Exception in default exception handler:", file=sys.stderr) + import traceback + + traceback.print_exc() + else: + try: + self._exception_handler(self, context) + except Exception as exc: + try: + self.default_exception_handler( + { + "message": "Exception in exception handler", + "exception": exc, + "context": context, + } + ) + except Exception: + print("Exception in exception handler:", file=sys.stderr) + import traceback + + traceback.print_exc() + + def get_debug(self) -> bool: + """Return True if debug mode is enabled.""" + return self._debug + + def set_debug(self, enabled: bool) -> None: + """Set debug mode.""" + self._debug = enabled + + async def subprocess_exec( + self, + protocol_factory: Callable[[], asyncio.SubprocessProtocol], + *args: Any, + stdin: Any = subprocess.PIPE, + stdout: Any = subprocess.PIPE, + stderr: Any = subprocess.PIPE, + **kwargs: Any, + ) -> Tuple[asyncio.SubprocessTransport, asyncio.SubprocessProtocol]: + """Execute a subprocess.""" + raise NotImplementedError("subprocess_exec is not yet implemented for CSP event loop") + + async def subprocess_shell( + self, + protocol_factory: Callable[[], asyncio.SubprocessProtocol], + cmd: str, + *, + stdin: Any = subprocess.PIPE, + stdout: Any = subprocess.PIPE, + stderr: Any = subprocess.PIPE, + **kwargs: Any, + ) -> Tuple[asyncio.SubprocessTransport, asyncio.SubprocessProtocol]: + """Execute a shell command as a subprocess.""" + raise NotImplementedError("subprocess_shell is not yet implemented for CSP event loop") + + def add_signal_handler(self, sig: int, callback: Callable[..., Any], *args: Any) -> None: + """Add a handler for a signal.""" + if threading.current_thread() is not threading.main_thread(): + raise ValueError("Signal handlers can only be set in the main thread") + + self._check_closed() + handle = _CspHandle(callback, args if args else None, self) + self._signal_handlers[sig] = handle + + try: + signal.signal(sig, lambda s, f: self.call_soon_threadsafe(callback, *args)) + except OSError: + del self._signal_handlers[sig] + raise + + def remove_signal_handler(self, sig: int) -> bool: + """Remove a handler for a signal.""" + if sig not in self._signal_handlers: + return False + + del self._signal_handlers[sig] + + try: + signal.signal(sig, signal.SIG_DFL) + except OSError: + pass + + return True + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} running={self._running} closed={self._closed} debug={self._debug}>" + + +class CspEventLoopPolicy(asyncio.AbstractEventLoopPolicy): + """ + Event loop policy for CSP-backed asyncio. + + This policy creates CspEventLoop instances for asyncio operations. + """ + + class _Local(threading.local): + _loop: Optional[CspEventLoop] = None + + def __init__(self) -> None: + self._local = self._Local() + + def get_event_loop(self) -> CspEventLoop: + """Get the event loop for the current context.""" + if self._local._loop is None: + raise RuntimeError(f"There is no current event loop in thread {threading.current_thread().name!r}.") + return self._local._loop + + def set_event_loop(self, loop: Optional[asyncio.AbstractEventLoop]) -> None: + """Set the event loop for the current context.""" + if loop is not None and not isinstance(loop, asyncio.AbstractEventLoop): + raise TypeError(f"loop must be an instance of AbstractEventLoop or None, not '{type(loop).__name__}'") + self._local._loop = loop + + def new_event_loop(self) -> CspEventLoop: + """Create and return a new event loop.""" + return CspEventLoop() + + +def new_event_loop() -> CspEventLoop: + """Create and return a new CSP event loop.""" + return CspEventLoop() + + +def run( + main: Coroutine[Any, Any, _T], + *, + loop_factory: Optional[Callable[[], CspEventLoop]] = None, + debug: Optional[bool] = None, +) -> _T: + """ + Run a coroutine using the CSP event loop. + + This is the preferred way to run asyncio code with the CSP event loop. + + Args: + main: The coroutine to run + loop_factory: Optional factory function to create the event loop. + Defaults to new_event_loop. + debug: If True, run in debug mode. + + Returns: + The result of the coroutine. + + Example: + async def main(): + await asyncio.sleep(1) + return "done" + + result = csp.event_loop.run(main()) + """ + if loop_factory is None: + loop_factory = new_event_loop + + if asyncio._get_running_loop() is not None: + raise RuntimeError("asyncio.run() cannot be called from a running event loop") + + if not asyncio.iscoroutine(main): + raise ValueError(f"a coroutine was expected, got {main!r}") + + loop = loop_factory() + try: + asyncio.set_event_loop(loop) + if debug is not None: + loop.set_debug(debug) + return loop.run_until_complete(main) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + if hasattr(loop, "shutdown_default_executor"): + loop.run_until_complete(loop.shutdown_default_executor()) + finally: + asyncio.set_event_loop(None) + loop.close() + + +def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None: + """Cancel all pending tasks.""" + to_cancel = asyncio.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler( + { + "message": "unhandled exception during asyncio.run() shutdown", + "exception": task.exception(), + "task": task, + } + ) diff --git a/csp/impl/async_adapter.py b/csp/impl/async_adapter.py new file mode 100644 index 000000000..36db2fe9f --- /dev/null +++ b/csp/impl/async_adapter.py @@ -0,0 +1,1077 @@ +import asyncio +import atexit +import concurrent.futures +import queue +import threading +from typing import ( + AsyncIterator, + Awaitable, + Callable, + Coroutine, + Optional, + TypeVar, + get_args, + get_origin, + get_type_hints, +) + +import csp +from csp.impl.pushadapter import PushInputAdapter +from csp.impl.types.tstype import ts +from csp.impl.wiring import py_push_adapter_def + +__all__ = [ + "async_for", + "async_in", + "async_out", + "async_node", + "await_", + "async_alarm", + "schedule_async_alarm", + "get_async_loop", + "get_shared_loop", + "get_csp_asyncio_loop", + "is_csp_asyncio_mode", + "shutdown_shared_loop", +] + +T = TypeVar("T") +U = TypeVar("U") + + +_shared_loop: Optional[asyncio.AbstractEventLoop] = None +_shared_thread: Optional[threading.Thread] = None +_shared_lock = threading.Lock() +_shared_ready = threading.Event() + + +def get_running_loop_or_none() -> Optional[asyncio.AbstractEventLoop]: + """ + Get the currently running asyncio event loop, or None if not in an async context. + + This is used to detect if we're inside a CspEventLoop or other asyncio loop, + in which case we can schedule async operations directly without a background thread. + """ + try: + return asyncio.get_running_loop() + except RuntimeError: + return None + + +def get_csp_asyncio_loop() -> Optional[asyncio.AbstractEventLoop]: + """ + Get the asyncio event loop from CSP's asyncio mode, if enabled. + + Returns: + The asyncio loop if CSP is running in asyncio mode (realtime=True with + asyncio_on_thread=False, the default), else None. + """ + from csp.impl.wiring.runtime import GraphRunInfo + + try: + info = GraphRunInfo.get_cur_run_times_info(raise_if_missing=False) + if info is not None and info.is_asyncio: + return info.asyncio_loop + except Exception: + pass + return None + + +def is_csp_asyncio_mode() -> bool: + """ + Check if CSP is currently running in asyncio mode. + + Returns: + True if CSP is running in asyncio mode (realtime=True with + asyncio_on_thread=False, the default), else False. + """ + return get_csp_asyncio_loop() is not None + + +def get_async_loop() -> asyncio.AbstractEventLoop: + """ + Get the appropriate asyncio event loop for running async operations. + + Priority: + 1. If CSP is running in asyncio mode (realtime with asyncio_on_thread=False), use that loop + 2. If there's a running asyncio loop (e.g., CspEventLoop), use it directly + 3. Otherwise, use/create the shared background loop + + This allows async adapters to integrate directly with CSP's asyncio mode + or CspEventLoop when available, avoiding the overhead of a separate background thread. + + Returns: + An asyncio event loop suitable for scheduling coroutines. + """ + # Check if CSP is running in asyncio mode + csp_loop = get_csp_asyncio_loop() + if csp_loop is not None: + return csp_loop + + # Check if we're already inside an asyncio context (e.g., CspEventLoop) + running_loop = get_running_loop_or_none() + if running_loop is not None: + return running_loop + + # Fall back to shared background loop + return get_shared_loop() + + +def get_shared_loop() -> asyncio.AbstractEventLoop: + """ + Get the shared asyncio event loop for running async operations. + + This returns a lazily-initialized event loop running in a background thread. + All async adapters can reuse this loop instead of creating their own threads. + + The loop is automatically shut down when the process exits. + + Returns: + The shared asyncio event loop. + """ + global _shared_loop, _shared_thread + + if _shared_loop is not None and _shared_loop.is_running(): + return _shared_loop + + with _shared_lock: + # Double-check after acquiring lock + if _shared_loop is not None and _shared_loop.is_running(): + return _shared_loop + + _shared_ready.clear() + + def run_loop(): + global _shared_loop + _shared_loop = asyncio.new_event_loop() + asyncio.set_event_loop(_shared_loop) + _shared_ready.set() + try: + _shared_loop.run_forever() + finally: + try: + _shared_loop.close() + except Exception: + pass + + _shared_thread = threading.Thread(target=run_loop, daemon=True, name="csp-async-loop") + _shared_thread.start() + _shared_ready.wait(timeout=5.0) + + if _shared_loop is None: + raise RuntimeError("Failed to start shared async loop") + + return _shared_loop + + +def shutdown_shared_loop() -> None: + """ + Shut down the shared async loop. + + This is called automatically at process exit, but can be called manually + if you need to cleanly shut down before exit. + """ + global _shared_loop, _shared_thread + + with _shared_lock: + if _shared_loop is not None and _shared_loop.is_running(): + _shared_loop.call_soon_threadsafe(_shared_loop.stop) + + if _shared_thread is not None: + _shared_thread.join(timeout=2.0) + _shared_thread = None + + _shared_loop = None + + +# Register cleanup at exit +atexit.register(shutdown_shared_loop) + + +def _run_on_async_loop(coro: Awaitable[T], timeout: Optional[float] = None) -> T: + """ + Run a coroutine on the best available loop and wait for the result. + + If we're inside a running asyncio loop (e.g., CspEventLoop), uses that directly. + Otherwise, uses the shared background loop. + + Args: + coro: The coroutine to run. + timeout: Optional timeout in seconds. + + Returns: + The result of the coroutine. + """ + # Check if we're already inside an asyncio context + running_loop = get_running_loop_or_none() + if running_loop is not None: + # We're inside an async context - can't block, but can schedule + # This case shouldn't happen for blocking await_(), caller should use await directly + raise RuntimeError( + "Cannot use blocking await_() inside an async context. Use 'await' directly instead, or use block=False." + ) + + # We're in sync context - use the shared loop + loop = get_shared_loop() + future = concurrent.futures.Future() + + async def wrapper(): + try: + if timeout is not None: + result = await asyncio.wait_for(coro, timeout) + else: + result = await coro + future.set_result(result) + except Exception as e: + future.set_exception(e) + + loop.call_soon_threadsafe(lambda: asyncio.ensure_future(wrapper(), loop=loop)) + return future.result(timeout=timeout) + + +def _schedule_on_loop(loop: asyncio.AbstractEventLoop, callback, *args): + """ + Schedule a callback on the event loop, handling both same-thread and cross-thread cases. + + If called from within the loop's thread and the loop is running, uses call_soon. + Otherwise uses call_soon_threadsafe. + """ + try: + running_loop = asyncio.get_running_loop() + if running_loop is loop: + # We're on the same thread as the loop, use call_soon + loop.call_soon(callback, *args) + return + except RuntimeError: + pass + + # Cross-thread or no running loop, use threadsafe version + loop.call_soon_threadsafe(callback, *args) + + +def _schedule_coro_on_loop(loop: asyncio.AbstractEventLoop, coro) -> None: + """ + Schedule a coroutine on the event loop, handling both same-thread and cross-thread cases. + + Args: + loop: The event loop to schedule on. + coro: The coroutine to schedule. + """ + + def schedule(): + asyncio.ensure_future(coro, loop=loop) + + _schedule_on_loop(loop, schedule) + + +def _schedule_on_async_loop(coro: Awaitable[T]) -> concurrent.futures.Future: + """ + Schedule a coroutine on the best available loop without waiting. + + If we're inside a running asyncio loop (e.g., CspEventLoop), schedules there. + Otherwise, uses the shared background loop. + + Args: + coro: The coroutine to run. + + Returns: + A Future that will contain the result. + """ + loop = get_async_loop() + future = concurrent.futures.Future() + + async def wrapper(): + try: + result = await coro + future.set_result(result) + except Exception as e: + future.set_exception(e) + + _schedule_coro_on_loop(loop, wrapper()) + return future + + +def _extract_async_iterator_type(type_hint) -> type: + """ + Extract the element type from an AsyncIterator[T] or AsyncGenerator[T, ...] type hint. + + For example: + AsyncIterator[int] -> int + AsyncGenerator[str, None] -> str + """ + origin = get_origin(type_hint) + if origin is not None: + # Check if it's AsyncIterator, AsyncGenerator, or similar + args = get_args(type_hint) + if args: + return args[0] # First type argument is the yield type + return type_hint + + +class _AsyncForAdapterImpl(PushInputAdapter): + """Push adapter implementation that consumes an async generator and pushes values to CSP.""" + + def __init__( + self, + async_gen: AsyncIterator, + output_type: type, + loop: Optional[asyncio.AbstractEventLoop] = None, + ): + self._async_gen = async_gen + self._output_type = output_type + self._provided_loop = loop + self._thread: threading.Thread = None + self._loop: asyncio.AbstractEventLoop = None + self._active = False + self._task: asyncio.Task = None + + def start(self, starttime, endtime): + self._active = True + if self._provided_loop is not None: + # Use the provided loop + self._loop = self._provided_loop + else: + # Use the best available loop (running loop or shared loop) + self._loop = get_async_loop() + _schedule_on_loop(self._loop, self._schedule_consumer) + + def _schedule_consumer(self): + """Schedule the consumer coroutine on the shared loop.""" + self._task = asyncio.ensure_future(self._consume_generator(), loop=self._loop) + + def stop(self): + self._active = False + # Cancel the task on the loop + if self._task is not None and not self._task.done(): + _schedule_on_loop(self._loop, self._task.cancel) + + async def _consume_generator(self): + """Consume the async generator and push each value to CSP.""" + try: + async for value in self._async_gen: + if not self._active: + break + self.push_tick(value) + except asyncio.CancelledError: + pass + + +_AsyncForAdapter = py_push_adapter_def( + "AsyncForAdapter", + _AsyncForAdapterImpl, + ts["T"], + async_gen=object, + output_type="T", + loop=object, +) + + +def async_for( + async_gen_or_func: AsyncIterator[T], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> ts[T]: + """ + Bridge an async generator to CSP, creating a time series that ticks on each yielded value. + + The async generator function must have a return type annotation specifying the output type. + + Args: + async_gen_or_func: An async generator instance (result of calling an async generator function). + loop: Event loop to use for running async operations. If None (default), uses CSP's + shared async loop which is efficient as all adapters share one background thread. + + Returns: + A CSP time series (ts[T]) that ticks whenever the async generator yields a value. + + Example: + async def my_async_gen(n: int) -> AsyncIterator[int]: + for i in range(n): + await asyncio.sleep(0.1) + yield i + + @csp.graph + def my_graph(): + values = csp.async_for(my_async_gen(10)) + csp.print("value", values) + """ + # Get the output type from the async generator + if hasattr(async_gen_or_func, "ag_frame"): + # It's an async generator instance - get the function from the code object + ag_code = async_gen_or_func.ag_code + # Try to get type hints from the frame's globals + func_name = ag_code.co_name + func_globals = async_gen_or_func.ag_frame.f_globals + + # Look for the function in globals to get type hints + if func_name in func_globals: + func = func_globals[func_name] + try: + hints = get_type_hints(func) + return_hint = hints.get("return", object) + # Extract the element type from AsyncIterator[T] or similar + output_type = _extract_async_iterator_type(return_hint) + except Exception: + output_type = object + else: + output_type = object + else: + raise TypeError( + "async_for expects an async generator instance. " + "Make sure to call the async generator function, e.g., async_for(my_gen(args)) not async_for(my_gen)" + ) + + return _AsyncForAdapter(async_gen_or_func, output_type, loop) + + +class _AsyncInAdapterImpl(PushInputAdapter): + """Push adapter that runs a coroutine and pushes the result when ready.""" + + def __init__( + self, + coro: Coroutine, + output_type: type, + loop: Optional[asyncio.AbstractEventLoop] = None, + ): + self._coro = coro + self._output_type = output_type + self._provided_loop = loop + self._loop: asyncio.AbstractEventLoop = None + + def start(self, starttime, endtime): + # Use provided loop, running loop, or shared loop + self._loop = self._provided_loop if self._provided_loop is not None else get_async_loop() + + async def run_and_push(): + try: + result = await self._coro + self.push_tick(result) + except Exception: + pass + + _schedule_coro_on_loop(self._loop, run_and_push()) + + def stop(self): + pass # Nothing to clean up - loop is managed externally + + +_AsyncInAdapter = py_push_adapter_def( + "AsyncInAdapter", + _AsyncInAdapterImpl, + ts["T"], + coro=object, + output_type="T", + loop=object, +) + + +def async_in( + coro: Awaitable[T], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> ts[T]: + """ + Run an async coroutine and create a time series that ticks once when it completes. + + Args: + coro: A coroutine instance (result of calling an async function). + loop: Event loop to use for running async operations. If None (default), uses CSP's + shared async loop which is efficient as all adapters share one background thread. + + Returns: + A CSP time series (ts[T]) that ticks once with the coroutine's return value. + + Example: + async def fetch_data() -> int: + await asyncio.sleep(0.1) + return 42 + + @csp.graph + def my_graph(): + value = csp.async_in(fetch_data()) + csp.print("value", value) + """ + # Get output type from the coroutine + if hasattr(coro, "cr_code"): + # It's a coroutine - get type hints from the function + func_name = coro.cr_code.co_name + func_globals = coro.cr_frame.f_globals if coro.cr_frame else {} + + if func_name in func_globals: + func = func_globals[func_name] + try: + hints = get_type_hints(func) + output_type = hints.get("return", object) + except Exception: + output_type = object + else: + output_type = object + else: + raise TypeError( + "async_in expects a coroutine instance. " + "Make sure to call the async function, e.g., async_in(my_func()) not async_in(my_func)" + ) + + return _AsyncInAdapter(coro, output_type, loop) + + +@csp.node +def async_out( + x: ts["T"], + async_func: Callable[["T"], Awaitable[None]], + loop: object = None, # Optional[asyncio.AbstractEventLoop], but object for csp.node compatibility +): + """ + Invoke an async function whenever the input time series ticks. + + Args: + x: Input time series that triggers the async function. + async_func: An async function that takes the ticked value. Should return None. + loop: Event loop to use for running async operations. If None (default), uses CSP's + shared async loop which is efficient as all adapters share one background thread. + + Example: + async def send_data(n: int) -> None: + await asyncio.sleep(0.1) + print(f"Sent: {n}") + + @csp.graph + def my_graph(): + values = ... # some ts[int] + csp.async_out(values, send_data) + """ + with csp.state(): + s_loop = None + + with csp.start(): + # Use provided loop, running loop, or shared loop + s_loop = loop if loop is not None else get_async_loop() + + if csp.ticked(x): + if s_loop is not None: + # Schedule directly on the loop + _schedule_coro_on_loop(s_loop, async_func(x)) + + +class _AsyncNodeState: + """Shared state for the async node pattern.""" + + def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None): + self.provided_loop = loop + self.loop: asyncio.AbstractEventLoop = None + self.active = True + self.input_queue: asyncio.Queue = None + self.push_adapter = None + + +class _AsyncNodeOutputAdapterImpl(PushInputAdapter): + """Push adapter for async node output.""" + + def __init__(self, state: _AsyncNodeState, async_func: Callable, output_type: type): + self._state = state + self._async_func = async_func + self._output_type = output_type + self._tasks = [] + + def start(self, starttime, endtime): + self._state.push_adapter = self + + # Use provided loop, running loop, or shared loop + self._state.loop = self._state.provided_loop if self._state.provided_loop is not None else get_async_loop() + self._state.input_queue = asyncio.Queue() + # Schedule the processor on the loop + _schedule_on_loop(self._state.loop, self._schedule_processor) + + def _schedule_processor(self): + """Schedule the async processor on the shared loop.""" + + async def process_one(value): + try: + result = await self._async_func(value) + self.push_tick(result) + except Exception: + pass + + async def queue_processor(): + while self._state.active: + try: + value = await asyncio.wait_for(self._state.input_queue.get(), timeout=0.1) + # Process each value as a separate task for concurrency + task = asyncio.ensure_future(process_one(value), loop=self._state.loop) + self._tasks.append(task) + except asyncio.TimeoutError: + continue + except asyncio.CancelledError: + break + except Exception: + pass + + self._processor_task = asyncio.ensure_future(queue_processor(), loop=self._state.loop) + + def stop(self): + self._state.active = False + # Cancel the processor task + if hasattr(self, "_processor_task") and not self._processor_task.done(): + _schedule_on_loop(self._state.loop, self._processor_task.cancel) + # Cancel pending tasks + for task in self._tasks: + if not task.done(): + _schedule_on_loop(self._state.loop, task.cancel) + + +_AsyncNodeOutputAdapter = py_push_adapter_def( + "AsyncNodeOutputAdapter", + _AsyncNodeOutputAdapterImpl, + ts["T"], + state=object, + async_func=object, + output_type="T", +) + + +@csp.node +def _async_node_input(x: ts["T"], state: _AsyncNodeState): + """Helper node that feeds input values to the async processing queue.""" + if csp.ticked(x): + if state.loop is not None and state.loop.is_running(): + _schedule_on_loop(state.loop, state.input_queue.put_nowait, x) + + +def async_node( + x: ts["T"], + async_func: Callable[["T"], Awaitable["U"]], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> ts["U"]: + """ + Apply an async function to each tick of the input, outputting the results. + + Takes a CSP input, runs an async function on each value, and outputs + the results as a new time series. + + Args: + x: Input time series. + async_func: An async function that transforms the input value. + loop: Event loop to use for running async operations. If None (default), uses CSP's + shared async loop which is efficient as all adapters share one background thread. + + Returns: + A CSP time series with the async function's results. + + Example: + async def process(n: int) -> int: + await asyncio.sleep(0.1) + return n * 2 + + @csp.graph + def my_graph(): + values = ... # some ts[int] + results = csp.async_node(values, process) + csp.print("results", results) + """ + # Get output type from the async function + try: + hints = get_type_hints(async_func) + output_type = hints.get("return", object) + except Exception: + output_type = object + + state = _AsyncNodeState(loop) + + # Wire up the input feeder and output adapter + _async_node_input(x, state) + return _AsyncNodeOutputAdapter(state, async_func, output_type) + + +def await_( + coro: Awaitable[T], + block: bool = True, + timeout: float = None, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> T: + """ + Await an async coroutine from synchronous code. + + This function allows calling async code from within CSP nodes or other + synchronous contexts. + + Args: + coro: A coroutine instance (result of calling an async function). + block: If True (default), blocks until the coroutine completes. + If False, returns a Future that can be checked later. + timeout: Optional timeout in seconds. + loop: Event loop to use. If None (default), uses CSP's shared loop + which is more efficient as it reuses a single background thread. + Pass your own loop for custom behavior. + + Returns: + When block=True: The result of the coroutine. + When block=False: A Future object that will contain the result. + + Example: + @csp.node + def my_node(x: ts[int]) -> ts[int]: + if csp.ticked(x): + # Uses CSP's shared async loop (efficient, default) + result = csp.await_(async_func(x)) + return result + + # Or with a custom loop: + result = csp.await_(async_func(x), loop=my_custom_loop) + """ + if loop is None: + # Use the best available loop + if block: + return _run_on_async_loop(coro, timeout) + else: + return _schedule_on_async_loop(coro) + else: + # Use the provided loop + if block: + future = concurrent.futures.Future() + + async def wrapper(): + try: + if timeout is not None: + result = await asyncio.wait_for(coro, timeout) + else: + result = await coro + future.set_result(result) + except Exception as e: + future.set_exception(e) + + _schedule_coro_on_loop(loop, wrapper()) + return future.result(timeout=timeout) + else: + future = concurrent.futures.Future() + + async def wrapper(): + try: + result = await coro + future.set_result(result) + except Exception as e: + future.set_exception(e) + + _schedule_coro_on_loop(loop, wrapper()) + return future + + +class AsyncContext: + """ + Context manager for managing async operations within CSP nodes. + + Provides a shared event loop and thread for running async operations, + avoiding the overhead of creating new loops for each operation. + + Example: + @csp.node + def my_node(x: ts[int]) -> ts[int]: + with csp.state(): + s_ctx = None + + with csp.start(): + s_ctx = csp.AsyncContext() + s_ctx.start() + + with csp.stop(): + if s_ctx: + s_ctx.stop() + + if csp.ticked(x): + result = s_ctx.run(async_func(x)) + return result + """ + + def __init__(self): + self._loop: asyncio.AbstractEventLoop = None + self._thread: threading.Thread = None + self._active = False + self._ready = threading.Event() + + def start(self): + """Start the async context's event loop in a background thread.""" + if self._active: + return + + self._active = True + + def run_loop(): + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + self._ready.set() + try: + self._loop.run_forever() + finally: + try: + self._loop.close() + except Exception: + pass + + self._thread = threading.Thread(target=run_loop, daemon=True) + self._thread.start() + self._ready.wait(timeout=5.0) + + def stop(self): + """Stop the async context's event loop.""" + self._active = False + if self._loop is not None and self._loop.is_running(): + self._loop.call_soon_threadsafe(self._loop.stop) + if self._thread is not None: + self._thread.join(timeout=1.0) + + def run(self, coro: Awaitable[T], timeout: float = None) -> T: + """ + Run a coroutine in this context's event loop and wait for result. + + Args: + coro: A coroutine to run. + timeout: Optional timeout in seconds. + + Returns: + The result of the coroutine. + """ + if not self._ready.is_set(): + raise RuntimeError("AsyncContext not started. Call start() first.") + + import concurrent.futures + + future = concurrent.futures.Future() + + async def wrapper(): + try: + if timeout is not None: + result = await asyncio.wait_for(coro, timeout) + else: + result = await coro + future.set_result(result) + except Exception as e: + future.set_exception(e) + + _schedule_coro_on_loop(self._loop, wrapper()) + return future.result(timeout=timeout) + + def run_nowait(self, coro: Awaitable[T]) -> "concurrent.futures.Future[T]": + """ + Schedule a coroutine to run without waiting for the result. + + Args: + coro: A coroutine to run. + + Returns: + A Future that will contain the result when complete. + """ + import concurrent.futures + + if not self._ready.is_set(): + raise RuntimeError("AsyncContext not started. Call start() first.") + + future = concurrent.futures.Future() + + async def wrapper(): + try: + result = await coro + future.set_result(result) + except Exception as e: + future.set_exception(e) + + _schedule_coro_on_loop(self._loop, wrapper()) + return future + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + return False + + +class AsyncAlarm: + """ + Async alarm that fires when async operations complete. + + This class provides an alarm-like interface for async operations within CSP nodes. + When an async operation completes, the alarm "ticks" with the result value. + + The internal mechanism uses a background thread with an event loop to run async + operations, and a polling alarm to check for completed results. + + Example: + @csp.node + def my_node() -> ts[int]: + with csp.alarms(): + poll_alarm = csp.alarm(bool) + async_alarm = csp.async_alarm(int) + + with csp.state(): + s_counter = 0 + s_pending = False + + with csp.start(): + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(poll_alarm): + # Only schedule a new async operation if one isn't already pending + if not s_pending: + s_counter += 1 + csp.schedule_async_alarm(async_alarm, async_func(s_counter)) + s_pending = True + + # Keep polling + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(async_alarm): + # Async operation completed - we can schedule another one now + s_pending = False + return async_alarm + """ + + def __init__(self, output_type: type = object): + self._output_type = output_type + self._loop: asyncio.AbstractEventLoop = None + self._thread: threading.Thread = None + self._active = False + self._ready = threading.Event() + self._results: queue.Queue = queue.Queue() + self._pending_count = 0 + self._lock = threading.Lock() + self._last_result = None # Store the last result for value access + + def start(self): + """Start the async alarm's event loop in a background thread.""" + if self._active: + return + + self._active = True + + def run_loop(): + self._loop = asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + self._ready.set() + try: + self._loop.run_forever() + finally: + try: + self._loop.close() + except Exception: + pass + + self._thread = threading.Thread(target=run_loop, daemon=True) + self._thread.start() + self._ready.wait(timeout=5.0) + + def stop(self): + """Stop the async alarm's event loop and cancel pending tasks.""" + self._active = False + if self._loop is not None and self._loop.is_running(): + # Cancel all pending tasks before stopping + def cancel_all(): + for task in asyncio.all_tasks(self._loop): + task.cancel() + self._loop.stop() + + self._loop.call_soon_threadsafe(cancel_all) + if self._thread is not None: + self._thread.join(timeout=1.0) + + def schedule(self, coro: Awaitable[T]) -> None: + """ + Schedule an async operation. When it completes, the result will be available. + + Args: + coro: A coroutine to run. + """ + if not self._ready.is_set(): + raise RuntimeError("AsyncAlarm not started. Call start() first.") + + with self._lock: + self._pending_count += 1 + + async def wrapper(): + try: + result = await coro + self._results.put(("success", result)) + except Exception as e: + self._results.put(("error", e)) + finally: + with self._lock: + self._pending_count -= 1 + + _schedule_coro_on_loop(self._loop, wrapper()) + + def has_result(self) -> bool: + """Check if any async operation has completed and has a result waiting.""" + return not self._results.empty() + + def get_result(self) -> T: + """ + Get the next completed result. Also stores it as _last_result for value access. + + Returns: + The result of the completed async operation. + + Raises: + queue.Empty: If no result is available. + Exception: If the async operation raised an exception. + """ + try: + status, value = self._results.get_nowait() + if status == "error": + raise value + self._last_result = value + return value + except queue.Empty: + raise + + @property + def value(self) -> T: + """Get the last result value. Used when accessing the alarm as a value.""" + return self._last_result + + def pending_count(self) -> int: + """Return the number of pending async operations.""" + with self._lock: + return self._pending_count + + +# Convenience functions for alarm-like syntax +def async_alarm(output_type: type = object) -> AsyncAlarm: + """ + Create an async alarm for use in CSP nodes. + + This is meant to be used in a pattern similar to csp.alarm(), but for async operations. + The async alarm is automatically started when created and should be stopped in the + node's stop block. + + Args: + output_type: The type of values that will be produced by async operations. + + Returns: + An AsyncAlarm instance (already started). + + Example: + with csp.alarms(): + async_alarm = csp.async_alarm(int) + + with csp.stop(): + async_alarm.stop() + """ + alarm = AsyncAlarm(output_type) + alarm.start() # Auto-start for convenience + return alarm + + +def schedule_async_alarm(alarm: AsyncAlarm, coro: Awaitable[T]) -> None: + """ + Schedule an async operation on an async alarm. + + When the async operation completes, the alarm will have a result available. + + Args: + alarm: The AsyncAlarm to schedule on. + coro: The coroutine to run. + + Example: + csp.schedule_async_alarm(s_async_alarm, fetch_data(url)) + """ + alarm.schedule(coro) diff --git a/csp/impl/types/common_definitions.py b/csp/impl/types/common_definitions.py index ce94a8dd4..5962cbbce 100644 --- a/csp/impl/types/common_definitions.py +++ b/csp/impl/types/common_definitions.py @@ -241,6 +241,7 @@ class ArgKind(IntEnum): BASKET_TS = TS | 0x4 DYNAMIC_BASKET_TS = BASKET_TS | 0x8 ALARM = TS | 0x10 + ASYNC_ALARM = TS | 0x20 def is_any_ts(self): return self & ArgKind.TS @@ -262,7 +263,7 @@ def is_dynamic_basket(self): return self == ArgKind.DYNAMIC_BASKET_TS def is_alarm(self): - return self == ArgKind.ALARM + return self == ArgKind.ALARM or self == ArgKind.ASYNC_ALARM class BasketKind(Enum): diff --git a/csp/impl/types/instantiation_type_resolver.py b/csp/impl/types/instantiation_type_resolver.py index 36f1ddb11..cd0d35844 100644 --- a/csp/impl/types/instantiation_type_resolver.py +++ b/csp/impl/types/instantiation_type_resolver.py @@ -231,7 +231,7 @@ def _resolve_types(self): self._add_container_scalar_value(arg, in_out_def) else: self._add_scalar_value(arg, in_out_def) - elif in_out_def.kind == ArgKind.ALARM: + elif in_out_def.kind.is_alarm(): # TODO: Handle alarms better? pass elif in_out_def.kind.is_non_dynamic_basket(): diff --git a/csp/impl/wiring/node.py b/csp/impl/wiring/node.py index a905bee36..aefd9b954 100644 --- a/csp/impl/wiring/node.py +++ b/csp/impl/wiring/node.py @@ -156,7 +156,7 @@ def _create(self, engine, memo): node = None if self._cppimpl: cppinputs = [ - (input_def.name, input_type, input_def.ts_idx, input_def.kind == ArgKind.ALARM) + (input_def.name, input_type, input_def.ts_idx, input_def.kind.is_alarm()) for input_type, input_def in zip(inputs, self._signature.ts_inputs) ] cppoutputs = [ diff --git a/csp/impl/wiring/node_parser.py b/csp/impl/wiring/node_parser.py index f1204bd2c..87c567cf0 100644 --- a/csp/impl/wiring/node_parser.py +++ b/csp/impl/wiring/node_parser.py @@ -14,6 +14,13 @@ from csp.impl.wiring.base_parser import BaseParser, CspParseError, _pythonic_depr_warning +def _get_async_alarm_class(): + """Lazy import to avoid circular dependency.""" + from csp.impl.async_adapter import AsyncAlarm + + return AsyncAlarm + + class _SingleProxyFuncArgResolver(object): class INVALID_VALUE: pass @@ -163,22 +170,40 @@ def _parse_alarms(self, node): raise CspParseError("Exactly one alarm can be assigned per line", node.lineno) name = node.targets[0].id - if not (isinstance(node.value, ast.Call) and BaseParser._is_csp_special_func_call(node.value, "alarm")): - raise CspParseError("Alarms must be initialized with csp.alarm in __alarms__ block", node.lineno) + is_alarm = isinstance(node.value, ast.Call) and BaseParser._is_csp_special_func_call( + node.value, "alarm" + ) + is_async_alarm = isinstance(node.value, ast.Call) and BaseParser._is_csp_special_func_call( + node.value, "async_alarm" + ) + + if not (is_alarm or is_async_alarm): + raise CspParseError( + "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block", node.lineno + ) call_node = node.value - # handle the initial scheduling via `csp.alarm` + # handle the initial scheduling via `csp.alarm` or `csp.async_alarm` if len(call_node.keywords): - raise TypeError("function `csp.alarm` does not take keyword arguments") + raise TypeError("function `csp.alarm`/`csp.async_alarm` does not take keyword arguments") if len(call_node.args) != 1: raise TypeError( - f"function `csp.alarm` requires a single type argument: {len(call_node.args)} arguments given" + f"function `csp.alarm`/`csp.async_alarm` requires a single type argument: {len(call_node.args)} arguments given" ) typ = self._eval_expr(call_node.args[0]) ts_type_arg = tstype.TsType[typ] - self._inputs.insert(num_alarms, InputDef(name, ts_type_arg, ArgKind.ALARM, None, num_alarms, -1)) + if is_async_alarm: + # Track async alarms for special handling + if not hasattr(self, "_async_alarms"): + self._async_alarms = {} + self._async_alarms[name] = typ + self._inputs.insert( + num_alarms, InputDef(name, ts_type_arg, ArgKind.ASYNC_ALARM, None, num_alarms, -1) + ) + else: + self._inputs.insert(num_alarms, InputDef(name, ts_type_arg, ArgKind.ALARM, None, num_alarms, -1)) num_alarms += 1 # Re-assign tsidx on inputs that have been pushed further out @@ -299,6 +324,18 @@ def visit_Expr(self, node: ast.Expr): else: return res + def visit_Name(self, node: ast.Name): + """Handle name access for async alarms - return manager's value instead of alarm proxy.""" + if hasattr(self, "_async_alarms") and node.id in self._async_alarms: + # For async alarms, accessing by name should get the manager's value + mgr_name = f"#async_mgr_{node.id}" + return ast.Attribute( + value=ast.Name(id=mgr_name, ctx=ast.Load()), + attr="value", + ctx=node.ctx, + ) + return node + def _visit_node_or_list(self, node_or_list): if isinstance(node_or_list, list): res = [] @@ -556,9 +593,64 @@ def _parse_remove_dynamic_key(self, node): def _create_single_input_ticked_expression(self, arg): return ast.UnaryOp(op=ast.UAdd(), operand=self._ts_inproxy_expr(arg)) + def _create_async_alarm_ticked_expression(self, arg, input_def): + """ + For async alarms, we check if the manager has results and get the result if so. + This generates code equivalent to: + (#async_mgr_name.has_result() and (#async_mgr_name.get_result(), True)[-1]) + + The get_result() call will store the result in _last_result, making it available + via the .value property when the alarm name is accessed. + """ + mgr_name = f"#async_mgr_{input_def.name}" + + # #async_mgr_name.has_result() + has_result_call = ast.Call( + func=ast.Attribute( + value=ast.Name(id=mgr_name, ctx=ast.Load()), + attr="has_result", + ctx=ast.Load(), + ), + args=[], + keywords=[], + ) + + # #async_mgr_name.get_result() + get_result_call = ast.Call( + func=ast.Attribute( + value=ast.Name(id=mgr_name, ctx=ast.Load()), + attr="get_result", + ctx=ast.Load(), + ), + args=[], + keywords=[], + ) + + # (get_result_call, True)[-1] - evaluates get_result (storing in _last_result) and returns True + result_tuple = ast.Subscript( + value=ast.Tuple(elts=[get_result_call, ast.Constant(True)], ctx=ast.Load()), + slice=ast.Constant(-1), + ctx=ast.Load(), + ) + + # has_result() and (get_result(), True)[-1] + return ast.BoolOp( + op=ast.And(), + values=[has_result_call, result_tuple], + ) + def _parse_ticked(self, node): """ORed together all values ticked converted to unary add +""" - exprs = [self._create_single_input_ticked_expression(arg) for arg in node.args] + exprs = [] + for arg in node.args: + # Check if this is an async alarm + if isinstance(arg, ast.Name): + input_def = self._signature.input(arg.id, allow_missing=True) + if input_def and input_def.kind == ArgKind.ASYNC_ALARM: + exprs.append(self._create_async_alarm_ticked_expression(arg, input_def)) + continue + exprs.append(self._create_single_input_ticked_expression(arg)) + if len(exprs) == 1: return exprs[0] @@ -638,6 +730,42 @@ def _parse_reschedule_alarm(self, node): def _parse_cancel_alarm(self, node): return self._parse_schedule_alarm_func(node, "cancel_alarm") + def _parse_schedule_async_alarm(self, node): + """ + Parse csp.schedule_async_alarm(async_alarm, coro). + Transforms to: #async_mgr_name.schedule(coro) + """ + if len(node.args) < 2: + raise CspParseError("csp.schedule_async_alarm requires alarm and coroutine arguments", node.lineno) + + name_node = node.args[0] + if not isinstance(name_node, ast.Name): + raise CspParseError("csp.schedule_async_alarm expects async alarm name as first argument", node.lineno) + + name = name_node.id + input_def = self._signature.input(name, allow_missing=True) + if not input_def: + raise CspParseError(f"unrecognized async alarm '{name}'", node.lineno) + + if input_def.kind != ArgKind.ASYNC_ALARM: + raise CspParseError( + f"csp.schedule_async_alarm can only be used with async alarms, not '{name}'", node.lineno + ) + + mgr_name = f"#async_mgr_{name}" + coro_arg = node.args[1] + + # #async_mgr_name.schedule(coro) + return ast.Call( + func=ast.Attribute( + value=ast.Name(id=mgr_name, ctx=ast.Load()), + attr="schedule", + ctx=ast.Load(), + ), + args=[coro_arg], + keywords=node.keywords, + ) + def _parse_set_buffering_policy(self, node): proxy = self._ts_inproxy_expr(node.args[0]) args = list(node.args[1:]) @@ -833,16 +961,70 @@ def _parse_impl(self): self._stateblock = [self.visit(node) for node in self._stateblock] self._startblock = [self.visit(node) for node in self._startblock] + # Inject async alarm manager state and lifecycle code + async_alarm_state = [] + async_alarm_start = [] + async_alarm_stop = [] + if hasattr(self, "_async_alarms") and self._async_alarms: + # Add AsyncAlarm to globals only when needed (lazy import to avoid circular dependency) + self._func_globals_modified["AsyncAlarm"] = _get_async_alarm_class() + for name, typ in self._async_alarms.items(): + mgr_name = f"#async_mgr_{name}" + # State: create the AsyncAlarm manager + # We need to create a reference to the type, not embed it as a constant + type_name = typ.__name__ if hasattr(typ, "__name__") else str(typ) + async_alarm_state.append( + ast.Assign( + targets=[ast.Name(id=mgr_name, ctx=ast.Store())], + value=ast.Call( + func=ast.Name(id="AsyncAlarm", ctx=ast.Load()), + args=[ast.Name(id=type_name, ctx=ast.Load())], + keywords=[], + ), + ) + ) + # Start: start the manager + async_alarm_start.append( + ast.Expr( + value=ast.Call( + func=ast.Attribute( + value=ast.Name(id=mgr_name, ctx=ast.Load()), + attr="start", + ctx=ast.Load(), + ), + args=[], + keywords=[], + ) + ) + ) + # Stop: stop the manager + async_alarm_stop.append( + ast.Expr( + value=ast.Call( + func=ast.Attribute( + value=ast.Name(id=mgr_name, ctx=ast.Load()), + attr="stop", + ctx=ast.Load(), + ), + args=[], + keywords=[], + ) + ) + ) + init_block = node_proxy + ts_in_proxies + ts_out_proxies + ts_vars - startblock = self._stateblock + self._startblock + startblock = async_alarm_state + self._stateblock + async_alarm_start + self._startblock body = [ast.While(test=ast.Constant(value=True), orelse=[], body=innerbody)] + # Combine user stopblock with async alarm stop if self._stopblock: self._stopblock = [self.visit(node) for node in self._stopblock] + combined_stopblock = (self._stopblock or []) + async_alarm_stop + if combined_stopblock: # For stop we wrap the body of a node in a try / finally # If the init block fails it's unrecoverable, and if the start block raises we don't want to stop that specific node - start_and_body = startblock + [ast.Try(body=body, finalbody=self._stopblock, handlers=[], orelse=[])] + start_and_body = startblock + [ast.Try(body=body, finalbody=combined_stopblock, handlers=[], orelse=[])] else: start_and_body = startblock + body @@ -891,6 +1073,11 @@ def f(node_parser, node): return f + @classmethod + def _parse_passthrough(cls, self, node): + """Passthrough parser that keeps the csp.xxx call unchanged.""" + return self.generic_visit(node) + @classmethod def _init_internal_maps(cls): cls.METHOD_MAP = { @@ -921,6 +1108,9 @@ def _init_internal_maps(cls): "csp.engine_start_time": cls._parse_engine_start_time, "csp.engine_end_time": cls._parse_engine_end_time, "csp.engine_stats": cls._parse_csp_engine_stats, + "csp.await_": cls._parse_passthrough, + "csp.async_alarm": cls._parse_passthrough, + "csp.schedule_async_alarm": cls._parse_schedule_async_alarm, } diff --git a/csp/impl/wiring/runtime.py b/csp/impl/wiring/runtime.py index f18cc0f78..40d507cda 100644 --- a/csp/impl/wiring/runtime.py +++ b/csp/impl/wiring/runtime.py @@ -1,7 +1,9 @@ +import asyncio import threading import time from collections import deque from datetime import datetime, timedelta +from typing import Optional import pytz @@ -116,10 +118,21 @@ def _build_engine(engine, context, memo=None): class GraphRunInfo: TLS = threading.local() - def __init__(self, starttime, endtime, realtime): + def __init__( + self, + starttime, + endtime, + realtime, + asyncio_on_thread=False, + asyncio_loop: Optional[asyncio.AbstractEventLoop] = None, + ): self._starttime = starttime self._endtime = endtime self._realtime = realtime + # is_asyncio means same-thread asyncio execution + # This is True when realtime=True and asyncio_on_thread=False + self._is_asyncio = realtime and not asyncio_on_thread + self._asyncio_loop = asyncio_loop self._prev = None @property @@ -134,6 +147,15 @@ def endtime(self): def is_realtime(self): return self._realtime + @property + def is_asyncio(self): + return self._is_asyncio + + @property + def asyncio_loop(self) -> Optional[asyncio.AbstractEventLoop]: + """Get the asyncio event loop for this run, if asyncio mode is enabled.""" + return self._asyncio_loop + @classmethod def get_cur_run_times_info(cls, raise_if_missing=True): info = getattr(cls.TLS, "instance", None) @@ -157,6 +179,113 @@ def __init__(self, context): self.context = context +async def _run_asyncio_engine(engine, starttime, endtime): + """Run the engine in asyncio mode using step-based execution. + + This function integrates CSP's engine with asyncio by: + 1. Using the engine's wakeup FD to detect push events + 2. Yielding to asyncio between cycles to allow other coroutines to run + 3. Running until the engine signals completion via process_one_cycle + """ + loop = asyncio.get_running_loop() + wakeup_fd = engine.get_wakeup_fd() + fd_event = asyncio.Event() + + def on_fd_readable(): + """Called when the wakeup FD becomes readable (push event queued).""" + fd_event.set() + + # Register the wakeup FD with asyncio + if wakeup_fd >= 0: + loop.add_reader(wakeup_fd, on_fd_readable) + + try: + engine.start(starttime, endtime) + + # Allow any async tasks scheduled during start() to begin + await asyncio.sleep(0) + + consecutive_no_work = 0 # Track consecutive cycles with no work + + while True: + # Check if engine is still active + if not engine.is_running(): + break + + # Process any pending work + # Note: We pass a tiny value (1 microsecond) instead of 0.0 + # because 0.0 means "use default wait time until endtime" in C++. + # This value needs to be at least 1 nanosecond when converted to int64_t + # in the C++ code: fromNanoseconds(1e-6 * 1e9) = fromNanoseconds(1000) + has_more = engine.process_one_cycle(1e-6) # 1 microsecond + + if has_more: + # More work available, yield briefly then continue + consecutive_no_work = 0 + await asyncio.sleep(0) + continue + + # No immediate work from process_one_cycle + consecutive_no_work += 1 + + # Calculate time until endtime + now = datetime.now(pytz.UTC).replace(tzinfo=None) + time_remaining = (endtime - now).total_seconds() + + if time_remaining <= 0: + # Past endtime - but give async tasks a chance to complete + # by waiting briefly for any pending push events + if consecutive_no_work > 3: + # Multiple cycles with no work after endtime -> done + break + # Brief wait for any final push events + fd_event.clear() + try: + await asyncio.wait_for(fd_event.wait(), timeout=0.01) + except asyncio.TimeoutError: + pass + else: + # Before endtime - wait for next event + next_time = engine.next_scheduled_time() + + if next_time is not None and next_time != datetime.max: + # Calculate wait time until next scheduled event + wait_seconds = (next_time - now).total_seconds() + if wait_seconds > 0: + fd_event.clear() + try: + await asyncio.wait_for(fd_event.wait(), timeout=min(wait_seconds, 0.1)) + except asyncio.TimeoutError: + pass + else: + await asyncio.sleep(0) + else: + # No scheduled events - wait for push events via FD + fd_event.clear() + try: + # Wait for FD or until endtime + await asyncio.wait_for(fd_event.wait(), timeout=min(time_remaining, 0.1)) + except asyncio.TimeoutError: + pass + + # Clear the wakeup FD after processing + if wakeup_fd >= 0: + engine.clear_wakeup_fd() + + except Exception: + # On error, finish and re-raise + if wakeup_fd >= 0: + loop.remove_reader(wakeup_fd) + engine.finish() + raise + + # Cleanup and return results + if wakeup_fd >= 0: + loop.remove_reader(wakeup_fd) + + return engine.finish() + + def run( g, *args, @@ -165,8 +294,13 @@ def run( queue_wait_time=None, realtime=False, output_numpy=False, + asyncio_on_thread=False, **kwargs, ): + # Determine if we run asyncio on the same thread (asyncio mode) + # When realtime=True and asyncio_on_thread=False (default), we run in asyncio mode + run_asyncio_mode = realtime and not asyncio_on_thread + with ExceptionContext(): starttime, endtime = _normalize_run_times(starttime, endtime, realtime) @@ -207,14 +341,42 @@ def run( time.sleep((starttime - now).total_seconds()) with mem_cache: - return engine.run(starttime, endtime) + if run_asyncio_mode: + # Run in asyncio mode using step-based execution + # Recalculate endtime for realtime mode to account for graph building time + # This ensures the user gets the expected runtime duration + actual_now = datetime.now(pytz.UTC).replace(tzinfo=None) + runtime_duration = endtime - starttime + actual_endtime = actual_now + runtime_duration + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + # Set up GraphRunInfo with asyncio context + with GraphRunInfo( + starttime=actual_now, + endtime=actual_endtime, + realtime=realtime, + asyncio_on_thread=False, + asyncio_loop=loop, + ): + return loop.run_until_complete(_run_asyncio_engine(engine, actual_now, actual_endtime)) + finally: + loop.close() + asyncio.set_event_loop(None) + else: + return engine.run(starttime, endtime) if isinstance(g, Edge): - return run(lambda: g, starttime=starttime, endtime=endtime, **engine_settings) + return run( + lambda: g, starttime=starttime, endtime=endtime, asyncio_on_thread=asyncio_on_thread, **engine_settings + ) # wrapped in a _WrappedContext so that we can give up the mem before run graph = _WrappedContext( build_graph(g, *args, starttime=starttime, endtime=endtime, realtime=realtime, **kwargs) ) - with GraphRunInfo(starttime=starttime, endtime=endtime, realtime=realtime): - return run(graph, starttime=starttime, endtime=endtime, **engine_settings) + with GraphRunInfo(starttime=starttime, endtime=endtime, realtime=realtime, asyncio_on_thread=asyncio_on_thread): + return run( + graph, starttime=starttime, endtime=endtime, asyncio_on_thread=asyncio_on_thread, **engine_settings + ) diff --git a/csp/impl/wiring/signature.py b/csp/impl/wiring/signature.py index 79285b011..d20d202a5 100644 --- a/csp/impl/wiring/signature.py +++ b/csp/impl/wiring/signature.py @@ -65,7 +65,7 @@ def _create_pydantic_models(self, name, inputs, outputs, defaults): # Prefix all names with INPUT_PREFIX to avoid conflicts with pydantic names (i.e. model_validate) input_fields = {} for defn in inputs: - if defn.kind != ArgKind.ALARM: + if not defn.kind.is_alarm(): default = defaults.get(defn.name, ...) typ = adjust_annotations(defn.typ, make_optional=True) if defn.kind.is_scalar(): # Allow for SnapType and SnapKeyType @@ -116,7 +116,7 @@ def copy(self, drop_alarms=False): new_inputs = [] cur_ts_dx = 0 for input in self._inputs: - if input.kind == ArgKind.ALARM: + if input.kind.is_alarm(): continue else: new_inputs.append( @@ -179,7 +179,7 @@ def parse_inputs(self, forced_tvars, *args, allow_none_ts=False, **kwargs): # We need to do some special handling here of int arguments connected to float tvars = type_resolver.tvars resolved_ts_inputs = type_resolver.ts_inputs - non_alarm_inputs = (e for e in self._ts_inputs if e.kind != ArgKind.ALARM) + non_alarm_inputs = (e for e in self._ts_inputs if not e.kind.is_alarm()) for i, (input, resolved_input) in enumerate(zip(non_alarm_inputs, resolved_ts_inputs)): if isinstance(resolved_input, Edge): if getattr(resolved_input.tstype, "typ", None) is int: diff --git a/csp/tests/test_async_adapter.py b/csp/tests/test_async_adapter.py new file mode 100644 index 000000000..4df9f67d5 --- /dev/null +++ b/csp/tests/test_async_adapter.py @@ -0,0 +1,645 @@ +import asyncio +import sys +import time +import unittest +from datetime import datetime, timedelta +from typing import AsyncIterator + +import pytest + +import csp +from csp import ts +from csp.impl.async_adapter import AsyncContext + +# Windows has lower timer resolution (~15.6ms vs ~1ms on Unix) +IS_WINDOWS = sys.platform == "win32" +# Use longer delays on Windows to avoid timing issues +DEFAULT_DELAY = 0.05 if IS_WINDOWS else 0.02 + + +def _has_decomposed_api(): + """Check if the CSP decomposed execution API is available.""" + try: + from csp.impl.__cspimpl import _cspimpl + + engine = _cspimpl.PyEngine(realtime=True) + return hasattr(engine, "start") + except Exception: + return False + + +HAS_DECOMPOSED_API = _has_decomposed_api() + + +async def async_return_value(value: int, delay: float = 0.05) -> int: + """Async function that returns a value after a delay.""" + await asyncio.sleep(delay) + return value + + +async def async_double(n: int, delay: float = 0.05) -> int: + """Async function that doubles a value after a delay.""" + await asyncio.sleep(delay) + return n * 2 + + +async def async_generator(count: int, delay: float = 0.05) -> AsyncIterator[int]: + """Async generator that yields count values.""" + for i in range(count): + await asyncio.sleep(delay) + yield i + + +async def async_side_effect(value: int, results: list, delay: float = 0.05) -> None: + """Async function with side effect (appends to list).""" + await asyncio.sleep(delay) + results.append(value) + + +class TestAsyncFor(unittest.TestCase): + """Tests for csp.async_for - async generator to time series.""" + + def test_async_for_basic(self): + """Test basic async_for functionality.""" + + @csp.graph + def graph(): + values = csp.async_for(async_generator(5, delay=DEFAULT_DELAY)) + csp.add_graph_output("values", values) + + # Use longer endtime on Windows due to longer delays + endtime = timedelta(seconds=1.0) if IS_WINDOWS else timedelta(seconds=0.5) + results = csp.run(graph, realtime=True, endtime=endtime) + values = [v for _, v in results["values"]] + self.assertEqual(values, [0, 1, 2, 3, 4]) + + def test_async_for_type_inference(self): + """Test that async_for correctly infers the output type.""" + + async def typed_generator() -> AsyncIterator[str]: + for s in ["a", "b", "c"]: + await asyncio.sleep(0.02) + yield s + + @csp.graph + def graph(): + values = csp.async_for(typed_generator()) + csp.add_graph_output("values", values) + + results = csp.run(graph, realtime=True, endtime=timedelta(seconds=0.3)) + values = [v for _, v in results["values"]] + self.assertEqual(values, ["a", "b", "c"]) + + +class TestAsyncIn(unittest.TestCase): + """Tests for csp.async_in - single async value to time series.""" + + def test_async_in_basic(self): + """Test basic async_in functionality.""" + + @csp.graph + def graph(): + result = csp.async_in(async_return_value(42, delay=DEFAULT_DELAY)) + csp.add_graph_output("result", result) + + endtime = timedelta(seconds=0.3) if IS_WINDOWS else timedelta(seconds=0.2) + results = csp.run(graph, realtime=True, endtime=endtime) + values = [v for _, v in results["result"]] + self.assertEqual(values, [42]) + + def test_async_in_ticks_once(self): + """Test that async_in only ticks once.""" + + @csp.graph + def graph(): + result = csp.async_in(async_return_value(100, delay=DEFAULT_DELAY)) + csp.add_graph_output("result", result) + + endtime = timedelta(seconds=0.4) if IS_WINDOWS else timedelta(seconds=0.3) + results = csp.run(graph, realtime=True, endtime=endtime) + # Should only have one tick + self.assertEqual(len(results["result"]), 1) + self.assertEqual(results["result"][0][1], 100) + + +class TestAsyncOut(unittest.TestCase): + """Tests for csp.async_out - time series to async function (side effects).""" + + def test_async_out_basic(self): + """Test basic async_out functionality.""" + collected = [] + + async def collector(value: int) -> None: + await asyncio.sleep(0.01) + collected.append(value) + + @csp.graph + def graph(): + trigger = csp.timer(timedelta(milliseconds=50), True) + counter = csp.count(trigger) + csp.async_out(counter, collector) + + csp.run(graph, realtime=True, endtime=timedelta(seconds=0.25)) + + # Give async operations time to complete + import time + + time.sleep(0.2) + + # Should have collected multiple values + self.assertGreater(len(collected), 0) + self.assertEqual(collected, sorted(collected)) # Should be in order + + +class TestAsyncNode(unittest.TestCase): + """Tests for csp.async_node - transform time series via async function.""" + + def test_async_node_basic(self): + """Test basic async_node functionality.""" + + @csp.graph + def graph(): + trigger = csp.timer(timedelta(milliseconds=50), True) + counter = csp.count(trigger) + doubled = csp.async_node(counter, async_double) + csp.add_graph_output("doubled", doubled) + + results = csp.run(graph, realtime=True, endtime=timedelta(seconds=0.3)) + values = [v for _, v in results["doubled"]] + + # Each value should be doubled + for i, v in enumerate(values, 1): + self.assertEqual(v, i * 2) + + +class TestAwait(unittest.TestCase): + """Tests for csp.await_ - blocking await within nodes.""" + + def test_await_blocking(self): + """Test blocking await within a node.""" + + @csp.node + def node_with_await() -> ts[int]: + with csp.alarms(): + trigger = csp.alarm(bool) + + with csp.start(): + csp.schedule_alarm(trigger, timedelta(milliseconds=10), True) + + if csp.ticked(trigger): + # Blocking await + result = csp.await_(async_return_value(99, delay=DEFAULT_DELAY), block=True) + return result + + @csp.graph + def graph(): + result = node_with_await() + csp.add_graph_output("result", result) + + endtime = timedelta(seconds=0.3) if IS_WINDOWS else timedelta(seconds=0.2) + results = csp.run(graph, realtime=True, endtime=endtime) + values = [v for _, v in results["result"]] + self.assertIn(99, values) + + +class TestAsyncContext(unittest.TestCase): + """Tests for AsyncContext - persistent async event loop in nodes.""" + + def test_async_context_basic(self): + """Test AsyncContext for persistent async operations.""" + + @csp.node + def node_with_context() -> ts[int]: + with csp.alarms(): + trigger = csp.alarm(bool) + + with csp.state(): + s_ctx = None + s_counter = 0 + + with csp.start(): + s_ctx = AsyncContext() + s_ctx.start() + csp.schedule_alarm(trigger, timedelta(milliseconds=20), True) + + with csp.stop(): + if s_ctx: + s_ctx.stop() + + if csp.ticked(trigger): + s_counter += 1 + if s_counter <= 3: + result = s_ctx.run(async_double(s_counter, delay=DEFAULT_DELAY)) + csp.schedule_alarm(trigger, timedelta(milliseconds=50), True) + return result + + @csp.graph + def graph(): + result = node_with_context() + csp.add_graph_output("result", result) + + endtime = timedelta(seconds=0.8) if IS_WINDOWS else timedelta(seconds=0.5) + results = csp.run(graph, realtime=True, endtime=endtime) + values = [v for _, v in results["result"]] + + # Should have values 2, 4, 6 (1*2, 2*2, 3*2) + self.assertEqual(values, [2, 4, 6]) + + +class TestAsyncAlarm(unittest.TestCase): + """Tests for csp.async_alarm - alarm-like pattern for async operations.""" + + def test_async_alarm_basic(self): + """Test basic async_alarm functionality.""" + + @csp.node + def node_with_async_alarm() -> ts[int]: + with csp.alarms(): + poll_alarm = csp.alarm(bool) + async_alarm = csp.async_alarm(int) + + with csp.state(): + s_counter = 0 + s_pending = False + + with csp.start(): + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(poll_alarm): + if not s_pending and s_counter < 3: + s_counter += 1 + csp.schedule_async_alarm(async_alarm, async_double(s_counter, delay=0.05)) + s_pending = True + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(async_alarm): + s_pending = False + return async_alarm + + @csp.graph + def graph(): + result = node_with_async_alarm() + csp.add_graph_output("result", result) + + results = csp.run(graph, realtime=True, endtime=timedelta(seconds=0.5)) + values = [v for _, v in results["result"]] + + # Should have values 2, 4, 6 (1*2, 2*2, 3*2) + self.assertEqual(values, [2, 4, 6]) + + def test_async_alarm_multiple_operations(self): + """Test async_alarm with multiple sequential operations.""" + + @csp.node + def node_with_sequential_ops() -> ts[int]: + with csp.alarms(): + poll_alarm = csp.alarm(bool) + async_alarm = csp.async_alarm(int) + + with csp.state(): + s_values = [10, 20, 30, 40, 50] + s_index = 0 + s_pending = False + + with csp.start(): + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(poll_alarm): + if not s_pending and s_index < len(s_values): + csp.schedule_async_alarm(async_alarm, async_double(s_values[s_index], delay=0.03)) + s_index += 1 + s_pending = True + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(async_alarm): + s_pending = False + return async_alarm + + @csp.graph + def graph(): + result = node_with_sequential_ops() + csp.add_graph_output("result", result) + + results = csp.run(graph, realtime=True, endtime=timedelta(seconds=0.6)) + values = [v for _, v in results["result"]] + + # Should have values 20, 40, 60, 80, 100 (each input doubled) + self.assertEqual(values, [20, 40, 60, 80, 100]) + + +class TestIntegration(unittest.TestCase): + """Integration tests combining multiple async features.""" + + def test_combined_async_features(self): + """Test combining async_for, async_in, async_out, and async_node.""" + output_collected = [] + + async def output_collector(value: int) -> None: + await asyncio.sleep(DEFAULT_DELAY / 2) + output_collected.append(value) + + @csp.graph + def graph(): + # async_for: async generator to time series + gen_values = csp.async_for(async_generator(3, delay=DEFAULT_DELAY)) + csp.add_graph_output("gen_values", gen_values) + + # async_in: single async value + single_value = csp.async_in(async_return_value(42, delay=DEFAULT_DELAY)) + csp.add_graph_output("single_value", single_value) + + # async_node: transform via async + doubled = csp.async_node(gen_values, async_double) + csp.add_graph_output("doubled", doubled) + + # async_out: side effects + csp.async_out(gen_values, output_collector) + + endtime = timedelta(seconds=0.8) if IS_WINDOWS else timedelta(seconds=0.4) + results = csp.run(graph, realtime=True, endtime=endtime) + + # Verify async_for results + gen_values = [v for _, v in results["gen_values"]] + self.assertEqual(gen_values, [0, 1, 2]) + + # Verify async_in results + single_values = [v for _, v in results["single_value"]] + self.assertEqual(single_values, [42]) + + # Verify async_node results (doubled values) + doubled_values = [v for _, v in results["doubled"]] + self.assertEqual(doubled_values, [0, 2, 4]) + + # Give time for async_out to complete + import time + + time.sleep(0.2) + self.assertEqual(sorted(output_collected), [0, 1, 2]) + + +class TestSharedLoop(unittest.TestCase): + """Tests for the shared async loop functionality.""" + + def test_shared_loop_is_reused(self): + """Verify that get_shared_loop returns the same loop instance.""" + from csp.impl.async_adapter import get_shared_loop, shutdown_shared_loop + + loop1 = get_shared_loop() + loop2 = get_shared_loop() + self.assertIs(loop1, loop2) + self.assertTrue(loop1.is_running()) + + def test_await_uses_shared_loop_by_default(self): + """Verify await_ uses the shared loop by default.""" + from csp.impl.async_adapter import get_shared_loop + + shared_loop = get_shared_loop() + loop_used = [] + + async def capture_loop() -> int: + loop_used.append(asyncio.get_running_loop()) + return 42 + + result = csp.await_(capture_loop()) + self.assertEqual(result, 42) + self.assertEqual(len(loop_used), 1) + self.assertIs(loop_used[0], shared_loop) + + def test_await_with_custom_loop(self): + """Verify await_ can use a custom loop when specified.""" + import threading + + custom_loop = asyncio.new_event_loop() + loop_used = [] + loop_ready = threading.Event() + + async def capture_loop() -> int: + loop_used.append(asyncio.get_running_loop()) + return 42 + + def run_custom_loop(): + asyncio.set_event_loop(custom_loop) + loop_ready.set() + custom_loop.run_forever() + + thread = threading.Thread(target=run_custom_loop, daemon=True) + thread.start() + loop_ready.wait(timeout=2.0) + + try: + result = csp.await_(capture_loop(), loop=custom_loop) + self.assertEqual(result, 42) + self.assertEqual(len(loop_used), 1) + self.assertIs(loop_used[0], custom_loop) + finally: + custom_loop.call_soon_threadsafe(custom_loop.stop) + thread.join(timeout=1.0) + custom_loop.close() + + def test_multiple_adapters_use_shared_loop(self): + """Verify that multiple adapters all use the same shared loop.""" + from csp.impl.async_adapter import get_shared_loop + + shared_loop = get_shared_loop() + loops_observed = [] + + async def record_loop(n: int) -> int: + loops_observed.append(asyncio.get_running_loop()) + return n * 2 + + async def record_loop_gen(count: int) -> AsyncIterator[int]: + for i in range(count): + loops_observed.append(asyncio.get_running_loop()) + yield i + + @csp.graph + def graph(): + gen_values = csp.async_for(record_loop_gen(2)) + doubled = csp.async_node(gen_values, record_loop) + csp.add_graph_output("doubled", doubled) + + csp.run(graph, realtime=True, endtime=timedelta(seconds=0.3)) + + # All observed loops should be the shared loop + self.assertGreater(len(loops_observed), 0) + for loop in loops_observed: + self.assertIs(loop, shared_loop) + + @pytest.mark.skipif(not HAS_DECOMPOSED_API, reason="CSP decomposed API not available") + @pytest.mark.skipif(IS_WINDOWS, reason="CspEventLoop has Windows issues") + def test_csp_event_loop_integration(self): + """Verify that async adapters use CspEventLoop directly when available.""" + from csp.event_loop import CspEventLoop + from csp.impl.async_adapter import get_async_loop + + loops_observed = [] + csp_loop = CspEventLoop() + + async def record_and_return(n: int) -> int: + loops_observed.append(asyncio.get_running_loop()) + await asyncio.sleep(0.01) + return n * 2 + + async def main(): + # Inside CspEventLoop, get_async_loop should return the CspEventLoop + current_loop = get_async_loop() + self.assertIs(current_loop, csp_loop) + + # Schedule a coroutine + result = await record_and_return(21) + self.assertEqual(result, 42) + + # Verify the coroutine ran on the CspEventLoop + self.assertEqual(len(loops_observed), 1) + self.assertIs(loops_observed[0], csp_loop) + + try: + csp_loop.run_until_complete(main()) + finally: + csp_loop.close() + + +@pytest.mark.skipif(not HAS_DECOMPOSED_API, reason="CSP decomposed API not available") +@pytest.mark.skipif(IS_WINDOWS, reason="CspEventLoop has Windows issues") +class TestCspRunAsyncioMode(unittest.TestCase): + """Tests for csp.run with asyncio_on_thread parameter (default: False = same thread).""" + + def test_asyncio_on_thread_simulation_mode(self): + """Verify that asyncio_on_thread has no effect in simulation mode.""" + + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=10), 1) + csp.add_graph_output("count", csp.count(timer)) + + # In simulation mode, asyncio_on_thread parameter is ignored + # Both should complete successfully + result1 = csp.run(graph, starttime=datetime(2020, 1, 1), endtime=timedelta(seconds=0.1), realtime=False) + result2 = csp.run( + graph, + starttime=datetime(2020, 1, 1), + endtime=timedelta(seconds=0.1), + realtime=False, + asyncio_on_thread=True, + ) + + self.assertIn("count", result1) + self.assertIn("count", result2) + + def test_realtime_same_thread_asyncio_basic(self): + """Test basic csp.run with same-thread asyncio (default in realtime mode).""" + values = [] + + @csp.node + def collect(x: ts[int]): + if csp.ticked(x): + values.append(x) + + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=50), 1) + counter = csp.count(timer) + collect(counter) + + # Default in realtime is asyncio_on_thread=False (same thread) + csp.run(graph, realtime=True, endtime=timedelta(milliseconds=200)) + + # Should have some values collected + self.assertGreater(len(values), 0) + + def test_realtime_background_thread_asyncio_basic(self): + """Test csp.run with background thread asyncio (old default behavior).""" + values = [] + + @csp.node + def collect(x: ts[int]): + if csp.ticked(x): + values.append(x) + + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=50), 1) + counter = csp.count(timer) + collect(counter) + + # Explicitly use background thread for asyncio + csp.run(graph, realtime=True, endtime=timedelta(milliseconds=200), asyncio_on_thread=True) + + # Should have some values collected + self.assertGreater(len(values), 0) + + def test_same_thread_async_adapter_uses_csp_loop(self): + """Verify that async adapters use the CSP loop in same-thread mode.""" + from csp.impl.async_adapter import get_csp_asyncio_loop, is_csp_asyncio_mode + + loop_checks = [] + + async def async_double(n: int) -> int: + # Record whether we detect CSP asyncio mode + is_asyncio = is_csp_asyncio_mode() + loop = get_csp_asyncio_loop() + loop_checks.append((is_asyncio, loop is not None)) + await asyncio.sleep(0.01) + return n * 2 + + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=50), 1) + doubled = csp.async_node(timer, async_double) + csp.add_graph_output("doubled", doubled) + + # Default in realtime: same-thread asyncio + result = csp.run(graph, realtime=True, endtime=timedelta(milliseconds=300)) + + # All async operations should detect CSP asyncio mode + self.assertGreater(len(loop_checks), 0) + for is_asyncio, has_loop in loop_checks: + self.assertTrue(is_asyncio, "Should detect CSP asyncio mode") + self.assertTrue(has_loop, "Should have access to CSP's asyncio loop") + + def test_same_thread_async_for(self): + """Test async_for works with same-thread asyncio (default).""" + + async def async_gen() -> AsyncIterator[int]: + for i in range(3): + await asyncio.sleep(0.02) + yield i + + @csp.graph + def graph(): + gen = csp.async_for(async_gen()) + csp.add_graph_output("values", gen) + + # Default in realtime: same-thread asyncio + result = csp.run(graph, realtime=True, endtime=timedelta(milliseconds=300)) + + # Should have collected some values from async generator + self.assertIn("values", result) + if result["values"]: + values = [v[1] for v in result["values"]] + self.assertGreater(len(values), 0) + + def test_same_thread_async_in(self): + """Test async_in works with same-thread asyncio (default).""" + + async def slow_fetch() -> int: + await asyncio.sleep(0.02) + return 42 + + @csp.graph + def graph(): + result = csp.async_in(slow_fetch()) + csp.add_graph_output("result", result) + + # Default in realtime: same-thread asyncio + output = csp.run(graph, realtime=True, endtime=timedelta(milliseconds=200)) + + # Should have received the result + self.assertIn("result", output) + if output["result"]: + values = [v[1] for v in output["result"]] + self.assertIn(42, values) + + +if __name__ == "__main__": + unittest.main() diff --git a/csp/tests/test_asyncio_mode.py b/csp/tests/test_asyncio_mode.py new file mode 100644 index 000000000..403bb5784 --- /dev/null +++ b/csp/tests/test_asyncio_mode.py @@ -0,0 +1,436 @@ +import time +import unittest +from datetime import datetime, timedelta +from typing import List + +import csp +from csp import ts + + +@csp.node +def accumulator(x: ts[int]) -> ts[List[int]]: + """Accumulate all values into a list.""" + with csp.state(): + s_values = [] + + if csp.ticked(x): + s_values.append(x) + return list(s_values) + + +@csp.node +def rolling_sum(x: ts[int], window: int) -> ts[int]: + """Compute rolling sum over a window.""" + with csp.state(): + s_buffer = [] + + if csp.ticked(x): + s_buffer.append(x) + if len(s_buffer) > window: + s_buffer.pop(0) + return sum(s_buffer) + + +@csp.node +def filter_even(x: ts[int]) -> ts[int]: + """Filter to only even numbers.""" + if csp.ticked(x) and x % 2 == 0: + return x + + +@csp.node +def multiply(x: ts[int], factor: int) -> ts[int]: + """Multiply input by a factor.""" + if csp.ticked(x): + return x * factor + + +class AsyncioModeTestCase(unittest.TestCase): + """Base class that runs tests in both asyncio and background-thread modes.""" + + def run_graph_both_modes(self, graph_func, endtime=timedelta(milliseconds=300)): + """ + Run a graph in both modes and return results for comparison. + + Args: + graph_func: A function that returns a csp.graph decorated function + endtime: Duration to run the graph + + Returns: + Tuple of (background_thread_results, same_thread_results) + """ + graph = graph_func() + + # Run with asyncio on background thread (old default behavior) + background_thread_results = csp.run(graph, realtime=True, endtime=endtime, asyncio_on_thread=True) + + # Run with asyncio on same thread (new default behavior) + graph = graph_func() # Fresh graph instance + same_thread_results = csp.run(graph, realtime=True, endtime=endtime) # asyncio_on_thread=False is default + + return background_thread_results, same_thread_results + + def extract_values(self, results, key): + """Extract just the values from timestamped results.""" + if key not in results or not results[key]: + return [] + return [v for _, v in results[key]] + + +class TestTimerEquivalence(AsyncioModeTestCase): + """Test timer functionality is equivalent in both modes.""" + + def test_timer_basic(self): + """Test that timers produce same count in both modes.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=50), 1) + counter = csp.count(timer) + csp.add_graph_output("count", counter) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=250)) + + normal_values = self.extract_values(normal, "count") + asyncio_values = self.extract_values(asyncio_result, "count") + + # Both should have multiple values + self.assertGreater(len(normal_values), 0) + self.assertGreater(len(asyncio_values), 0) + + # Values should be close (timing may cause slight differences) + # Check that we got approximately the same number of events + self.assertAlmostEqual(len(normal_values), len(asyncio_values), delta=2) + + def test_timer_with_processing(self): + """Test timer with node processing is equivalent.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=40), 1) + counter = csp.count(timer) + doubled = multiply(counter, 2) + csp.add_graph_output("doubled", doubled) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=200)) + + normal_values = self.extract_values(normal, "doubled") + asyncio_values = self.extract_values(asyncio_result, "doubled") + + # Check both have values + self.assertGreater(len(normal_values), 0) + self.assertGreater(len(asyncio_values), 0) + + # All values should be even (doubled from count) + for v in normal_values + asyncio_values: + self.assertEqual(v % 2, 0) + + +class TestNodeEquivalence(AsyncioModeTestCase): + """Test node functionality is equivalent in both modes.""" + + def test_rolling_sum(self): + """Test rolling sum produces consistent results.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=30), 1) + counter = csp.count(timer) + rsum = rolling_sum(counter, 3) + csp.add_graph_output("rolling", rsum) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=200)) + + normal_values = self.extract_values(normal, "rolling") + asyncio_values = self.extract_values(asyncio_result, "rolling") + + # Both should produce results + self.assertGreater(len(normal_values), 0) + self.assertGreater(len(asyncio_values), 0) + + def test_filter_node(self): + """Test filter node works equivalently.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=25), 1) + counter = csp.count(timer) + evens = filter_even(counter) + csp.add_graph_output("evens", evens) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=250)) + + normal_values = self.extract_values(normal, "evens") + asyncio_values = self.extract_values(asyncio_result, "evens") + + # All values should be even + for v in normal_values: + self.assertEqual(v % 2, 0) + for v in asyncio_values: + self.assertEqual(v % 2, 0) + + def test_chained_nodes(self): + """Test chained nodes produce equivalent results.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=30), 1) + counter = csp.count(timer) + doubled = multiply(counter, 2) + tripled = multiply(doubled, 3) # Actually 6x + csp.add_graph_output("result", tripled) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=180)) + + normal_values = self.extract_values(normal, "result") + asyncio_values = self.extract_values(asyncio_result, "result") + + # All values should be multiples of 6 + for v in normal_values + asyncio_values: + self.assertEqual(v % 6, 0) + + +class TestBaselibEquivalence(AsyncioModeTestCase): + """Test baselib functions work equivalently in both modes.""" + + def test_sample(self): + """Test csp.sample works equivalently.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=20), 1) + trigger = csp.timer(timedelta(milliseconds=100)) + counter = csp.count(timer) + sampled = csp.sample(trigger, counter) + csp.add_graph_output("sampled", sampled) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=350)) + + normal_values = self.extract_values(normal, "sampled") + asyncio_values = self.extract_values(asyncio_result, "sampled") + + # Both should have some samples + self.assertGreater(len(normal_values), 0) + self.assertGreater(len(asyncio_values), 0) + + def test_delay(self): + """Test csp.delay works equivalently.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=50), 1) + counter = csp.count(timer) + delayed = csp.delay(counter, timedelta(milliseconds=25)) + csp.add_graph_output("delayed", delayed) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=300)) + + normal_values = self.extract_values(normal, "delayed") + asyncio_values = self.extract_values(asyncio_result, "delayed") + + # Both should have delayed values + self.assertGreater(len(normal_values), 0) + self.assertGreater(len(asyncio_values), 0) + + def test_merge(self): + """Test csp.merge works equivalently.""" + + def make_graph(): + @csp.graph + def graph(): + timer1 = csp.timer(timedelta(milliseconds=40), 1) + timer2 = csp.timer(timedelta(milliseconds=60), 10) + merged = csp.merge(timer1, timer2) + csp.add_graph_output("merged", merged) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=250)) + + normal_values = self.extract_values(normal, "merged") + asyncio_values = self.extract_values(asyncio_result, "merged") + + # Both should have merged values (1s and 10s) + self.assertGreater(len(normal_values), 0) + self.assertGreater(len(asyncio_values), 0) + + # Check we get both types of values + self.assertIn(1, normal_values) + self.assertIn(10, normal_values) + + +class TestMathEquivalence(AsyncioModeTestCase): + """Test math operations work equivalently in both modes.""" + + def test_add(self): + """Test addition works equivalently.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=40), 5) + result = timer + 10 + csp.add_graph_output("result", result) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=200)) + + normal_values = self.extract_values(normal, "result") + asyncio_values = self.extract_values(asyncio_result, "result") + + # All values should be 15 + for v in normal_values + asyncio_values: + self.assertEqual(v, 15) + + def test_comparison(self): + """Test comparison operations work equivalently.""" + + def make_graph(): + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=30), 1) + counter = csp.count(timer) + gt_3 = counter > 3 + csp.add_graph_output("gt_3", gt_3) + + return graph + + normal, asyncio_result = self.run_graph_both_modes(make_graph, endtime=timedelta(milliseconds=200)) + + normal_values = self.extract_values(normal, "gt_3") + asyncio_values = self.extract_values(asyncio_result, "gt_3") + + # Should have both True and False values + self.assertIn(False, normal_values) + self.assertIn(True, normal_values) + + +class TestAsyncioModePerformance(unittest.TestCase): + """Test that asyncio mode performance is within acceptable bounds.""" + + def test_performance_comparison(self): + """ + Compare performance of normal vs asyncio mode. + + Asyncio mode has overhead from Python/C++ boundary crossings, + but should be within a reasonable margin for typical workloads. + """ + + @csp.graph + def benchmark_graph(): + # Create a moderately complex graph + timer = csp.timer(timedelta(milliseconds=5), 1) + counter = csp.count(timer) + + # Chain several operations + doubled = counter * 2 + tripled = counter * 3 + summed = doubled + tripled # 5x + + # Add some filtering using csp.filter with a boolean ts + is_even = (summed % 2) == 0 + evens = csp.filter(is_even, summed) + + csp.add_graph_output("result", evens) + + duration = timedelta(milliseconds=200) + iterations = 3 + + # Warm up + csp.run(benchmark_graph, realtime=True, endtime=timedelta(milliseconds=50), asyncio_on_thread=True) + csp.run(benchmark_graph, realtime=True, endtime=timedelta(milliseconds=50)) # default: asyncio on same thread + + # Benchmark background thread mode + background_times = [] + for _ in range(iterations): + start = time.perf_counter() + csp.run(benchmark_graph, realtime=True, endtime=duration, asyncio_on_thread=True) + background_times.append(time.perf_counter() - start) + + # Benchmark same-thread asyncio mode (default) + same_thread_times = [] + for _ in range(iterations): + start = time.perf_counter() + csp.run(benchmark_graph, realtime=True, endtime=duration) # default: asyncio on same thread + same_thread_times.append(time.perf_counter() - start) + + avg_background = sum(background_times) / len(background_times) + avg_same_thread = sum(same_thread_times) / len(same_thread_times) + + # In realtime mode, both should take approximately the same wall time + # (the duration), so the ratio should be close to 1.0 + # Allow up to 50% overhead for same-thread mode + ratio = avg_same_thread / avg_background if avg_background > 0 else 1.0 + + print("\nPerformance comparison:") + print(f" Background thread mode average: {avg_background:.4f}s") + print(f" Same-thread mode average: {avg_same_thread:.4f}s") + print(f" Ratio (same_thread/background): {ratio:.2f}x") + + # Both should complete close to the requested duration + self.assertAlmostEqual(avg_background, duration.total_seconds(), delta=0.1) + self.assertAlmostEqual(avg_same_thread, duration.total_seconds(), delta=0.1) + + # Same-thread mode should not be more than 50% slower + self.assertLess(ratio, 1.5, f"Same-thread mode too slow: {ratio:.2f}x background mode") + + def test_throughput_comparison(self): + """ + Compare event throughput between modes. + + Both modes should process approximately the same number of events + in the same time period. + """ + + def count_events(asyncio_on_thread: bool) -> int: + @csp.graph + def graph(): + timer = csp.timer(timedelta(milliseconds=10), 1) + counter = csp.count(timer) + csp.add_graph_output("count", counter) + + result = csp.run( + graph, realtime=True, endtime=timedelta(milliseconds=200), asyncio_on_thread=asyncio_on_thread + ) + return len(result.get("count", [])) + + background_events = count_events(True) + same_thread_events = count_events(False) + + print("\nThroughput comparison:") + print(f" Background thread events: {background_events}") + print(f" Same-thread events: {same_thread_events}") + + # Both should process similar number of events (within 20%) + min_events = min(background_events, same_thread_events) + max_events = max(background_events, same_thread_events) + + if min_events > 0: + ratio = max_events / min_events + self.assertLess(ratio, 1.2, f"Event count differs too much: {ratio:.2f}x") + + +if __name__ == "__main__": + unittest.main() diff --git a/csp/tests/test_event_loop.py b/csp/tests/test_event_loop.py new file mode 100644 index 000000000..39e087426 --- /dev/null +++ b/csp/tests/test_event_loop.py @@ -0,0 +1,881 @@ +""" +Tests for CSP Event Loop Integration + +This module tests the integration between CSP's event loop and Python's asyncio. +""" + +import asyncio +import concurrent.futures +import socket +import sys +import threading +import time +import unittest +from datetime import datetime, timedelta +from unittest import mock + +import pytest + +import csp +from csp.event_loop import CspEventLoop, CspEventLoopPolicy, new_event_loop, run + +# Windows has lower timer resolution (~15.6ms vs ~1ms on Unix) +IS_WINDOWS = sys.platform == "win32" +TIMING_TOLERANCE = 0.05 if IS_WINDOWS else 0.01 + + +def _has_decomposed_api(): + """Check if the CSP decomposed execution API is available.""" + try: + from csp.impl.__cspimpl import _cspimpl + + engine = _cspimpl.PyEngine(realtime=True) + return hasattr(engine, "start") + except Exception: + return False + + +HAS_DECOMPOSED_API = _has_decomposed_api() + +# Skip entire module if decomposed API not available or on Windows +# Windows has issues with selectors and signal handling in the event loop +pytestmark = [ + pytest.mark.skipif(not HAS_DECOMPOSED_API, reason="CSP decomposed API not available - rebuild with PR changes"), + pytest.mark.skipif(IS_WINDOWS, reason="CspEventLoop has Windows-specific issues with selectors/signals"), +] + + +class TestCspEventLoopBasic(unittest.TestCase): + """Basic tests for CspEventLoop.""" + + def test_create_loop(self): + """Test creating a new event loop.""" + loop = new_event_loop() + self.assertIsInstance(loop, CspEventLoop) + self.assertFalse(loop.is_running()) + self.assertFalse(loop.is_closed()) + loop.close() + self.assertTrue(loop.is_closed()) + + def test_run_until_complete_simple(self): + """Test run_until_complete with a simple coroutine.""" + + async def simple_coro(): + return 42 + + loop = new_event_loop() + try: + result = loop.run_until_complete(simple_coro()) + self.assertEqual(result, 42) + finally: + loop.close() + + def test_run_until_complete_with_await(self): + """Test run_until_complete with a coroutine that awaits.""" + + async def coro_with_await(): + await asyncio.sleep(0.01) + return "done" + + loop = new_event_loop() + try: + result = loop.run_until_complete(coro_with_await()) + self.assertEqual(result, "done") + finally: + loop.close() + + def test_run_function(self): + """Test the run() convenience function.""" + + async def main(): + await asyncio.sleep(0.01) + return "result" + + result = run(main()) + self.assertEqual(result, "result") + + def test_stop(self): + """Test stopping the loop.""" + loop = new_event_loop() + + async def stopper(): + await asyncio.sleep(0.01) + loop.stop() + + try: + loop.create_task(stopper()) + loop.run_forever() + # If we get here, stop worked + self.assertFalse(loop.is_running()) + finally: + loop.close() + + +class TestCallbackScheduling(unittest.TestCase): + """Tests for callback scheduling methods.""" + + def setUp(self): + self.loop = new_event_loop() + self.calls = [] + + def tearDown(self): + self.loop.close() + + def test_call_soon(self): + """Test call_soon schedules a callback.""" + + def callback(value): + self.calls.append(value) + self.loop.stop() + + self.loop.call_soon(callback, "called") + self.loop.run_forever() + self.assertEqual(self.calls, ["called"]) + + def test_call_later(self): + """Test call_later schedules a delayed callback.""" + start = time.monotonic() + delay = 0.05 + + def callback(): + elapsed = time.monotonic() - start + self.calls.append(elapsed) + self.loop.stop() + + self.loop.call_later(delay, callback) + self.loop.run_forever() + + self.assertEqual(len(self.calls), 1) + self.assertGreaterEqual(self.calls[0], delay - TIMING_TOLERANCE) # Allow some tolerance + + def test_call_at(self): + """Test call_at schedules a callback at absolute time.""" + start = self.loop.time() + when = start + 0.05 + + def callback(): + self.calls.append(self.loop.time()) + self.loop.stop() + + self.loop.call_at(when, callback) + self.loop.run_forever() + + self.assertEqual(len(self.calls), 1) + self.assertGreaterEqual(self.calls[0], when - TIMING_TOLERANCE) + + def test_handle_cancel(self): + """Test cancelling a scheduled callback.""" + + def callback(): + self.calls.append("called") + + handle = self.loop.call_soon(callback) + handle.cancel() + self.assertTrue(handle.cancelled()) + + # Run a bit to ensure the cancelled callback doesn't run + self.loop.call_soon(self.loop.stop) + self.loop.run_forever() + + self.assertEqual(self.calls, []) + + def test_call_soon_threadsafe(self): + """Test thread-safe callback scheduling.""" + result = [] + + def background(): + time.sleep(0.01) + self.loop.call_soon_threadsafe(result.append, "threaded") + time.sleep(0.01) + self.loop.call_soon_threadsafe(self.loop.stop) + + thread = threading.Thread(target=background) + thread.start() + + self.loop.run_forever() + thread.join() + + self.assertEqual(result, ["threaded"]) + + def test_multiple_callbacks_order(self): + """Test that callbacks run in order.""" + for i in range(5): + self.loop.call_soon(lambda x=i: self.calls.append(x)) + self.loop.call_soon(self.loop.stop) + + self.loop.run_forever() + self.assertEqual(self.calls, [0, 1, 2, 3, 4]) + + def test_timer_handles_order(self): + """Test that timer handles run in time order.""" + now = self.loop.time() + # Use larger intervals on Windows due to lower timer resolution + base_interval = 0.05 if IS_WINDOWS else 0.01 + + self.loop.call_at(now + 3 * base_interval, lambda: self.calls.append(3)) + self.loop.call_at(now + 1 * base_interval, lambda: self.calls.append(1)) + self.loop.call_at(now + 2 * base_interval, lambda: self.calls.append(2)) + self.loop.call_at(now + 4 * base_interval, self.loop.stop) + + self.loop.run_forever() + self.assertEqual(self.calls, [1, 2, 3]) + + +class TestFuturesAndTasks(unittest.TestCase): + """Tests for Future and Task handling.""" + + def test_create_future(self): + """Test creating a Future.""" + loop = new_event_loop() + try: + future = loop.create_future() + self.assertIsInstance(future, asyncio.Future) + self.assertFalse(future.done()) + finally: + loop.close() + + def test_create_task(self): + """Test creating a Task.""" + + async def coro(): + return 42 + + loop = new_event_loop() + try: + # Need to set the loop as running to create tasks + async def wrapper(): + task = loop.create_task(coro()) + return await task + + result = loop.run_until_complete(wrapper()) + self.assertEqual(result, 42) + finally: + loop.close() + + def test_task_with_name(self): + """Test creating a named task.""" + + async def coro(): + return 42 + + async def main(): + loop = asyncio.get_running_loop() + task = loop.create_task(coro(), name="my_task") + self.assertEqual(task.get_name(), "my_task") + return await task + + result = run(main()) + self.assertEqual(result, 42) + + def test_gather(self): + """Test asyncio.gather works with CSP loop.""" + + async def coro(n): + await asyncio.sleep(0.01) + return n * 2 + + async def main(): + results = await asyncio.gather(coro(1), coro(2), coro(3)) + return results + + results = run(main()) + self.assertEqual(results, [2, 4, 6]) + + def test_wait_for_timeout(self): + """Test asyncio.wait_for with timeout.""" + + async def slow_coro(): + await asyncio.sleep(10) + return "done" + + async def main(): + with self.assertRaises(asyncio.TimeoutError): + await asyncio.wait_for(slow_coro(), timeout=0.01) + + run(main()) + + def test_shield(self): + """Test asyncio.shield.""" + + async def inner(): + await asyncio.sleep(0.01) + return "shielded" + + async def main(): + return await asyncio.shield(inner()) + + result = run(main()) + self.assertEqual(result, "shielded") + + +class TestExceptionHandling(unittest.TestCase): + """Tests for exception handling.""" + + def test_exception_in_callback(self): + """Test exception handling in callbacks.""" + loop = new_event_loop() + exceptions = [] + + def handler(loop, context): + exceptions.append(context.get("exception")) + + loop.set_exception_handler(handler) + + def bad_callback(): + raise ValueError("test error") + + def stopper(): + loop.stop() + + try: + loop.call_soon(bad_callback) + loop.call_soon(stopper) + loop.run_forever() + + self.assertEqual(len(exceptions), 1) + self.assertIsInstance(exceptions[0], ValueError) + finally: + loop.close() + + def test_exception_in_coroutine(self): + """Test exception handling in coroutines.""" + + async def bad_coro(): + raise RuntimeError("test error") + + loop = new_event_loop() + try: + with self.assertRaises(RuntimeError): + loop.run_until_complete(bad_coro()) + finally: + loop.close() + + def test_default_exception_handler(self): + """Test default exception handler is called.""" + loop = new_event_loop() + + with mock.patch.object(loop, "default_exception_handler") as mock_handler: + + def bad_callback(): + raise ValueError("test") + + def stopper(): + loop.stop() + + try: + loop.call_soon(bad_callback) + loop.call_soon(stopper) + loop.run_forever() + + mock_handler.assert_called_once() + finally: + loop.close() + + +@unittest.skipIf(IS_WINDOWS, "I/O selector tests may have issues on Windows") +class TestIOOperations(unittest.TestCase): + """Tests for I/O operations.""" + + def test_add_remove_reader(self): + """Test adding and removing file descriptor readers.""" + loop = new_event_loop() + try: + r, w = socket.socketpair() + r.setblocking(False) + w.setblocking(False) + + calls = [] + + def reader(): + data = r.recv(1024) + calls.append(data) + loop.stop() + + loop.add_reader(r.fileno(), reader) + w.send(b"test") + loop.run_forever() + + self.assertEqual(calls, [b"test"]) + + # Test remove + result = loop.remove_reader(r.fileno()) + self.assertTrue(result) + + result = loop.remove_reader(r.fileno()) + self.assertFalse(result) + + r.close() + w.close() + finally: + loop.close() + + def test_add_remove_writer(self): + """Test adding and removing file descriptor writers.""" + loop = new_event_loop() + try: + r, w = socket.socketpair() + r.setblocking(False) + w.setblocking(False) + + calls = [] + + def writer(): + w.send(b"test") + calls.append(True) + loop.remove_writer(w.fileno()) + loop.stop() + + loop.add_writer(w.fileno(), writer) + loop.run_forever() + + self.assertEqual(calls, [True]) + self.assertEqual(r.recv(1024), b"test") + + r.close() + w.close() + finally: + loop.close() + + +class TestExecutor(unittest.TestCase): + """Tests for executor operations.""" + + def test_run_in_executor(self): + """Test running functions in executor.""" + + def blocking_func(x, y): + time.sleep(0.01) + return x + y + + async def main(): + loop = asyncio.get_running_loop() + result = await loop.run_in_executor(None, blocking_func, 1, 2) + return result + + result = run(main()) + self.assertEqual(result, 3) + + def test_run_in_custom_executor(self): + """Test running with custom executor.""" + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: + + def blocking_func(): + return threading.current_thread().name + + async def main(): + loop = asyncio.get_running_loop() + result = await loop.run_in_executor(executor, blocking_func) + return result + + result = run(main()) + self.assertIn("ThreadPoolExecutor", result) + + +class TestEventLoopPolicy(unittest.TestCase): + """Tests for CspEventLoopPolicy.""" + + def test_policy_new_event_loop(self): + """Test policy creates CspEventLoop.""" + policy = CspEventLoopPolicy() + loop = policy.new_event_loop() + self.assertIsInstance(loop, CspEventLoop) + loop.close() + + def test_policy_get_set_event_loop(self): + """Test policy get/set event loop.""" + policy = CspEventLoopPolicy() + + with self.assertRaises(RuntimeError): + policy.get_event_loop() + + loop = policy.new_event_loop() + policy.set_event_loop(loop) + + self.assertIs(policy.get_event_loop(), loop) + + policy.set_event_loop(None) + loop.close() + + def test_set_as_global_policy(self): + """Test setting as global asyncio policy.""" + old_policy = asyncio.get_event_loop_policy() + try: + asyncio.set_event_loop_policy(CspEventLoopPolicy()) + loop = asyncio.new_event_loop() + self.assertIsInstance(loop, CspEventLoop) + loop.close() + finally: + asyncio.set_event_loop_policy(old_policy) + + +class TestAsyncioCompatibility(unittest.TestCase): + """Tests for asyncio compatibility.""" + + def test_asyncio_sleep(self): + """Test asyncio.sleep works.""" + + async def main(): + start = time.monotonic() + await asyncio.sleep(0.05) + elapsed = time.monotonic() - start + return elapsed + + result = run(main()) + self.assertGreaterEqual(result, 0.04) + + def test_asyncio_create_task(self): + """Test asyncio.create_task works.""" + + async def inner(): + await asyncio.sleep(0.01) + return 42 + + async def main(): + task = asyncio.create_task(inner()) + result = await task + return result + + result = run(main()) + self.assertEqual(result, 42) + + def test_asyncio_wait(self): + """Test asyncio.wait works.""" + + async def coro(n): + await asyncio.sleep(0.01) + return n + + async def main(): + tasks = [asyncio.create_task(coro(i)) for i in range(3)] + done, pending = await asyncio.wait(tasks) + return sorted([t.result() for t in done]) + + result = run(main()) + self.assertEqual(result, [0, 1, 2]) + + def test_asyncio_queue(self): + """Test asyncio.Queue works.""" + + async def main(): + queue = asyncio.Queue() + await queue.put(1) + await queue.put(2) + + results = [] + results.append(await queue.get()) + results.append(await queue.get()) + return results + + result = run(main()) + self.assertEqual(result, [1, 2]) + + def test_asyncio_event(self): + """Test asyncio.Event works.""" + + async def main(): + event = asyncio.Event() + + async def setter(): + await asyncio.sleep(0.01) + event.set() + + asyncio.create_task(setter()) + await event.wait() + return event.is_set() + + result = run(main()) + self.assertTrue(result) + + def test_asyncio_lock(self): + """Test asyncio.Lock works.""" + + async def main(): + lock = asyncio.Lock() + results = [] + + async def worker(n): + async with lock: + results.append(f"start-{n}") + await asyncio.sleep(0.01) + results.append(f"end-{n}") + + await asyncio.gather(worker(1), worker(2)) + return results + + result = run(main()) + # Due to lock, operations should be sequential + self.assertEqual(len(result), 4) + # First worker should complete before second starts + start_indices = [i for i, x in enumerate(result) if x.startswith("start")] + end_indices = [i for i, x in enumerate(result) if x.startswith("end")] + self.assertLess(end_indices[0], start_indices[1]) + + def test_asyncio_semaphore(self): + """Test asyncio.Semaphore works.""" + + async def main(): + sem = asyncio.Semaphore(2) + active = [] + max_active = [0] + + async def worker(n): + async with sem: + active.append(n) + max_active[0] = max(max_active[0], len(active)) + await asyncio.sleep(0.01) + active.remove(n) + + await asyncio.gather(*[worker(i) for i in range(5)]) + return max_active[0] + + result = run(main()) + self.assertLessEqual(result, 2) + + +class TestDebugMode(unittest.TestCase): + """Tests for debug mode.""" + + def test_debug_mode_default(self): + """Test debug mode is off by default.""" + loop = new_event_loop() + try: + self.assertFalse(loop.get_debug()) + finally: + loop.close() + + def test_set_debug_mode(self): + """Test setting debug mode.""" + loop = new_event_loop() + try: + loop.set_debug(True) + self.assertTrue(loop.get_debug()) + loop.set_debug(False) + self.assertFalse(loop.get_debug()) + finally: + loop.close() + + def test_run_with_debug(self): + """Test running with debug mode.""" + + async def main(): + loop = asyncio.get_running_loop() + return loop.get_debug() + + result = run(main(), debug=True) + self.assertTrue(result) + + +class TestLoopTime(unittest.TestCase): + """Tests for loop time operations.""" + + def test_time_increases(self): + """Test that loop time increases.""" + loop = new_event_loop() + try: + t1 = loop.time() + time.sleep(0.01) + t2 = loop.time() + self.assertGreater(t2, t1) + finally: + loop.close() + + +class TestContextVars(unittest.TestCase): + """Tests for context variable support.""" + + def test_context_in_callback(self): + """Test context variables in callbacks.""" + import contextvars + + cv = contextvars.ContextVar("test_cv", default="default") + results = [] + + def callback(): + results.append(cv.get()) + loop.stop() + + loop = new_event_loop() + try: + ctx = contextvars.copy_context() + ctx.run(cv.set, "modified") + loop.call_soon(callback, context=ctx) + loop.run_forever() + + self.assertEqual(results, ["modified"]) + finally: + loop.close() + + +class TestShutdown(unittest.TestCase): + """Tests for shutdown operations.""" + + def test_shutdown_asyncgens(self): + """Test shutdown_asyncgens.""" + cleanup_called = [] + + async def async_gen(): + try: + while True: + yield 1 + await asyncio.sleep(0.01) + finally: + cleanup_called.append(True) + + async def main(): + loop = asyncio.get_running_loop() + gen = async_gen() + await gen.__anext__() + # Don't complete the generator + return loop + + # Run and capture the loop + loop = new_event_loop() + try: + asyncio.set_event_loop(loop) + + async def wrapper(): + gen = async_gen() + await gen.__anext__() + + loop.run_until_complete(wrapper()) + loop.run_until_complete(loop.shutdown_asyncgens()) + finally: + loop.close() + + +class TestSimulationMode(unittest.TestCase): + """Tests for simulation (historical) mode.""" + + def test_create_simulation_loop(self): + """Test creating a simulation mode event loop.""" + loop = CspEventLoop(realtime=False) + self.assertIsInstance(loop, CspEventLoop) + self.assertFalse(loop._realtime) + loop.close() + + def test_set_simulation_time_range(self): + """Test setting the simulation time range.""" + loop = CspEventLoop(realtime=False) + start = datetime(2020, 1, 1, 9, 30, 0) + end = datetime(2020, 1, 1, 16, 0, 0) + loop.set_simulation_time_range(start=start, end=end) + self.assertEqual(loop._starttime, start) + self.assertEqual(loop._endtime, end) + loop.close() + + def test_set_simulation_time_range_on_realtime_raises(self): + """Test that set_simulation_time_range raises on realtime loop.""" + loop = CspEventLoop(realtime=True) + try: + with self.assertRaises(RuntimeError): + loop.set_simulation_time_range(start=datetime(2020, 1, 1)) + finally: + loop.close() + + def test_simulation_mode_time_returns_simulated_time(self): + """Test that time() returns simulated time in simulation mode.""" + loop = CspEventLoop(realtime=False) + start = datetime(2020, 6, 15, 12, 0, 0) + loop.set_simulation_time_range(start=start) + + recorded_time = None + + async def capture_time(): + nonlocal recorded_time + recorded_time = loop.time() + return "done" + + try: + loop.run_until_complete(capture_time()) + # Time should be the timestamp of the start time + expected_timestamp = start.timestamp() + self.assertEqual(recorded_time, expected_timestamp) + finally: + loop.close() + + def test_simulation_mode_no_waiting(self): + """Test that simulation mode doesn't wait on asyncio.sleep.""" + loop = CspEventLoop(realtime=False) + loop.set_simulation_time_range(start=datetime(2020, 1, 1)) + + async def slow_in_realtime(): + # In realtime, this would take 10 seconds + await asyncio.sleep(0) + await asyncio.sleep(0) + await asyncio.sleep(0) + return "done" + + try: + wall_start = time.monotonic() + result = loop.run_until_complete(slow_in_realtime()) + wall_elapsed = time.monotonic() - wall_start + + self.assertEqual(result, "done") + # Should complete in well under 1 second + self.assertLess(wall_elapsed, 1.0) + finally: + loop.close() + + def test_simulation_mode_simple_coroutine(self): + """Test running a simple coroutine in simulation mode.""" + loop = CspEventLoop(realtime=False) + + async def simple(): + return 42 + + try: + result = loop.run_until_complete(simple()) + self.assertEqual(result, 42) + finally: + loop.close() + + def test_simulation_mode_with_tasks(self): + """Test creating tasks in simulation mode.""" + loop = CspEventLoop(realtime=False) + loop.set_simulation_time_range(start=datetime(2020, 1, 1)) + + results = [] + + async def task1(): + results.append("task1") + return 1 + + async def task2(): + results.append("task2") + return 2 + + async def main(): + t1 = asyncio.create_task(task1()) + t2 = asyncio.create_task(task2()) + r1 = await t1 + r2 = await t2 + return r1 + r2 + + try: + result = loop.run_until_complete(main()) + self.assertEqual(result, 3) + self.assertIn("task1", results) + self.assertIn("task2", results) + finally: + loop.close() + + def test_simulation_mode_gather(self): + """Test asyncio.gather in simulation mode.""" + loop = CspEventLoop(realtime=False) + + async def coro(n): + return n * 2 + + async def main(): + results = await asyncio.gather(coro(1), coro(2), coro(3)) + return results + + try: + result = loop.run_until_complete(main()) + self.assertEqual(result, [2, 4, 6]) + finally: + loop.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/csp/tests/test_event_loop_bridge.py b/csp/tests/test_event_loop_bridge.py new file mode 100644 index 000000000..d80a786d0 --- /dev/null +++ b/csp/tests/test_event_loop_bridge.py @@ -0,0 +1,574 @@ +""" +Tests for CSP Event Loop Bridge Integration + +This module tests the integration between asyncio and running CSP graphs +using the AsyncioBridge and BidirectionalBridge classes. +""" + +import asyncio +import sys +import threading +import time +import unittest +from datetime import datetime, timedelta + +import csp +from csp import ts +from csp.event_loop.bridge import AsyncioBridge, BidirectionalBridge +from csp.utils.datetime import utc_now + +# Windows has lower timer resolution (~15.6ms vs ~1ms on Unix) +IS_WINDOWS = sys.platform == "win32" +TIMING_TOLERANCE = 0.05 if IS_WINDOWS else 0.01 + + +class TestAsyncioBridgeBasic(unittest.TestCase): + """Basic tests for AsyncioBridge.""" + + def test_create_bridge(self): + """Test creating a bridge.""" + bridge = AsyncioBridge(int, "test_bridge") + self.assertIsNotNone(bridge.adapter) + self.assertFalse(bridge.is_running) + self.assertIsNone(bridge.loop) + + def test_start_stop(self): + """Test starting and stopping the bridge.""" + bridge = AsyncioBridge(int, "test_bridge") + + bridge.start() + self.assertTrue(bridge.is_running) + self.assertIsNotNone(bridge.loop) + + bridge.stop() + self.assertFalse(bridge.is_running) + self.assertIsNone(bridge.loop) + + def test_start_with_time(self): + """Test starting with a specific start time.""" + bridge = AsyncioBridge(int, "test_bridge") + start_time = datetime(2026, 1, 1, 12, 0, 0) + + bridge.start(start_time) + self.assertEqual(bridge._start_time, start_time) + + bridge.stop() + + def test_double_start_raises(self): + """Test that starting twice raises an error.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + try: + with self.assertRaises(RuntimeError): + bridge.start() + finally: + bridge.stop() + + def test_push_before_start_fails(self): + """Test that push returns False before graph starts.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + # Push before adapter is bound to graph + result = bridge.push(42) + self.assertFalse(result) + + bridge.stop() + + +class TestAsyncioBridgeCallbacks(unittest.TestCase): + """Tests for callback scheduling methods.""" + + def test_call_soon(self): + """Test call_soon schedules a callback.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + results = [] + + bridge.call_soon(lambda: results.append("called")) + + # Wait for callback to execute + time.sleep(0.1) + + self.assertEqual(results, ["called"]) + bridge.stop() + + def test_call_soon_with_args(self): + """Test call_soon with arguments.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + results = [] + + bridge.call_soon(lambda x, y: results.append(x + y), 1, 2) + + time.sleep(0.1) + + self.assertEqual(results, [3]) + bridge.stop() + + def test_call_soon_before_start_raises(self): + """Test that call_soon before start raises an error.""" + bridge = AsyncioBridge(int, "test_bridge") + + with self.assertRaises(RuntimeError): + bridge.call_soon(lambda: None) + + def test_call_later(self): + """Test call_later schedules a delayed callback.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + results = [] + start = time.time() + + bridge.call_later(0.1, lambda: results.append(time.time() - start)) + + # Wait for callback + time.sleep(0.2) + + self.assertEqual(len(results), 1) + self.assertGreaterEqual(results[0], 0.09) + + bridge.stop() + + def test_call_later_negative_raises(self): + """Test that negative delay raises an error.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + try: + with self.assertRaises(ValueError): + bridge.call_later(-1.0, lambda: None) + finally: + bridge.stop() + + def test_call_at(self): + """Test call_at schedules at specific time.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + results = [] + target = datetime.utcnow() + timedelta(milliseconds=100) + + bridge.call_at(target, lambda: results.append(datetime.utcnow())) + + time.sleep(0.2) + + self.assertEqual(len(results), 1) + # Allow some tolerance + self.assertLess(abs((results[0] - target).total_seconds()), 0.05) + + bridge.stop() + + def test_call_at_past_time(self): + """Test call_at with past time schedules immediately.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + results = [] + past_time = datetime.utcnow() - timedelta(seconds=10) + + bridge.call_at(past_time, lambda: results.append(True)) + + time.sleep(0.1) + + self.assertEqual(results, [True]) + bridge.stop() + + def test_call_at_offset(self): + """Test call_at_offset from start time.""" + bridge = AsyncioBridge(int, "test_bridge") + start_time = datetime.utcnow() + bridge.start(start_time) + + results = [] + + bridge.call_at_offset(timedelta(milliseconds=100), lambda: results.append(True)) + + time.sleep(0.2) + + self.assertEqual(results, [True]) + bridge.stop() + + +class TestAsyncioBridgeCoroutines(unittest.TestCase): + """Tests for running coroutines.""" + + def test_run_coroutine(self): + """Test running a coroutine.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + async def my_coro(): + await asyncio.sleep(0.05) + return 42 + + future = bridge.run_coroutine(my_coro()) + result = future.result(timeout=1.0) + + self.assertEqual(result, 42) + bridge.stop() + + def test_run_coroutine_before_start_raises(self): + """Test that running coroutine before start raises.""" + bridge = AsyncioBridge(int, "test_bridge") + + async def my_coro(): + return 42 + + with self.assertRaises(RuntimeError): + bridge.run_coroutine(my_coro()) + + def test_run_coroutine_with_exception(self): + """Test coroutine that raises exception.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + async def failing_coro(): + raise ValueError("test error") + + future = bridge.run_coroutine(failing_coro()) + + with self.assertRaises(ValueError): + future.result(timeout=1.0) + + bridge.stop() + + +class TestAsyncioBridgeTime(unittest.TestCase): + """Tests for time-related methods.""" + + def test_time(self): + """Test time() returns current time.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + t1 = bridge.time() + time.sleep(0.05) + t2 = bridge.time() + + self.assertGreater(t2, t1) + self.assertGreaterEqual(t2 - t1, 0.04) + + bridge.stop() + + def test_elapsed_since_start(self): + """Test elapsed_since_start returns correct duration.""" + bridge = AsyncioBridge(int, "test_bridge") + bridge.start() + + time.sleep(0.1) + elapsed = bridge.elapsed_since_start() + + self.assertGreaterEqual(elapsed.total_seconds(), 0.09) + + bridge.stop() + + def test_elapsed_before_start(self): + """Test elapsed_since_start before start returns zero.""" + bridge = AsyncioBridge(int, "test_bridge") + elapsed = bridge.elapsed_since_start() + self.assertEqual(elapsed, timedelta(0)) + + +class TestAsyncioBridgeWithCSP(unittest.TestCase): + """Tests for AsyncioBridge with actual CSP graphs.""" + + def test_push_to_csp(self): + """Test pushing data from asyncio to CSP.""" + bridge = AsyncioBridge(int, "data") + collected = [] + + @csp.node + def collect(data: ts[int]) -> ts[int]: + if csp.ticked(data): + return data + + @csp.graph + def g(): + data = bridge.adapter.out() + result = collect(data) + csp.add_graph_output("data", result) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.05) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=1)) + + bridge.wait_for_adapter(timeout=1.0) + + # Push some data + for i in range(5): + bridge.push(i) + time.sleep(0.05) + + results = runner.join() + bridge.stop() + + data = results.get("data", []) + values = [v for _, v in data] + self.assertEqual(values, [0, 1, 2, 3, 4]) + + def test_call_later_with_csp(self): + """Test call_later scheduling with CSP graph.""" + bridge = AsyncioBridge(str, "messages") + + @csp.node + def collect(data: ts[str]) -> ts[str]: + if csp.ticked(data): + return data + + @csp.graph + def g(): + data = bridge.adapter.out() + result = collect(data) + csp.add_graph_output("messages", result) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.05) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=1)) + + bridge.wait_for_adapter(timeout=1.0) + + # Schedule messages with call_later + bridge.call_later(0.1, lambda: bridge.push("msg1")) + bridge.call_later(0.2, lambda: bridge.push("msg2")) + bridge.call_later(0.3, lambda: bridge.push("msg3")) + + results = runner.join() + bridge.stop() + + data = results.get("messages", []) + values = [v for _, v in data] + self.assertEqual(values, ["msg1", "msg2", "msg3"]) + + def test_async_coroutine_with_csp(self): + """Test running async coroutine that pushes to CSP.""" + bridge = AsyncioBridge(dict, "async_data") + + @csp.node + def collect(data: ts[dict]) -> ts[dict]: + if csp.ticked(data): + return data + + @csp.graph + def g(): + data = bridge.adapter.out() + result = collect(data) + csp.add_graph_output("data", result) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.05) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=1)) + + bridge.wait_for_adapter(timeout=1.0) + + # Run coroutine that pushes to CSP + async def fetch_and_push(): + for i in range(3): + await asyncio.sleep(0.05) + bridge.push({"value": i}) + + bridge.run_coroutine(fetch_and_push()) + + results = runner.join() + bridge.stop() + + data = results.get("data", []) + values = [v["value"] for _, v in data] + self.assertEqual(values, [0, 1, 2]) + + def test_csp_timer_with_async(self): + """Test CSP timer running alongside async callbacks.""" + bridge = AsyncioBridge(str, "async") + + @csp.node + def combine(timer: ts[bool], async_data: ts[str]) -> ts[str]: + if csp.ticked(timer): + return "timer" + if csp.ticked(async_data): + return f"async:{async_data}" + + @csp.graph + def g(): + timer = csp.timer(timedelta(milliseconds=100)) + async_data = bridge.adapter.out() + result = combine(timer, async_data) + csp.add_graph_output("events", result) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.05) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(milliseconds=500)) + + bridge.wait_for_adapter(timeout=1.0) + + # Push async data between timer ticks + bridge.call_later(0.05, lambda: bridge.push("a")) + bridge.call_later(0.15, lambda: bridge.push("b")) + bridge.call_later(0.25, lambda: bridge.push("c")) + + results = runner.join() + bridge.stop() + + data = results.get("events", []) + values = [v for _, v in data] + + # Should have mix of timer and async events + timer_count = sum(1 for v in values if v == "timer") + async_count = sum(1 for v in values if v.startswith("async:")) + + self.assertGreater(timer_count, 0) + self.assertGreater(async_count, 0) + + +class TestBidirectionalBridge(unittest.TestCase): + """Tests for BidirectionalBridge.""" + + def test_create_bidirectional(self): + """Test creating a bidirectional bridge.""" + bridge = BidirectionalBridge(str, "bidi") + self.assertIsNotNone(bridge.adapter) + + def test_on_event(self): + """Test registering event callbacks.""" + bridge = BidirectionalBridge(str, "bidi") + bridge.start() + + received = [] + bridge.on_event(lambda x: received.append(x)) + + # Emit from "CSP side" + bridge.emit({"test": 123}) + + time.sleep(0.1) + + self.assertEqual(received, [{"test": 123}]) + bridge.stop() + + def test_off_event(self): + """Test unregistering event callbacks.""" + bridge = BidirectionalBridge(str, "bidi") + bridge.start() + + received = [] + callback = lambda x: received.append(x) + + bridge.on_event(callback) + result = bridge.off_event(callback) + self.assertTrue(result) + + bridge.emit({"test": 123}) + time.sleep(0.1) + + self.assertEqual(received, []) + bridge.stop() + + def test_off_event_not_found(self): + """Test unregistering non-existent callback.""" + bridge = BidirectionalBridge(str, "bidi") + + result = bridge.off_event(lambda x: None) + self.assertFalse(result) + + def test_bidirectional_with_csp(self): + """Test bidirectional communication with CSP.""" + bridge = BidirectionalBridge(str, "messages") + received_in_async = [] + + @csp.node + def process_and_respond(data: ts[str], bridge_ref: object) -> ts[str]: + if csp.ticked(data): + response = f"processed:{data}" + bridge_ref.emit({"original": data, "response": response}) + return response + + @csp.graph + def g(): + data = bridge.adapter.out() + result = process_and_respond(data, bridge) + csp.add_graph_output("results", result) + + bridge.on_event(lambda x: received_in_async.append(x)) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.05) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=1)) + + bridge.wait_for_adapter(timeout=1.0) + + # Push data to CSP + bridge.push("hello") + time.sleep(0.1) + bridge.push("world") + time.sleep(0.1) + + results = runner.join() + bridge.stop() + + # Check CSP outputs + data = results.get("results", []) + values = [v for _, v in data] + self.assertEqual(values, ["processed:hello", "processed:world"]) + + # Check asyncio received events + self.assertEqual(len(received_in_async), 2) + self.assertEqual(received_in_async[0]["original"], "hello") + self.assertEqual(received_in_async[1]["original"], "world") + + def test_multiple_event_callbacks(self): + """Test multiple event callbacks receive events.""" + bridge = BidirectionalBridge(str, "bidi") + bridge.start() + + received1 = [] + received2 = [] + + bridge.on_event(lambda x: received1.append(x)) + bridge.on_event(lambda x: received2.append(x)) + + bridge.emit("test") + time.sleep(0.1) + + self.assertEqual(received1, ["test"]) + self.assertEqual(received2, ["test"]) + + bridge.stop() + + +class TestDeferredHandle(unittest.TestCase): + """Tests for _DeferredHandle.""" + + def test_cancel(self): + """Test cancelling a deferred handle.""" + bridge = AsyncioBridge(int, "test") + bridge.start() + + results = [] + handle = bridge.call_later(0.5, lambda: results.append(True)) + + # Cancel before it fires + handle.cancel() + self.assertTrue(handle.cancelled()) + + time.sleep(0.6) + + # Should not have been called + self.assertEqual(results, []) + + bridge.stop() + + +if __name__ == "__main__": + unittest.main() diff --git a/csp/tests/test_examples.py b/csp/tests/test_examples.py index 8dde99882..3fa38c792 100644 --- a/csp/tests/test_examples.py +++ b/csp/tests/test_examples.py @@ -6,6 +6,13 @@ EXAMPLES_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "examples")) +# Examples to skip in tests (may hang or require specific environment) +SKIP_EXAMPLES = { + "e3_asyncio_integration.py", # Requires CSP event loop features + "e4_csp_asyncio_integration.py", # Requires CSP event loop features + "e2_csp_fastapi.py", # Starts a server that runs forever +} + # make examples importable without keeping in source tree sys.path.append(EXAMPLES_ROOT) @@ -23,7 +30,7 @@ def _get_modules_to_test(*folders): return [ (file, _get_module(folder, file)) for file in os.listdir(os.path.join(EXAMPLES_ROOT, *folders)) - if file.endswith(".py") + if file.endswith(".py") and file not in SKIP_EXAMPLES ] diff --git a/csp/tests/test_fd_wakeup.py b/csp/tests/test_fd_wakeup.py new file mode 100644 index 000000000..ea6bdf79a --- /dev/null +++ b/csp/tests/test_fd_wakeup.py @@ -0,0 +1,321 @@ +"""Tests for the native fd-based wakeup mechanism for event loop integration. + +This tests the FdWaiter functionality that allows asyncio's selector to monitor +the CSP event queue without polling. +""" + +import asyncio +import os +import select +import sys +import threading +import time +import unittest +from datetime import datetime, timedelta + +import csp +from csp.impl.__cspimpl import _cspimpl + + +class TestFdWakeupBasics(unittest.TestCase): + """Test basic fd wakeup functionality.""" + + def test_get_wakeup_fd_returns_valid_fd(self): + """Test that get_wakeup_fd returns a valid file descriptor.""" + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + + engine.start(start, end) + try: + fd = engine.get_wakeup_fd() + # Should return a valid fd (>= 0) on Unix, or -1 if not supported + self.assertIsInstance(fd, int) + if sys.platform != "win32": + # On Unix, should be a valid fd + self.assertGreaterEqual(fd, 0) + finally: + engine.finish() + + def test_clear_wakeup_fd(self): + """Test that clear_wakeup_fd can be called without error.""" + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + + engine.start(start, end) + try: + # Should not raise + engine.clear_wakeup_fd() + finally: + engine.finish() + + +@unittest.skipIf(sys.platform == "win32", "select.select on fds not supported on Windows") +class TestFdWakeupReadable(unittest.TestCase): + """Test that the fd becomes readable when events are pushed.""" + + def test_fd_not_readable_initially(self): + """Test that the fd is not readable when no events are queued.""" + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + + engine.start(start, end) + try: + fd = engine.get_wakeup_fd() + if fd < 0: + self.skipTest("FdWaiter not supported on this platform") + + # Check if fd is readable with zero timeout (should not be) + readable, _, _ = select.select([fd], [], [], 0) + # Note: might be readable if there are initial scheduled events + # This is implementation-dependent + finally: + engine.finish() + + +class TestFdWakeupCrossThread(unittest.TestCase): + """Test fd-based wakeup across threads.""" + + @unittest.skipIf(sys.platform == "win32", "select.select on fds not supported on Windows") + def test_fd_wakeup_integration_with_selector(self): + """Test that the fd can be used with select for waiting.""" + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + + engine.start(start, end) + try: + fd = engine.get_wakeup_fd() + if fd < 0: + self.skipTest("FdWaiter not supported on this platform") + + # This should not block indefinitely - select with timeout + readable, _, _ = select.select([fd], [], [], 0.1) + # Test passes if it doesn't hang + finally: + engine.finish() + + +class TestEventLoopFdIntegration(unittest.TestCase): + """Test fd integration through CspEventLoop.""" + + def test_event_loop_registers_wakeup_fd(self): + """Test that CspEventLoop registers the wakeup fd with its selector.""" + from csp.event_loop import CspEventLoop + + loop = CspEventLoop(realtime=True) + try: + # Start the CSP engine + loop._start_csp_engine() + + # Check that wakeup fd was registered + if loop._csp_wakeup_fd is not None: + self.assertGreaterEqual(loop._csp_wakeup_fd, 0) + # Verify it's in the selector + try: + key = loop._selector.get_key(loop._csp_wakeup_fd) + self.assertIsNotNone(key) + except KeyError: + # Might not be registered if platform doesn't support it + pass + finally: + loop._stop_csp_engine() + loop.close() + + def test_event_loop_unregisters_wakeup_fd_on_stop(self): + """Test that CspEventLoop unregisters the wakeup fd when stopped.""" + from csp.event_loop import CspEventLoop + + loop = CspEventLoop(realtime=True) + try: + loop._start_csp_engine() + wakeup_fd = loop._csp_wakeup_fd + loop._stop_csp_engine() + + # Should be cleaned up + self.assertIsNone(loop._csp_wakeup_fd) + + # Should not be in selector anymore + if wakeup_fd is not None: + with self.assertRaises(KeyError): + loop._selector.get_key(wakeup_fd) + finally: + loop.close() + + +class TestEndToEndWithCsp(unittest.TestCase): + """End-to-end tests with CSP graphs and the event loop.""" + + @unittest.skip("Needs to run in a file context") + def test_csp_timer_in_simulation(self): + """Test that CSP timers work correctly in simulation mode.""" + # Use simulation mode which is faster and more reliable for testing + results = [] + + @csp.node + def collector(x: csp.ts[int]) -> csp.ts[int]: + if csp.ticked(x): + results.append(x) + return x + + @csp.graph + def test_graph(): + timer = csp.timer(timedelta(milliseconds=20)) + counter = csp.count(timer) + csp.add_graph_output("out", collector(counter)) + + # Run in simulation mode (fast, deterministic) + start = datetime(2024, 1, 1) + csp.run(test_graph, starttime=start, endtime=start + timedelta(milliseconds=150), realtime=False) + + # Should have received the events (7 timer ticks at 20ms intervals over 150ms) + self.assertGreater(len(results), 0) + self.assertTrue(all(isinstance(r, int) for r in results)) + + @unittest.skipIf(sys.platform == "win32", "select.select on fds not supported on Windows") + def test_fd_wakeup_with_event_loop(self): + """Test that the fd-based wakeup is properly registered with the event loop. + + This tests that the CspEventLoop registers the fd with its selector. + """ + from csp.event_loop import CspEventLoop + + # Use the CspEventLoop directly + loop = CspEventLoop(realtime=True) + + try: + loop._start_csp_engine() + fd = loop._csp_wakeup_fd + + if fd is None or fd < 0: + self.skipTest("FdWaiter not supported") + + # Verify the fd is valid and registered + self.assertGreaterEqual(fd, 0) + + # Test that we can select on it without hanging (timeout=0) + readable, _, _ = select.select([fd], [], [], 0) + # Test passes if it doesn't hang + + finally: + loop._stop_csp_engine() + loop.close() + + +class TestFdWakeupPerformance(unittest.TestCase): + """Performance comparison between polling and fd-based wakeup.""" + + @unittest.skipIf(sys.platform == "win32", "select.select on fds not supported on Windows") + def test_select_latency(self): + """Measure baseline latency of select with the wakeup fd.""" + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + + engine.start(start, end) + try: + fd = engine.get_wakeup_fd() + if fd < 0: + self.skipTest("FdWaiter not supported") + + # Measure select latency (no events, immediate timeout) + iterations = 1000 + start_time = time.perf_counter() + for _ in range(iterations): + select.select([fd], [], [], 0) + end_time = time.perf_counter() + + avg_latency_us = (end_time - start_time) / iterations * 1_000_000 + print(f"\nSelect latency (no events): {avg_latency_us:.2f} µs/call") + + # Should be reasonable (< 100 µs per call on most systems) + self.assertLess(avg_latency_us, 1000) # < 1ms + finally: + engine.finish() + + @unittest.skipIf(sys.platform == "win32", "select.select on fds not supported on Windows") + def test_wakeup_roundtrip_latency(self): + """Measure roundtrip latency: notify -> select ready -> clear.""" + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + + engine.start(start, end) + try: + fd = engine.get_wakeup_fd() + if fd < 0: + self.skipTest("FdWaiter not supported") + + # We can't easily trigger notify from Python without pushing events + # So we just measure select + clear cycle time + iterations = 1000 + start_time = time.perf_counter() + for _ in range(iterations): + # Poll (no block) + select.select([fd], [], [], 0) + # Clear (even if nothing to clear) + engine.clear_wakeup_fd() + end_time = time.perf_counter() + + avg_latency_us = (end_time - start_time) / iterations * 1_000_000 + print(f"\nSelect + clear latency: {avg_latency_us:.2f} µs/call") + + self.assertLess(avg_latency_us, 1000) # < 1ms + finally: + engine.finish() + + def test_polling_overhead_comparison(self): + """Compare overhead of polling vs fd-based approach. + + This measures the cost difference between: + 1. Polling with time.sleep(interval) + 2. Using select with the wakeup fd + """ + # Polling approach: time.sleep costs + poll_iterations = 100 + poll_interval = 0.001 # 1ms + + start_time = time.perf_counter() + for _ in range(poll_iterations): + time.sleep(poll_interval) + poll_time = time.perf_counter() - start_time + + # Expected time: poll_iterations * poll_interval + expected_poll = poll_iterations * poll_interval + poll_overhead = poll_time - expected_poll + + print(f"\nPolling ({poll_iterations} x {poll_interval * 1000:.1f}ms sleep):") + print(f" Total time: {poll_time * 1000:.2f} ms") + print(f" Expected: {expected_poll * 1000:.2f} ms") + print(f" Overhead: {poll_overhead * 1000:.2f} ms") + + # Now measure fd-based select (instant return when no events) + if sys.platform != "win32": + engine = _cspimpl.PyEngine(realtime=True) + start = datetime.utcnow() + end = start + timedelta(hours=1) + engine.start(start, end) + + try: + fd = engine.get_wakeup_fd() + if fd >= 0: + start_time = time.perf_counter() + for _ in range(poll_iterations): + # With timeout 0, returns immediately + select.select([fd], [], [], 0) + select_time = time.perf_counter() - start_time + + print(f"\nFd-based select ({poll_iterations} iterations, no block):") + print(f" Total time: {select_time * 1000:.2f} ms") + print(f" Per-call: {select_time / poll_iterations * 1000000:.2f} µs") + + # Fd-based should be MUCH faster than polling with sleep + self.assertLess(select_time, poll_time / 10) + finally: + engine.finish() + + +if __name__ == "__main__": + unittest.main() diff --git a/csp/tests/test_parsing.py b/csp/tests/test_parsing.py index d89bfb037..60046dd4b 100644 --- a/csp/tests/test_parsing.py +++ b/csp/tests/test_parsing.py @@ -131,7 +131,9 @@ def foo(): __alarms__(x) pass - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def foo(): @@ -146,7 +148,9 @@ def foo(): __alarms__(x=int) pass - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def foo(): @@ -154,7 +158,9 @@ def foo(): x: int = 5 pass - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def foo(): @@ -162,7 +168,9 @@ def foo(): x: ts[int] pass - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def foo(): @@ -1658,21 +1666,27 @@ def g1(): self.assertEqual(res["y"], exp_out) # Now verify a ton of error messages - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def n(): with csp.alarms(): a: ts[int] - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def n(): with csp.alarms(): a: ts[int] = csp.foo() - with self.assertRaisesRegex(TypeError, "function `csp.alarm` does not take keyword arguments"): + with self.assertRaisesRegex( + TypeError, "function `csp.alarm`/`csp.async_alarm` does not take keyword arguments" + ): @csp.node def n(): @@ -1680,7 +1694,7 @@ def n(): a: ts[int] = csp.alarm(typ=int) with self.assertRaisesRegex( - TypeError, "function `csp.alarm` requires a single type argument: 0 arguments given" + TypeError, "function `csp.alarm`/`csp.async_alarm` requires a single type argument: 0 arguments given" ): @csp.node @@ -1689,7 +1703,7 @@ def n(): a: ts[int] = csp.alarm() with self.assertRaisesRegex( - TypeError, "function `csp.alarm` requires a single type argument: 2 arguments given" + TypeError, "function `csp.alarm`/`csp.async_alarm` requires a single type argument: 2 arguments given" ): @csp.node @@ -1705,7 +1719,7 @@ def n(): a = csp.alarm(foo()) with self.assertRaisesRegex( - TypeError, "function `csp.alarm` requires a single type argument: 2 arguments given" + TypeError, "function `csp.alarm`/`csp.async_alarm` requires a single type argument: 2 arguments given" ): @csp.node @@ -1719,7 +1733,9 @@ def n(): with csp.alarms(): a: ts[StructA] = csp.alarm(StructB) - with self.assertRaisesRegex(CspParseError, "Alarms must be initialized with csp.alarm in __alarms__ block"): + with self.assertRaisesRegex( + CspParseError, "Alarms must be initialized with csp.alarm or csp.async_alarm in __alarms__ block" + ): @csp.node def n(): diff --git a/docs/wiki/_Sidebar.md b/docs/wiki/_Sidebar.md index c1ae37f8a..eff5c95a5 100644 --- a/docs/wiki/_Sidebar.md +++ b/docs/wiki/_Sidebar.md @@ -30,11 +30,13 @@ Notes for editors: - [Use Statistical Nodes](Use-Statistical-Nodes) - [Create Dynamic Baskets](Create-Dynamic-Baskets) +- [Use Async Operations](Async) - Write Adapters: - [Write Historical Input Adapters](Write-Historical-Input-Adapters) - [Write Realtime Input Adapters](Write-Realtime-Input-Adapters) - [Write Output Adapters](Write-Output-Adapters) - [Profile CSP Code](Profile-CSP-Code) +- [Event Loop Integration](Event-Loop-Integration) **References** @@ -56,6 +58,7 @@ Notes for editors: **Developer Guide** +- [Architecture](Architecture) - [Contributing](Contribute) - [Development Setup](Local-Development-Setup) - [Build CSP from Source](Build-CSP-from-Source) diff --git a/docs/wiki/dev-guides/Architecture.md b/docs/wiki/dev-guides/Architecture.md new file mode 100644 index 000000000..1d1929c43 --- /dev/null +++ b/docs/wiki/dev-guides/Architecture.md @@ -0,0 +1,546 @@ +# CSP Engine Architecture + +This document provides an in-depth technical overview of CSP's C++ engine implementation. It is intended for developers who want to understand or contribute to the core engine code. + +For conceptual introductions to CSP graphs and nodes, see the [concepts documentation](../concepts/). + +## Table of Contents + +- [Overview](#overview) +- [Engine Lifecycle](#engine-lifecycle) +- [Core Components](#core-components) + - [RootEngine](#rootengine) + - [SRMWLockFreeQueue](#srmwlockfreequeue) + - [CycleStepTable](#cyclesteptable) + - [Scheduler](#scheduler) + - [QueueWaiter and FdWaiter](#queuewaiter-and-fdwaiter) +- [Execution Model](#execution-model) + - [Engine Cycles](#engine-cycles) + - [Push Event Processing](#push-event-processing) + - [Rank-Based Execution](#rank-based-execution) +- [Adapters](#adapters) + - [InputAdapter](#inputadapter) + - [OutputAdapter](#outputadapter) + - [PushInputAdapter](#pushinputadapter) + - [PushGroup Synchronization](#pushgroup-synchronization) +- [Asyncio Integration](#asyncio-integration) + - [Decomposed Execution API](#decomposed-execution-api) + - [FdWaiter for Native Event Loop Integration](#fdwaiter-for-native-event-loop-integration) +- [Thread Safety](#thread-safety) +- [Memory Management](#memory-management) + +______________________________________________________________________ + +## Overview + +CSP's engine is implemented in C++ for performance, with Python bindings via pybind11. The engine processes events in discrete **cycles**, where each cycle represents a single point in time. Within a cycle, nodes execute in **rank order** to ensure deterministic behavior. + +Key design principles: + +1. **Determinism**: Given the same inputs and same starttime, a CSP graph produces identical outputs +1. **Push-based events**: External data enters through lock-free queues, avoiding blocking the main engine thread +1. **Single-threaded execution**: The engine itself runs on a single thread; external threads push events that are processed in the next cycle +1. **Time-ordered processing**: Events are processed strictly in time order, with cycles executing at each unique timestamp + +______________________________________________________________________ + +## Engine Lifecycle + +The engine exposes a **decomposed execution API** that allows external event loops (like Python's asyncio) to drive execution: + +``` +┌─────────────────┐ +│ start() │ Initialize engine with starttime/endtime +└────────┬────────┘ + │ + ▼ +┌─────────────────┐ +│processOneCycle()│ Execute one engine cycle (repeatable) +└────────┬────────┘ + │ (repeat) + ▼ +┌─────────────────┐ +│ finish() │ Clean up and return outputs +└─────────────────┘ +``` + +### start(starttime, endtime) + +Initializes the engine: + +- Sets up the scheduler with the time range +- Prepares the cycle step table +- Initializes signal handlers for graceful shutdown +- Calls `start()` on all adapters + +### processOneCycle(maxWait) + +Executes a single engine cycle: + +1. In realtime mode, waits up to `maxWait` seconds for push events or scheduled callbacks +1. Processes all pending push events from the lock-free queue +1. Executes all scheduled callbacks due at the current time +1. Runs the cycle step table (executes nodes by rank) +1. Returns `true` if the engine should continue, `false` when done + +### finish() + +Shuts down the engine: + +- Calls `stop()` on all adapters +- Collects graph outputs +- Cleans up resources + +This decomposition allows the `CspEventLoop` to interleave CSP execution with Python asyncio operations. + +______________________________________________________________________ + +## Core Components + +### RootEngine + +**Header**: `cpp/csp/engine/RootEngine.h` + +The `RootEngine` is the top-level orchestrator. Key members: + +| Member | Type | Purpose | +| ------------------ | ------------------------------ | ------------------------------------------------- | +| `m_cycleStepTable` | `CycleStepTable` | Executes consumers by rank | +| `m_scheduler` | `Scheduler` | Time-based callback scheduling | +| `m_pushEventQueue` | `SRMWLockFreeQueue` | Thread-safe external event queue | +| `m_queueWaiter` | `QueueWaiter` | Blocks until events arrive (condition variable) | +| `m_fdWaiter` | `FdWaiter` | File descriptor for native event loop integration | +| `m_cycleCount` | `uint64_t` | Monotonically increasing cycle counter | + +The engine maintains the current time via `now()` which returns the timestamp of the current cycle. + +### SRMWLockFreeQueue + +**Header**: `cpp/csp/core/SRMWLockFreeQueue.h` + +A **S**ingle-**R**eader **M**ulti-**W**riter lock-free queue using atomic compare-exchange operations. + +**Design**: + +``` + Writer Threads Reader Thread + ┌──────────┐ ┌─────────────┐ + │ push(A) │──┐ │ │ + └──────────┘ │ ┌──────┐ │ popAll() │ + ├──▶ │ HEAD │ ──▶ │ returns │ + ┌──────────┐ │ └──────┘ │ linked list │ + │ push(B) │──┘ │ │ + └──────────┘ └─────────────┘ +``` + +**Key operations**: + +- **`push(T)`**: Writers atomically prepend to a linked list head. Uses `compare_exchange_weak` in a loop. +- **`popAll()`**: Reader atomically swaps the head to null and returns the entire list. Reverses it to maintain FIFO order. +- **`Batch`**: Groups multiple pushes into a single atomic operation, ensuring related events stay together. + +**Example usage in CSP**: + +```cpp +// External thread pushing an event +m_pushEventQueue.push(new TypedPushEvent(adapter, 42)); +m_queueWaiter.notify(); // Wake up engine + +// Engine thread consuming events +auto events = m_pushEventQueue.popAll(); +for (auto& event : events) { + event->invoke(); +} +``` + +### CycleStepTable + +**Header**: `cpp/csp/engine/CycleStepTable.h` + +Manages **rank-based execution** of consumers (nodes and output adapters) within a cycle. + +**Concept**: Each consumer has a **rank** determined by its position in the graph topology. Lower ranks execute first, ensuring that upstream nodes tick before downstream consumers see the data. + +**Implementation**: + +``` +┌─────────────────────────────────────────┐ +│ DynamicBitSet: dirty ranks │ +│ ┌───┬───┬───┬───┬───┬───┬───┬───┐ │ +│ │ 0 │ 1 │ 0 │ 1 │ 0 │ 0 │ 1 │ 0 │ ... │ +│ └───┴───┴───┴───┴───┴───┴───┴───┘ │ +│ │ +│ m_table[rank] = vector │ +│ rank 1: [NodeA, NodeB] │ +│ rank 3: [NodeC] │ +│ rank 6: [OutputAdapterX] │ +└─────────────────────────────────────────┘ +``` + +When a consumer's input ticks: + +1. The consumer's rank is marked dirty in the bitset +1. During cycle execution, the table iterates through dirty ranks in order +1. All consumers at each rank execute via `Consumer::execute()` +1. Consumers may produce outputs, marking downstream ranks as dirty + +### Scheduler + +**Header**: `cpp/csp/engine/Scheduler.h` + +Handles time-based scheduling of callbacks (alarms, timers). + +**Data structures**: + +- **`EventMap`**: A map from `DateTime` to a list of `Event` objects +- **`Handle`**: An opaque reference to a scheduled event (used for cancellation) +- **`DynamicEngineStart` monitor**: Detects adapters added during runtime (for dynamic graphs) + +**Key operations**: + +```cpp +// Schedule a callback for a specific time +Handle scheduleCallback(DateTime when, Callback cb); + +// Reschedule an existing callback +void rescheduleCallback(Handle h, DateTime newTime); + +// Cancel a scheduled callback +void cancelCallback(Handle h); + +// Get the next scheduled time +DateTime getNextTime(); + +// Execute all events at or before the given time +void executeEventsUpTo(DateTime time); +``` + +### QueueWaiter and FdWaiter + +**Header**: `cpp/csp/core/QueueWaiter.h` + +**QueueWaiter**: Traditional condition-variable-based waiting for push events. + +```cpp +// Writer thread +void notify() { + std::lock_guard guard(m_lock); + m_eventsPending = true; + m_condition.notify_one(); +} + +// Engine thread +bool wait(TimeDelta maxWait) { + std::unique_lock lock(m_lock); + m_condition.wait_for(lock, maxWait, [&]{ return m_eventsPending; }); + bool had_events = m_eventsPending; + m_eventsPending = false; + return had_events; +} +``` + +**FdWaiter**: File-descriptor-based signaling for native event loop integration. This allows external event loops (like asyncio) to use `select()`/`poll()`/`epoll()` to wait for CSP events. + +Platform-specific implementations: + +| Platform | Mechanism | Description | +| -------- | ----------- | ------------------------- | +| Linux | `eventfd` | Single fd, most efficient | +| macOS | `pipe` | Pair of fds (read/write) | +| Windows | Socket pair | Localhost TCP connection | + +```cpp +// Get fd for registration with external selector +int fd = fdWaiter.readFd(); +selector.register(fd, EVENT_READ); + +// When events available, writer calls: +fdWaiter.notify(); + +// Event loop detects fd readable, then: +fdWaiter.clear(); // Reset for next notification +engine.processOneCycle(0); // Process events +``` + +______________________________________________________________________ + +## Execution Model + +### Engine Cycles + +Each cycle processes events at a single timestamp: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Engine Cycle N │ +│ Time: 2024-01-01 10:00:00.123 │ +├─────────────────────────────────────────────────────────────┤ +│ 1. Process push events from queue │ +│ - External data pushed by adapter threads │ +│ - Each tick schedules consumer in CycleStepTable │ +│ │ +│ 2. Execute scheduler callbacks │ +│ - Timer ticks, alarm callbacks │ +│ - May schedule more consumers │ +│ │ +│ 3. Execute CycleStepTable │ +│ - Iterate dirty ranks in order │ +│ - Execute all consumers at each rank │ +│ - Consumers may output, dirtying downstream ranks │ +│ │ +│ 4. Increment cycleCount │ +└─────────────────────────────────────────────────────────────┘ +``` + +Between cycles, the engine either: + +- **Simulation mode**: Jumps instantly to the next scheduled event time +- **Realtime mode**: Waits until wall-clock time reaches the next event, or push events arrive + +### Push Event Processing + +When external data arrives: + +``` +External Thread Engine Thread + │ │ + │ push(event) │ + │ ───────────▶ │ + │ notify() │ + │ ─────────────────────▶ │ + │ │ (wake from wait) + │ popAll() + │ │ + │ for each event: + │ adapter.consumeEvent() + │ │ + │ cycleStepTable.execute() +``` + +### Rank-Based Execution + +Rank assignment follows graph topology: + +``` +Input Adapters: rank 0 + │ + ▼ + ┌──────┐ + │Node A│ rank 1 + └──┬───┘ + │ + ┌───┴───┐ + ▼ ▼ +┌──────┐ ┌──────┐ +│Node B│ │Node C│ rank 2 +└──┬───┘ └──┬───┘ + │ │ + └───┬───┘ + ▼ + ┌─────────┐ + │ Node D │ rank 3 + └────┬────┘ + │ + ▼ +┌───────────────┐ +│Output Adapter │ rank 4 +└───────────────┘ +``` + +This ensures that when Node A ticks, Node B and C see A's output in the same cycle, and Node D sees B and C's outputs. + +______________________________________________________________________ + +## Adapters + +### InputAdapter + +**Header**: `cpp/csp/engine/InputAdapter.h` + +Base class for adapters that bring data into the graph. Inherits from `TimeSeriesProvider` (it *is* a time series). + +**Key methods**: + +- `start(DateTime, DateTime)`: Called when engine starts +- `stop()`: Called when engine stops +- `consumeTick(value)`: Process an incoming value based on `PushMode` + +**PushMode handling**: + +| Mode | Behavior | +| ---------------- | -------------------------------------------------- | +| `LAST_VALUE` | Multiple ticks at same time collapse to last value | +| `NON_COLLAPSING` | Each tick gets its own cycle (same timestamp) | +| `BURST` | All ticks at same time grouped into a vector | + +### OutputAdapter + +**Header**: `cpp/csp/engine/OutputAdapter.h` + +Base class for adapters that send data out of the graph. Inherits from `Consumer` (it listens to a time series). + +**Key methods**: + +- `link(TimeSeriesProvider*)`: Connect to input time series +- `executeImpl()`: Called when input ticks (override in subclasses) + +### PushInputAdapter + +**Header**: `cpp/csp/engine/PushInputAdapter.h` + +Extends `InputAdapter` for thread-safe pushing from external threads. + +**Key features**: + +- `pushTick(value)`: Thread-safe push via `SRMWLockFreeQueue` +- Integrates with `PushGroup` for synchronization across multiple adapters + +```cpp +template +void PushInputAdapter::pushTick(const T& value) { + auto event = new TypedPushEvent(this, value); + if (m_pushGroup) + m_pushGroup->push(event); + else { + m_pushEventQueue.push(event); + m_queueWaiter.notify(); + } +} +``` + +### PushGroup Synchronization + +When multiple adapters need to push related events atomically: + +```cpp +// PushGroup states: +enum State { NONE, LOCKING, LOCKED }; + +// Usage pattern: +pushGroup.startBatch(); // Acquire lock +adapter1.pushTick(value1); // Events buffered +adapter2.pushTick(value2); +pushGroup.endBatch(); // Atomically push all events +``` + +This ensures related events from different adapters arrive in the same engine cycle. + +______________________________________________________________________ + +## Asyncio Integration + +### Decomposed Execution API + +The engine exposes a Python-accessible decomposed API via `PyEngine`: + +```python +engine = _cspimpl.PyEngine(realtime=True) + +# Initialize +engine.start(starttime, endtime) + +# Run cycles manually +while engine.process_one_cycle(max_wait=0.1): + # Can do asyncio work here between cycles + await asyncio.sleep(0) + +# Cleanup +results = engine.finish() +``` + +This allows `CspEventLoop` to integrate CSP cycles with Python's asyncio. + +### FdWaiter for Native Event Loop Integration + +The engine provides a wakeup file descriptor: + +```python +# Get the fd for selector registration +fd = engine.get_wakeup_fd() +selector.register(fd, selectors.EVENT_READ) + +# When fd becomes readable, CSP has events +events = selector.select(timeout) +for key, mask in events: + if key.fd == fd: + engine.clear_wakeup_fd() + engine.process_one_cycle(0) +``` + +Benefits: + +- No polling required +- Native integration with asyncio's event loop +- Efficient wakeup via OS-level fd signaling + +______________________________________________________________________ + +## Thread Safety + +CSP uses a **single-threaded engine with thread-safe ingestion**: + +| Operation | Thread Safety | +| ---------------------------------- | ----------------------------- | +| `pushTick()` on adapters | Thread-safe (lock-free queue) | +| `processOneCycle()` | Single-threaded only | +| `scheduleCallback()` within engine | Single-threaded only | +| `SRMWLockFreeQueue.push()` | Thread-safe | +| `SRMWLockFreeQueue.popAll()` | Single reader only | + +**Rules for external code**: + +1. Push events via `PushInputAdapter` from any thread +1. Never call engine methods from external threads (except push) +1. Use `PushGroup` when multiple adapters need atomic batching + +______________________________________________________________________ + +## Memory Management + +CSP uses **engine-owned** and **reference-counted** patterns: + +### EngineOwned + +Objects that inherit from `EngineOwned` are tracked by the engine and cleaned up on shutdown: + +```cpp +class InputAdapter : public TimeSeriesProvider, public EngineOwned { + // Engine deletes this on shutdown +}; +``` + +### Push Events + +Push events are allocated by writers and deleted after processing: + +```cpp +// Writer allocates +auto event = new TypedPushEvent(adapter, 42); +queue.push(event); + +// Engine consumes and deletes +auto events = queue.popAll(); +for (auto& event : events) { + event->invoke(); + delete event; +} +``` + +### Time Series Buffers + +Time series data uses ring buffers with configurable history: + +```python +# Python API +csp.set_buffering_policy(ts, tick_count=100) +csp.set_buffering_policy(ts, tick_history=timedelta(minutes=5)) +``` + +______________________________________________________________________ + +## Further Reading + +- [CSP Node concepts](../concepts/CSP-Node.md) - Node anatomy and lifecycle hooks +- [Execution Modes](../concepts/Execution-Modes.md) - Simulation vs realtime +- [Adapters](../concepts/Adapters.md) - Writing custom adapters +- [Asyncio Integration](../how-tos/Asyncio-Integration.md) - Using CSP with asyncio diff --git a/docs/wiki/how-tos/Async.md b/docs/wiki/how-tos/Async.md new file mode 100644 index 000000000..62112cbc9 --- /dev/null +++ b/docs/wiki/how-tos/Async.md @@ -0,0 +1,594 @@ +## Table of Contents + +- [Table of Contents](#table-of-contents) +- [Introduction](#introduction) +- [Event Loop Options](#event-loop-options) + - [Option 1: Default (Shared Background Loop)](#option-1-default-shared-background-loop) + - [Option 2: CSP Asyncio Mode](#option-2-csp-asyncio-mode) + - [Option 3: Custom Event Loop](#option-3-custom-event-loop) + - [Choosing the Right Option](#choosing-the-right-option) +- [Graph-Level Async Adapters](#graph-level-async-adapters) + - [async_for - Async Generator to Time Series](#async_for---async-generator-to-time-series) + - [async_in - Single Async Value to Time Series](#async_in---single-async-value-to-time-series) + - [async_out - Time Series to Async Function](#async_out---time-series-to-async-function) + - [async_node - Transform Time Series via Async](#async_node---transform-time-series-via-async) +- [Node-Level Async Operations](#node-level-async-operations) + - [await\_ - Blocking Await in Nodes](#await_---blocking-await-in-nodes) + - [async_alarm - Alarm-Like Async Pattern](#async_alarm---alarm-like-async-pattern) + - [AsyncContext - Persistent Async Event Loop (Advanced)](#asynccontext---persistent-async-event-loop-advanced) +- [Complete Example](#complete-example) +- [Best Practices](#best-practices) + +## Introduction + +CSP is fundamentally a synchronous, deterministic event processing framework. However, modern applications often need to interact with async APIs, external services, or I/O-bound operations that are naturally asynchronous. + +The `csp.impl.async_adapter` module provides several tools to bridge Python's `asyncio` with CSP's synchronous graph processing: + +| Function | Purpose | +| ----------------- | ---------------------------------------------------------- | +| `csp.async_for` | Convert an async generator to a CSP time series | +| `csp.async_in` | Convert a single async value to a time series (ticks once) | +| `csp.async_out` | Invoke an async function on each tick (side effects) | +| `csp.async_node` | Transform time series values via an async function | +| `csp.await_` | Await async code inside a CSP node (blocking) | +| `csp.async_alarm` | Alarm-like pattern for async operations in nodes | + +## Event Loop Options + +All async adapters accept an optional `loop` parameter that controls which asyncio event loop is used for running async operations. CSP provides three ways to handle this: + +### Option 1: Default (Shared Background Loop) + +When you don't specify a loop, CSP automatically uses a shared background loop: + +```python +@csp.graph +def my_graph(): + # Uses shared background loop (default) + values = csp.async_for(my_async_gen(10)) + result = csp.async_in(fetch_config()) + csp.async_out(values, send_to_api) +``` + +This shared loop: + +- Runs in a dedicated background thread +- Is lazily initialized on first use +- Is shared by all async adapters (efficient) +- Is automatically cleaned up on process exit + +### Option 2: Same-Thread Asyncio Mode (Default in Realtime) + +In realtime mode, CSP runs async operations on the same thread by default. This allows async operations to run directly on CSP's engine loop, eliminating the need for a separate background thread: + +```python +@csp.graph +def my_graph(): + # All async adapters automatically use CSP's asyncio loop + updates = csp.async_for(fetch_updates(10)) + config = csp.async_in(get_config()) + doubled = csp.async_node(updates, async_double) + csp.print("doubled", doubled) + +# In realtime mode, same-thread asyncio is the default +csp.run(my_graph, realtime=True, endtime=timedelta(seconds=5)) +``` + +Key benefits: + +- **Same-thread execution**: Async operations run on the same thread as the CSP engine +- **No threading overhead**: No cross-thread synchronization needed +- **Direct integration**: Async code can interact more naturally with CSP's scheduler +- **Automatic detection**: All async adapters detect CSP asyncio mode and use it automatically + +To use the legacy background thread mode instead: + +```python +# Explicitly run async on a background thread +csp.run(my_graph, realtime=True, endtime=timedelta(seconds=5), asyncio_on_thread=True) +``` + +To check if you're in asyncio mode: + +```python +from csp.impl.async_adapter import is_csp_asyncio_mode, get_csp_asyncio_loop + +if is_csp_asyncio_mode(): + loop = get_csp_asyncio_loop() + print(f"Running in CSP asyncio mode on {loop}") +``` + +### Option 3: Custom Event Loop + +You can provide your own event loop to any async adapter: + +```python +import asyncio + +# Create a custom loop +my_loop = asyncio.new_event_loop() + +@csp.graph +def my_graph(): + # All operations use the custom loop + values = csp.async_for(my_async_gen(10), loop=my_loop) + result = csp.async_in(fetch_data(), loop=my_loop) + csp.async_out(values, send_data, loop=my_loop) + transformed = csp.async_node(values, transform, loop=my_loop) +``` + +### Choosing the Right Option + +| Scenario | Recommended Option | +| ------------------------------------------- | -------------------------------------------------- | +| Simple async I/O, realtime mode | **Default** (`realtime=True`, same-thread asyncio) | +| Need background thread for async | **Background thread** (`asyncio_on_thread=True`) | +| Need to coordinate with external async code | **Custom Loop** (pass `loop` parameter) | +| Using CspEventLoop for asyncio integration | **Default** (detects running loop automatically) | + +The async adapters use this priority for finding a loop: + +1. If CSP is running in realtime mode with same-thread asyncio (the default), use that loop +1. If there's a running asyncio loop (e.g., CspEventLoop), use it +1. Otherwise, use the shared background loop + +## Graph-Level Async Adapters + +These adapters work at the graph level to bridge async code with CSP time series. + +### async_for - Async Generator to Time Series + +`csp.async_for` converts an async generator into a CSP time series. Each yielded value becomes a tick. + +**Signature:** + +```python +def async_for( + async_gen_or_func: AsyncIterator[T], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> ts[T] +``` + +**Parameters:** + +- `async_gen_or_func`: An async generator instance (result of calling an async generator function) +- `loop`: Event loop to use. If None, uses the best available loop (see [Event Loop Options](#event-loop-options)) + +**Example:** + +```python +from typing import AsyncIterator +import asyncio +import csp +from datetime import timedelta + +async def fetch_updates(count: int) -> AsyncIterator[dict]: + """Async generator that yields updates from an external source.""" + for i in range(count): + await asyncio.sleep(0.1) # Simulate async I/O + yield {"id": i, "value": i * 10} + +@csp.graph +def my_graph(): + # Convert async generator to time series + updates = csp.async_for(fetch_updates(10)) + csp.print("update", updates) + +csp.run(my_graph, realtime=True, endtime=timedelta(seconds=2)) +``` + +**Key points:** + +- In realtime mode, the async generator runs on the same thread as CSP (or on a background thread if `asyncio_on_thread=True`) +- Type is inferred from the `AsyncIterator[T]` return annotation +- Runs until the generator is exhausted or the graph ends + +### async_in - Single Async Value to Time Series + +`csp.async_in` converts a single async coroutine result into a time series that ticks once. + +**Signature:** + +```python +def async_in( + coro: Awaitable[T], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> ts[T] +``` + +**Parameters:** + +- `coro`: A coroutine instance (result of calling an async function) +- `loop`: Event loop to use. If None, uses the best available loop (see [Event Loop Options](#event-loop-options)) + +**Example:** + +```python +async def fetch_config() -> dict: + """Fetch configuration from an async API.""" + await asyncio.sleep(0.1) + return {"setting": "value"} + +@csp.graph +def my_graph(): + # Single async value that ticks once when ready + config = csp.async_in(fetch_config()) + csp.print("config", config) +``` + +**Key points:** + +- Ticks exactly once when the coroutine completes +- Useful for initialization or one-time async fetches +- Type is inferred from the return annotation + +### async_out - Time Series to Async Function + +`csp.async_out` invokes an async function each time the input time series ticks. This is useful for async side effects like sending data to external services. + +**Signature:** + +```python +@csp.node +def async_out( + x: ts[T], + async_func: Callable[[T], Awaitable[None]], + loop: Optional[asyncio.AbstractEventLoop] = None, +) +``` + +**Parameters:** + +- `x`: Input time series that triggers the async function +- `async_func`: An async function that takes the ticked value and returns None +- `loop`: Event loop to use. If None, uses the best available loop (see [Event Loop Options](#event-loop-options)) + +**Example:** + +```python +async def send_to_api(value: int) -> None: + """Send value to an external async API.""" + await asyncio.sleep(0.1) # Simulate async I/O + print(f"Sent: {value}") + +@csp.graph +def my_graph(): + counter = csp.count(csp.timer(timedelta(seconds=0.5), True)) + + # Invoke async function on each tick + csp.async_out(counter, send_to_api) +``` + +**Key points:** + +- Each tick triggers an async operation +- Operations run concurrently on the event loop +- No return value (fire-and-forget for side effects) + +### async_node - Transform Time Series via Async + +`csp.async_node` transforms input time series values through an async function, producing an output time series. + +**Signature:** + +```python +def async_node( + x: ts[T], + async_func: Callable[[T], Awaitable[U]], + *, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> ts[U] +``` + +**Parameters:** + +- `x`: Input time series +- `async_func`: An async function that transforms the input value +- `loop`: Event loop to use. If None, uses the best available loop (see [Event Loop Options](#event-loop-options)) + +**Example:** + +```python +async def fetch_details(id: int) -> dict: + """Fetch details for an ID from an async API.""" + await asyncio.sleep(0.1) + return {"id": id, "details": f"Details for {id}"} + +@csp.graph +def my_graph(): + ids = csp.count(csp.timer(timedelta(seconds=0.5), True)) + + # Transform each ID to details via async call + details = csp.async_node(ids, fetch_details) + csp.print("details", details) +``` + +**Key points:** + +- Each input tick triggers an async transformation +- Output ticks when the async operation completes +- Order of outputs matches order of operation completion (may differ from input order) + +## Node-Level Async Operations + +These tools allow async operations inside CSP node definitions. + +### await\_ - Blocking Await in Nodes + +`csp.await_` allows blocking on an async operation inside a node. The node execution pauses until the async operation completes. + +**Signature:** + +```python +def await_( + coro: Awaitable[T], + block: bool = True, + timeout: float = None, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> T +``` + +**Parameters:** + +- `coro`: A coroutine instance (result of calling an async function) +- `block`: If True (default), blocks until the coroutine completes. If False, returns a Future +- `timeout`: Optional timeout in seconds +- `loop`: Event loop to use. If None, uses the best available loop (see [Event Loop Options](#event-loop-options)) + +**Example:** + +```python +async def fetch_value(key: str) -> int: + await asyncio.sleep(0.1) + return hash(key) % 100 + +@csp.node +def node_with_await(key: ts[str]) -> ts[int]: + if csp.ticked(key): + # Block until async completes + result = csp.await_(fetch_value(key), block=True) + return result +``` + +**Key points:** + +- `block=True` (default) blocks until completion +- `block=False` returns a `Future` for later checking +- Uses the appropriate event loop automatically + +### async_alarm - Alarm-Like Async Pattern + +`csp.async_alarm` provides the most idiomatic CSP pattern for async operations. It works like a regular alarm but fires when an async operation completes. + +```python +async def async_process(value: int) -> int: + await asyncio.sleep(0.1) + return value * 2 + +@csp.node +def node_with_async_alarm() -> ts[int]: + with csp.alarms(): + poll_alarm = csp.alarm(bool) + async_alarm = csp.async_alarm(int) # Declare in alarms block + + with csp.state(): + s_counter = 0 + s_pending = False + + with csp.start(): + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(poll_alarm): + # Only schedule new async if previous completed + if not s_pending: + s_counter += 1 + csp.schedule_async_alarm(async_alarm, async_process(s_counter)) + s_pending = True + csp.schedule_alarm(poll_alarm, timedelta(milliseconds=10), True) + + if csp.ticked(async_alarm): + # Async operation completed + s_pending = False + return async_alarm # Returns the result value +``` + +**Key points:** + +- Declare with `csp.async_alarm(T)` in the `with csp.alarms():` block +- Schedule with `csp.schedule_async_alarm(alarm, coroutine)` +- Check completion with `if csp.ticked(async_alarm):` +- Access the result value directly with `async_alarm` +- Lifecycle (start/stop) is managed automatically + +### AsyncContext - Persistent Async Event Loop (Advanced) + +> **Note:** `AsyncContext` is an internal class and not part of the public API. For most use cases, prefer `csp.async_alarm` which handles lifecycle automatically. If you need `AsyncContext`, import it from the implementation module. + +`AsyncContext` provides a persistent async event loop for a node, avoiding the overhead of creating new loops for each operation. + +```python +from csp.impl.async_adapter import AsyncContext + +@csp.node +def node_with_context(x: ts[int]) -> ts[int]: + with csp.state(): + s_ctx = None + + with csp.start(): + s_ctx = AsyncContext() + s_ctx.start() + + with csp.stop(): + if s_ctx: + s_ctx.stop() + + if csp.ticked(x): + # Use the persistent event loop + result = s_ctx.run(fetch_value(str(x))) + return result +``` + +**Key points:** + +- Import from `csp.impl.async_adapter` (not part of public API) +- Reuses the same event loop across ticks (more efficient) +- Must call `start()` in `with csp.start()` and `stop()` in `with csp.stop()` +- `run()` blocks until completion, `run_nowait()` returns a Future + +## Complete Example + +Here's a complete example demonstrating multiple async features: + +```python +import csp +from csp import ts +from datetime import timedelta +from typing import AsyncIterator +import asyncio + + +async def async_fetch() -> int: + """Single async fetch.""" + await asyncio.sleep(0.1) + return 42 + + +async def async_double(n: int) -> int: + """Async transformation.""" + await asyncio.sleep(0.1) + return n * 2 + + +async def async_log(n: int) -> None: + """Async side effect.""" + await asyncio.sleep(0.05) + print(f"Logged: {n}") + + +async def async_stream(count: int) -> AsyncIterator[int]: + """Async generator stream.""" + for i in range(count): + await asyncio.sleep(0.1) + yield i + + +@csp.node +def counter_node() -> ts[int]: + with csp.alarms(): + tick = csp.alarm(bool) + + with csp.state(): + s_count = 0 + + with csp.start(): + csp.schedule_alarm(tick, timedelta(), True) + + if csp.ticked(tick): + s_count += 1 + csp.schedule_alarm(tick, timedelta(seconds=0.1), True) + return s_count + + +@csp.node +def async_alarm_node() -> ts[int]: + """Node using async_alarm pattern.""" + with csp.alarms(): + poll = csp.alarm(bool) + result_alarm = csp.async_alarm(int) + + with csp.state(): + s_counter = 0 + s_pending = False + + with csp.start(): + csp.schedule_alarm(poll, timedelta(milliseconds=10), True) + + if csp.ticked(poll): + if not s_pending: + s_counter += 1 + csp.schedule_async_alarm(result_alarm, async_double(s_counter)) + s_pending = True + csp.schedule_alarm(poll, timedelta(milliseconds=10), True) + + if csp.ticked(result_alarm): + s_pending = False + return result_alarm + + +@csp.graph +def main_graph(): + counter = counter_node() + + # async_for: stream from async generator + stream = csp.async_for(async_stream(10)) + csp.print("stream", stream) + + # async_in: single async value + initial = csp.async_in(async_fetch()) + csp.print("initial", initial) + + # async_out: async side effects + csp.async_out(counter, async_log) + + # async_node: transform via async + doubled = csp.async_node(counter, async_double) + csp.print("doubled", doubled) + + # async_alarm in a node + alarm_results = async_alarm_node() + csp.print("alarm_results", alarm_results) + + +if __name__ == "__main__": + csp.run(main_graph, realtime=True, endtime=timedelta(seconds=2)) +``` + +### Running with Same-Thread Asyncio Mode + +In realtime mode, CSP automatically runs async operations on the same thread (same-thread asyncio mode is the default): + +```python +if __name__ == "__main__": + # In realtime mode, same-thread asyncio is the default + csp.run(main_graph, realtime=True, endtime=timedelta(seconds=2)) + + # To use background thread for async instead: + # csp.run(main_graph, realtime=True, endtime=timedelta(seconds=2), asyncio_on_thread=True) +``` + +## Best Practices + +1. **Use `async_alarm` for node-internal async**: It's the most idiomatic CSP pattern and handles lifecycle automatically. + +1. **Same-thread asyncio is the default**: In realtime mode, async operations run on the same thread as CSP, eliminating cross-thread overhead. Use `asyncio_on_thread=True` if you need the legacy background thread behavior. + +1. **Track pending operations**: Use a state variable like `s_pending` to avoid scheduling overlapping async operations when order matters. + +1. **Handle errors**: Async operations can fail. Consider wrapping in try/except: + + ```python + try: + result = csp.await_(risky_async_call(), block=True) + except Exception as e: + # Handle error + pass + ``` + +1. **Mind the timing**: Async operations complete at unpredictable times. If order matters, wait for one to complete before starting another. + +1. **Use graph-level adapters when possible**: `async_for`, `async_in`, `async_out`, and `async_node` are simpler than node-internal async. + +1. **Realtime mode required**: Async adapters only work in realtime mode (`realtime=True`). In simulation mode, async operations are not supported. + +1. **Consider timeouts**: For blocking operations, consider using the `timeout` parameter: + + ```python + result = csp.await_(slow_operation(), block=True, timeout=5.0) + ``` + +1. **Event loop selection**: Let CSP automatically choose the best loop (default behavior). Only pass a custom `loop` parameter when you need to coordinate with external async code. + +1. **Check asyncio mode when needed**: Use `is_csp_asyncio_mode()` to check if you're running inside CSP's asyncio mode, useful for conditional behavior in adapters or nodes. diff --git a/docs/wiki/how-tos/Asyncio-Integration.md b/docs/wiki/how-tos/Asyncio-Integration.md new file mode 100644 index 000000000..121a72390 --- /dev/null +++ b/docs/wiki/how-tos/Asyncio-Integration.md @@ -0,0 +1,803 @@ +# CSP Event Loop Integration + +CSP provides three complementary integration patterns with Python's `asyncio` framework: + +1. **Same-Thread Asyncio Mode (Default)**: In realtime mode, CSP runs async operations on the same thread by default +1. **Standalone Event Loop**: Use CSP as the asyncio event loop backend (similar to uvloop) +1. **Bridge with Running Graph**: Interleave asyncio operations with a running CSP graph + +## Overview + +The `csp.run()` function in **realtime mode** automatically runs async operations on the same thread as CSP: + +- **`asyncio_on_thread=False`** (default in realtime): Same-thread asyncio execution, eliminating cross-thread synchronization overhead +- **`asyncio_on_thread=True`**: Run async operations on a background thread (legacy behavior) + +The `csp.event_loop` module provides: + +**Standalone Event Loop:** + +- **`CspEventLoop`**: An asyncio-compatible event loop backed by CSP's scheduler +- **`CspEventLoopPolicy`**: An event loop policy for using CSP loops with asyncio +- **`run()`**: A convenience function for running coroutines with CSP's event loop +- **`new_event_loop()`**: Factory function to create a new CSP event loop + +**Bridge with Running Graph:** + +- **`AsyncioBridge`**: Bridge for pushing data from asyncio to CSP graphs +- **`BidirectionalBridge`**: Bridge supporting two-way communication + +## Part 0: Same-Thread Asyncio Mode (Default) + +When running CSP graphs in realtime mode, async operations run on the same thread by default: + +```python +import csp +from datetime import timedelta +from typing import AsyncIterator +import asyncio + +async def fetch_data(count: int) -> AsyncIterator[int]: + """Async generator that fetches data.""" + for i in range(count): + await asyncio.sleep(0.1) + yield i * 10 + +@csp.graph +def my_graph(): + # Async adapters automatically use CSP's asyncio loop + data = csp.async_for(fetch_data(5)) + csp.print("data", data) + +# In realtime mode, asyncio runs on the same thread by default +csp.run(my_graph, realtime=True, endtime=timedelta(seconds=2)) +``` + +### How It Works + +When you call `csp.run(..., realtime=True)`: + +1. CSP creates a new asyncio event loop on the **current thread** +1. The CSP engine yields control to the asyncio loop between engine cycles, allowing async tasks to run +1. All async adapters (`async_for`, `async_in`, `async_out`, etc.) automatically detect this mode and run on the **same thread** as CSP +1. No background thread is needed - this eliminates cross-thread synchronization overhead + +**To use the legacy background thread mode**, set `asyncio_on_thread=True`: + +```python +# Explicitly use background thread for async operations +csp.run(my_graph, realtime=True, endtime=timedelta(seconds=2), asyncio_on_thread=True) +``` + +### Detecting Asyncio Mode + +You can check if you're running in CSP's asyncio mode: + +```python +from csp.impl.async_adapter import is_csp_asyncio_mode, get_csp_asyncio_loop + +@csp.node +def my_node(x: ts[int]) -> ts[int]: + if csp.ticked(x): + if is_csp_asyncio_mode(): + loop = get_csp_asyncio_loop() + # We're running in asyncio mode, can schedule directly on the loop + return x * 2 +``` + +### Requirements + +- Same-thread asyncio mode is only active in realtime mode (`realtime=True`) +- The graph runs synchronously (blocks until complete) +- The return value is the same as normal `csp.run()` + +## Part 1: Standalone Event Loop + +### Basic Usage + +The simplest way to use CSP's asyncio integration is through the `run()` function: + +```python +import csp.event_loop as csp_event_loop + +async def main(): + print("Hello from CSP asyncio!") + await asyncio.sleep(1) + return "done" + +result = csp_event_loop.run(main()) +print(result) # "done" +``` + +### Using as Event Loop Policy + +You can set CSP as the default event loop for all asyncio operations: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +# Set CSP as the event loop policy +asyncio.set_event_loop_policy(csp_event_loop.EventLoopPolicy()) + +# Now all asyncio operations use CSP's event loop +async def main(): + await asyncio.sleep(0.1) + return "Hello!" + +# This will use CSP's event loop +result = asyncio.run(main()) +``` + +### Creating Event Loops Manually + +For more control, you can create and manage event loops directly: + +```python +from csp.event_loop import new_event_loop + +loop = new_event_loop() +try: + result = loop.run_until_complete(my_coroutine()) +finally: + loop.close() +``` + +## Part 2: Bridge with Running CSP Graph + +The bridge integration allows asyncio operations to interact with a running CSP graph. +This is useful when you want to: + +- Push data from asyncio callbacks or coroutines into CSP +- Schedule callbacks using `call_later` and `call_at` that feed data to CSP +- Coordinate asyncio timing with CSP's engine time (`csp.now()`) +- Enable bidirectional communication between asyncio and CSP nodes + +### Quick Start with AsyncioBridge + +```python +import csp +from csp.event_loop import AsyncioBridge +from csp.utils.datetime import utc_now +from datetime import timedelta + +# Create the bridge +bridge = AsyncioBridge(int, "data_feed") + +@csp.node +def process(data: csp.ts[int]) -> csp.ts[str]: + if csp.ticked(data): + return f"Received {data} at {csp.now()}" + +@csp.graph +def my_graph(): + # Wire the bridge's adapter into the graph + data = bridge.adapter.out() + result = process(data) + csp.add_graph_output("result", result) + +# Start the bridge +start_time = utc_now() +bridge.start(start_time) + +# Run the CSP graph in a thread +runner = csp.run_on_thread( + my_graph, + realtime=True, + starttime=start_time, + endtime=timedelta(seconds=5) +) + +# Wait for adapter to be ready +bridge.wait_for_adapter(timeout=1.0) + +# Push data from asyncio +bridge.call_later(0.5, lambda: bridge.push(1)) +bridge.call_later(1.0, lambda: bridge.push(2)) +bridge.call_later(1.5, lambda: bridge.push(3)) + +# Wait for completion +results = runner.join() +bridge.stop() + +print(results["result"]) +``` + +### Scheduling with call_later and call_at + +The bridge provides asyncio-style scheduling methods: + +```python +from datetime import datetime, timedelta + +bridge = AsyncioBridge(str, "events") +bridge.start() + +# Schedule callback after delay +bridge.call_later(1.0, lambda: bridge.push("after 1 second")) + +# Schedule at specific datetime +target_time = datetime.utcnow() + timedelta(seconds=2) +bridge.call_at(target_time, lambda: bridge.push("at specific time")) + +# Schedule at offset from start time (aligned with CSP time) +bridge.call_at_offset( + timedelta(milliseconds=500), + lambda: bridge.push("at 500ms from start") +) +``` + +### Running Async Coroutines + +You can run full asyncio coroutines that interact with CSP: + +```python +async def fetch_data_and_push(): + """Coroutine that fetches data and pushes to CSP.""" + for i in range(5): + await asyncio.sleep(0.2) + # Simulate fetching data + data = {"value": i, "timestamp": time.time()} + bridge.push(data) + +# Run the coroutine +future = bridge.run_coroutine(fetch_data_and_push()) + +# Optionally wait for completion +future.result(timeout=10.0) +``` + +### Coordinating with CSP Time (csp.now()) + +When scheduling callbacks, you can align with CSP's engine start time: + +```python +@csp.node +def log_with_time(data: csp.ts[str]) -> csp.ts[str]: + if csp.ticked(data): + # csp.now() shows the engine time + return f"[{csp.now()}] {data}" + +# The bridge uses wall-clock time, but you can align with CSP start +start_time = utc_now() +bridge.start(start_time) + +# This callback fires at start_time + 1 second +# which aligns with csp.now() being approximately 1 second into the run +bridge.call_at_offset(timedelta(seconds=1), lambda: bridge.push("1s mark")) +``` + +### Bidirectional Communication + +For two-way communication, use `BidirectionalBridge`: + +```python +from csp.event_loop import BidirectionalBridge + +bridge = BidirectionalBridge(str, "bidi") + +@csp.node +def process_and_respond(data: csp.ts[str], bridge_ref: object) -> csp.ts[str]: + if csp.ticked(data): + response = f"processed: {data}" + # Emit back to asyncio + bridge_ref.emit({"input": data, "output": response}) + return response + +@csp.graph +def my_graph(): + data = bridge.adapter.out() + result = process_and_respond(data, bridge) + csp.add_graph_output("results", result) + +# Register callback to receive events from CSP +def on_csp_event(event): + print(f"Received from CSP: {event}") + +bridge.on_event(on_csp_event) + +# Start everything +bridge.start() +runner = csp.run_on_thread(my_graph, realtime=True, ...) + +# Push to CSP +bridge.push("hello") + +# Later, the on_csp_event callback receives: +# {"input": "hello", "output": "processed: hello"} +``` + +### Combining CSP Timers with Async Callbacks + +You can run CSP's internal timers alongside async callbacks: + +```python +@csp.node +def combine(timer: csp.ts[bool], async_data: csp.ts[str]) -> csp.ts[str]: + if csp.ticked(timer): + return "timer tick" + if csp.ticked(async_data): + return f"async: {async_data}" + +@csp.graph +def my_graph(): + # CSP's timer fires every 100ms + timer = csp.timer(timedelta(milliseconds=100)) + # Async data comes from the bridge + async_data = bridge.adapter.out() + + result = combine(timer, async_data) + csp.add_graph_output("events", result) + +# Schedule async callbacks at different intervals +bridge.call_later(0.05, lambda: bridge.push("early")) +bridge.call_later(0.15, lambda: bridge.push("middle")) +bridge.call_later(0.25, lambda: bridge.push("late")) + +# The graph receives interleaved timer and async events +``` + +## Features + +### Full Asyncio Compatibility + +The CSP event loop is compatible with standard asyncio primitives: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def producer(queue): + for i in range(5): + await asyncio.sleep(0.1) + await queue.put(i) + await queue.put(None) # Signal end + +async def consumer(queue): + results = [] + while True: + item = await queue.get() + if item is None: + break + results.append(item) + return results + +async def main(): + queue = asyncio.Queue() + + # Run producer and consumer concurrently + producer_task = asyncio.create_task(producer(queue)) + consumer_task = asyncio.create_task(consumer(queue)) + + await producer_task + results = await consumer_task + + return results + +results = csp_event_loop.run(main()) +print(results) # [0, 1, 2, 3, 4] +``` + +### Synchronization Primitives + +All asyncio synchronization primitives work with CSP's event loop: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def main(): + # Locks + lock = asyncio.Lock() + async with lock: + print("Holding lock") + + # Events + event = asyncio.Event() + event.set() + await event.wait() + + # Semaphores + sem = asyncio.Semaphore(3) + async with sem: + print("Acquired semaphore") + + # Conditions + condition = asyncio.Condition() + async with condition: + condition.notify_all() + +csp_event_loop.run(main()) +``` + +### Concurrent Operations + +Use `asyncio.gather()`, `asyncio.wait()`, and other concurrent operations: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def fetch_data(url): + await asyncio.sleep(0.1) # Simulate network delay + return f"data from {url}" + +async def main(): + urls = ["url1", "url2", "url3"] + + # Gather results concurrently + results = await asyncio.gather(*[fetch_data(url) for url in urls]) + return results + +results = csp_event_loop.run(main()) +``` + +### Timeouts + +Use `asyncio.wait_for()` for timeout operations: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def slow_operation(): + await asyncio.sleep(10) + return "done" + +async def main(): + try: + result = await asyncio.wait_for(slow_operation(), timeout=1.0) + except asyncio.TimeoutError: + result = "timeout" + return result + +result = csp_event_loop.run(main()) # "timeout" +``` + +### Thread Pool Executor + +Run blocking operations in a thread pool: + +```python +import asyncio +import csp.event_loop as csp_event_loop +import time + +def blocking_io(): + time.sleep(0.1) + return "data" + +async def main(): + loop = asyncio.get_running_loop() + + # Run in default executor + result = await loop.run_in_executor(None, blocking_io) + return result + +result = csp_event_loop.run(main()) +``` + +### I/O Operations + +Socket and file descriptor operations: + +```python +import asyncio +import socket +import csp.event_loop as csp_event_loop + +async def main(): + loop = asyncio.get_running_loop() + + # Create a socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setblocking(False) + + # Connect asynchronously + try: + await loop.sock_connect(sock, ('example.com', 80)) + + # Send data + await loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n') + + # Receive data + data = await loop.sock_recv(sock, 1024) + return data[:50] + finally: + sock.close() + +# result = csp_event_loop.run(main()) +``` + +### Callback Scheduling + +Schedule callbacks directly on the event loop: + +```python +import csp.event_loop as csp_event_loop + +loop = csp_event_loop.new_event_loop() + +results = [] + +def my_callback(value): + results.append(value) + +# Schedule callbacks +loop.call_soon(my_callback, "immediate") +loop.call_later(0.1, my_callback, "delayed") +loop.call_soon(loop.stop) + +loop.run_forever() +loop.close() + +print(results) # ["immediate"] +``` + +### Exception Handling + +Custom exception handlers: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +def my_exception_handler(loop, context): + exception = context.get("exception") + message = context.get("message") + print(f"Caught exception: {exception}, message: {message}") + +async def main(): + loop = asyncio.get_running_loop() + loop.set_exception_handler(my_exception_handler) + + # This exception will be caught by our handler + async def bad_task(): + raise ValueError("oops") + + task = asyncio.create_task(bad_task()) + await asyncio.sleep(0.1) # Let the task run + +csp_event_loop.run(main()) +``` + +### Debug Mode + +Enable debug mode for development: + +```python +import csp.event_loop as csp_event_loop + +async def main(): + loop = asyncio.get_running_loop() + print(f"Debug mode: {loop.get_debug()}") + +# Enable debug mode +csp_event_loop.run(main(), debug=True) + +# Or set via environment variable +# PYTHONASYNCIODEBUG=1 python script.py +``` + +## API Reference + +### `csp.event_loop.run(main, *, loop_factory=None, debug=None)` + +Run a coroutine using the CSP event loop. + +**Parameters:** + +- `main`: The coroutine to run +- `loop_factory`: Optional factory function to create the event loop. Defaults to `new_event_loop`. +- `debug`: If True, run in debug mode. + +**Returns:** The result of the coroutine. + +### `csp.event_loop.new_event_loop()` + +Create and return a new CSP event loop. + +**Returns:** A new `CspEventLoop` instance. + +### `csp.event_loop.CspEventLoop` + +An asyncio-compatible event loop backed by CSP's scheduler. + +The loop implements the full `asyncio.AbstractEventLoop` interface, including: + +- `run_until_complete(future)`: Run until a future completes +- `run_forever()`: Run until `stop()` is called +- `stop()`: Stop the loop +- `close()`: Close the loop +- `is_running()`: Check if the loop is running +- `is_closed()`: Check if the loop is closed +- `call_soon(callback, *args)`: Schedule a callback +- `call_later(delay, callback, *args)`: Schedule a delayed callback +- `call_at(when, callback, *args)`: Schedule a callback at absolute time +- `call_soon_threadsafe(callback, *args)`: Thread-safe callback scheduling +- `create_future()`: Create a new Future +- `create_task(coro)`: Create a new Task +- `run_in_executor(executor, func, *args)`: Run in thread pool +- `add_reader(fd, callback, *args)`: Add file descriptor reader +- `remove_reader(fd)`: Remove file descriptor reader +- `add_writer(fd, callback, *args)`: Add file descriptor writer +- `remove_writer(fd)`: Remove file descriptor writer +- `time()`: Get current loop time +- `get_debug()`: Get debug mode status +- `set_debug(enabled)`: Set debug mode + +### `csp.event_loop.CspEventLoopPolicy` + +Event loop policy for CSP-backed asyncio. + +Methods: + +- `get_event_loop()`: Get the event loop for the current context +- `set_event_loop(loop)`: Set the event loop for the current context +- `new_event_loop()`: Create a new event loop + +Alias: `csp.event_loop.EventLoopPolicy` + +### `csp.event_loop.AsyncioBridge` + +Bridge between asyncio and running CSP graphs. + +**Constructor:** + +```python +AsyncioBridge(adapter_type: type = object, name: str = "asyncio_bridge") +``` + +**Parameters:** + +- `adapter_type`: The type of data to push through the adapter +- `name`: Name for the push adapter (for debugging) + +**Properties:** + +- `adapter`: The `GenericPushAdapter` to wire into your CSP graph +- `is_running`: Whether the bridge is currently running +- `loop`: The underlying asyncio event loop (if started) + +**Methods:** + +- `start(start_time=None)`: Start the asyncio event loop in a background thread +- `stop(timeout=5.0)`: Stop the asyncio event loop +- `push(value)`: Push a value to the CSP graph +- `call_soon(callback, *args)`: Schedule a callback immediately +- `call_later(delay, callback, *args)`: Schedule a callback after delay seconds +- `call_at(when, callback, *args)`: Schedule a callback at a specific datetime +- `call_at_offset(offset, callback, *args)`: Schedule at offset from start time +- `run_coroutine(coro)`: Run an asyncio coroutine +- `wait_for_adapter(timeout=None)`: Wait for adapter to be bound to graph +- `time()`: Get current time in seconds since epoch +- `elapsed_since_start()`: Get time elapsed since start + +### `csp.event_loop.BidirectionalBridge` + +Extended bridge supporting two-way communication. + +Inherits all methods from `AsyncioBridge`, plus: + +**Additional Methods:** + +- `on_event(callback)`: Register a callback to receive events from CSP +- `off_event(callback)`: Unregister an event callback +- `emit(value)`: Emit an event from CSP to asyncio (call from CSP nodes) + +## Best Practices + +### 1. Use `run()` for simple scripts + +For simple scripts and applications, use `csp.event_loop.run()`: + +```python +import csp.event_loop as csp_event_loop + +async def main(): + # Your async code here + pass + +csp_event_loop.run(main()) +``` + +### 2. Always close loops and stop bridges + +When creating loops manually, always close them: + +```python +loop = csp_event_loop.new_event_loop() +try: + loop.run_until_complete(main()) +finally: + loop.close() +``` + +When using bridges, always stop them: + +```python +bridge = AsyncioBridge(int, "data") +bridge.start() +try: + # ... run your graph ... +finally: + bridge.stop() +``` + +### 3. Handle shutdown gracefully + +Shutdown async generators and executors: + +```python +async def shutdown(loop): + await loop.shutdown_asyncgens() + await loop.shutdown_default_executor() +``` + +### 4. Use context managers for resources + +```python +async def main(): + async with aiofiles.open('file.txt') as f: + content = await f.read() +``` + +### 5. Wait for adapter binding + +When using the bridge with CSP, wait for the adapter to be ready: + +```python +bridge.start() +runner = csp.run_on_thread(my_graph, realtime=True, ...) + +# Wait for CSP graph to start and bind the adapter +bridge.wait_for_adapter(timeout=1.0) + +# Now it's safe to push data +bridge.push(data) +``` + +## Limitations + +The current implementation has some limitations: + +1. **Subprocess support**: `subprocess_exec()` and `subprocess_shell()` are not yet implemented. + +1. **SSL/TLS**: Direct SSL support requires additional implementation. + +1. **Signal handlers**: Signal handling works but has some platform-specific limitations. + +1. **Bridge timing**: The bridge uses wall-clock time, not CSP engine time. Use `call_at_offset` to align with CSP start time. + +## Comparison: Choosing the Right Integration + +| Feature | `csp.run(realtime=True)` | `CspEventLoop` | `AsyncioBridge` | +| ------------------ | ------------------------------- | ------------------------------------- | ------------------------------ | +| **Use case** | CSP graph with async operations | Pure asyncio code with CSP scheduling | CSP graph receiving async data | +| **Threading** | Single-threaded (default) | Single-threaded | Multi-threaded | +| **CSP graph** | Yes (main focus) | No (pure asyncio) | Yes (main focus) | +| **Async adapters** | Automatic integration | Need to run in the loop | Need explicit bridge | +| **Complexity** | Low | Low | Medium | +| **When to use** | Async I/O in CSP nodes | Replace asyncio.run() | Push external data to CSP | + +### Quick Decision Guide + +1. **I have a CSP graph with async operations (fetch APIs, async I/O):** + → Use `csp.run(my_graph, realtime=True, ...)` - same-thread asyncio is the default + +1. **I want to use asyncio code and need CSP's scheduler:** + → Use `CspEventLoop` or `csp.event_loop.run()` + +1. **I have a running CSP graph and need to push data from external async sources:** + → Use `AsyncioBridge` or `BidirectionalBridge` + +1. **I want async adapters to work without extra configuration:** + → Just use `realtime=True` - adapters auto-detect the asyncio mode + +## See Also + +- [Async Adapters Reference](Async.md) - Detailed async adapter documentation +- [Python asyncio documentation](https://docs.python.org/3/library/asyncio.html) +- [CSP Documentation](../README.md) +- [uvloop](https://github.com/MagicStack/uvloop) - Similar project for libuv-based event loop +- [Example: CSP Asyncio Integration](https://github.com/Point72/csp/tree/main/examples/06_advanced/e2_csp_event_loop_integration.py) diff --git a/docs/wiki/how-tos/Event-Loop-Integration.md b/docs/wiki/how-tos/Event-Loop-Integration.md new file mode 100644 index 000000000..307379a3f --- /dev/null +++ b/docs/wiki/how-tos/Event-Loop-Integration.md @@ -0,0 +1,796 @@ +# CSP Event Loop Integration + +CSP provides two complementary integration patterns with Python's `asyncio` framework: + +1. **Standalone Event Loop**: Use CSP as the asyncio event loop backend (similar to uvloop) +1. **Bridge with Running Graph**: Interleave asyncio operations with a running CSP graph + +## Overview + +The `csp.event_loop` module provides: + +**Standalone Event Loop:** + +- **`CspEventLoop`**: An asyncio-compatible event loop backed by CSP's scheduler +- **`CspEventLoopPolicy`**: An event loop policy for using CSP loops with asyncio +- **`run()`**: A convenience function for running coroutines with CSP's event loop +- **`new_event_loop()`**: Factory function to create a new CSP event loop + +**Bridge with Running Graph:** + +- **`AsyncioBridge`**: Bridge for pushing data from asyncio to CSP graphs +- **`BidirectionalBridge`**: Bridge supporting two-way communication + +## Part 1: Standalone Event Loop + +### Basic Usage + +The simplest way to use CSP's asyncio integration is through the `run()` function: + +```python +import csp.event_loop as csp_event_loop + +async def main(): + print("Hello from CSP asyncio!") + await asyncio.sleep(1) + return "done" + +result = csp_event_loop.run(main()) +print(result) # "done" +``` + +### Using as Event Loop Policy + +You can set CSP as the default event loop for all asyncio operations: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +# Set CSP as the event loop policy +asyncio.set_event_loop_policy(csp_event_loop.EventLoopPolicy()) + +# Now all asyncio operations use CSP's event loop +async def main(): + await asyncio.sleep(0.1) + return "Hello!" + +# This will use CSP's event loop +result = asyncio.run(main()) +``` + +### Creating Event Loops Manually + +For more control, you can create and manage event loops directly: + +```python +from csp.event_loop import new_event_loop + +loop = new_event_loop() +try: + result = loop.run_until_complete(my_coroutine()) +finally: + loop.close() +``` + +## Part 2: Bridge with Running CSP Graph + +The bridge integration allows asyncio operations to interact with a running CSP graph. +This is useful when you want to: + +- Push data from asyncio callbacks or coroutines into CSP +- Schedule callbacks using `call_later` and `call_at` that feed data to CSP +- Coordinate asyncio timing with CSP's engine time (`csp.now()`) +- Enable bidirectional communication between asyncio and CSP nodes + +### Quick Start with AsyncioBridge + +```python +import csp +from csp.event_loop import AsyncioBridge +from csp.utils.datetime import utc_now +from datetime import timedelta + +# Create the bridge +bridge = AsyncioBridge(int, "data_feed") + +@csp.node +def process(data: csp.ts[int]) -> csp.ts[str]: + if csp.ticked(data): + return f"Received {data} at {csp.now()}" + +@csp.graph +def my_graph(): + # Wire the bridge's adapter into the graph + data = bridge.adapter.out() + result = process(data) + csp.add_graph_output("result", result) + +# Start the bridge +start_time = utc_now() +bridge.start(start_time) + +# Run the CSP graph in a thread +runner = csp.run_on_thread( + my_graph, + realtime=True, + starttime=start_time, + endtime=timedelta(seconds=5) +) + +# Wait for adapter to be ready +bridge.wait_for_adapter(timeout=1.0) + +# Push data from asyncio +bridge.call_later(0.5, lambda: bridge.push(1)) +bridge.call_later(1.0, lambda: bridge.push(2)) +bridge.call_later(1.5, lambda: bridge.push(3)) + +# Wait for completion +results = runner.join() +bridge.stop() + +print(results["result"]) +``` + +### Scheduling with call_later and call_at + +The bridge provides asyncio-style scheduling methods: + +```python +from datetime import datetime, timedelta + +bridge = AsyncioBridge(str, "events") +bridge.start() + +# Schedule callback after delay +bridge.call_later(1.0, lambda: bridge.push("after 1 second")) + +# Schedule at specific datetime +target_time = datetime.utcnow() + timedelta(seconds=2) +bridge.call_at(target_time, lambda: bridge.push("at specific time")) + +# Schedule at offset from start time (aligned with CSP time) +bridge.call_at_offset( + timedelta(milliseconds=500), + lambda: bridge.push("at 500ms from start") +) +``` + +### Running Async Coroutines + +You can run full asyncio coroutines that interact with CSP: + +```python +async def fetch_data_and_push(): + """Coroutine that fetches data and pushes to CSP.""" + for i in range(5): + await asyncio.sleep(0.2) + # Simulate fetching data + data = {"value": i, "timestamp": time.time()} + bridge.push(data) + +# Run the coroutine +future = bridge.run_coroutine(fetch_data_and_push()) + +# Optionally wait for completion +future.result(timeout=10.0) +``` + +### Coordinating with CSP Time (csp.now()) + +When scheduling callbacks, you can align with CSP's engine start time: + +```python +@csp.node +def log_with_time(data: csp.ts[str]) -> csp.ts[str]: + if csp.ticked(data): + # csp.now() shows the engine time + return f"[{csp.now()}] {data}" + +# The bridge uses wall-clock time, but you can align with CSP start +start_time = utc_now() +bridge.start(start_time) + +# This callback fires at start_time + 1 second +# which aligns with csp.now() being approximately 1 second into the run +bridge.call_at_offset(timedelta(seconds=1), lambda: bridge.push("1s mark")) +``` + +### Bidirectional Communication + +For two-way communication, use `BidirectionalBridge`: + +```python +from csp.event_loop import BidirectionalBridge + +bridge = BidirectionalBridge(str, "bidi") + +@csp.node +def process_and_respond(data: csp.ts[str], bridge_ref: object) -> csp.ts[str]: + if csp.ticked(data): + response = f"processed: {data}" + # Emit back to asyncio + bridge_ref.emit({"input": data, "output": response}) + return response + +@csp.graph +def my_graph(): + data = bridge.adapter.out() + result = process_and_respond(data, bridge) + csp.add_graph_output("results", result) + +# Register callback to receive events from CSP +def on_csp_event(event): + print(f"Received from CSP: {event}") + +bridge.on_event(on_csp_event) + +# Start everything +bridge.start() +runner = csp.run_on_thread(my_graph, realtime=True, ...) + +# Push to CSP +bridge.push("hello") + +# Later, the on_csp_event callback receives: +# {"input": "hello", "output": "processed: hello"} +``` + +### Combining CSP Timers with Async Callbacks + +You can run CSP's internal timers alongside async callbacks: + +```python +@csp.node +def combine(timer: csp.ts[bool], async_data: csp.ts[str]) -> csp.ts[str]: + if csp.ticked(timer): + return "timer tick" + if csp.ticked(async_data): + return f"async: {async_data}" + +@csp.graph +def my_graph(): + # CSP's timer fires every 100ms + timer = csp.timer(timedelta(milliseconds=100)) + # Async data comes from the bridge + async_data = bridge.adapter.out() + + result = combine(timer, async_data) + csp.add_graph_output("events", result) + +# Schedule async callbacks at different intervals +bridge.call_later(0.05, lambda: bridge.push("early")) +bridge.call_later(0.15, lambda: bridge.push("middle")) +bridge.call_later(0.25, lambda: bridge.push("late")) + +# The graph receives interleaved timer and async events +``` + +## Features + +### Full Asyncio Compatibility + +The CSP event loop is compatible with standard asyncio primitives: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def producer(queue): + for i in range(5): + await asyncio.sleep(0.1) + await queue.put(i) + await queue.put(None) # Signal end + +async def consumer(queue): + results = [] + while True: + item = await queue.get() + if item is None: + break + results.append(item) + return results + +async def main(): + queue = asyncio.Queue() + + # Run producer and consumer concurrently + producer_task = asyncio.create_task(producer(queue)) + consumer_task = asyncio.create_task(consumer(queue)) + + await producer_task + results = await consumer_task + + return results + +results = csp_event_loop.run(main()) +print(results) # [0, 1, 2, 3, 4] +``` + +### Synchronization Primitives + +All asyncio synchronization primitives work with CSP's event loop: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def main(): + # Locks + lock = asyncio.Lock() + async with lock: + print("Holding lock") + + # Events + event = asyncio.Event() + event.set() + await event.wait() + + # Semaphores + sem = asyncio.Semaphore(3) + async with sem: + print("Acquired semaphore") + + # Conditions + condition = asyncio.Condition() + async with condition: + condition.notify_all() + +csp_event_loop.run(main()) +``` + +### Concurrent Operations + +Use `asyncio.gather()`, `asyncio.wait()`, and other concurrent operations: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def fetch_data(url): + await asyncio.sleep(0.1) # Simulate network delay + return f"data from {url}" + +async def main(): + urls = ["url1", "url2", "url3"] + + # Gather results concurrently + results = await asyncio.gather(*[fetch_data(url) for url in urls]) + return results + +results = csp_event_loop.run(main()) +``` + +### Timeouts + +Use `asyncio.wait_for()` for timeout operations: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +async def slow_operation(): + await asyncio.sleep(10) + return "done" + +async def main(): + try: + result = await asyncio.wait_for(slow_operation(), timeout=1.0) + except asyncio.TimeoutError: + result = "timeout" + return result + +result = csp_event_loop.run(main()) # "timeout" +``` + +### Thread Pool Executor + +Run blocking operations in a thread pool: + +```python +import asyncio +import csp.event_loop as csp_event_loop +import time + +def blocking_io(): + time.sleep(0.1) + return "data" + +async def main(): + loop = asyncio.get_running_loop() + + # Run in default executor + result = await loop.run_in_executor(None, blocking_io) + return result + +result = csp_event_loop.run(main()) +``` + +### I/O Operations + +Socket and file descriptor operations: + +```python +import asyncio +import socket +import csp.event_loop as csp_event_loop + +async def main(): + loop = asyncio.get_running_loop() + + # Create a socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setblocking(False) + + # Connect asynchronously + try: + await loop.sock_connect(sock, ('example.com', 80)) + + # Send data + await loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n') + + # Receive data + data = await loop.sock_recv(sock, 1024) + return data[:50] + finally: + sock.close() + +# result = csp_event_loop.run(main()) +``` + +### Callback Scheduling + +Schedule callbacks directly on the event loop: + +```python +import csp.event_loop as csp_event_loop + +loop = csp_event_loop.new_event_loop() + +results = [] + +def my_callback(value): + results.append(value) + +# Schedule callbacks +loop.call_soon(my_callback, "immediate") +loop.call_later(0.1, my_callback, "delayed") +loop.call_soon(loop.stop) + +loop.run_forever() +loop.close() + +print(results) # ["immediate"] +``` + +### Simulation Mode + +The CSP event loop supports running in **simulation mode** for historical replay and testing. +In simulation mode, time is driven by CSP's scheduler rather than wall-clock time: + +```python +from datetime import datetime, timedelta +from csp.event_loop import CspEventLoop + +# Create event loop in simulation mode +loop = CspEventLoop(realtime=False) + +# Configure simulation time range +loop.set_simulation_time_range( + start=datetime(2020, 1, 1, 9, 30, 0), # Market open + end=datetime(2020, 1, 1, 16, 0, 0) # Market close +) + +async def test_historical(): + # loop.time() returns simulated time (seconds since epoch) + start_time = loop.time() + print(f"Start: {datetime.fromtimestamp(start_time)}") + + await asyncio.sleep(0) # Yields but doesn't wait + # Time jumps instantly to next event + + return "done" + +result = loop.run_until_complete(test_historical()) +loop.close() +``` + +**Key differences between realtime and simulation modes:** + +| Aspect | Realtime Mode (`realtime=True`) | Simulation Mode (`realtime=False`) | +| ----------------- | ------------------------------- | ---------------------------------- | +| `loop.time()` | Wall clock (monotonic) | CSP simulated time | +| `asyncio.sleep()` | Actually waits | Returns immediately | +| I/O waiting | Blocks on selectors | Polls only (no waiting) | +| Time progression | Follows wall clock | Jumps to next event | +| Use case | Live applications | Backtesting, testing | + +**Example: Fast-forwarding through simulated events:** + +```python +from csp.event_loop import CspEventLoop +from datetime import datetime +import asyncio + +loop = CspEventLoop(realtime=False) +loop.set_simulation_time_range( + start=datetime(2020, 1, 1), + end=datetime(2020, 12, 31) +) + +times = [] + +async def record_timestamps(): + for i in range(5): + times.append(loop.time()) + await asyncio.sleep(0) # Yield without waiting + return times + +import time +wall_start = time.monotonic() +result = loop.run_until_complete(record_timestamps()) +wall_elapsed = time.monotonic() - wall_start + +print(f"Processed 5 events in {wall_elapsed:.3f}s wall time") +# Output: Processed 5 events in 0.001s wall time +loop.close() +``` + +### Exception Handling + +Custom exception handlers: + +```python +import asyncio +import csp.event_loop as csp_event_loop + +def my_exception_handler(loop, context): + exception = context.get("exception") + message = context.get("message") + print(f"Caught exception: {exception}, message: {message}") + +async def main(): + loop = asyncio.get_running_loop() + loop.set_exception_handler(my_exception_handler) + + # This exception will be caught by our handler + async def bad_task(): + raise ValueError("oops") + + task = asyncio.create_task(bad_task()) + await asyncio.sleep(0.1) # Let the task run + +csp_event_loop.run(main()) +``` + +### Debug Mode + +Enable debug mode for development: + +```python +import csp.event_loop as csp_event_loop + +async def main(): + loop = asyncio.get_running_loop() + print(f"Debug mode: {loop.get_debug()}") + +# Enable debug mode +csp_event_loop.run(main(), debug=True) + +# Or set via environment variable +# PYTHONASYNCIODEBUG=1 python script.py +``` + +## API Reference + +### `csp.event_loop.run(main, *, loop_factory=None, debug=None)` + +Run a coroutine using the CSP event loop. + +**Parameters:** + +- `main`: The coroutine to run +- `loop_factory`: Optional factory function to create the event loop. Defaults to `new_event_loop`. +- `debug`: If True, run in debug mode. + +**Returns:** The result of the coroutine. + +### `csp.event_loop.new_event_loop()` + +Create and return a new CSP event loop. + +**Returns:** A new `CspEventLoop` instance. + +### `csp.event_loop.CspEventLoop` + +An asyncio-compatible event loop backed by CSP's scheduler. + +**Constructor:** + +```python +CspEventLoop(realtime: bool = True) +``` + +- `realtime`: If True (default), run in realtime mode with wall-clock timing. + If False, run in simulation mode where time is driven by CSP's scheduler. + +The loop implements the full `asyncio.AbstractEventLoop` interface, including: + +- `run_until_complete(future)`: Run until a future completes +- `run_forever()`: Run until `stop()` is called +- `stop()`: Stop the loop +- `close()`: Close the loop +- `is_running()`: Check if the loop is running +- `is_closed()`: Check if the loop is closed +- `call_soon(callback, *args)`: Schedule a callback +- `call_later(delay, callback, *args)`: Schedule a delayed callback +- `call_at(when, callback, *args)`: Schedule a callback at absolute time +- `call_soon_threadsafe(callback, *args)`: Thread-safe callback scheduling +- `create_future()`: Create a new Future +- `create_task(coro)`: Create a new Task +- `run_in_executor(executor, func, *args)`: Run in thread pool +- `add_reader(fd, callback, *args)`: Add file descriptor reader +- `remove_reader(fd)`: Remove file descriptor reader +- `add_writer(fd, callback, *args)`: Add file descriptor writer +- `remove_writer(fd)`: Remove file descriptor writer +- `time()`: Get current loop time (wall clock in realtime, simulated in simulation mode) +- `get_debug()`: Get debug mode status +- `set_debug(enabled)`: Set debug mode + +**Simulation Mode Methods:** + +- `set_simulation_time_range(start, end)`: Configure the time range for simulation mode. + Must be called before `run_forever()` or `run_until_complete()`. + - `start`: Start datetime for simulation (default: Unix epoch) + - `end`: End datetime for simulation (default: 100 years from start) + +### `csp.event_loop.CspEventLoopPolicy` + +Event loop policy for CSP-backed asyncio. + +Methods: + +- `get_event_loop()`: Get the event loop for the current context +- `set_event_loop(loop)`: Set the event loop for the current context +- `new_event_loop()`: Create a new event loop + +Alias: `csp.event_loop.EventLoopPolicy` + +### `csp.event_loop.AsyncioBridge` + +Bridge between asyncio and running CSP graphs. + +**Constructor:** + +```python +AsyncioBridge(adapter_type: type = object, name: str = "asyncio_bridge") +``` + +**Parameters:** + +- `adapter_type`: The type of data to push through the adapter +- `name`: Name for the push adapter (for debugging) + +**Properties:** + +- `adapter`: The `GenericPushAdapter` to wire into your CSP graph +- `is_running`: Whether the bridge is currently running +- `loop`: The underlying asyncio event loop (if started) + +**Methods:** + +- `start(start_time=None)`: Start the asyncio event loop in a background thread +- `stop(timeout=5.0)`: Stop the asyncio event loop +- `push(value)`: Push a value to the CSP graph +- `call_soon(callback, *args)`: Schedule a callback immediately +- `call_later(delay, callback, *args)`: Schedule a callback after delay seconds +- `call_at(when, callback, *args)`: Schedule a callback at a specific datetime +- `call_at_offset(offset, callback, *args)`: Schedule at offset from start time +- `run_coroutine(coro)`: Run an asyncio coroutine +- `wait_for_adapter(timeout=None)`: Wait for adapter to be bound to graph +- `time()`: Get current time in seconds since epoch +- `elapsed_since_start()`: Get time elapsed since start + +### `csp.event_loop.BidirectionalBridge` + +Extended bridge supporting two-way communication. + +Inherits all methods from `AsyncioBridge`, plus: + +**Additional Methods:** + +- `on_event(callback)`: Register a callback to receive events from CSP +- `off_event(callback)`: Unregister an event callback +- `emit(value)`: Emit an event from CSP to asyncio (call from CSP nodes) + +## Best Practices + +### 1. Use `run()` for simple scripts + +For simple scripts and applications, use `csp.event_loop.run()`: + +```python +import csp.event_loop as csp_event_loop + +async def main(): + # Your async code here + pass + +csp_event_loop.run(main()) +``` + +### 2. Always close loops and stop bridges + +When creating loops manually, always close them: + +```python +loop = csp_event_loop.new_event_loop() +try: + loop.run_until_complete(main()) +finally: + loop.close() +``` + +When using bridges, always stop them: + +```python +bridge = AsyncioBridge(int, "data") +bridge.start() +try: + # ... run your graph ... +finally: + bridge.stop() +``` + +### 3. Handle shutdown gracefully + +Shutdown async generators and executors: + +```python +async def shutdown(loop): + await loop.shutdown_asyncgens() + await loop.shutdown_default_executor() +``` + +### 4. Use context managers for resources + +```python +async def main(): + async with aiofiles.open('file.txt') as f: + content = await f.read() +``` + +### 5. Wait for adapter binding + +When using the bridge with CSP, wait for the adapter to be ready: + +```python +bridge.start() +runner = csp.run_on_thread(my_graph, realtime=True, ...) + +# Wait for CSP graph to start and bind the adapter +bridge.wait_for_adapter(timeout=1.0) + +# Now it's safe to push data +bridge.push(data) +``` + +## Limitations + +The current implementation has some limitations: + +1. **Subprocess support**: `subprocess_exec()` and `subprocess_shell()` are not yet implemented. + +1. **SSL/TLS**: Direct SSL support requires additional implementation. + +1. **Signal handlers**: Signal handling works but has some platform-specific limitations. + +1. **Bridge timing**: The bridge uses wall-clock time, not CSP engine time. Use `call_at_offset` to align with CSP start time. + +## See Also + +- [Python asyncio documentation](https://docs.python.org/3/library/asyncio.html) +- [CSP Documentation](../README.md) +- [uvloop](https://github.com/MagicStack/uvloop) - Similar project for libuv-based event loop +- [Example: CSP Asyncio Integration](https://github.com/Point72/csp/tree/main/examples/06_advanced/e2_csp_event_loop_integration.py) diff --git a/examples/06_advanced/e3_asyncio_integration.py b/examples/06_advanced/e3_asyncio_integration.py new file mode 100644 index 000000000..307c85d2f --- /dev/null +++ b/examples/06_advanced/e3_asyncio_integration.py @@ -0,0 +1,354 @@ +""" +CSP Event Loop Integration Examples + +This module demonstrates how to use CSP's event loop integration to run +asynciocoroutines with CSP's event loop, similar to uvloop. +""" + +import asyncio +import time + +import csp.event_loop as csp_event_loop + +# Example 1: Basic Usage + + +async def hello_world(): + """Simple async function.""" + print("Hello from CSP asyncio!") + await asyncio.sleep(0.1) + return "done" + + +def example_basic(): + """Basic usage of csp.event_loop.run().""" + print("=" * 50) + print("Example 1: Basic Usage") + print("=" * 50) + + result = csp_event_loop.run(hello_world()) + print(f"Result: {result}\n") + + +# Example 2: Callback Scheduling + + +def example_callbacks(): + """Demonstrate callback scheduling methods.""" + print("=" * 50) + print("Example 2: Callback Scheduling") + print("=" * 50) + + results = [] + + async def callback_demo(): + loop = asyncio.get_running_loop() + + def callback(value): + results.append(value) + print(f"Callback called with: {value}") + + # Schedule callbacks + loop.call_soon(callback, "immediate") + loop.call_later(0.1, callback, "delayed_100ms") + loop.call_later(0.05, callback, "delayed_50ms") + + # Wait for all callbacks to complete + await asyncio.sleep(0.2) + + csp_event_loop.run(callback_demo()) + + print(f"Callback order: {results}\n") + + +# Example 3: Concurrent Tasks + + +async def fetch_data(name, delay): + """Simulate fetching data with a delay.""" + print(f"Fetching {name}...") + await asyncio.sleep(delay) + print(f"Got {name}!") + return f"data from {name}" + + +async def concurrent_tasks(): + """Run multiple tasks concurrently.""" + # Create tasks + tasks = [ + asyncio.create_task(fetch_data("source1", 0.1)), + asyncio.create_task(fetch_data("source2", 0.15)), + asyncio.create_task(fetch_data("source3", 0.05)), + ] + + # Wait for all tasks + results = await asyncio.gather(*tasks) + return results + + +def example_concurrent(): + """Demonstrate concurrent task execution.""" + print("=" * 50) + print("Example 3: Concurrent Tasks") + print("=" * 50) + + results = csp_event_loop.run(concurrent_tasks()) + print(f"Results: {results}\n") + + +# Example 4: Synchronization Primitives + + +async def worker_with_lock(lock, name, shared_data): + """Worker that uses a lock to access shared data.""" + async with lock: + print(f"{name}: Acquired lock") + shared_data.append(f"{name} start") + await asyncio.sleep(0.05) + shared_data.append(f"{name} end") + print(f"{name}: Releasing lock") + + +async def synchronization_example(): + """Demonstrate asyncio synchronization primitives.""" + lock = asyncio.Lock() + shared_data = [] + + await asyncio.gather( + worker_with_lock(lock, "Worker1", shared_data), + worker_with_lock(lock, "Worker2", shared_data), + worker_with_lock(lock, "Worker3", shared_data), + ) + + return shared_data + + +def example_synchronization(): + """Demonstrate synchronization primitives.""" + print("=" * 50) + print("Example 4: Synchronization Primitives") + print("=" * 50) + + results = csp_event_loop.run(synchronization_example()) + print(f"Execution order: {results}\n") + + +# Example 5: Producer-Consumer + + +async def producer(queue, items): + """Produce items and put them in a queue.""" + for item in items: + await queue.put(item) + print(f"Produced: {item}") + await asyncio.sleep(0.02) + await queue.put(None) # Signal end + + +async def consumer(queue): + """Consume items from a queue.""" + results = [] + while True: + item = await queue.get() + if item is None: + break + print(f"Consumed: {item}") + results.append(item) + return results + + +async def producer_consumer(): + """Producer-consumer pattern.""" + queue = asyncio.Queue() + + # Run producer and consumer concurrently + producer_task = asyncio.create_task(producer(queue, [1, 2, 3, 4, 5])) + consumer_task = asyncio.create_task(consumer(queue)) + + await producer_task + results = await consumer_task + + return results + + +def example_producer_consumer(): + """Demonstrate producer-consumer pattern.""" + print("=" * 50) + print("Example 5: Producer-Consumer Pattern") + print("=" * 50) + + results = csp_event_loop.run(producer_consumer()) + print(f"All consumed items: {results}\n") + + +# Example 6: Timeout Handling + + +async def slow_operation(): + """A slow operation that might need to be cancelled.""" + await asyncio.sleep(10) + return "completed" + + +async def timeout_example(): + """Demonstrate timeout handling.""" + try: + result = await asyncio.wait_for(slow_operation(), timeout=0.1) + return result + except asyncio.TimeoutError: + return "operation timed out" + + +def example_timeout(): + """Demonstrate timeout handling.""" + print("=" * 50) + print("Example 6: Timeout Handling") + print("=" * 50) + + result = csp_event_loop.run(timeout_example()) + print(f"Result: {result}\n") + + +# Example 7: Event Loop Policy + + +def example_policy(): + """Demonstrate using CSP as the asyncio event loop policy.""" + print("=" * 50) + print("Example 7: Event Loop Policy") + print("=" * 50) + + # Save old policy + old_policy = asyncio.get_event_loop_policy() + + try: + # Set CSP as the event loop policy + asyncio.set_event_loop_policy(csp_event_loop.EventLoopPolicy()) + + # Now asyncio.run() uses CSP's event loop + async def check_loop(): + loop = asyncio.get_running_loop() + return type(loop).__name__ + + loop_name = asyncio.run(check_loop()) + print(f"Using event loop: {loop_name}\n") + finally: + # Restore old policy + asyncio.set_event_loop_policy(old_policy) + + +# Example 8: Exception Handling + + +async def faulty_operation(): + """An operation that raises an exception.""" + await asyncio.sleep(0.01) + raise ValueError("Something went wrong!") + + +async def exception_example(): + """Demonstrate exception handling in async code.""" + try: + await faulty_operation() + except ValueError as e: + return f"Caught exception: {e}" + + +def example_exception_handling(): + """Demonstrate exception handling.""" + print("=" * 50) + print("Example 8: Exception Handling") + print("=" * 50) + + result = csp_event_loop.run(exception_example()) + print(f"Result: {result}\n") + + +# Example 9: Executor + + +def blocking_io_operation(x, y): + """A blocking I/O operation.""" + time.sleep(0.1) # Simulate blocking I/O + return x * y + + +async def executor_example(): + """Run blocking code in an executor.""" + loop = asyncio.get_running_loop() + + # Run blocking operations in the default executor with timeout + try: + result1 = await asyncio.wait_for(loop.run_in_executor(None, blocking_io_operation, 5, 10), timeout=5.0) + result2 = await asyncio.wait_for(loop.run_in_executor(None, blocking_io_operation, 3, 7), timeout=5.0) + except asyncio.TimeoutError: + return "executor timed out", None + + return result1, result2 + + +def example_executor(): + """Demonstrate running blocking code in executor.""" + print("=" * 50) + print("Example 9: Running Blocking Code in Executor") + print("=" * 50) + + results = csp_event_loop.run(executor_example()) + print(f"Results: {results}\n") + + +def _check_event_loop_available(): + """Check if CSP event loop is functional by checking required methods exist.""" + try: + loop = csp_event_loop.new_event_loop() + # Check that the CSP engine has the required start method + if not hasattr(loop, "_csp_engine"): + loop.close() + return False + if not hasattr(loop._csp_engine, "start"): + loop.close() + return False + loop.close() + return True + except Exception: + return False + + +def main(): + """Run all examples.""" + print("\n" + "=" * 50) + print("CSP ASYNCIO INTEGRATION EXAMPLES") + print("=" * 50 + "\n") + + # Check if event loop is functional + if not _check_event_loop_available(): + print("CSP event loop is not available or functional.") + print("Skipping asyncio integration examples.") + print("=" * 50) + return + + examples = [ + ("basic", example_basic), + ("callbacks", example_callbacks), + ("concurrent", example_concurrent), + ("synchronization", example_synchronization), + ("producer_consumer", example_producer_consumer), + ("timeout", example_timeout), + ("policy", example_policy), + ("exception_handling", example_exception_handling), + ("executor", example_executor), + ] + + for name, func in examples: + try: + func() + except Exception as e: + print(f"Example {name} failed: {e}\n") + + print("=" * 50) + print("All examples completed!") + print("=" * 50) + + +if __name__ == "__main__": + main() diff --git a/examples/06_advanced/e4_csp_asyncio_integration.py b/examples/06_advanced/e4_csp_asyncio_integration.py new file mode 100644 index 000000000..d0b26d8c9 --- /dev/null +++ b/examples/06_advanced/e4_csp_asyncio_integration.py @@ -0,0 +1,611 @@ +""" +CSP Event Loop Integration with Running Graph + +This example demonstrates how to integrate asyncio event loop operations +with a running CSP graph using the AsyncioBridge and BidirectionalBridge +classes from csp.event_loop. + +Examples show how to: +1. Run asyncio coroutines alongside a CSP graph in realtime mode +2. Use push adapters to feed data from asyncio callbacks into CSP +3. Interleave operations with the CSP engine and its clock (csp.now()) +4. Use call_later and call_at to schedule callbacks that interact with CSP +""" + +import asyncio +import time +from datetime import datetime, timedelta +from typing import Any, Callable, List, Optional + +import csp +from csp import ts +from csp.event_loop import AsyncioBridge, BidirectionalBridge +from csp.utils.datetime import utc_now + +# Example 1: Basic Push from Asyncio to CSP + + +def example_basic_push(): + """Demonstrate pushing data from asyncio callbacks to CSP graph.""" + print("=" * 60) + print("Example 1: Basic Push from Asyncio to CSP") + print("=" * 60) + + bridge = AsyncioBridge(int, "counter") + + @csp.node + def collect(data: ts[int]) -> ts[int]: + if csp.ticked(data): + print(f" CSP received at {csp.now()}: {data}") + return data + + @csp.graph + def g(): + data = bridge.adapter.out() + collected = collect(data) + csp.add_graph_output("data", collected) + + # Start bridge first + start_time = utc_now() + bridge.start(start_time) + + # Give bridge time to start + time.sleep(0.1) + + # Run CSP graph in thread + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=2)) + + # Wait for adapter to bind + bridge.adapter.wait_for_start(timeout=1.0) + + # Push data from asyncio + counter = 0 + + def push_counter(): + nonlocal counter + if bridge._running and runner.is_alive(): + bridge.push(counter) + counter += 1 + if counter < 5: + bridge.call_later(0.3, push_counter) + + bridge.call_soon(push_counter) + + # Wait for completion + results = runner.join() + bridge.stop() + + print(f" Collected: {[v for _, v in results.get('data', [])]}\n") + + +# Example 2: Scheduled Callbacks with call_later + + +def example_call_later(): + """Demonstrate call_later scheduling with CSP.""" + print("=" * 60) + print("Example 2: Scheduled Callbacks with call_later") + print("=" * 60) + + bridge = AsyncioBridge(str, "messages") + + @csp.node + def log_events(msg: ts[str]) -> ts[str]: + if csp.ticked(msg): + now = csp.now() + print(f" [{now}] CSP: {msg}") + return msg + + @csp.graph + def g(): + data = bridge.adapter.out() + logged = log_events(data) + csp.add_graph_output("events", logged) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=3)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Schedule messages at different times + bridge.call_later(0.1, lambda: bridge.push("Message at 0.1s")) + bridge.call_later(0.5, lambda: bridge.push("Message at 0.5s")) + bridge.call_later(1.0, lambda: bridge.push("Message at 1.0s")) + bridge.call_later(1.5, lambda: bridge.push("Message at 1.5s")) + bridge.call_later(2.0, lambda: bridge.push("Message at 2.0s")) + + results = runner.join() + bridge.stop() + + print(f" Total messages: {len(results.get('events', []))}\n") + + +# Example 3: Call at Specific Times + + +def example_call_at(): + """Demonstrate call_at scheduling at specific datetimes.""" + print("=" * 60) + print("Example 3: Call at Specific Datetimes") + print("=" * 60) + + bridge = AsyncioBridge(str, "timed_events") + + @csp.node + def process(msg: ts[str]) -> ts[str]: + if csp.ticked(msg): + print(f" [{csp.now().strftime('%H:%M:%S.%f')}] {msg}") + return msg + + @csp.graph + def g(): + data = bridge.adapter.out() + processed = process(data) + csp.add_graph_output("events", processed) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=3)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Schedule at specific times + t1 = start_time + timedelta(seconds=0.5) + t2 = start_time + timedelta(seconds=1.0) + t3 = start_time + timedelta(seconds=1.5) + + bridge.call_at(t1, lambda: bridge.push(f"Event scheduled for {t1.strftime('%H:%M:%S.%f')}")) + bridge.call_at(t2, lambda: bridge.push(f"Event scheduled for {t2.strftime('%H:%M:%S.%f')}")) + bridge.call_at(t3, lambda: bridge.push(f"Event scheduled for {t3.strftime('%H:%M:%S.%f')}")) + + results = runner.join() + bridge.stop() + + print(f" Total events: {len(results.get('events', []))}\n") + + +# Example 4: Running Async Coroutines + + +def example_async_coroutines(): + """Demonstrate running asyncio coroutines that push to CSP.""" + print("=" * 60) + print("Example 4: Running Async Coroutines") + print("=" * 60) + + bridge = AsyncioBridge(dict, "async_data") + + @csp.node + def process_data(data: ts[dict]) -> ts[str]: + if csp.ticked(data): + result = f"Processed: {data}" + print(f" CSP [{csp.now()}]: {result}") + return result + + @csp.graph + def g(): + data = bridge.adapter.out() + processed = process_data(data) + csp.add_graph_output("results", processed) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=3)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Define an async coroutine that fetches data + async def fetch_data(source: str, delay: float) -> dict: + await asyncio.sleep(delay) + return {"source": source, "value": delay * 100, "timestamp": time.time()} + + # Run multiple coroutines and push results to CSP + async def fetch_and_push(): + for i in range(3): + data = await fetch_data(f"source_{i}", 0.3 + i * 0.2) + bridge.push(data) + + bridge.run_coroutine(fetch_and_push()) + + results = runner.join() + bridge.stop() + + print(f" Total results: {len(results.get('results', []))}\n") + + +# Example 5: Bidirectional Communication + + +def example_bidirectional(): + """Demonstrate bidirectional communication between asyncio and CSP.""" + print("=" * 60) + print("Example 5: Bidirectional Communication") + print("=" * 60) + + bridge = BidirectionalBridge() + received_in_async = [] + + @csp.node + def process_and_respond(data: ts[object], bridge_ref: object) -> ts[str]: + if csp.ticked(data): + print(f" CSP: Processing {data}") + # Emit response back to asyncio + bridge_ref.emit({"original": data, "processed_at": str(csp.now())}) + return f"CSP processed: {data}" + + @csp.graph + def g(): + data = bridge.adapter.out() + result = process_and_respond(data, bridge) + csp.add_graph_output("results", result) + + # Register callback to receive events from CSP + def on_event(event): + print(f" Asyncio received: {event}") + received_in_async.append(event) + + bridge.on_event(on_event) + + start_time = utc_now() + bridge.start() + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=2)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Push data to CSP + for i in range(3): + time.sleep(0.3) + bridge.push(f"message_{i}") + + results = runner.join() + bridge.stop() + + print(f" CSP outputs: {len(results.get('results', []))}") + print(f" Asyncio received: {len(received_in_async)} events\n") + + +# Example 6: Periodic Tasks with CSP Timer Integration + + +def example_periodic_tasks(): + """Demonstrate periodic async tasks alongside CSP timers.""" + print("=" * 60) + print("Example 6: Periodic Tasks with CSP Timer Integration") + print("=" * 60) + + bridge = AsyncioBridge(str, "async_ticks") + + @csp.node + def csp_timer_node(timer: ts[bool], async_data: ts[str]) -> ts[str]: + """Node that processes both CSP timer ticks and async data.""" + with csp.state(): + s_count = 0 + + if csp.ticked(timer): + s_count += 1 + msg = f"CSP timer tick #{s_count} at {csp.now()}" + print(f" {msg}") + return msg + + if csp.ticked(async_data): + msg = f"Async data: {async_data} at {csp.now()}" + print(f" {msg}") + return msg + + @csp.graph + def g(): + # CSP's own timer + timer = csp.timer(timedelta(milliseconds=400)) + # Async data coming in + async_data = bridge.adapter.out() + # Process both + result = csp_timer_node(timer, async_data) + csp.add_graph_output("events", result) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=2)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Set up periodic async task + tick_count = [0] + + def periodic_tick(): + if bridge._running and runner.is_alive(): + tick_count[0] += 1 + bridge.push(f"async_tick_{tick_count[0]}") + if tick_count[0] < 5: + bridge.call_later(0.3, periodic_tick) + + bridge.call_later(0.15, periodic_tick) # Offset from CSP timer + + results = runner.join() + bridge.stop() + + print(f" Total events: {len(results.get('events', []))}\n") + + +# Example 7: Using csp.now() Time for Scheduling + + +class CspAwareScheduler: + """ + A scheduler that can coordinate with CSP's internal time. + + This demonstrates how to use CSP's time (csp.now()) as a reference + for scheduling asyncio callbacks. + """ + + def __init__(self): + self.bridge = AsyncioBridge(dict, "scheduler_events") + self._csp_start_time: Optional[datetime] = None + self._pending_schedules: List[tuple] = [] + + def set_csp_start_time(self, start_time: datetime) -> None: + """Set the CSP engine start time for time calculations.""" + self._csp_start_time = start_time + self.bridge.start(start_time) + + def schedule_at_csp_offset(self, offset: timedelta, callback: Callable[..., Any], *args: Any) -> None: + """ + Schedule a callback at a specific offset from CSP start time. + + This ensures the callback fires at a time aligned with csp.now(). + """ + if self._csp_start_time is None: + self._pending_schedules.append((offset, callback, args)) + return + + target_time = self._csp_start_time + offset + self.bridge.call_at(target_time, callback, *args) + + def push_with_metadata(self, event_type: str, data: Any) -> None: + """Push data with timing metadata.""" + self.bridge.push( + { + "type": event_type, + "data": data, + "wall_time": datetime.utcnow().isoformat(), + } + ) + + +def example_csp_time_scheduling(): + """Demonstrate scheduling aligned with CSP time.""" + print("=" * 60) + print("Example 7: Scheduling Aligned with CSP Time") + print("=" * 60) + + scheduler = CspAwareScheduler() + + @csp.node + def process(event: ts[dict]) -> ts[str]: + if csp.ticked(event): + csp_time = csp.now() + wall_time = event.get("wall_time", "unknown") + event_type = event.get("type", "unknown") + data = event.get("data", None) + result = f"[{csp_time}] {event_type}: {data} (wall: {wall_time[-12:]})" + print(f" {result}") + return result + + @csp.graph + def g(): + data = scheduler.bridge.adapter.out() + processed = process(data) + csp.add_graph_output("events", processed) + + start_time = utc_now() + scheduler.set_csp_start_time(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=3)) + + scheduler.bridge.adapter.wait_for_start(timeout=1.0) + + # Schedule events at specific CSP time offsets + scheduler.schedule_at_csp_offset( + timedelta(milliseconds=200), + lambda: scheduler.push_with_metadata("tick", "event_1"), + ) + scheduler.schedule_at_csp_offset( + timedelta(milliseconds=600), + lambda: scheduler.push_with_metadata("tick", "event_2"), + ) + scheduler.schedule_at_csp_offset( + timedelta(seconds=1), + lambda: scheduler.push_with_metadata("milestone", "1 second mark"), + ) + scheduler.schedule_at_csp_offset( + timedelta(seconds=1, milliseconds=500), + lambda: scheduler.push_with_metadata("tick", "event_3"), + ) + + results = runner.join() + scheduler.bridge.stop() + + print(f" Total events: {len(results.get('events', []))}\n") + + +# Example 8: Error Handling + + +def example_error_handling(): + """Demonstrate error handling in asyncio-CSP integration.""" + print("=" * 60) + print("Example 8: Error Handling") + print("=" * 60) + + bridge = AsyncioBridge(str, "error_demo") + errors_caught = [] + + @csp.node + def process(data: ts[str]) -> ts[str]: + if csp.ticked(data): + if "error" in data.lower(): + raise ValueError(f"Error processing: {data}") + print(f" CSP processed: {data}") + return data + + @csp.graph + def g(): + data = bridge.adapter.out() + # Note: In production, you'd want proper error handling here + processed = process(data) + csp.add_graph_output("results", processed) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=2)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Push normal data + bridge.call_later(0.1, lambda: bridge.push("normal message 1")) + bridge.call_later(0.3, lambda: bridge.push("normal message 2")) + bridge.call_later(0.5, lambda: bridge.push("normal message 3")) + + # Demonstrate async error handling + async def async_operation_with_error(): + try: + await asyncio.sleep(0.2) + raise RuntimeError("Simulated async error") + except RuntimeError as e: + print(f" Caught async error: {e}") + errors_caught.append(str(e)) + bridge.push("recovered from error") + + bridge.run_coroutine(async_operation_with_error()) + + try: + results = runner.join() + except Exception as e: + print(f" Graph error (expected): {e}") + results = {} + + bridge.stop() + + print(f" Errors caught in async: {len(errors_caught)}") + print(f" Results: {len(results.get('results', []))}\n") + + +# Example 9: Integration with Standard Asyncio Libraries + + +def example_asyncio_libraries(): + """Demonstrate using standard asyncio patterns with CSP.""" + print("=" * 60) + print("Example 9: Integration with Standard Asyncio Libraries") + print("=" * 60) + + bridge = AsyncioBridge(dict, "asyncio_events") + + @csp.node + def aggregate(events: ts[dict]) -> ts[dict]: + """Aggregate events from asyncio.""" + with csp.state(): + s_count = 0 + + if csp.ticked(events): + s_count += 1 + events["count"] = s_count + events["csp_time"] = str(csp.now()) + print(f" CSP: Event #{s_count} - {events.get('type')}") + return events + + @csp.graph + def g(): + data = bridge.adapter.out() + agg = aggregate(data) + csp.add_graph_output("aggregated", agg) + + start_time = utc_now() + bridge.start(start_time) + time.sleep(0.1) + + runner = csp.run_on_thread(g, realtime=True, starttime=start_time, endtime=timedelta(seconds=3)) + + bridge.adapter.wait_for_start(timeout=1.0) + + # Use standard asyncio patterns + async def producer_consumer_pattern(): + """Demonstrate asyncio Queue with CSP.""" + queue = asyncio.Queue() + + async def producer(): + for i in range(5): + await asyncio.sleep(0.2) + await queue.put({"type": "produced", "value": i}) + + async def consumer(): + while True: + try: + item = await asyncio.wait_for(queue.get(), timeout=1.0) + bridge.push(item) + except asyncio.TimeoutError: + break + + await asyncio.gather(producer(), consumer()) + + async def gather_pattern(): + """Demonstrate asyncio.gather with CSP.""" + + async def task(name, delay): + await asyncio.sleep(delay) + return {"type": "completed", "task": name, "delay": delay} + + results = await asyncio.gather( + task("fast", 0.1), + task("medium", 0.2), + task("slow", 0.3), + ) + for result in results: + bridge.push(result) + + # Run both patterns + bridge.run_coroutine(producer_consumer_pattern()) + bridge.run_coroutine(gather_pattern()) + + results = runner.join() + bridge.stop() + + print(f" Total aggregated events: {len(results.get('aggregated', []))}\n") + + +def main(): + """Run all examples.""" + print("\n" + "=" * 60) + print("CSP ASYNCIO INTEGRATION WITH RUNNING GRAPH") + print("=" * 60 + "\n") + + example_basic_push() + example_call_later() + example_call_at() + example_async_coroutines() + example_bidirectional() + example_periodic_tasks() + example_csp_time_scheduling() + example_error_handling() + example_asyncio_libraries() + + print("=" * 60) + print("All examples completed!") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/examples/98_just_for_fun/e2_csp_fastapi.py b/examples/98_just_for_fun/e2_csp_fastapi.py new file mode 100644 index 000000000..543e75a3d --- /dev/null +++ b/examples/98_just_for_fun/e2_csp_fastapi.py @@ -0,0 +1,301 @@ +""" +FastAPI with CSP Event Loop Integration + +This example demonstrates two ways to integrate CSP with FastAPI: + +1. Using CSP's asyncio bridge to push data from FastAPI to a CSP graph +2. Running CSP graphs that feed data to FastAPI endpoints + +Note: CSP's event loop doesn't yet implement create_server(), so we can't +replace uvicorn's event loop entirely. Instead, we show how to bridge +asyncio web frameworks with CSP graphs running in realtime mode. + +To run this example: + pip install fastapi uvicorn + python e2_csp_fastapi.py + +Then visit (check console output for actual port): + http://localhost:/ - Hello world + http://localhost:/docs - Swagger UI + http://localhost:/price - Real-time price from CSP graph + http://localhost:/submit - Submit data to CSP graph +""" + +import random +import socket +import time +from datetime import datetime, timedelta +from threading import Lock +from typing import Dict, Optional + +# Check for FastAPI/uvicorn availability +try: + import uvicorn + from fastapi import FastAPI + from pydantic import BaseModel +except ImportError: + print("This example requires FastAPI and uvicorn.") + print("Install with: pip install fastapi uvicorn") + exit(1) + +import csp +from csp import ts +from csp.event_loop import BidirectionalBridge +from csp.utils.datetime import utc_now + +# Utilities + + +def find_free_port(start_port: int = 8000, max_attempts: int = 100) -> int: + """Find an available port starting from start_port.""" + for port in range(start_port, start_port + max_attempts): + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(("0.0.0.0", port)) + return port + except OSError: + continue + raise RuntimeError(f"Could not find a free port in range {start_port}-{start_port + max_attempts}") + + +# Latest values from CSP graph (updated by CSP, read by FastAPI) +_latest_prices: Dict[str, dict] = {} +_prices_lock = Lock() + +# Bridge for sending data from FastAPI to CSP +_to_csp_bridge: Optional[BidirectionalBridge] = None +_csp_runner = None + + +@csp.node +def generate_prices(trigger: ts[bool], symbols: list, bridge: object) -> ts[dict]: + """Generate simulated market prices and emit to FastAPI.""" + with csp.state(): + s_prices = {} + + if csp.ticked(trigger): + for sym in symbols: + if sym not in s_prices: + s_prices[sym] = 100.0 + + # Random walk + change = random.uniform(-0.5, 0.5) + s_prices[sym] = max(0.01, s_prices[sym] + change) + + price_data = { + "symbol": sym, + "price": round(s_prices[sym], 2), + "timestamp": str(csp.now()), + } + + # Emit to FastAPI via bridge + if bridge is not None: + bridge.emit(price_data) + + return {"prices": s_prices.copy(), "time": str(csp.now())} + + +@csp.node +def process_orders( + orders: ts[dict], +) -> ts[dict]: + """Process orders received from FastAPI.""" + if csp.ticked(orders): + order = orders + result = { + "order_id": order.get("id"), + "symbol": order.get("symbol"), + "quantity": order.get("quantity"), + "status": "FILLED", + "fill_price": round(random.uniform(99, 101), 2), + "processed_at": str(csp.now()), + } + print(f" CSP processed order: {result}") + return result + + +@csp.graph +def market_data_graph( + symbols: list, + price_bridge: object, + order_bridge: object, +): + """Graph that generates prices and processes orders.""" + # Generate prices every 500ms + timer = csp.timer(timedelta(milliseconds=500)) + prices = generate_prices(timer, symbols, price_bridge) + + # Process orders from FastAPI + if order_bridge is not None: + orders = order_bridge.adapter.out() + results = process_orders(orders) + csp.add_graph_output("fills", results) + + csp.add_graph_output("prices", prices) + + +app = FastAPI( + title="CSP + FastAPI Integration", + description="FastAPI app integrated with a CSP graph for real-time data", + version="1.0.0", +) + + +class Order(BaseModel): + """Order request model.""" + + symbol: str + quantity: int + side: str = "BUY" + + +@app.get("/") +async def root(): + """Hello world endpoint.""" + return { + "message": "FastAPI + CSP Integration Example", + "description": "This app runs FastAPI alongside a CSP graph", + "csp_running": _csp_runner is not None and _csp_runner.is_alive(), + } + + +@app.get("/prices") +async def get_prices(): + """Get latest prices from CSP graph.""" + with _prices_lock: + return { + "prices": dict(_latest_prices), + "source": "CSP real-time graph", + } + + +@app.get("/price/{symbol}") +async def get_price(symbol: str): + """Get price for a specific symbol.""" + symbol = symbol.upper() + with _prices_lock: + if symbol in _latest_prices: + return _latest_prices[symbol] + return {"error": f"Symbol {symbol} not found", "available": list(_latest_prices.keys())} + + +@app.post("/order") +async def submit_order(order: Order): + """Submit an order to CSP for processing.""" + if _to_csp_bridge is None: + return {"error": "CSP graph not running"} + + order_data = { + "id": f"ORD-{int(time.time() * 1000)}", + "symbol": order.symbol.upper(), + "quantity": order.quantity, + "side": order.side, + "submitted_at": datetime.utcnow().isoformat(), + } + + # Push to CSP graph via bridge + _to_csp_bridge.push(order_data) + + return { + "status": "submitted", + "order": order_data, + "note": "Order sent to CSP for processing", + } + + +@app.get("/status") +async def status(): + """Get system status.""" + return { + "fastapi": "running", + "csp_graph": "running" if _csp_runner and _csp_runner.is_alive() else "stopped", + "symbols_tracked": list(_latest_prices.keys()), + "server_time": datetime.utcnow().isoformat(), + } + + +def on_price_update(price_data: dict): + """Callback when CSP emits a price update.""" + symbol = price_data.get("symbol") + if symbol: + with _prices_lock: + _latest_prices[symbol] = price_data + + +def start_csp_graph(): + """Start the CSP graph in a background thread.""" + global _to_csp_bridge, _csp_runner + + # Create bridges + price_bridge = BidirectionalBridge(dict, "prices") + _to_csp_bridge = BidirectionalBridge(dict, "orders") + + # Register callback for price updates + price_bridge.on_event(on_price_update) + + # Start bridges + price_bridge.start() + _to_csp_bridge.start() + + # Define symbols to track + symbols = ["AAPL", "GOOGL", "MSFT", "AMZN", "META"] + + # Run CSP graph in background thread + start_time = utc_now() + _csp_runner = csp.run_on_thread( + market_data_graph, + symbols, + price_bridge, + _to_csp_bridge, + realtime=True, + starttime=start_time, + endtime=timedelta(hours=1), # Run for 1 hour + ) + + print(f"CSP graph started, tracking: {symbols}") + return price_bridge, _to_csp_bridge + + +def main(): + """Run the FastAPI app with CSP integration.""" + print("=" * 60) + print("FastAPI + CSP Integration Example") + print("=" * 60) + print() + + # Start CSP graph + print("Starting CSP graph...") + price_bridge, order_bridge = start_csp_graph() + time.sleep(0.5) # Let CSP start + + # Find an available port + port = find_free_port() + + print(f"Starting FastAPI server on http://localhost:{port}") + print() + print("Endpoints:") + print(" GET / - Status") + print(" GET /prices - All latest prices from CSP") + print(" GET /price/AAPL - Price for specific symbol") + print(" POST /order - Submit order to CSP") + print(" GET /status - System status") + print(" GET /docs - Swagger UI") + print() + print("Press Ctrl+C to stop") + print("=" * 60) + + try: + uvicorn.run( + app, + host="0.0.0.0", + port=port, + log_level="info", + ) + finally: + print("\nStopping CSP graph...") + price_bridge.stop() + order_bridge.stop() + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 759c95048..736da0637 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,10 +71,12 @@ develop = [ "mdformat-tables>=1,<1.1", "ruff>=0.9,<0.15", # test + "fastapi", "pytest", "pytest-asyncio", "pytest-cov", "pytest-sugar", + "uvicorn", # showgraph "graphviz", "pillow",