diff options
Diffstat (limited to '')
-rw-r--r-- | libraries/luaproc/sched.c | 314 |
1 files changed, 314 insertions, 0 deletions
diff --git a/libraries/luaproc/sched.c b/libraries/luaproc/sched.c new file mode 100644 index 0000000..f19beeb --- /dev/null +++ b/libraries/luaproc/sched.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /*************************************************** | ||
2 | |||
3 | Copyright 2008 Alexandre Skyrme, Noemi Rodriguez, Roberto Ierusalimschy | ||
4 | |||
5 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
6 | of this software and associated documentation files (the "Software"), to deal | ||
7 | in the Software without restriction, including without limitation the rights | ||
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
9 | copies of the Software, and to permit persons to whom the Software is | ||
10 | furnished to do so, subject to the following conditions: | ||
11 | |||
12 | The above copyright notice and this permission notice shall be included in | ||
13 | all copies or substantial portions of the Software. | ||
14 | |||
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
21 | THE SOFTWARE. | ||
22 | |||
23 | ***************************************************** | ||
24 | |||
25 | [sched.c] | ||
26 | |||
27 | ****************************************************/ | ||
28 | |||
29 | #include <pthread.h> | ||
30 | #include <stdio.h> | ||
31 | #include <stdlib.h> | ||
32 | #include <string.h> | ||
33 | #include <unistd.h> | ||
34 | #include <arpa/inet.h> | ||
35 | #include <sys/select.h> | ||
36 | #include <sys/socket.h> | ||
37 | #include <sys/stat.h> | ||
38 | #include <lua.h> | ||
39 | #include <lauxlib.h> | ||
40 | #include <lualib.h> | ||
41 | |||
42 | #include "list.h" | ||
43 | #include "sched.h" | ||
44 | #include "luaproc.h" | ||
45 | #include "channel.h" | ||
46 | |||
47 | #define TRUE 1 | ||
48 | #define FALSE 0 | ||
49 | |||
50 | /********* | ||
51 | * globals | ||
52 | *********/ | ||
53 | |||
54 | /* ready process list */ | ||
55 | list lpready = NULL; | ||
56 | |||
57 | /* ready process queue access mutex */ | ||
58 | pthread_mutex_t mutex_queue_access = PTHREAD_MUTEX_INITIALIZER; | ||
59 | |||
60 | /* wake worker up conditional variable */ | ||
61 | pthread_cond_t cond_wakeup_worker = PTHREAD_COND_INITIALIZER; | ||
62 | |||
63 | /* active luaproc count access mutex */ | ||
64 | pthread_mutex_t mutex_lp_count = PTHREAD_MUTEX_INITIALIZER; | ||
65 | |||
66 | /* no active luaproc conditional variable */ | ||
67 | pthread_cond_t cond_no_active_lp = PTHREAD_COND_INITIALIZER; | ||
68 | |||
69 | /* number of active luaprocs */ | ||
70 | int lpcount = 0; | ||
71 | |||
72 | /* no more lua processes flag */ | ||
73 | int no_more_processes = FALSE; | ||
74 | |||
75 | /* worker thread main function */ | ||
76 | void *workermain( void *args ) { | ||
77 | |||
78 | node n; | ||
79 | luaproc lp; | ||
80 | int procstat; | ||
81 | int destroyworker; | ||
82 | |||
83 | /* detach thread so resources are freed as soon as thread exits (no further joining) */ | ||
84 | pthread_detach( pthread_self( )); | ||
85 | |||
86 | /* main worker loop */ | ||
87 | while ( 1 ) { | ||
88 | |||
89 | /* get exclusive access to the ready process queue */ | ||
90 | pthread_mutex_lock( &mutex_queue_access ); | ||
91 | |||
92 | /* wait until instructed to wake up (because there's work to do or because its time to finish) */ | ||
93 | while (( list_node_count( lpready ) == 0 ) && ( no_more_processes == FALSE )) { | ||
94 | pthread_cond_wait( &cond_wakeup_worker, &mutex_queue_access ); | ||
95 | } | ||
96 | |||
97 | /* pop the first node from the ready process queue */ | ||
98 | n = list_pop_head( lpready ); | ||
99 | |||
100 | /* ensure list pop succeeded before proceeding */ | ||
101 | if ( n != NULL ) { | ||
102 | /* get the popped node's data content (ie, the lua process struct) */ | ||
103 | lp = (luaproc )list_data( n ); | ||
104 | } | ||
105 | else { | ||
106 | /* free access to the process ready queue */ | ||
107 | pthread_mutex_unlock( &mutex_queue_access ); | ||
108 | /* finished thread */ | ||
109 | pthread_exit( NULL ); | ||
110 | } | ||
111 | |||
112 | /* free access to the process ready queue */ | ||
113 | pthread_mutex_unlock( &mutex_queue_access ); | ||
114 | |||
115 | /* execute the lua code specified in the lua process struct */ | ||
116 | procstat = lua_resume( luaproc_get_state( lp ), luaproc_get_args( lp )); | ||
117 | |||
118 | /* reset the process argument count */ | ||
119 | luaproc_set_args( lp, 0 ); | ||
120 | |||
121 | /* check if process finished its whole execution */ | ||
122 | if ( procstat == 0 ) { | ||
123 | |||
124 | /* destroy the corresponding list node */ | ||
125 | list_destroy_node( n ); | ||
126 | |||
127 | /* check if worker thread should be destroyed */ | ||
128 | destroyworker = luaproc_get_destroyworker( lp ); | ||
129 | |||
130 | /* set process status to finished */ | ||
131 | luaproc_set_status( lp, LUAPROC_STAT_FINISHED ); | ||
132 | |||
133 | /* check if lua process should be recycled and, if not, destroy it */ | ||
134 | if ( luaproc_recycle_push( lp ) == FALSE ) { | ||
135 | lua_close( luaproc_get_state( lp )); | ||
136 | } | ||
137 | |||
138 | /* decrease active lua process count */ | ||
139 | sched_lpcount_dec(); | ||
140 | |||
141 | /* check if thread should be finished after lua process conclusion */ | ||
142 | if ( destroyworker ) { | ||
143 | /* if so, finish thread */ | ||
144 | pthread_exit( NULL ); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* check if process yielded */ | ||
149 | else if ( procstat == LUA_YIELD ) { | ||
150 | |||
151 | /* if so, further check if yield originated from an unmatched send/recv operation */ | ||
152 | if ( luaproc_get_status( lp ) == LUAPROC_STAT_BLOCKED_SEND ) { | ||
153 | /* queue blocked lua process on corresponding channel */ | ||
154 | luaproc_queue_sender( lp ); | ||
155 | /* unlock channel access */ | ||
156 | luaproc_unlock_channel( luaproc_get_channel( lp )); | ||
157 | /* destroy node (but not the associated Lua process) */ | ||
158 | list_destroy_node( n ); | ||
159 | } | ||
160 | |||
161 | else if ( luaproc_get_status( lp ) == LUAPROC_STAT_BLOCKED_RECV ) { | ||
162 | /* queue blocked lua process on corresponding channel */ | ||
163 | luaproc_queue_receiver( lp ); | ||
164 | /* unlock channel access */ | ||
165 | luaproc_unlock_channel( luaproc_get_channel( lp )); | ||
166 | /* destroy node (but not the associated Lua process) */ | ||
167 | list_destroy_node( n ); | ||
168 | } | ||
169 | |||
170 | /* or if yield resulted from an explicit call to coroutine.yield in the lua code being executed */ | ||
171 | else { | ||
172 | /* get exclusive access to the ready process queue */ | ||
173 | pthread_mutex_lock( &mutex_queue_access ); | ||
174 | /* re-insert the job at the end of the ready process queue */ | ||
175 | list_add( lpready, n ); | ||
176 | /* free access to the process ready queue */ | ||
177 | pthread_mutex_unlock( &mutex_queue_access ); | ||
178 | } | ||
179 | } | ||
180 | |||
181 | /* check if there was any execution error (LUA_ERRRUN, LUA_ERRSYNTAX, LUA_ERRMEM or LUA_ERRERR) */ | ||
182 | else { | ||
183 | /* destroy the corresponding node */ | ||
184 | list_destroy_node( n ); | ||
185 | /* print error message */ | ||
186 | fprintf( stderr, "close lua_State (error: %s)\n", luaL_checkstring( luaproc_get_state( lp ), -1 )); | ||
187 | /* close lua state */ | ||
188 | lua_close( luaproc_get_state( lp )); | ||
189 | /* decrease active lua process count */ | ||
190 | sched_lpcount_dec(); | ||
191 | } | ||
192 | } | ||
193 | } | ||
194 | |||
195 | /* local scheduler initialization */ | ||
196 | int sched_init_local( int numworkers ) { | ||
197 | |||
198 | int tid; | ||
199 | int workercount = 0; | ||
200 | pthread_t worker; | ||
201 | |||
202 | /* initialize ready process list */ | ||
203 | lpready = list_new(); | ||
204 | |||
205 | /* initialize channels */ | ||
206 | channel_init(); | ||
207 | |||
208 | /* create initial worker threads */ | ||
209 | for ( tid = 0; tid < numworkers; tid++ ) { | ||
210 | if ( pthread_create( &worker, NULL, workermain, NULL ) == 0 ) { | ||
211 | workercount++; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | if ( workercount != numworkers ) { | ||
216 | return LUAPROC_SCHED_INIT_ERROR; | ||
217 | } | ||
218 | |||
219 | return LUAPROC_SCHED_OK; | ||
220 | } | ||
221 | |||
222 | /* exit scheduler */ | ||
223 | void sched_exit( void ) { | ||
224 | |||
225 | /* get exclusive access to the ready process queue */ | ||
226 | pthread_mutex_lock( &mutex_queue_access ); | ||
227 | /* destroy the ready process list */ | ||
228 | list_destroy( lpready ); | ||
229 | /* free access to the process ready queue */ | ||
230 | pthread_mutex_unlock( &mutex_queue_access ); | ||
231 | } | ||
232 | |||
233 | /* move process to ready queue (ie, schedule process) */ | ||
234 | int sched_queue_proc( luaproc lp ) { | ||
235 | |||
236 | /* get exclusive access to the ready process queue */ | ||
237 | pthread_mutex_lock( &mutex_queue_access ); | ||
238 | |||
239 | /* add process to ready queue */ | ||
240 | if ( list_add( lpready, list_new_node( lp )) != NULL ) { | ||
241 | |||
242 | /* set process status to ready */ | ||
243 | luaproc_set_status( lp, LUAPROC_STAT_READY ); | ||
244 | |||
245 | /* wake worker up */ | ||
246 | pthread_cond_signal( &cond_wakeup_worker ); | ||
247 | /* free access to the process ready queue */ | ||
248 | pthread_mutex_unlock( &mutex_queue_access ); | ||
249 | |||
250 | return LUAPROC_SCHED_QUEUE_PROC_OK; | ||
251 | } | ||
252 | else { | ||
253 | /* free access to the process ready queue */ | ||
254 | pthread_mutex_unlock( &mutex_queue_access ); | ||
255 | |||
256 | return LUAPROC_SCHED_QUEUE_PROC_ERR; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | /* synchronize worker threads */ | ||
261 | void sched_join_workerthreads( void ) { | ||
262 | |||
263 | pthread_mutex_lock( &mutex_lp_count ); | ||
264 | |||
265 | /* wait until there is no more active lua processes */ | ||
266 | while( lpcount != 0 ) { | ||
267 | pthread_cond_wait( &cond_no_active_lp, &mutex_lp_count ); | ||
268 | } | ||
269 | /* get exclusive access to the ready process queue */ | ||
270 | pthread_mutex_lock( &mutex_queue_access ); | ||
271 | /* set the no more active lua processes flag to true */ | ||
272 | no_more_processes = TRUE; | ||
273 | /* wake ALL workers up */ | ||
274 | pthread_cond_broadcast( &cond_wakeup_worker ); | ||
275 | /* free access to the process ready queue */ | ||
276 | pthread_mutex_unlock( &mutex_queue_access ); | ||
277 | /* wait for (join) worker threads */ | ||
278 | pthread_exit( NULL ); | ||
279 | |||
280 | pthread_mutex_unlock( &mutex_lp_count ); | ||
281 | |||
282 | } | ||
283 | |||
284 | /* increase active lua process count */ | ||
285 | void sched_lpcount_inc( void ) { | ||
286 | pthread_mutex_lock( &mutex_lp_count ); | ||
287 | lpcount++; | ||
288 | pthread_mutex_unlock( &mutex_lp_count ); | ||
289 | } | ||
290 | |||
291 | /* decrease active lua process count */ | ||
292 | void sched_lpcount_dec( void ) { | ||
293 | pthread_mutex_lock( &mutex_lp_count ); | ||
294 | lpcount--; | ||
295 | /* if count reaches zero, signal there are no more active processes */ | ||
296 | if ( lpcount == 0 ) { | ||
297 | pthread_cond_signal( &cond_no_active_lp ); | ||
298 | } | ||
299 | pthread_mutex_unlock( &mutex_lp_count ); | ||
300 | } | ||
301 | |||
302 | /* create a new worker pthread */ | ||
303 | int sched_create_worker( void ) { | ||
304 | |||
305 | pthread_t worker; | ||
306 | |||
307 | /* create a new pthread */ | ||
308 | if ( pthread_create( &worker, NULL, workermain, NULL ) != 0 ) { | ||
309 | return LUAPROC_SCHED_PTHREAD_ERROR; | ||
310 | } | ||
311 | |||
312 | return LUAPROC_SCHED_OK; | ||
313 | } | ||
314 | |||