diff options
author | David Walter Seikel | 2012-02-25 00:09:31 +1000 |
---|---|---|
committer | David Walter Seikel | 2012-02-25 00:09:31 +1000 |
commit | eff5b6e0fc5d7106a1a38967c3bbcbf73fdd6e44 (patch) | |
tree | ea1acf1bbd9fdd1f9905a7327183f0fe2bd87468 /libraries/luaproc/sched.c | |
parent | Time to fork luaproc and turn it into an EFL based thing, with tighter integr... (diff) | |
download | SledjHamr-eff5b6e0fc5d7106a1a38967c3bbcbf73fdd6e44.zip SledjHamr-eff5b6e0fc5d7106a1a38967c3bbcbf73fdd6e44.tar.gz SledjHamr-eff5b6e0fc5d7106a1a38967c3bbcbf73fdd6e44.tar.bz2 SledjHamr-eff5b6e0fc5d7106a1a38967c3bbcbf73fdd6e44.tar.xz |
Moved luaproc into LuaSL, merged it all into it's own file, and replaced the luaproc list with Eina_Clist.
From this point on, luaproc is officialy forked.
Diffstat (limited to '')
-rw-r--r-- | libraries/luaproc/sched.c | 356 |
1 files changed, 0 insertions, 356 deletions
diff --git a/libraries/luaproc/sched.c b/libraries/luaproc/sched.c deleted file mode 100644 index 474c82b..0000000 --- a/libraries/luaproc/sched.c +++ /dev/null | |||
@@ -1,356 +0,0 @@ | |||
1 | /*************************************************** | ||
2 | |||
3 | Copyright 2008 Alexandre Skyrme, Noemi Rodriguez, Roberto Ierusalimschy | ||
4 | |||
5 | Permission is hereby granted, free of charge, to any person obtaining a copy | ||
6 | of this software and associated documentation files (the "Software"), to deal | ||
7 | in the Software without restriction, including without limitation the rights | ||
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
9 | copies of the Software, and to permit persons to whom the Software is | ||
10 | furnished to do so, subject to the following conditions: | ||
11 | |||
12 | The above copyright notice and this permission notice shall be included in | ||
13 | all copies or substantial portions of the Software. | ||
14 | |||
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
21 | THE SOFTWARE. | ||
22 | |||
23 | ***************************************************** | ||
24 | |||
25 | [sched.c] | ||
26 | |||
27 | ****************************************************/ | ||
28 | |||
29 | #include <pthread.h> | ||
30 | #include <stdio.h> | ||
31 | #include <stdlib.h> | ||
32 | #include <string.h> | ||
33 | #include <unistd.h> | ||
34 | #include <arpa/inet.h> | ||
35 | #include <sys/select.h> | ||
36 | #include <sys/socket.h> | ||
37 | #include <sys/stat.h> | ||
38 | #include <lua.h> | ||
39 | #include <lauxlib.h> | ||
40 | #include <lualib.h> | ||
41 | |||
42 | #include "list.h" | ||
43 | #include "sched.h" | ||
44 | #include "luaproc.h" | ||
45 | #include "channel.h" | ||
46 | |||
47 | #define TRUE 1 | ||
48 | #define FALSE 0 | ||
49 | |||
50 | /********* | ||
51 | * globals | ||
52 | *********/ | ||
53 | |||
54 | /* ready process list */ | ||
55 | list lpready = NULL; | ||
56 | |||
57 | /* ready process queue access mutex */ | ||
58 | pthread_mutex_t mutex_queue_access = PTHREAD_MUTEX_INITIALIZER; | ||
59 | |||
60 | /* wake worker up conditional variable */ | ||
61 | pthread_cond_t cond_wakeup_worker = PTHREAD_COND_INITIALIZER; | ||
62 | |||
63 | /* active luaproc count access mutex */ | ||
64 | pthread_mutex_t mutex_lp_count = PTHREAD_MUTEX_INITIALIZER; | ||
65 | |||
66 | /* no active luaproc conditional variable */ | ||
67 | pthread_cond_t cond_no_active_lp = PTHREAD_COND_INITIALIZER; | ||
68 | |||
69 | /* number of active luaprocs */ | ||
70 | int lpcount = 0; | ||
71 | |||
72 | /* no more lua processes flag */ | ||
73 | int no_more_processes = FALSE; | ||
74 | |||
75 | /* worker thread main function */ | ||
76 | void *workermain( void *args ) { | ||
77 | |||
78 | node n; | ||
79 | luaproc lp; | ||
80 | int procstat; | ||
81 | int destroyworker; | ||
82 | |||
83 | /* detach thread so resources are freed as soon as thread exits (no further joining) */ | ||
84 | pthread_detach( pthread_self( )); | ||
85 | |||
86 | //printf("NEW WORKER\n"); | ||
87 | /* main worker loop */ | ||
88 | while ( 1 ) { | ||
89 | |||
90 | //printf("a\n"); | ||
91 | /* get exclusive access to the ready process queue */ | ||
92 | pthread_mutex_lock( &mutex_queue_access ); | ||
93 | |||
94 | /* wait until instructed to wake up (because there's work to do or because its time to finish) */ | ||
95 | while (( list_node_count( lpready ) == 0 ) && ( no_more_processes == FALSE )) { | ||
96 | pthread_cond_wait( &cond_wakeup_worker, &mutex_queue_access ); | ||
97 | } | ||
98 | |||
99 | ////printf("b\n"); | ||
100 | /* pop the first node from the ready process queue */ | ||
101 | n = list_pop_head( lpready ); | ||
102 | |||
103 | ////printf("c\n"); | ||
104 | /* ensure list pop succeeded before proceeding */ | ||
105 | if ( n != NULL ) { | ||
106 | //printf("c.0\n"); | ||
107 | /* get the popped node's data content (ie, the lua process struct) */ | ||
108 | lp = (luaproc )list_data( n ); | ||
109 | } | ||
110 | else { | ||
111 | ////printf("c.1\n"); | ||
112 | /* free access to the process ready queue */ | ||
113 | pthread_mutex_unlock( &mutex_queue_access ); | ||
114 | /* finished thread */ | ||
115 | //printf("c.2 pthread_exit\n"); | ||
116 | pthread_exit( NULL ); | ||
117 | //printf("c.3\n"); | ||
118 | } | ||
119 | |||
120 | ////printf("d\n"); | ||
121 | /* free access to the process ready queue */ | ||
122 | pthread_mutex_unlock( &mutex_queue_access ); | ||
123 | |||
124 | //printf("e lua_resum\n"); | ||
125 | /* execute the lua code specified in the lua process struct */ | ||
126 | procstat = lua_resume( luaproc_get_state( lp ), luaproc_get_args( lp )); | ||
127 | |||
128 | //printf("f\n"); | ||
129 | /* reset the process argument count */ | ||
130 | luaproc_set_args( lp, 0 ); | ||
131 | |||
132 | ////printf("g\n"); | ||
133 | /* check if process finished its whole execution */ | ||
134 | if ( procstat == 0 ) { | ||
135 | |||
136 | //printf("g.0\n"); | ||
137 | /* destroy the corresponding list node */ | ||
138 | list_destroy_node( n ); | ||
139 | |||
140 | ////printf("g.1\n"); | ||
141 | /* check if worker thread should be destroyed */ | ||
142 | destroyworker = luaproc_get_destroyworker( lp ); | ||
143 | |||
144 | ////printf("g.2 proc finished\n"); | ||
145 | /* set process status to finished */ | ||
146 | luaproc_set_status( lp, LUAPROC_STAT_FINISHED ); | ||
147 | |||
148 | ////printf("g.3\n"); | ||
149 | /* check if lua process should be recycled and, if not, destroy it */ | ||
150 | if ( luaproc_recycle_push( lp ) == FALSE ) { | ||
151 | //printf("g.3.0 lua_close\n"); | ||
152 | lua_close( luaproc_get_state( lp )); | ||
153 | } | ||
154 | |||
155 | ////printf("g.4\n"); | ||
156 | /* decrease active lua process count */ | ||
157 | sched_lpcount_dec(); | ||
158 | |||
159 | ////printf("g.5\n"); | ||
160 | /* check if thread should be finished after lua process conclusion */ | ||
161 | if ( destroyworker ) { | ||
162 | //printf("g.5.0 pthread_exit\n"); | ||
163 | /* if so, finish thread */ | ||
164 | pthread_exit( NULL ); | ||
165 | } | ||
166 | //printf("g.6\n"); | ||
167 | } | ||
168 | |||
169 | /* check if process yielded */ | ||
170 | else if ( procstat == LUA_YIELD ) { | ||
171 | |||
172 | //printf("??????????????h.0\n"); | ||
173 | /* if so, further check if yield originated from an unmatched send/recv operation */ | ||
174 | if ( luaproc_get_status( lp ) == LUAPROC_STAT_BLOCKED_SEND ) { | ||
175 | //printf("??????????????h.1\n"); | ||
176 | /* queue blocked lua process on corresponding channel */ | ||
177 | luaproc_queue_sender( lp ); | ||
178 | /* unlock channel access */ | ||
179 | luaproc_unlock_channel( luaproc_get_channel( lp )); | ||
180 | /* destroy node (but not the associated Lua process) */ | ||
181 | list_destroy_node( n ); | ||
182 | } | ||
183 | |||
184 | else if ( luaproc_get_status( lp ) == LUAPROC_STAT_BLOCKED_RECV ) { | ||
185 | //printf("??????????????h.2\n"); | ||
186 | /* queue blocked lua process on corresponding channel */ | ||
187 | luaproc_queue_receiver( lp ); | ||
188 | /* unlock channel access */ | ||
189 | luaproc_unlock_channel( luaproc_get_channel( lp )); | ||
190 | /* destroy node (but not the associated Lua process) */ | ||
191 | list_destroy_node( n ); | ||
192 | } | ||
193 | |||
194 | /* or if yield resulted from an explicit call to coroutine.yield in the lua code being executed */ | ||
195 | else { | ||
196 | //printf("??????????????h.3\n"); | ||
197 | /* get exclusive access to the ready process queue */ | ||
198 | pthread_mutex_lock( &mutex_queue_access ); | ||
199 | /* re-insert the job at the end of the ready process queue */ | ||
200 | list_add( lpready, n ); | ||
201 | /* free access to the process ready queue */ | ||
202 | pthread_mutex_unlock( &mutex_queue_access ); | ||
203 | } | ||
204 | } | ||
205 | |||
206 | /* check if there was any execution error (LUA_ERRRUN, LUA_ERRSYNTAX, LUA_ERRMEM or LUA_ERRERR) */ | ||
207 | else { | ||
208 | //printf("??????????????i.0\n"); | ||
209 | /* destroy the corresponding node */ | ||
210 | list_destroy_node( n ); | ||
211 | /* print error message */ | ||
212 | fprintf( stderr, "close lua_State (error: %s)\n", luaL_checkstring( luaproc_get_state( lp ), -1 )); | ||
213 | /* close lua state */ | ||
214 | lua_close( luaproc_get_state( lp )); | ||
215 | /* decrease active lua process count */ | ||
216 | sched_lpcount_dec(); | ||
217 | } | ||
218 | //printf("END\n"); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | /* local scheduler initialization */ | ||
223 | int sched_init_local( int numworkers ) { | ||
224 | |||
225 | int tid; | ||
226 | int workercount = 0; | ||
227 | pthread_t worker; | ||
228 | |||
229 | /* initialize ready process list */ | ||
230 | lpready = list_new(); | ||
231 | |||
232 | /* initialize channels */ | ||
233 | channel_init(); | ||
234 | |||
235 | /* create initial worker threads */ | ||
236 | for ( tid = 0; tid < numworkers; tid++ ) { | ||
237 | if ( pthread_create( &worker, NULL, workermain, NULL ) == 0 ) { | ||
238 | workercount++; | ||
239 | } | ||
240 | } | ||
241 | |||
242 | if ( workercount != numworkers ) { | ||
243 | return LUAPROC_SCHED_INIT_ERROR; | ||
244 | } | ||
245 | |||
246 | return LUAPROC_SCHED_OK; | ||
247 | } | ||
248 | |||
249 | /* exit scheduler */ | ||
250 | void sched_exit( void ) { | ||
251 | |||
252 | /* get exclusive access to the ready process queue */ | ||
253 | pthread_mutex_lock( &mutex_queue_access ); | ||
254 | /* destroy the ready process list */ | ||
255 | list_destroy( lpready ); | ||
256 | /* free access to the process ready queue */ | ||
257 | pthread_mutex_unlock( &mutex_queue_access ); | ||
258 | } | ||
259 | |||
260 | /* move process to ready queue (ie, schedule process) */ | ||
261 | int sched_queue_proc( luaproc lp ) { | ||
262 | |||
263 | /* get exclusive access to the ready process queue */ | ||
264 | pthread_mutex_lock( &mutex_queue_access ); | ||
265 | |||
266 | /* add process to ready queue */ | ||
267 | if ( list_add( lpready, list_new_node( lp )) != NULL ) { | ||
268 | |||
269 | /* set process status to ready */ | ||
270 | luaproc_set_status( lp, LUAPROC_STAT_READY ); | ||
271 | |||
272 | /* wake worker up */ | ||
273 | pthread_cond_signal( &cond_wakeup_worker ); | ||
274 | /* free access to the process ready queue */ | ||
275 | pthread_mutex_unlock( &mutex_queue_access ); | ||
276 | |||
277 | return LUAPROC_SCHED_QUEUE_PROC_OK; | ||
278 | } | ||
279 | else { | ||
280 | /* free access to the process ready queue */ | ||
281 | pthread_mutex_unlock( &mutex_queue_access ); | ||
282 | |||
283 | return LUAPROC_SCHED_QUEUE_PROC_ERR; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /* synchronize worker threads */ | ||
288 | void sched_join_workerthreads( void ) { | ||
289 | |||
290 | ////printf(" 0\n"); | ||
291 | pthread_mutex_lock( &mutex_lp_count ); | ||
292 | |||
293 | //printf(" 1 wait for procs to end\n"); | ||
294 | /* wait until there is no more active lua processes */ | ||
295 | while( lpcount != 0 ) { | ||
296 | //printf(" 1.0\n"); | ||
297 | pthread_cond_wait( &cond_no_active_lp, &mutex_lp_count ); | ||
298 | } | ||
299 | /* get exclusive access to the ready process queue */ | ||
300 | ////printf(" 2\n"); | ||
301 | pthread_mutex_lock( &mutex_queue_access ); | ||
302 | /* set the no more active lua processes flag to true */ | ||
303 | ////printf(" 3\n"); | ||
304 | no_more_processes = TRUE; | ||
305 | /* wake ALL workers up */ | ||
306 | //printf(" 4 wake up all workers.\n"); | ||
307 | pthread_cond_broadcast( &cond_wakeup_worker ); | ||
308 | /* free access to the process ready queue */ | ||
309 | ////printf(" 5\n"); | ||
310 | pthread_mutex_unlock( &mutex_queue_access ); | ||
311 | |||
312 | // We don't need this, as we only get here during shutdown. Linking this to EFL results in a hang otherwise anyway. | ||
313 | /* wait for (join) worker threads */ | ||
314 | //printf(" 6 pthread_exit, waiting for workers to end\n"); | ||
315 | // pthread_exit( NULL ); | ||
316 | |||
317 | //printf("7\n"); | ||
318 | pthread_mutex_unlock( &mutex_lp_count ); | ||
319 | |||
320 | //printf("8\n"); | ||
321 | } | ||
322 | |||
323 | /* increase active lua process count */ | ||
324 | void sched_lpcount_inc( void ) { | ||
325 | //printf("inc procs++++++++++++++++++++++++++++++++++++++++\n"); | ||
326 | pthread_mutex_lock( &mutex_lp_count ); | ||
327 | lpcount++; | ||
328 | pthread_mutex_unlock( &mutex_lp_count ); | ||
329 | } | ||
330 | |||
331 | /* decrease active lua process count */ | ||
332 | void sched_lpcount_dec( void ) { | ||
333 | //printf("dec procs----------------------------------------\n"); | ||
334 | pthread_mutex_lock( &mutex_lp_count ); | ||
335 | lpcount--; | ||
336 | /* if count reaches zero, signal there are no more active processes */ | ||
337 | if ( lpcount == 0 ) { | ||
338 | //printf("dec procs AND NONE LEFT000000000000000000000000000\n"); | ||
339 | pthread_cond_signal( &cond_no_active_lp ); | ||
340 | } | ||
341 | pthread_mutex_unlock( &mutex_lp_count ); | ||
342 | } | ||
343 | |||
344 | /* create a new worker pthread */ | ||
345 | int sched_create_worker( void ) { | ||
346 | |||
347 | pthread_t worker; | ||
348 | |||
349 | /* create a new pthread */ | ||
350 | if ( pthread_create( &worker, NULL, workermain, NULL ) != 0 ) { | ||
351 | return LUAPROC_SCHED_PTHREAD_ERROR; | ||
352 | } | ||
353 | |||
354 | return LUAPROC_SCHED_OK; | ||
355 | } | ||
356 | |||