1: 2: 3: 4: 5: 6: 7: 8: 9: 10: 11: 12: 13: 14:
15: 16: 17: 18: 19: 20: 21: 22: 23: 24: 25: 26: 27: 28: 29: 30: 31: 32: 33: 34: 35: 36: 37: 38: 39: 40: 41: 42: 43: 44: 45:
46:
47: 48: 49: 50:
51:
52: #include <tk/tkernel.h>
53: #include "rominfo.h"
54:
55: #include <sys/param.h>
56: #include <sys/types.h>
57: #include <sys/systm.h>
58: #include <sys/mutex.h>
59: #include <sys/_queue.h>
60: #include <sys/kmem.h>
61: #ifndef T2EX
62: #include <sys/atomic.h>
63: #else
64: #include <sys/_atomic.h>
65: #endif
66: #include <sys/file.h>
67: #include <sys/filedesc.h>
68: #include <sys/malloc.h>
69:
70: #include <sys/tkn_intr.h>
71:
72: #include <tk/util.h>
73: #include <tk/dbgspt.h>
74:
75: #include "tkn.h"
76:
77:
78: #define DEFAULT_MAX_COUNT ((UINT)128)
79: #define INDEX_OFFSET 5
80:
81: #define INDEX(x) (((x) >> INDEX_OFFSET) - 1)
82: #define NO(x) ((x) & 0x1fU)
83: #define ID(x, y) ((((x) + 1) << INDEX_OFFSET) | (y))
84:
85: typedef struct {
86: FastMLock lock;
87: UW used_bitmap;
88: INT owner_tid[32];
89: } MutexMLock;
90:
91:
92: LOCAL UINT max_mutex_count;
93: LOCAL UINT lock_count;
94: LOCAL MutexMLock* mutex_mlock;
95:
96: INT mtx_oldspl = 0;
97: INT mtx_count = 0;
98:
99:
100: EXPORT ER tkn_mutex_initialize(void)
101: {
102: LockTKN();
103:
104: 105: 106: 107: 108: 109:
110: max_mutex_count = 57 + (maxfiles - NDFDFILE) * 2;
111: lock_count = (max_mutex_count + 31) / 32;
112:
113: mutex_mlock = malloc(sizeof(MutexMLock) * lock_count, M_KMEM, M_NOWAIT | M_ZERO);
114:
115: mtx_oldspl = 0;
116: mtx_count = 0;
117:
118: UnlockTKN();
119:
120: return (mutex_mlock == NULL) ? E_NOMEM : E_OK;
121: }
122:
123: EXPORT ER tkn_mutex_finish(void)
124: {
125: int index;
126: ER ercd = E_OK;
127:
128: LockTKN();
129:
130: for(index = 0; index < lock_count; index++) {
131: if ( mutex_mlock[index].used_bitmap != 0 ) {
132: ercd = DeleteMLock(&mutex_mlock[index].lock);
133: if ( ercd < E_OK ) {
134: break;
135: }
136: }
137: }
138:
139: if ( ercd == E_OK ) {
140: free(mutex_mlock, M_KMEM);
141: }
142:
143: UnlockTKN();
144:
145: return ercd;
146: }
147:
148: LOCAL int init_mutex(__volatile kmutex_t* mtx, kmutex_type_t type, int ipl)
149: {
150: int index;
151: int no;
152:
153: switch (type) {
154: case MUTEX_ADAPTIVE:
155: KASSERT(ipl == IPL_NONE);
156: break;
157: case MUTEX_DEFAULT:
158: case MUTEX_DRIVER:
159: if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
160: ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
161: ipl == IPL_SOFTSERIAL) {
162: type = MUTEX_ADAPTIVE;
163: } else {
164: type = MUTEX_SPIN;
165: }
166: break;
167: default:
168: break;
169: }
170: mtx->type = type;
171:
172: for(index = 0; index < lock_count; index++) {
173: if ( mutex_mlock[index].used_bitmap != 0xffffffffU ) {
174: break;
175: }
176: }
177:
178: if ( index >= lock_count ) {
179: return ENOMEM;
180: }
181:
182: if ( mutex_mlock[index].used_bitmap == 0 ) {
183: ER ercd = CreateMLock(&mutex_mlock[index].lock, (CONST UB*)"Nmtx");
184: if ( ercd < E_OK ) {
185: return ENOMEM;
186: }
187: }
188:
189: for(no = 0; no < sizeof(UW)*8; no++) {
190: if ( (mutex_mlock[index].used_bitmap & (1U << no)) == 0 ) {
191: break;
192: }
193: }
194:
195: if ( no == sizeof(UW)*8 ) {
196: return ENOENT;
197: }
198:
199: mutex_mlock[index].used_bitmap |= 1U << no;
200:
201: mtx->mtxid = ID(index, no);
202: mtx->ipl = ipl;
203:
204: return 0;
205: }
206:
207: ER lock_mutex(__volatile kmutex_t* mtx, TMO tmo)
208: {
209: int index = INDEX(mtx->mtxid);
210: int no = NO(mtx->mtxid);
211: ER ercd;
212: int s;
213:
214: UnlockTKN();
215:
216: if ( tmo == TMO_FEVR ) {
217: ercd = MLockTmo(&mutex_mlock[index].lock, no, TMO_POL);
218: if ( ercd == E_TMOUT ) {
219: s = tkn_spl_unlock(IPL_NONE);
220: ercd = MLockTmo(&mutex_mlock[index].lock, no, tmo);
221: tkn_spl_lock(s);
222: }
223: } else {
224: ercd = MLockTmo(&mutex_mlock[index].lock, no, tmo);
225: }
226:
227: LockTKN();
228: if ( ercd >= E_OK ) {
229: mutex_mlock[index].owner_tid[no] = tk_get_tid();
230: }
231:
232: return ercd;
233: }
234:
235: ER unlock_mutex(__volatile kmutex_t* mtx)
236: {
237: int index = INDEX(mtx->mtxid);
238: int no = NO(mtx->mtxid);
239:
240: if ( mutex_mlock[index].owner_tid[no] != tk_get_tid() ) {
241: return E_ILUSE;
242: }
243:
244: mutex_mlock[index].owner_tid[no] = 0;
245: ER ercd = MUnlock(&mutex_mlock[index].lock, no);
246:
247: return ercd;
248: }
249:
250: LOCAL void release_mutex(__volatile kmutex_t* mtx)
251: {
252: int index = INDEX(mtx->mtxid);
253: int no = NO(mtx->mtxid);
254: int tid = mutex_mlock[index].owner_tid[no];
255:
256: if ( tid != 0 ) {
257: ER ercd = unlock_mutex(mtx);
258: if ( ercd < E_OK ) {
259: panic("release_mutex: %d\n", MERCD(ercd));
260: }
261: if ( mtx->type == MUTEX_SPIN ) {
262: if ( --mtx_count == 0 ) {
263: int s = mtx_oldspl;
264: mtx_oldspl = 0;
265: tkn_spl_unlock(s);
266: }
267: }
268: }
269:
270: mutex_mlock[index].used_bitmap &= ~(1U << no);
271:
272: if ( mutex_mlock[index].used_bitmap == 0 ) {
273: ER ercd = DeleteMLock(&mutex_mlock[index].lock);
274: if ( ercd < E_OK ) {
275: panic("release_mutex: %d\n", MERCD(ercd));
276: }
277: }
278:
279: mtx->mtxid = 0;
280: }
281:
282:
283:
284: int tkn_mutex_init(__volatile kmutex_t *mtx, kmutex_type_t type, int ipl)
285: {
286: int error;
287: (void)type;
288:
289: LockTKN();
290:
291: #ifdef DEBUG
292: unsigned long old_id = mtx->mtxid;
293: #endif
294: error = init_mutex(mtx, type, ipl);
295: #ifdef DEBUG
296: printf("mutex init %lu(at %p) by %d, old id = %lu\n", mtx->mtxid, mtx, tk_get_tid(), old_id);
297: #endif
298:
299: UnlockTKN();
300:
301: return error;
302: }
303:
304: void tkn_mutex_destroy(__volatile kmutex_t *mtx)
305: {
306: LockTKN();
307:
308: if ( mtx->mtxid != 0 ) {
309: #ifdef DEBUG
310: printf("mutex destroy %lu(at %p) by %d\n", mtx->mtxid, mtx, tk_get_tid());
311: #endif
312: release_mutex(mtx);
313: } else {
314: #ifdef DEBUG
315: printf("mutex destroy uninitialized %lu by %d\n", mtx->mtxid, tk_get_tid());
316: #endif
317: }
318:
319: UnlockTKN();
320: }
321:
322: void tkn_mutex_spin_enter( __volatile kmutex_t *mtx )
323: {
324: LockTKN();
325: if ( mtx->mtxid == 0 ) {
326: panic("tkn_mutex_enter: not initialized.\n");
327: }
328: UnlockTKN();
329:
330: int s = tkn_spl_lock(mtx->ipl);
331:
332: LockTKN();
333: if ( mtx_count++ == 0 ) {
334: mtx_oldspl = s;
335: }
336:
337: ER ercd = lock_mutex(mtx, TMO_FEVR);
338: if ( ercd < E_OK ) {
339: ID id = tk_get_tid();
340: if ( ercd == E_ID ) {
341: panic("tkn_mutex_enter: task#%d, error=%d, ID = %lu\n", id, MERCD(ercd), mtx->mtxid);
342: } else {
343: panic("tkn_mutex_enter: task#%d, error=%d\n", id, MERCD(ercd));
344: }
345: }
346: UnlockTKN();
347: }
348:
349: void tkn_mutex_adaptive_enter( __volatile kmutex_t *mtx )
350: {
351: LockTKN();
352: if ( mtx->mtxid == 0 ) {
353: panic("tkn_mutex_enter: not initialized.\n");
354: }
355:
356: ER ercd = lock_mutex(mtx, TMO_FEVR);
357: if ( ercd < E_OK ) {
358: ID id = tk_get_tid();
359: if ( ercd == E_ID ) {
360: panic("tkn_mutex_enter: task#%d, error=%d, ID = %lu\n", id, MERCD(ercd), mtx->mtxid);
361: } else {
362: panic("tkn_mutex_enter: task#%d, error=%d\n", id, MERCD(ercd));
363: }
364: }
365: UnlockTKN();
366: }
367:
368: void tkn_mutex_enter( __volatile kmutex_t *mtx )
369: {
370: if ( mtx->type == MUTEX_SPIN ) {
371: tkn_mutex_spin_enter(mtx);
372: } else {
373: tkn_mutex_adaptive_enter(mtx);
374: }
375: }
376:
377: void tkn_mutex_spin_exit( __volatile kmutex_t *mtx )
378: {
379: LockTKN();
380: if ( mtx->mtxid == 0 ) {
381: panic("tkn_mutex_exit: not initialized.\n");
382: }
383:
384: ER ercd = unlock_mutex(mtx);
385: if ( ercd < E_OK ) {
386: panic("tkn_mutex_exit: %d\n", MERCD(ercd));
387: }
388:
389: if ( --mtx_count == 0 ) {
390: int s = mtx_oldspl;
391: mtx_oldspl = 0;
392: tkn_spl_unlock(s);
393: }
394: UnlockTKN();
395: }
396:
397: void tkn_mutex_adaptive_exit( __volatile kmutex_t *mtx )
398: {
399: LockTKN();
400: if ( mtx->mtxid == 0 ) {
401: panic("tkn_mutex_exit: not initialized.\n");
402: }
403:
404: ER ercd = unlock_mutex(mtx);
405: if ( ercd < E_OK ) {
406: panic("tkn_mutex_exit: %d\n", MERCD(ercd));
407: }
408:
409: UnlockTKN();
410: }
411:
412: void tkn_mutex_exit( __volatile kmutex_t *mtx )
413: {
414: if ( mtx->type == MUTEX_SPIN ) {
415: tkn_mutex_spin_exit(mtx);
416: } else {
417: tkn_mutex_adaptive_exit(mtx);
418: }
419: }
420:
421: int tkn_mutex_tryenter( __volatile kmutex_t *mtx )
422: {
423: int lock = 0;
424:
425: LockTKN();
426: if ( mtx->mtxid == 0 ) {
427: panic("tkn_mutex_tryenter: not initialized.\n");
428: }
429: UnlockTKN();
430:
431: if ( mtx->type == MUTEX_SPIN ) {
432: int s = tkn_spl_lock(mtx->ipl);
433: LockTKN();
434: if ( mtx_count++ == 0 ) {
435: mtx_oldspl = s;
436: }
437: } else {
438: LockTKN();
439: }
440:
441: ER ercd = lock_mutex(mtx, TMO_POL);
442:
443: lock = (ercd == E_TMOUT) ? 0 : 1;
444:
445: UnlockTKN();
446:
447: return lock;
448: }
449:
450: int tkn_mutex_owned(__volatile kmutex_t *mtx)
451: {
452: int owned = 0;
453: int index;
454: int no;
455:
456: LockTKN();
457:
458: if ( mtx->mtxid == 0 ) {
459: panic("tkn_mutex_owned: not initialized.\n");
460: }
461:
462: index = INDEX(mtx->mtxid);
463: no = NO(mtx->mtxid);
464:
465: owned = (mutex_mlock[index].owner_tid[no] == tk_get_tid()) ? 1 : 0;
466:
467: UnlockTKN();
468:
469: return owned;
470: }
471:
472: struct kmutexobj {
473: kmutex_t mo_lock;
474: unsigned int mo_refcnt;
475: };
476:
477: kmutex_t* tkn_mutex_obj_alloc(kmutex_type_t type, int ipl)
478: {
479: struct kmutexobj *mo;
480: int error;
481:
482: mo = kmem_alloc(sizeof *mo, KM_SLEEP);
483: if ( mo == NULL ) {
484: return NULL;
485: }
486: bzero(mo, sizeof *mo);
487: error = tkn_mutex_init(&mo->mo_lock, type, ipl);
488: if ( error != 0 ) {
489: kmem_free(mo, sizeof(*mo));
490: return NULL;
491: }
492: mo->mo_refcnt = 1;
493:
494: return (kmutex_t *)mo;
495: }
496:
497: void tkn_mutex_obj_hold(__volatile kmutex_t *lock)
498: {
499: struct kmutexobj *mo = (struct kmutexobj *)lock;
500:
501: atomic_inc_uint(&mo->mo_refcnt);
502: }
503:
504: bool tkn_mutex_obj_free(__volatile kmutex_t *lock)
505: {
506: struct kmutexobj *mo = (struct kmutexobj *)lock;
507:
508: if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
509: return false;
510: }
511: tkn_mutex_destroy(&mo->mo_lock);
512: kmem_free(mo, sizeof(*mo));
513: return true;
514: }