1: 2: 3: 4: 5: 6: 7: 8: 9: 10: 11: 12: 13: 14: 15: 16:
17:
18: 19: 20: 21:
22:
23: #include "kernel.h"
24: #include "task.h"
25: #include "wait.h"
26: #include "check.h"
27: #include "limits.h"
28: #include <sys/rominfo.h>
29:
30: #ifdef NUM_MPLID
31:
32: EXPORT ID max_mplid;
33:
34: 35: 36: 37: 38:
39: typedef struct memorypool_control_block {
40: QUEUE wait_queue;
41: ID mplid;
42: void *exinf;
43: ATR mplatr;
44: INT mplsz;
45: QUEUE areaque;
46: QUEUE freeque;
47: #if USE_OBJECT_NAME
48: UB name[OBJECT_NAME_LENGTH];
49: #endif
50: } MPLCB;
51:
52: LOCAL MPLCB *mplcb_table;
53: LOCAL QUEUE free_mplcb;
54:
55: #define get_mplcb(id) ( &mplcb_table[INDEX_MPL(id)] )
56:
57:
58: 59: 60:
61: EXPORT ER memorypool_initialize( void )
62: {
63: MPLCB *mplcb, *end;
64: W n;
65:
66:
67: n = _tk_get_cfn(SCTAG_TMAXMPLID, &max_mplid, 1);
68: if ( n < 1 || NUM_MPLID < 1 ) {
69: return E_SYS;
70: }
71:
72:
73: mplcb_table = Imalloc((UINT)NUM_MPLID * sizeof(MPLCB));
74: if ( mplcb_table == NULL ) {
75: return E_NOMEM;
76: }
77:
78:
79: QueInit(&free_mplcb);
80: end = mplcb_table + NUM_MPLID;
81: for ( mplcb = mplcb_table; mplcb < end; mplcb++ ) {
82: mplcb->mplid = 0;
83: QueInsert(&mplcb->wait_queue, &free_mplcb);
84: }
85:
86: return E_OK;
87: }
88:
89:
90:
91: 92: 93: 94: 95: 96:
97: #define ROUNDSZ ( sizeof(QUEUE) )
98:
99: 100: 101:
102: #define MIN_FRAGMENT ( sizeof(QUEUE) * 2 )
103:
104: 105: 106:
107: #define MAX_ALLOCATE ( INT_MAX & ~(ROUNDSZ-1) )
108:
109: 110: 111:
112: Inline INT roundSize( INT sz )
113: {
114: if ( sz < (INT)MIN_FRAGMENT ) {
115: sz = (INT)MIN_FRAGMENT;
116: }
117: return (INT)(((UINT)sz + (ROUNDSZ-1)) & ~(ROUNDSZ-1));
118: }
119:
120: 121: 122:
123: #define AREA_USE 0x00000001U
124: #define AREA_MASK 0x00000001U
125:
126: #define setAreaFlag(q, f) ( (q)->prev = (QUEUE*)((UINT)(q)->prev | (UINT)(f)) )
127: #define clrAreaFlag(q, f) ( (q)->prev = (QUEUE*)((UINT)(q)->prev & ~(UINT)(f)) )
128: #define chkAreaFlag(q, f) ( ((UINT)(q)->prev & (UINT)(f)) != 0 )
129:
130: #define Mask(x) ( (QUEUE*)((UINT)(x) & ~AREA_MASK) )
131: #define Assign(x, y) ( (x) = (QUEUE*)(((UINT)(x) & AREA_MASK) | (UINT)(y)) )
132:
133: 134: 135:
136: #define AreaSize(aq) ( (VB*)(aq)->next - (VB*)((aq) + 1) )
137: #define FreeSize(fq) ( (VB*)((fq) - 1)->next - (VB*)(fq) )
138:
139: 140: 141:
142: Inline INT MaxFreeSize( MPLCB *mplcb )
143: {
144: if ( isQueEmpty(&mplcb->freeque) ) {
145: return 0;
146: }
147: return FreeSize(mplcb->freeque.prev);
148: }
149:
150: 151: 152: 153: 154: 155: 156:
157: LOCAL QUEUE* searchFreeArea( MPLCB *mplcb, INT blksz )
158: {
159: QUEUE *q = &mplcb->freeque;
160:
161: 162: 163:
164: if ( blksz > mplcb->mplsz / 4 ) {
165:
166: INT fsz = 0;
167: while ( (q = q->prev) != &mplcb->freeque ) {
168: fsz = FreeSize(q);
169: if ( fsz <= blksz ) {
170: return ( fsz < blksz )? q->next: q;
171: }
172: }
173: return ( fsz >= blksz )? q->next: q;
174: } else {
175:
176: while ( (q = q->next) != &mplcb->freeque ) {
177: if ( FreeSize(q) >= blksz ) {
178: break;
179: }
180: }
181: return q;
182: }
183: }
184:
185: 186: 187: 188: 189: 190: 191: 192: 193: 194: 195: 196: 197: 198: 199: 200: 201: 202: 203:
204: LOCAL void appendFreeArea( MPLCB *mplcb, QUEUE *aq )
205: {
206: QUEUE *fq;
207: INT size = AreaSize(aq);
208:
209:
210: 211: 212: 213:
214: fq = searchFreeArea(mplcb, size);
215:
216:
217: clrAreaFlag(aq, AREA_USE);
218: if ( fq != &mplcb->freeque && FreeSize(fq) == size ) {
219: QueInsert(aq + 1, fq + 1);
220: } else {
221: QueInsert(aq + 1, fq);
222: }
223: QueInit(aq + 2);
224: }
225:
226: 227: 228:
229: LOCAL void removeFreeQue( QUEUE *fq )
230: {
231: if ( !isQueEmpty(fq + 1) ) {
232: QUEUE *nq = (fq + 1)->next;
233:
234: QueRemove(fq + 1);
235: QueInsert(nq + 1, nq);
236: QueRemove(nq);
237: QueInsert(nq, fq);
238: }
239:
240: QueRemove(fq);
241: }
242:
243: 244: 245: 246:
247: LOCAL void insertAreaQue( QUEUE *que, QUEUE *ent )
248: {
249: ent->prev = que;
250: ent->next = que->next;
251: Assign(que->next->prev, ent);
252: que->next = ent;
253: }
254:
255: 256: 257:
258: LOCAL void removeAreaQue( QUEUE *aq )
259: {
260: Mask(aq->prev)->next = aq->next;
261: Assign(aq->next->prev, Mask(aq->prev));
262: }
263:
264: 265: 266: 267: 268:
269: LOCAL void* get_blk( MPLCB *mplcb, INT blksz )
270: {
271: QUEUE *q, *aq;
272:
273:
274: q = searchFreeArea(mplcb, blksz);
275: if ( q == &mplcb->freeque ) {
276: return NULL;
277: }
278:
279:
280: removeFreeQue(q);
281: aq = q - 1;
282:
283: 284:
285: if ( AreaSize(aq) - (UINT)blksz >= MIN_FRAGMENT + sizeof(QUEUE) ) {
286:
287:
288: q = (QUEUE*)((VB*)(aq + 1) + blksz);
289: insertAreaQue(aq, q);
290:
291:
292: appendFreeArea(mplcb, q);
293: }
294: setAreaFlag(aq, AREA_USE);
295:
296: return (void*)(aq + 1);
297: }
298:
299: 300: 301:
302: LOCAL ER rel_blk( MPLCB *mplcb, void *blk )
303: {
304: QUEUE *aq;
305:
306: aq = (QUEUE*)blk - 1;
307:
308: #if CHK_PAR
309: if ( !chkAreaFlag(aq, AREA_USE) ) {
310: return E_PAR;
311: }
312: #endif
313: clrAreaFlag(aq, AREA_USE);
314:
315: if ( !chkAreaFlag(aq->next, AREA_USE) ) {
316:
317: removeFreeQue(aq->next + 1);
318: removeAreaQue(aq->next);
319: }
320: if ( !chkAreaFlag(aq->prev, AREA_USE) ) {
321:
322: aq = aq->prev;
323: removeFreeQue(aq + 1);
324: removeAreaQue(aq->next);
325: }
326:
327:
328: appendFreeArea(mplcb, aq);
329:
330: return E_OK;
331: }
332:
333: 334: 335:
336: LOCAL void init_mempool( MPLCB *mplcb, void *mempool, INT mempsz )
337: {
338: QUEUE *tp, *ep;
339:
340: QueInit(&mplcb->areaque);
341: QueInit(&mplcb->freeque);
342:
343:
344: tp = (QUEUE*)mempool;
345: ep = (QUEUE*)((VB*)mempool + mempsz) - 1;
346: insertAreaQue(&mplcb->areaque, ep);
347: insertAreaQue(&mplcb->areaque, tp);
348:
349:
350: setAreaFlag(&mplcb->areaque, AREA_USE);
351: setAreaFlag(ep, AREA_USE);
352:
353:
354: appendFreeArea(mplcb, tp);
355: }
356:
357:
358:
359: 360: 361: 362:
363: LOCAL void mpl_wakeup( MPLCB *mplcb )
364: {
365: TCB *top;
366: void *blk;
367: INT blksz;
368:
369: while ( !isQueEmpty(&mplcb->wait_queue) ) {
370: top = (TCB*)mplcb->wait_queue.next;
371: blksz = top->winfo.mpl.blksz;
372:
373:
374: if ( blksz > MaxFreeSize(mplcb) ) {
375: break;
376: }
377:
378:
379: blk = get_blk(mplcb, blksz);
380: *top->winfo.mpl.p_blk = blk;
381:
382:
383: wait_release_ok(top);
384: }
385: }
386:
387: 388: 389: 390: 391:
392: LOCAL void mpl_chg_pri( TCB *tcb, INT oldpri )
393: {
394: MPLCB *mplcb;
395:
396: mplcb = get_mplcb(tcb->wid);
397: if ( oldpri >= 0 ) {
398:
399: gcb_change_priority((GCB*)mplcb, tcb);
400: }
401:
402: 403:
404: mpl_wakeup(mplcb);
405: }
406:
407: 408: 409:
410: LOCAL void mpl_rel_wai( TCB *tcb )
411: {
412: mpl_chg_pri(tcb, -1);
413: }
414:
415: 416: 417:
418: LOCAL CONST WSPEC wspec_mpl_tfifo = { TTW_MPL, NULL, mpl_rel_wai };
419: LOCAL CONST WSPEC wspec_mpl_tpri = { TTW_MPL, mpl_chg_pri, mpl_rel_wai };
420:
421:
422: 423: 424:
425: SYSCALL ID _tk_cre_mpl( CONST T_CMPL *pk_cmpl )
426: {
427: #if CHK_RSATR
428: const ATR VALID_MPLATR = {
429: TA_TPRI
430: |TA_RNG3
431: |TA_NODISWAI
432: #if USE_OBJECT_NAME
433: |TA_DSNAME
434: #endif
435: };
436: #endif
437: MPLCB *mplcb;
438: ID mplid;
439: INT mplsz;
440: void *mempool;
441: ER ercd;
442:
443: CHECK_RSATR(pk_cmpl->mplatr, VALID_MPLATR);
444: CHECK_PAR(pk_cmpl->mplsz > 0 && pk_cmpl->mplsz <= MAX_ALLOCATE);
445: CHECK_DISPATCH();
446:
447: mplsz = roundSize(pk_cmpl->mplsz);
448:
449:
450: mempool = IAmalloc((UINT)mplsz + sizeof(QUEUE)*2, pk_cmpl->mplatr);
451: if ( mempool == NULL ) {
452: return E_NOMEM;
453: }
454:
455: BEGIN_CRITICAL_SECTION;
456:
457: mplcb = (MPLCB*)QueRemoveNext(&free_mplcb);
458: if ( mplcb == NULL ) {
459: ercd = E_LIMIT;
460: } else {
461: mplid = ID_MPL(mplcb - mplcb_table);
462:
463:
464: QueInit(&mplcb->wait_queue);
465: mplcb->mplid = mplid;
466: mplcb->exinf = pk_cmpl->exinf;
467: mplcb->mplatr = pk_cmpl->mplatr;
468: mplcb->mplsz = mplsz;
469: #if USE_OBJECT_NAME
470: if ( (pk_cmpl->mplatr & TA_DSNAME) != 0 ) {
471: STRNCPY((char*)mplcb->name, (char*)pk_cmpl->dsname, OBJECT_NAME_LENGTH);
472: }
473: #endif
474:
475: init_mempool(mplcb, mempool, mplsz + (INT)sizeof(QUEUE)*2);
476:
477: ercd = mplid;
478: }
479: END_CRITICAL_SECTION;
480:
481: if ( ercd < E_OK ) {
482: IAfree(mempool, pk_cmpl->mplatr);
483: }
484:
485: return ercd;
486: }
487:
488: 489: 490:
491: SYSCALL ER _tk_del_mpl( ID mplid )
492: {
493: MPLCB *mplcb;
494: void *mempool = NULL;
495: ATR memattr = 0;
496: ER ercd = E_OK;
497:
498: CHECK_MPLID(mplid);
499: CHECK_DISPATCH();
500:
501: mplcb = get_mplcb(mplid);
502:
503: BEGIN_CRITICAL_SECTION;
504: if ( mplcb->mplid == 0 ) {
505: ercd = E_NOEXS;
506: } else {
507: mempool = mplcb->areaque.next;
508: memattr = mplcb->mplatr;
509:
510:
511: wait_delete(&mplcb->wait_queue);
512:
513:
514: QueInsert(&mplcb->wait_queue, &free_mplcb);
515: mplcb->mplid = 0;
516: }
517: END_CRITICAL_SECTION;
518:
519: if ( ercd == E_OK ) {
520: IAfree(mempool, memattr);
521: }
522:
523: return ercd;
524: }
525:
526: 527: 528:
529: SYSCALL ER _tk_get_mpl( ID mplid, INT blksz, void **p_blk, TMO tmout )
530: {
531: return _tk_get_mpl_u(mplid, blksz, p_blk, to_usec_tmo(tmout));
532: }
533:
534: SYSCALL ER _tk_get_mpl_u( ID mplid, INT blksz, void **p_blk, TMO_U tmout )
535: {
536: MPLCB *mplcb;
537: void *blk = NULL;
538: ER ercd = E_OK;
539:
540: CHECK_MPLID(mplid);
541: CHECK_PAR(blksz > 0 && blksz <= MAX_ALLOCATE);
542: CHECK_TMOUT(tmout);
543: CHECK_DISPATCH();
544:
545: mplcb = get_mplcb(mplid);
546: blksz = roundSize(blksz);
547:
548: BEGIN_CRITICAL_SECTION;
549: if ( mplcb->mplid == 0 ) {
550: ercd = E_NOEXS;
551: goto error_exit;
552: }
553:
554: #if CHK_PAR
555: if ( blksz > mplcb->mplsz ) {
556: ercd = E_PAR;
557: goto error_exit;
558: }
559: #endif
560:
561:
562: if ( is_diswai((GCB*)mplcb, ctxtsk, TTW_MPL) ) {
563: ercd = E_DISWAI;
564: goto error_exit;
565: }
566:
567: if ( gcb_top_of_wait_queue((GCB*)mplcb, ctxtsk) == ctxtsk
568: && (blk = get_blk(mplcb, blksz)) != NULL ) {
569:
570: *p_blk = blk;
571: } else {
572:
573: ctxtsk->wspec = ( (mplcb->mplatr & TA_TPRI) != 0 )?
574: &wspec_mpl_tpri: &wspec_mpl_tfifo;
575: ctxtsk->wercd = &ercd;
576: ctxtsk->winfo.mpl.blksz = blksz;
577: ctxtsk->winfo.mpl.p_blk = p_blk;
578: gcb_make_wait_with_diswai((GCB*)mplcb, tmout);
579: }
580:
581: error_exit:
582: END_CRITICAL_SECTION;
583:
584: return ercd;
585: }
586:
587: 588: 589:
590: SYSCALL ER _tk_rel_mpl( ID mplid, void *blk )
591: {
592: MPLCB *mplcb;
593: ER ercd = E_OK;
594:
595: CHECK_MPLID(mplid);
596: CHECK_DISPATCH();
597:
598: mplcb = get_mplcb(mplid);
599:
600: BEGIN_CRITICAL_SECTION;
601: if ( mplcb->mplid == 0 ) {
602: ercd = E_NOEXS;
603: goto error_exit;
604: }
605: #if CHK_PAR
606: if ( (B*)blk < (B*)mplcb->areaque.next || (B*)blk > (B*)mplcb->areaque.prev ) {
607: ercd = E_PAR;
608: goto error_exit;
609: }
610: #endif
611:
612:
613: ercd = rel_blk(mplcb, blk);
614: if ( ercd < E_OK ) {
615: goto error_exit;
616: }
617:
618:
619: mpl_wakeup(mplcb);
620:
621: error_exit:
622: END_CRITICAL_SECTION;
623:
624: return ercd;
625: }
626:
627: 628: 629:
630: SYSCALL ER _tk_ref_mpl( ID mplid, T_RMPL *pk_rmpl )
631: {
632: MPLCB *mplcb;
633: QUEUE *fq, *q;
634: INT frsz, blksz;
635: ER ercd = E_OK;
636:
637: CHECK_MPLID(mplid);
638: CHECK_DISPATCH();
639:
640: mplcb = get_mplcb(mplid);
641:
642: BEGIN_CRITICAL_SECTION;
643: if ( mplcb->mplid == 0 ) {
644: ercd = E_NOEXS;
645: } else {
646: pk_rmpl->exinf = mplcb->exinf;
647: pk_rmpl->wtsk = wait_tskid(&mplcb->wait_queue);
648: frsz = 0;
649: for ( fq = mplcb->freeque.next; fq != &mplcb->freeque; fq = fq->next ) {
650: blksz = FreeSize(fq);
651: frsz += blksz;
652: for ( q = (fq+1)->next; q != (fq+1); q = q->next ) {
653: frsz += blksz;
654: }
655: }
656: pk_rmpl->frsz = frsz;
657: pk_rmpl->maxsz = MaxFreeSize(mplcb);
658: }
659: END_CRITICAL_SECTION;
660:
661: return ercd;
662: }
663:
664:
665: 666: 667:
668: #if USE_DBGSPT
669:
670: 671: 672:
673: #if USE_OBJECT_NAME
674: EXPORT ER memorypool_getname(ID id, UB **name)
675: {
676: MPLCB *mplcb;
677: ER ercd = E_OK;
678:
679: CHECK_MPLID(id);
680:
681: BEGIN_DISABLE_INTERRUPT;
682: mplcb = get_mplcb(id);
683: if ( mplcb->mplid == 0 ) {
684: ercd = E_NOEXS;
685: goto error_exit;
686: }
687: if ( (mplcb->mplatr & TA_DSNAME) == 0 ) {
688: ercd = E_OBJ;
689: goto error_exit;
690: }
691: *name = mplcb->name;
692:
693: error_exit:
694: END_DISABLE_INTERRUPT;
695:
696: return ercd;
697: }
698: #endif
699:
700: 701: 702:
703: SYSCALL INT _td_lst_mpl( ID list[], INT nent )
704: {
705: MPLCB *mplcb, *end;
706: INT n = 0;
707:
708: BEGIN_DISABLE_INTERRUPT;
709: end = mplcb_table + NUM_MPLID;
710: for ( mplcb = mplcb_table; mplcb < end; mplcb++ ) {
711: if ( mplcb->mplid == 0 ) {
712: continue;
713: }
714:
715: if ( n++ < nent ) {
716: *list++ = ID_MPL(mplcb - mplcb_table);
717: }
718: }
719: END_DISABLE_INTERRUPT;
720:
721: return n;
722: }
723:
724: 725: 726:
727: SYSCALL ER _td_ref_mpl( ID mplid, TD_RMPL *pk_rmpl )
728: {
729: MPLCB *mplcb;
730: QUEUE *fq, *q;
731: INT frsz, blksz;
732: ER ercd = E_OK;
733:
734: CHECK_MPLID(mplid);
735:
736: mplcb = get_mplcb(mplid);
737:
738: BEGIN_DISABLE_INTERRUPT;
739: if ( mplcb->mplid == 0 ) {
740: ercd = E_NOEXS;
741: } else {
742: pk_rmpl->exinf = mplcb->exinf;
743: pk_rmpl->wtsk = wait_tskid(&mplcb->wait_queue);
744: frsz = 0;
745: for ( fq = mplcb->freeque.next; fq != &mplcb->freeque; fq = fq->next ) {
746: blksz = FreeSize(fq);
747: frsz += blksz;
748: for ( q = (fq+1)->next; q != (fq+1); q = q->next ) {
749: frsz += blksz;
750: }
751: }
752: pk_rmpl->frsz = frsz;
753: pk_rmpl->maxsz = MaxFreeSize(mplcb);
754: }
755: END_DISABLE_INTERRUPT;
756:
757: return ercd;
758: }
759:
760: 761: 762:
763: SYSCALL INT _td_mpl_que( ID mplid, ID list[], INT nent )
764: {
765: MPLCB *mplcb;
766: QUEUE *q;
767: ER ercd = E_OK;
768:
769: CHECK_MPLID(mplid);
770:
771: mplcb = get_mplcb(mplid);
772:
773: BEGIN_DISABLE_INTERRUPT;
774: if ( mplcb->mplid == 0 ) {
775: ercd = E_NOEXS;
776: } else {
777: INT n = 0;
778: for ( q = mplcb->wait_queue.next; q != &mplcb->wait_queue; q = q->next ) {
779: if ( n++ < nent ) {
780: *list++ = ((TCB*)q)->tskid;
781: }
782: }
783: ercd = n;
784: }
785: END_DISABLE_INTERRUPT;
786:
787: return ercd;
788: }
789:
790: #endif
791: #endif