gonzui


Format: Advanced Search

mtkernel_3/kernel/tkernel/mempool.cbare sourcepermlink (0.04 seconds)

Search this content:

    1: /*
    2:  *----------------------------------------------------------------------
    3:  *    micro T-Kernel 3.00.00
    4:  *
    5:  *    Copyright (C) 2006-2019 by Ken Sakamura.
    6:  *    This software is distributed under the T-License 2.1.
    7:  *----------------------------------------------------------------------
    8:  *
    9:  *    Released by TRON Forum(http://www.tron.org) at 2019/12/11.
   10:  *
   11:  *----------------------------------------------------------------------
   12:  */
   13: 
   14: /*
   15:  *      mempool.c
   16:  *      Variable Size Memory Pool
   17:  */
   18: 
   19: #include "kernel.h"
   20: #include "wait.h"
   21: #include "check.h"
   22: #include "memory.h"
   23: #include "mempool.h"
   24: 
   25: #if USE_MEMORYPOOL
   26: 
   27: 
   28: Noinit(EXPORT MPLCB knl_mplcb_table[NUM_MPLID]);        /* Variable size memory pool control block */
   29: Noinit(EXPORT QUEUE knl_free_mplcb);    /* FreeQue */
   30: 
   31: 
   32: /*
   33:  * Initialization of variable size memory pool control block
   34:  */
   35: EXPORT ER knl_memorypool_initialize( void )
   36: {
   37:         MPLCB  *mplcb, *end;
   38: 
   39:         if ( NUM_MPLID < 1 ) {
   40:                 return E_SYS;
   41:         }
   42: 
   43:         /* Register all control blocks onto FreeQue */
   44:         QueInit(&knl_free_mplcb);
   45:         end = knl_mplcb_table + NUM_MPLID;
   46:         for ( mplcb = knl_mplcb_table; mplcb < end; mplcb++ ) {
   47:                 mplcb->mplid = 0;
   48:                 QueInsert(&mplcb->wait_queue, &knl_free_mplcb);
   49:         }
   50: 
   51:         return E_OK;
   52: }
   53: 
   54: /* ------------------------------------------------------------------------ */
   55: 
   56: /*
   57:  * Registration of free area on FreeQue
   58:  *   Specialized version for merging with top/end area
   59:  */
   60: LOCAL void knl_appendFreeAreaBound( MPLCB *mplcb, QUEUE *aq )
   61: {
   62:         IMACB  *imacb = (IMACB*)&(mplcb->mplsz);
   63:         QUEUE  *fq, *top, *end;
   64:         W      size;
   65: 
   66:         if ( aq == &(mplcb->areaque) ) {
   67:                 top = (QUEUE*)mplcb->mempool;
   68:         } else {
   69:                 top = aq + 1;
   70:         }
   71: 
   72:         if ( aq->next == &(mplcb->areaque_end) ) {
   73:                 end = (QUEUE*)((VB*)mplcb->mempool + mplcb->mplsz);
   74:         } else {
   75:                 end = aq->next;
   76:         }
   77: 
   78:         size = (W)((VB*)end - (VB*)top);
   79: 
   80:         /* Registration position search */
   81:         /*  Search the free area whose size is equal to 'blksz',
   82:          *  or larger than 'blksz' but closest.
   83:          *  If it does not exist, return '&imacb->freeque'.
   84:          */
   85:         fq = knl_searchFreeArea(imacb, size);
   86: 
   87:         /* Register */
   88:         clrAreaFlag(aq, AREA_USE);
   89:         if ( fq != &imacb->freeque && FreeSize(fq) == size ) {
   90:                 /* FreeQue Same size */
   91:                 (top + 1)->next = (fq + 1)->next;
   92:                 (fq  + 1)->next = top + 1;
   93:                 (top + 1)->prev = fq + 1;
   94:                 if( (top + 1)->next != NULL ) {
   95:                         (top + 1)->next->prev = top + 1;
   96:                 }
   97:                 top->next = NULL;
   98:         } else {
   99:                 /* FreeQue Size order */
  100:                 QueInsert(top, fq);
  101:                 (top + 1)->next = NULL;
  102:                 (top + 1)->prev = (QUEUE*)size;
  103:         }
  104: }
  105: 
  106: /*
  107:  * Get memory block 
  108:  *      'blksz' must be larger than minimum fragment size
  109:  *      and adjusted by ROUNDSZ unit.
  110:  */
  111: LOCAL void *knl_get_blk( MPLCB *mplcb, W blksz )
  112: {
  113:         QUEUE  *q, *aq, *aq2;
  114:         IMACB* imacb = (IMACB*)&(mplcb->mplsz);
  115: 
  116:         /* Search FreeQue */
  117:         q = knl_searchFreeArea(imacb, blksz);
  118:         if ( q == &(imacb->freeque) ) {
  119:                 return NULL;
  120:         }
  121: 
  122:         /* remove free area from FreeQue */
  123:         knl_removeFreeQue(q);
  124:         aq = ((void *)q == mplcb->mempool) ? &(mplcb->areaque) :  q - 1;
  125: 
  126:         /* If there is a fragment smaller than the minimum fragment size,
  127:            allocate them together */
  128:         if ( FreeSize(q) - (UW)blksz >= MIN_FRAGMENT + sizeof(QUEUE) ) {
  129: 
  130:                 /* Divide the area into 2. */
  131:                 aq2 = (QUEUE*)((VB*)q + blksz);
  132:                 knl_insertAreaQue(aq, aq2);
  133: 
  134:                 /* Register the remaining area onto FreeQue */
  135:                 if ( aq2->next == &(mplcb->areaque_end) ) {
  136:                         knl_appendFreeAreaBound(mplcb, aq2);
  137:                 } else {
  138:                         knl_appendFreeArea(imacb, aq2);
  139:                 }
  140:         }
  141:         setAreaFlag(aq, AREA_USE);
  142: 
  143:         return (void *)q;
  144: }
  145: 
  146: /*
  147:  * Free memory block 
  148:  */
  149: LOCAL ER knl_rel_blk( MPLCB *mplcb, void *blk )
  150: {
  151:         QUEUE  *aq;
  152:         IMACB* imacb = (IMACB*)&(mplcb->mplsz);
  153: 
  154:         aq = (blk == mplcb->mempool) ? &(mplcb->areaque) : (QUEUE*)blk - 1;
  155: 
  156: #if CHK_PAR
  157:         if ( !chkAreaFlag(aq, AREA_USE) ) {
  158:                 return E_PAR;
  159:         }
  160: #endif
  161:         clrAreaFlag(aq, AREA_USE);
  162: 
  163:         if ( !chkAreaFlag(aq->next, AREA_USE) ) {
  164:                 /* Merge to the next area */
  165:                 knl_removeFreeQue(aq->next + 1);
  166:                 knl_removeAreaQue(aq->next);
  167:         }
  168:         if ( !chkAreaFlag(aq->prev, AREA_USE) ) {
  169:                 /* Merge to the previous area */
  170:                 QUEUE *fq;
  171:                 aq = aq->prev;
  172:                 fq = (aq == &(mplcb->areaque)) ? (QUEUE*)(mplcb->mempool) : aq + 1;
  173: 
  174:                 knl_removeFreeQue(fq);
  175:                 knl_removeAreaQue(aq->next);
  176:         }
  177: 
  178:         /* Register free area onto FreeQue */
  179:         if ( aq == &(mplcb->areaque) || aq->next == &(mplcb->areaque_end) ) {
  180:                 knl_appendFreeAreaBound(mplcb, aq);
  181:         } else {
  182:                 knl_appendFreeArea(imacb, aq);
  183:         }
  184: 
  185:         return E_OK;
  186: }
  187: 
  188: /* ------------------------------------------------------------------------ */
  189: 
  190: /*
  191:  * Allocate memory and release wait task,
  192:  * as long as there are enough free memory.
  193:  */
  194: EXPORT void knl_mpl_wakeup( MPLCB *mplcb )
  195: {
  196:         TCB    *top;
  197:         void   *blk;
  198:         W      blksz;
  199: 
  200:         while ( !isQueEmpty(&mplcb->wait_queue) ) {
  201:                 top = (TCB*)mplcb->wait_queue.next;
  202:                 blksz = top->winfo.mpl.blksz;
  203: 
  204:                 /* Check free space */
  205:                 if ( blksz > knl_MaxFreeSize(mplcb) ) {
  206:                         break;
  207:                 }
  208: 
  209:                 /* Get memory block */
  210:                 blk = knl_get_blk(mplcb, blksz);
  211:                 *top->winfo.mpl.p_blk = blk;
  212: 
  213:                 /* Release wait task */
  214:                 knl_wait_release_ok(top);
  215:         }
  216: }
  217: 
  218: 
  219: /*
  220:  * Memory pool initial setting
  221:  */
  222: LOCAL void init_mempool( MPLCB *mplcb )
  223: {
  224:         QueInit(&mplcb->areaque);
  225:         QueInit(&mplcb->freeque);
  226: 
  227:         /* Register onto AreaQue */
  228:         knl_insertAreaQue(&mplcb->areaque, &mplcb->areaque_end);
  229: 
  230:         /* Set AREA_USE for locations that must not be free area */
  231:         setAreaFlag(&mplcb->areaque_end, AREA_USE);
  232: 
  233:         /* Register onto FreeQue */
  234:         knl_appendFreeAreaBound(mplcb, &mplcb->areaque);
  235: }
  236: 
  237: /*
  238:  * Create variable size memory pool 
  239:  */
  240: SYSCALL ID tk_cre_mpl( CONST T_CMPL *pk_cmpl )
  241: {
  242: #if CHK_RSATR
  243:         const ATR VALID_MPLATR = {
  244:                  TA_TPRI
  245:                 |TA_RNG3
  246:                 |TA_USERBUF
  247: #if USE_OBJECT_NAME
  248:                 |TA_DSNAME
  249: #endif
  250:         };
  251: #endif
  252:         MPLCB  *mplcb;
  253:         ID     mplid;
  254:         W      mplsz;
  255:         void   *mempool;
  256:         ER     ercd;
  257: 
  258:         CHECK_RSATR(pk_cmpl->mplatr, VALID_MPLATR);
  259:         CHECK_PAR(pk_cmpl->mplsz > 0 && pk_cmpl->mplsz <= MAX_ALLOCATE);
  260: #if !USE_IMALLOC
  261:         /* TA_USERBUF must be specified if configured in no Imalloc */
  262:         CHECK_PAR((pk_cmpl->mplatr & TA_USERBUF) != 0);
  263: #endif
  264:         CHECK_DISPATCH();
  265: 
  266:         mplsz = roundSize(pk_cmpl->mplsz);
  267: 
  268: #if USE_IMALLOC
  269:         if ( (pk_cmpl->mplatr & TA_USERBUF) != 0 ) {
  270:                 /* Size of user buffer must be multiples of sizeof(QUEUE)
  271:                         and larger than sizeof(QUEUE)*2 */
  272:                 if ( mplsz != pk_cmpl->mplsz ) {
  273:                         return E_PAR;
  274:                 }
  275:                 /* Use user buffer */
  276:                 mempool = pk_cmpl->bufptr;
  277:         } else {
  278:                 /* Allocate memory for memory pool */
  279:                 mempool = knl_Imalloc((UW)mplsz);
  280:                 if ( mempool == NULL ) {
  281:                         return E_NOMEM;
  282:                 }
  283:         }
  284: #else
  285:         /* Size of user buffer must be multiples of sizeof(QUEUE)
  286:                 and larger than sizeof(QUEUE)*2 */
  287:         if ( mplsz != pk_cmpl->mplsz ) {
  288:                 return E_PAR;
  289:         }
  290:         /* Use user buffer */
  291:         mempool = pk_cmpl->bufptr;
  292: #endif
  293: 
  294:         BEGIN_CRITICAL_SECTION;
  295:         /* Get control block from FreeQue */
  296:         mplcb = (MPLCB*)QueRemoveNext(&knl_free_mplcb);
  297:         if ( mplcb == NULL ) {
  298:                 ercd = E_LIMIT;
  299:         } else {
  300:                 mplid = ID_MPL(mplcb - knl_mplcb_table);
  301: 
  302:                 /* Initialize control block */
  303:                 QueInit(&mplcb->wait_queue);
  304:                 mplcb->mplid  = mplid;
  305:                 mplcb->exinf  = pk_cmpl->exinf;
  306:                 mplcb->mplatr = pk_cmpl->mplatr;
  307:                 mplcb->mplsz  = mplsz;
  308: #if USE_OBJECT_NAME
  309:                 if ( (pk_cmpl->mplatr & TA_DSNAME) != 0 ) {
  310:                         knl_strncpy((char*)mplcb->name, (char*)pk_cmpl->dsname, OBJECT_NAME_LENGTH);
  311:                 }
  312: #endif
  313: 
  314:                 mplcb->mempool = mempool;
  315: 
  316:                 /* Initialize memory pool */
  317:                 init_mempool(mplcb);
  318: 
  319:                 ercd = mplid;
  320:         }
  321:         END_CRITICAL_SECTION;
  322: 
  323: #if USE_IMALLOC
  324:         if ( (ercd < E_OK) && ((pk_cmpl->mplatr & TA_USERBUF) == 0) ) {
  325:                 knl_Ifree(mempool);
  326:         }
  327: #endif
  328: 
  329:         return ercd;
  330: }
  331: 
  332: #ifdef USE_FUNC_TK_DEL_MPL
  333: /*
  334:  * Delete variable size memory pool 
  335:  */
  336: SYSCALL ER tk_del_mpl( ID mplid )
  337: {
  338:         MPLCB  *mplcb;
  339:         void   *mempool = NULL;
  340:         ATR    memattr = 0;
  341:         ER     ercd = E_OK;
  342: 
  343:         CHECK_MPLID(mplid);
  344:         CHECK_DISPATCH();
  345: 
  346:         mplcb = get_mplcb(mplid);
  347: 
  348:         BEGIN_CRITICAL_SECTION;
  349:         if ( mplcb->mplid == 0 ) {
  350:                 ercd = E_NOEXS;
  351:         } else {
  352:                 mempool = mplcb->mempool;
  353:                 memattr = mplcb->mplatr;
  354: 
  355:                 /* Free wait state of task (E_DLT) */
  356:                 knl_wait_delete(&mplcb->wait_queue);
  357: 
  358:                 /* Return to FreeQue */
  359:                 QueInsert(&mplcb->wait_queue, &knl_free_mplcb);
  360:                 mplcb->mplid = 0;
  361:         }
  362:         END_CRITICAL_SECTION;
  363: 
  364: #if USE_IMALLOC
  365:         if ( (ercd == E_OK) && ((memattr & TA_USERBUF) == 0) ) {
  366:                 knl_Ifree(mempool);
  367:         }
  368: #endif
  369: 
  370:         return ercd;
  371: }
  372: #endif /* USE_FUNC_TK_DEL_MPL */
  373: 
  374: /*
  375:  * Processing if the priority of wait task changes.
  376:  *      You need to execute with interrupt disable.
  377:  */
  378: LOCAL void mpl_chg_pri( TCB *tcb, INT oldpri )
  379: {
  380:         MPLCB  *mplcb;
  381: 
  382:         mplcb = get_mplcb(tcb->wid);
  383:         if ( oldpri >= 0 ) {
  384:                 /* Reorder wait line */
  385:                 knl_gcb_change_priority((GCB*)mplcb, tcb);
  386:         }
  387: 
  388:         /* From the new top task of a wait queue, free the assign
  389:            wait of memory blocks as much as possible. */
  390:         knl_mpl_wakeup(mplcb);
  391: }
  392: 
  393: /*
  394:  * Processing if the wait task is freed
  395:  */
  396: LOCAL void mpl_rel_wai( TCB *tcb )
  397: {
  398:         mpl_chg_pri(tcb, -1);
  399: }
  400: 
  401: /*
  402:  * Definition of variable size memory pool wait specification
  403:  */
  404: LOCAL CONST WSPEC knl_wspec_mpl_tfifo = { TTW_MPL, NULL,        mpl_rel_wai };
  405: LOCAL CONST WSPEC knl_wspec_mpl_tpri  = { TTW_MPL, mpl_chg_pri, mpl_rel_wai };
  406: 
  407: /*
  408:  * Get variable size memory block 
  409:  */
  410: SYSCALL ER tk_get_mpl( ID mplid, SZ blksz, void **p_blk, TMO tmout )
  411: {
  412:         MPLCB  *mplcb;
  413:         void   *blk = NULL;
  414:         ER     ercd = E_OK;
  415: 
  416:         CHECK_MPLID(mplid);
  417:         CHECK_PAR(blksz > 0 && blksz <= MAX_ALLOCATE);
  418:         CHECK_TMOUT(tmout);
  419:         CHECK_DISPATCH();
  420: 
  421:         mplcb = get_mplcb(mplid);
  422:         blksz = roundSize(blksz);
  423: 
  424:         BEGIN_CRITICAL_SECTION;
  425:         if ( mplcb->mplid == 0 ) {
  426:                 ercd = E_NOEXS;
  427:                 goto error_exit;
  428:         }
  429: 
  430: #if CHK_PAR
  431:         if ( blksz > mplcb->mplsz ) {
  432:                 ercd = E_PAR;
  433:                 goto error_exit;
  434:         }
  435: #endif
  436: 
  437:         if ( knl_gcb_top_of_wait_queue((GCB*)mplcb, knl_ctxtsk) == knl_ctxtsk
  438:           && (blk = knl_get_blk(mplcb, blksz)) != NULL ) {
  439:                 /* Get memory block */
  440:                 *p_blk = blk;
  441:         } else {
  442:                 /* Ready for wait */
  443:                 knl_ctxtsk->wspec = ( (mplcb->mplatr & TA_TPRI) != 0 )?
  444:                                         &knl_wspec_mpl_tpri: &knl_wspec_mpl_tfifo;
  445:                 knl_ctxtsk->wercd = &ercd;
  446:                 knl_ctxtsk->winfo.mpl.blksz = blksz;
  447:                 knl_ctxtsk->winfo.mpl.p_blk = p_blk;
  448:                 knl_gcb_make_wait((GCB*)mplcb, tmout);
  449:         }
  450: 
  451:     error_exit:
  452:         END_CRITICAL_SECTION;
  453: 
  454:         return ercd;
  455: }
  456: 
  457: /*
  458:  * Return variable size memory block 
  459:  */
  460: SYSCALL ER tk_rel_mpl( ID mplid, void *blk )
  461: {
  462:         MPLCB  *mplcb;
  463:         ER     ercd = E_OK;
  464: 
  465:         CHECK_MPLID(mplid);
  466:         CHECK_DISPATCH();
  467: 
  468:         mplcb = get_mplcb(mplid);
  469: 
  470:         BEGIN_CRITICAL_SECTION;
  471:         if ( mplcb->mplid == 0 ) {
  472:                 ercd = E_NOEXS;
  473:                 goto error_exit;
  474:         }
  475: #if CHK_PAR
  476:         if ( (B*)blk < (B*)mplcb->mempool || (B*)blk > (B*)mplcb->mempool + mplcb->mplsz ) {
  477:                 ercd = E_PAR;
  478:                 goto error_exit;
  479:         }
  480: #endif
  481: 
  482:         /* Free memory block */
  483:         ercd = knl_rel_blk(mplcb, blk);
  484:         if ( ercd < E_OK ) {
  485:                 goto error_exit;
  486:         }
  487: 
  488:         /* Assign memory block to waiting task */
  489:         knl_mpl_wakeup(mplcb);
  490: 
  491:     error_exit:
  492:         END_CRITICAL_SECTION;
  493: 
  494:         return ercd;
  495: }
  496: 
  497: #ifdef USE_FUNC_TK_REF_MPL
  498: /*
  499:  * Refer variable size memory pool state
  500:  */
  501: SYSCALL ER tk_ref_mpl( ID mplid, T_RMPL *pk_rmpl )
  502: {
  503:         MPLCB  *mplcb;
  504:         QUEUE  *fq, *q;
  505:         W      frsz, blksz;
  506:         ER     ercd = E_OK;
  507: 
  508:         CHECK_MPLID(mplid);
  509:         CHECK_DISPATCH();
  510: 
  511:         mplcb = get_mplcb(mplid);
  512: 
  513:         BEGIN_CRITICAL_SECTION;
  514:         if ( mplcb->mplid == 0 ) {
  515:                 ercd = E_NOEXS;
  516:         } else {
  517:                 pk_rmpl->exinf = mplcb->exinf;
  518:                 pk_rmpl->wtsk  = knl_wait_tskid(&mplcb->wait_queue);
  519:                 frsz = 0;
  520:                 for ( fq = mplcb->freeque.next; fq != &mplcb->freeque; fq = fq->next ) {
  521:                         blksz = FreeSize(fq);
  522:                         frsz += blksz;
  523:                         for ( q = (fq+1)->next; q != NULL; q = q->next ) {
  524:                                 frsz += blksz;
  525:                         }
  526:                 }
  527:                 pk_rmpl->frsz  = frsz;
  528:                 pk_rmpl->maxsz = knl_MaxFreeSize(mplcb);
  529:         }
  530:         END_CRITICAL_SECTION;
  531: 
  532:         return ercd;
  533: }
  534: #endif /* USE_FUNC_TK_REF_MPL */
  535: 
  536: /* ------------------------------------------------------------------------ */
  537: /*
  538:  *      Debugger support function
  539:  */
  540: #if USE_DBGSPT
  541: 
  542: #if USE_OBJECT_NAME
  543: /*
  544:  * Get object name from control block
  545:  */
  546: EXPORT ER knl_memorypool_getname(ID id, UB **name)
  547: {
  548:         MPLCB  *mplcb;
  549:         ER     ercd = E_OK;
  550: 
  551:         CHECK_MPLID(id);
  552: 
  553:         BEGIN_DISABLE_INTERRUPT;
  554:         mplcb = get_mplcb(id);
  555:         if ( mplcb->mplid == 0 ) {
  556:                 ercd = E_NOEXS;
  557:                 goto error_exit;
  558:         }
  559:         if ( (mplcb->mplatr & TA_DSNAME) == 0 ) {
  560:                 ercd = E_OBJ;
  561:                 goto error_exit;
  562:         }
  563:         *name = mplcb->name;
  564: 
  565:     error_exit:
  566:         END_DISABLE_INTERRUPT;
  567: 
  568:         return ercd;
  569: }
  570: #endif /* USE_OBJECT_NAME */
  571: 
  572: #ifdef USE_FUNC_TD_LST_MPL
  573: /*
  574:  * Refer variable size memory pool usage state
  575:  */
  576: SYSCALL INT td_lst_mpl( ID list[], INT nent )
  577: {
  578:         MPLCB  *mplcb, *end;
  579:         INT    n = 0;
  580: 
  581:         BEGIN_DISABLE_INTERRUPT;
  582:         end = knl_mplcb_table + NUM_MPLID;
  583:         for ( mplcb = knl_mplcb_table; mplcb < end; mplcb++ ) {
  584:                 if ( mplcb->mplid == 0 ) {
  585:                         continue;
  586:                 }
  587: 
  588:                 if ( n++ < nent ) {
  589:                         *list++ = ID_MPL(mplcb - knl_mplcb_table);
  590:                 }
  591:         }
  592:         END_DISABLE_INTERRUPT;
  593: 
  594:         return n;
  595: }
  596: #endif /* USE_FUNC_TD_LST_MPL */
  597: 
  598: #ifdef USE_FUNC_TD_REF_MPL
  599: /*
  600:  * Refer variable size memory pool state
  601:  */
  602: SYSCALL ER td_ref_mpl( ID mplid, TD_RMPL *pk_rmpl )
  603: {
  604:         MPLCB  *mplcb;
  605:         QUEUE  *fq, *q;
  606:         W      frsz, blksz;
  607:         ER     ercd = E_OK;
  608: 
  609:         CHECK_MPLID(mplid);
  610: 
  611:         mplcb = get_mplcb(mplid);
  612: 
  613:         BEGIN_DISABLE_INTERRUPT;
  614:         if ( mplcb->mplid == 0 ) {
  615:                 ercd = E_NOEXS;
  616:         } else {
  617:                 pk_rmpl->exinf = mplcb->exinf;
  618:                 pk_rmpl->wtsk  = knl_wait_tskid(&mplcb->wait_queue);
  619:                 frsz = 0;
  620:                 for ( fq = mplcb->freeque.next; fq != &mplcb->freeque; fq = fq->next ) {
  621:                         blksz = FreeSize(fq);
  622:                         frsz += blksz;
  623:                         for ( q = (fq+1)->next; q != NULL; q = q->next ) {
  624:                                 frsz += blksz;
  625:                         }
  626:                 }
  627:                 pk_rmpl->frsz  = frsz;
  628:                 pk_rmpl->maxsz = knl_MaxFreeSize(mplcb);
  629:         }
  630:         END_DISABLE_INTERRUPT;
  631: 
  632:         return ercd;
  633: }
  634: #endif /* USE_FUNC_TD_REF_MPL */
  635: 
  636: #ifdef USE_FUNC_TD_MPL_QUE
  637: /*
  638:  * Refer variable size memory pool wait queue 
  639:  */
  640: SYSCALL INT td_mpl_que( ID mplid, ID list[], INT nent )
  641: {
  642:         MPLCB  *mplcb;
  643:         QUEUE  *q;
  644:         ER     ercd = E_OK;
  645: 
  646:         CHECK_MPLID(mplid);
  647: 
  648:         mplcb = get_mplcb(mplid);
  649: 
  650:         BEGIN_DISABLE_INTERRUPT;
  651:         if ( mplcb->mplid == 0 ) {
  652:                 ercd = E_NOEXS;
  653:         } else {
  654:                 INT n = 0;
  655:                 for ( q = mplcb->wait_queue.next; q != &mplcb->wait_queue; q = q->next ) {
  656:                         if ( n++ < nent ) {
  657:                                 *list++ = ((TCB*)q)->tskid;
  658:                         }
  659:                 }
  660:                 ercd = n;
  661:         }
  662:         END_DISABLE_INTERRUPT;
  663: 
  664:         return ercd;
  665: }
  666: #endif /* USE_FUNC_TD_MPL_QUE */
  667: 
  668: #endif /* USE_DBGSPT */
  669: #endif /* USE_MEMORYPOOL */