1: 2: 3: 4: 5: 6: 7: 8: 9: 10: 11: 12: 13: 14:
15:
16: 17: 18: 19:
20:
21: #include "sysmgr.h"
22: #include <sys/imalloc.h>
23: #include <sys/queue.h>
24:
25: 26: 27:
28: typedef struct {
29: const QUEUE nouse;
30:
31: 32: 33:
34: QUEUE areaque;
35: 36:
37: QUEUE freeque;
38:
39: UINT mematr;
40: } IMACB;
41:
42: 43: 44:
45: #define AlignIMACB(imacb) ( (IMACB*)((UW)(imacb) & ~0x00000007U) )
46:
47: LOCAL UINT pagesz;
48:
49: 50: 51: 52: 53: 54:
55: #define ROUNDSZ ( sizeof(QUEUE) )
56: #define ROUND(sz) ( ((sz) + (ROUNDSZ-1)) & ~(ROUNDSZ-1) )
57:
58:
59: #define MIN_FRAGMENT ( sizeof(QUEUE) * 2 )
60:
61: 62: 63:
64: #define AREA_USE 0x00000001U
65: #define AREA_TOP 0x00000002U
66: #define AREA_END 0x00000004U
67: #define AREA_MASK 0x00000007U
68:
69: #define setAreaFlag(q, f) ( (q)->prev = (QUEUE*)((UW)(q)->prev | (f)) )
70: #define clrAreaFlag(q, f) ( (q)->prev = (QUEUE*)((UW)(q)->prev & ~(f)) )
71: #define chkAreaFlag(q, f) ( ((UW)(q)->prev & (f)) != 0 )
72:
73: #define Mask(x) ( (QUEUE*)((UW)(x) & ~AREA_MASK) )
74: #define Assign(x, y) ( (x) = (QUEUE*)(((UW)(x) & AREA_MASK) | (UW)(y)) )
75:
76: 77: 78:
79: #define AreaSize(aq) ((size_t)( (VB*)(aq)->next - (VB*)((aq) + 1) ))
80: #define FreeSize(fq) ((size_t)( (VB*)((fq) - 1)->next - (VB*)(fq) ))
81:
82: 83: 84:
85: Inline size_t PageCount( size_t size )
86: {
87: return (size + (pagesz-1)) / pagesz;
88: }
89:
90: 91: 92: 93: 94: 95:
96: LOCAL QUEUE* searchFreeArea( size_t blksz, IMACB *imacb )
97: {
98: QUEUE *q = &imacb->freeque;
99:
100: 101:
102: if ( blksz > pagesz / 4 ) {
103:
104: size_t fsz = 0;
105: while ( (q = q->prev) != &imacb->freeque ) {
106: fsz = FreeSize(q);
107: if ( fsz <= blksz ) {
108: return ( fsz < blksz )? q->next: q;
109: }
110: }
111: return ( fsz >= blksz )? q->next: q;
112: } else {
113:
114: while ( (q = q->next) != &imacb->freeque ) {
115: if ( FreeSize(q) >= blksz ) {
116: break;
117: }
118: }
119: return q;
120: }
121: }
122:
123: 124: 125: 126: 127: 128: 129: 130: 131: 132: 133: 134: 135: 136: 137: 138: 139: 140: 141:
142: LOCAL void appendFreeArea( QUEUE *aq, IMACB *imacb )
143: {
144: QUEUE *fq;
145: size_t size = AreaSize(aq);
146:
147:
148: 149: 150: 151:
152: fq = searchFreeArea(size, imacb);
153:
154:
155: clrAreaFlag(aq, AREA_USE);
156: if ( fq != &imacb->freeque && FreeSize(fq) == size ) {
157: QueInsert(aq + 1, fq + 1);
158: } else {
159: QueInsert(aq + 1, fq);
160: }
161: QueInit(aq + 2);
162: }
163:
164: 165: 166:
167: LOCAL void removeFreeQue( QUEUE *fq )
168: {
169: if ( !isQueEmpty(fq + 1) ) {
170: QUEUE *nq = (fq + 1)->next;
171:
172: QueRemove(fq + 1);
173: QueInsert(nq + 1, nq);
174: QueRemove(nq);
175: QueInsert(nq, fq);
176: }
177:
178: QueRemove(fq);
179: }
180:
181: 182: 183: 184:
185: LOCAL void insertAreaQue( QUEUE *que, QUEUE *ent )
186: {
187: ent->prev = que;
188: ent->next = que->next;
189: Assign(que->next->prev, ent);
190: que->next = ent;
191: }
192:
193: 194: 195:
196: LOCAL void removeAreaQue( QUEUE *aq )
197: {
198: Mask(aq->prev)->next = aq->next;
199: Assign(aq->next->prev, Mask(aq->prev));
200: }
201:
202: 203: 204:
205: Inline void* mem_alloc( QUEUE *aq, size_t blksz, IMACB *imacb )
206: {
207: QUEUE *q;
208:
209: 210:
211: if ( AreaSize(aq) - blksz >= MIN_FRAGMENT + sizeof(QUEUE) ) {
212:
213:
214: q = (QUEUE*)((VB*)(aq + 1) + blksz);
215: insertAreaQue(aq, q);
216:
217:
218: appendFreeArea(q, imacb);
219: }
220: setAreaFlag(aq, AREA_USE);
221:
222: return (void*)(aq + 1);
223: }
224:
225: 226: 227:
228: LOCAL void* imalloc( size_t size, IMACB *imacb )
229: {
230: QUEUE *q;
231: void *mem;
232: UW imask;
233:
234: 235:
236: if ( size < MIN_FRAGMENT ) {
237: size = MIN_FRAGMENT;
238: }
239: size = ROUND(size);
240:
241: DI(imask);
242:
243:
244: q = searchFreeArea(size, imacb);
245: if ( q != &imacb->freeque ) {
246:
247: removeFreeQue(q);
248:
249: q = q - 1;
250: } else {
251:
252: QUEUE *e;
253: size_t n;
254:
255:
256: EI(imask);
257: n = PageCount(size + sizeof(QUEUE) * 2);
258: q = GetSysMemBlk(n, imacb->mematr);
259: if ( q == NULL ) {
260: goto err_ret;
261: }
262: DI(imask);
263:
264:
265: e = (QUEUE*)((VB*)q + n * pagesz) - 1;
266: insertAreaQue(&imacb->areaque, e);
267: insertAreaQue(&imacb->areaque, q);
268: setAreaFlag(q, AREA_TOP);
269: setAreaFlag(e, AREA_END);
270: }
271:
272:
273: mem = mem_alloc(q, size, imacb);
274:
275: EI(imask);
276: return mem;
277:
278: err_ret:
279: TM_DEBUG_PRINT(("imalloc error\n"));
280: return NULL;
281: }
282:
283: 284: 285:
286: LOCAL void* icalloc( size_t nmemb, size_t size, IMACB *imacb )
287: {
288: size_t sz = nmemb * size;
289: void *mem;
290:
291: mem = imalloc(sz, imacb);
292: if ( mem == NULL ) {
293: return NULL;
294: }
295:
296: MEMSET(mem, 0, sz);
297:
298: return mem;
299: }
300:
301: 302: 303: 304: 305:
306: LOCAL void ifree( void *ptr, IMACB *imacb )
307: {
308: QUEUE *aq;
309: UW imask;
310:
311: DI(imask);
312:
313: aq = (QUEUE*)ptr - 1;
314: clrAreaFlag(aq, AREA_USE);
315:
316: if ( !chkAreaFlag(aq->next, AREA_END|AREA_USE) ) {
317:
318: removeFreeQue(aq->next + 1);
319: removeAreaQue(aq->next);
320: }
321:
322: if ( !chkAreaFlag(aq, AREA_TOP) && !chkAreaFlag(aq->prev, AREA_USE) ) {
323:
324: aq = aq->prev;
325: removeFreeQue(aq + 1);
326: removeAreaQue(aq->next);
327: }
328:
329: 330: 331: 332:
333: if ( !isDI(imask) && chkAreaFlag(aq, AREA_TOP) && chkAreaFlag(aq->next, AREA_END) ) {
334:
335: removeAreaQue(aq->next);
336: removeAreaQue(aq);
337: EI(imask);
338: RelSysMemBlk(aq);
339: DI(imask);
340: } else {
341:
342: appendFreeArea(aq, imacb);
343: }
344:
345: EI(imask);
346: }
347:
348:
349: 350: 351: 352:
353:
354: LOCAL IMACB Imacb[2][2];
355:
356: #define RING(attr) ( ( ((attr) & TA_RNG3) == TA_RNG3 )? 1: 0 )
357: #define RESIDENT(attr) ( ( ((attr) & TA_NORESIDENT) == 0 )? 1: 0 )
358:
359: #define SelIMACB(attr) ( AlignIMACB(&Imacb[RING(attr)][RESIDENT(attr)]) )
360:
361: EXPORT void* IAmalloc( size_t size, UINT attr )
362: {
363: return imalloc(size, SelIMACB(attr));
364: }
365:
366: EXPORT void* IAcalloc( size_t nmemb, size_t size, UINT attr )
367: {
368: return icalloc(nmemb, size, SelIMACB(attr));
369: }
370:
371: EXPORT void IAfree( void *ptr, UINT attr )
372: {
373: ifree(ptr, SelIMACB(attr));
374: }
375:
376:
377: 378: 379: 380:
381:
382:
383: IMPORT INT svc_call_limit;
384:
385: #define TA_RNG ( (UINT)svc_call_limit << 8 )
386:
387: EXPORT void* Imalloc( size_t size )
388: {
389: return IAmalloc(size, TA_RNG);
390: }
391:
392: EXPORT void* Icalloc( size_t nmemb, size_t size )
393: {
394: return IAcalloc(nmemb, size, TA_RNG);
395: }
396:
397: EXPORT void Ifree( void *ptr )
398: {
399: IAfree(ptr, TA_RNG);
400: }
401:
402:
403:
404: 405: 406:
407: LOCAL void initIMACB( UINT attr )
408: {
409: IMACB *imacb = SelIMACB(attr);
410:
411: QueInit(&imacb->areaque);
412: QueInit(&imacb->freeque);
413: imacb->mematr = attr;
414: }
415:
416: 417: 418:
419: EXPORT ER init_Imalloc( void )
420: {
421: T_RSMB rsmb;
422: ER ercd;
423:
424: ercd = RefSysMemInfo(&rsmb);
425: if ( ercd < E_OK ) {
426: goto err_ret;
427: }
428:
429: pagesz = (UINT)rsmb.blksz;
430:
431: initIMACB(TA_RNG0);
432: initIMACB(TA_RNG0|TA_NORESIDENT);
433: initIMACB(TA_RNG3);
434: initIMACB(TA_RNG3|TA_NORESIDENT);
435:
436: return E_OK;
437:
438: err_ret:
439: TM_DEBUG_PRINT(("init_Imalloc ercd = %d\n", ercd));
440: return ercd;
441: }