1: 2: 3: 4: 5: 6: 7: 8: 9: 10: 11: 12: 13:
14: 15: 16: 17: 18: 19: 20: 21: 22: 23: 24: 25: 26: 27: 28: 29: 30: 31: 32: 33: 34: 35: 36: 37: 38: 39: 40: 41: 42: 43: 44:
45:
46: 47: 48: 49: 50:
51:
52: #ifndef __SYS_ATOMIC_DEPEND_H__
53: #define __SYS_ATOMIC_DEPEND_H__
54:
55: #define ATOMIC_INC_USER_MODE 1
56: Inline UINT atomic_inc(volatile UINT* addr)
57: {
58: UINT excl, re;
59:
60: do {
61: Asm(" ldrex %2, [%3]\n"
62: " add %2, %2, #1\n"
63: " strex %1, %2, [%3]"
64: : "+m"(*addr), "=r"(excl), "=r"(re)
65: : "r"(addr)
66: : "cc", "memory");
67: } while (excl);
68:
69: return re;
70: }
71:
72: #define ATOMIC_DEC_USER_MODE 1
73: Inline UINT atomic_dec(volatile UINT* addr)
74: {
75: UINT excl, re;
76:
77: do {
78: Asm(" ldrex %2, [%3]\n"
79: " sub %2, %2, #1\n"
80: " strex %1, %2, [%3]"
81: : "+m"(*addr), "=r"(excl), "=r"(re)
82: : "r"(addr)
83: : "cc", "memory");
84: } while (excl);
85:
86: return re;
87: }
88:
89: #define ATOMIC_ADD_USER_MODE 1
90: Inline UINT atomic_add(volatile UINT* addr, UINT val)
91: {
92: UINT excl, re;
93:
94: do {
95: Asm(" ldrex %2, [%3]\n"
96: " add %2, %2, %4\n"
97: " strex %1, %2, [%3]"
98: : "+m"(*addr), "=r"(excl), "=r"(re)
99: : "r"(addr), "r"(val)
100: : "cc", "memory");
101: } while (excl);
102:
103: return re;
104: }
105:
106: #define ATOMIC_SUB_USER_MODE 1
107: Inline UINT atomic_sub(volatile UINT* addr, UINT val)
108: {
109: UINT excl, re;
110:
111: do {
112: Asm(" ldrex %2, [%3]\n"
113: " sub %2, %2, %4\n"
114: " strex %1, %2, [%3]"
115: : "+m"(*addr), "=r"(excl), "=r"(re)
116: : "r"(addr), "r"(val)
117: : "cc", "memory");
118: } while (excl);
119:
120: return re;
121: }
122:
123:
124: #define ATOMIC_XCHG_USER_MODE 1
125: Inline UINT atomic_xchg(volatile UINT* addr, UINT val)
126: {
127: UINT excl, re;
128:
129: do {
130: Asm(" ldrex %2, [%3]\n"
131: " strex %1, %4, [%3]"
132: : "+m"(*addr), "=r"(excl), "=r"(re)
133: : "r"(addr), "r"(val)
134: : "cc", "memory");
135: } while (excl);
136:
137: return re;
138: }
139:
140:
141: #define ATOMIC_CMPXCHG_USER_MODE 1
142: Inline UINT atomic_cmpxchg(volatile UINT* addr, UINT val, UINT cmp)
143: {
144: UINT excl, re;
145:
146: do {
147: Asm(" mov %1, #0\n"
148: " ldrex %2, [%3]\n"
149: " cmp %2, %5\n"
150: " strexeq %1, %4, [%3]\n"
151: " clrex"
152: : "+m"(*addr), "=r"(excl), "=r"(re)
153: : "r"(addr), "r"(val), "r"(cmp)
154: : "cc", "memory");
155: } while (excl);
156:
157: return re;
158: }
159:
160: #define ATOMIC_BITSET_USER_MODE 1
161: Inline UINT atomic_bitset(volatile UINT* addr, UINT setptn)
162: {
163: UINT excl, re, tmp;
164:
165: do {
166: Asm(" ldrex %2, [%4]\n"
167: " orr %3, %2, %5\n"
168: " strex %1, %3, [%4]"
169: : "+m"(*addr), "=r"(excl), "=r"(re), "=r"(tmp)
170: : "r"(addr), "r"(setptn)
171: : "cc", "memory");
172: } while (excl);
173:
174: return re;
175: }
176:
177: #define ATOMIC_BITCLR_USER_MODE 1
178: Inline UINT atomic_bitclr(volatile UINT* addr, UINT clrptn)
179: {
180: UINT excl, re, tmp;
181:
182: do {
183: Asm(" ldrex %2, [%4]\n"
184: " and %3, %2, %5\n"
185: " strex %1, %3, [%4]"
186: : "+m"(*addr), "=r"(excl), "=r"(re), "=r"(tmp)
187: : "r"(addr), "r"(clrptn)
188: : "cc", "memory");
189: } while (excl);
190:
191: return re;
192: }
193:
194: #endif