00001
00002
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028 #ifndef __L4UTIL__INCLUDE__ARCH_X86__ATOMIC_ARCH_H__
00029 #define __L4UTIL__INCLUDE__ARCH_X86__ATOMIC_ARCH_H__
00030
00031
00032
00033
00034
00035 EXTERN_C_BEGIN
00036
00037
00038 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG64
00039 L4_INLINE int
00040 l4util_cmpxchg64(volatile l4_uint64_t * dest,
00041 l4_uint64_t cmp_val, l4_uint64_t new_val)
00042 {
00043 unsigned char ret;
00044 l4_umword_t dummy;
00045
00046 __asm__ __volatile__
00047 (
00048 #ifdef __PIC__
00049 "xchg %%esi,%%ebx\n\t"
00050 #endif
00051 "cmpxchg8b %5\n\t"
00052 "sete %0\n\t"
00053 #ifdef __PIC__
00054 "xchg %%esi,%%ebx\n\t"
00055 #endif
00056 :
00057 "=a" (ret),
00058 "=d" (dummy)
00059 :
00060 "A" (cmp_val),
00061 "c" ((unsigned int)(new_val>>32ULL)),
00062 #ifdef __PIC__
00063 "S"
00064 #else
00065 "b"
00066 #endif
00067 ((unsigned int)new_val),
00068 "m" (*dest)
00069 :
00070 "memory", "cc"
00071 );
00072
00073 return ret;
00074 }
00075
00076
00077 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG32
00078 L4_INLINE int
00079 l4util_cmpxchg32(volatile l4_uint32_t * dest,
00080 l4_uint32_t cmp_val, l4_uint32_t new_val)
00081 {
00082 l4_uint32_t tmp;
00083
00084 __asm__ __volatile__
00085 (
00086 "cmpxchgl %1, %3 \n\t"
00087 :
00088 "=a" (tmp)
00089 :
00090 "r" (new_val),
00091 "0" (cmp_val),
00092 "m" (*dest)
00093 :
00094 "memory", "cc"
00095 );
00096
00097 return tmp == cmp_val;
00098 }
00099
00100
00101 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG16
00102 L4_INLINE int
00103 l4util_cmpxchg16(volatile l4_uint16_t * dest,
00104 l4_uint16_t cmp_val, l4_uint16_t new_val)
00105 {
00106 l4_uint16_t tmp;
00107
00108 __asm__ __volatile__
00109 (
00110 "cmpxchgw %1, %3 \n\t"
00111 :
00112 "=a" (tmp)
00113 :
00114 "c" (new_val),
00115 "0" (cmp_val),
00116 "m" (*dest)
00117 :
00118 "memory", "cc"
00119 );
00120
00121 return tmp == cmp_val;
00122 }
00123
00124
00125 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG8
00126 L4_INLINE int
00127 l4util_cmpxchg8(volatile l4_uint8_t * dest,
00128 l4_uint8_t cmp_val, l4_uint8_t new_val)
00129 {
00130 l4_uint8_t tmp;
00131
00132 __asm__ __volatile__
00133 (
00134 "cmpxchgb %1, %3 \n\t"
00135 :
00136 "=a" (tmp)
00137 :
00138 "c" (new_val),
00139 "0" (cmp_val),
00140 "m" (*dest)
00141 :
00142 "memory", "cc"
00143 );
00144
00145 return tmp == cmp_val;
00146 }
00147
00148 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG
00149 L4_INLINE int
00150 l4util_cmpxchg(volatile l4_umword_t * dest,
00151 l4_umword_t cmp_val, l4_umword_t new_val)
00152 {
00153 return l4util_cmpxchg32((volatile l4_uint32_t *)dest,
00154 (l4_uint32_t)cmp_val, (l4_uint32_t)new_val);
00155 }
00156
00157
00158 #define __L4UTIL_ATOMIC_HAVE_ARCH_XCHG32
00159 L4_INLINE l4_uint32_t
00160 l4util_xchg32(volatile l4_uint32_t * dest, l4_uint32_t val)
00161 {
00162 __asm__ __volatile__
00163 (
00164 "xchg %0, %1 \n\t"
00165 :
00166 "=r" (val)
00167 :
00168 "m" (*dest), "0" (val)
00169 :
00170 "memory"
00171 );
00172
00173 return val;
00174 }
00175
00176
00177 #define __L4UTIL_ATOMIC_HAVE_ARCH_XCHG16
00178 L4_INLINE l4_uint16_t
00179 l4util_xchg16(volatile l4_uint16_t * dest, l4_uint16_t val)
00180 {
00181 __asm__ __volatile__
00182 (
00183 "xchg %w0, %1 \n\t"
00184 :
00185 "=r" (val)
00186 :
00187 "m" (*dest), "0" (val)
00188 :
00189 "memory"
00190 );
00191
00192 return val;
00193 }
00194
00195
00196 #define __L4UTIL_ATOMIC_HAVE_ARCH_XCHG8
00197 L4_INLINE l4_uint8_t
00198 l4util_xchg8(volatile l4_uint8_t * dest, l4_uint8_t val)
00199 {
00200 __asm__ __volatile__
00201 (
00202 "xchg %b0, %1 \n\t"
00203 :
00204 "=r" (val)
00205 :
00206 "m" (*dest), "0" (val)
00207 :
00208 "memory"
00209 );
00210
00211 return val;
00212 }
00213
00214
00215 #define __L4UTIL_ATOMIC_HAVE_ARCH_XCHG
00216 L4_INLINE l4_umword_t
00217 l4util_xchg(volatile l4_umword_t * dest, l4_umword_t val)
00218 {
00219 return l4util_xchg32((volatile l4_uint32_t *)dest, (l4_uint32_t)val);
00220 }
00221
00222 #define l4util_gen_allop(args...) \
00223 l4util_genop( 8,"b", "", args) \
00224 l4util_genop(16,"w", "", args) \
00225 l4util_genop(32,"l", "", args)
00226
00227 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG8_RES
00228 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG16_RES
00229 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG32_RES
00230
00231 #undef l4util_genop
00232 #define l4util_genop(bit, mod, op1, opname, opchar) \
00233 L4_INLINE l4_uint##bit##_t \
00234 l4util_##opname##bit##_res(volatile l4_uint##bit##_t* dest, \
00235 l4_uint##bit##_t cmp_val, \
00236 l4_uint##bit##_t new_val) \
00237 { \
00238 l4_uint##bit##_t old_val; \
00239 __asm__ __volatile__ \
00240 ( \
00241 #opname mod " %"op1"1,%3 \n\t" \
00242 : \
00243 "=a"(old_val) \
00244 : \
00245 "r"(new_val), "a"(cmp_val), "m" (*dest) \
00246 : \
00247 "memory" \
00248 ); \
00249 return old_val; \
00250 }
00251 l4util_gen_allop(cmpxchg,cmpxchg)
00252
00253 #define __L4UTIL_ATOMIC_HAVE_ARCH_CMPXCHG_RES
00254 L4_INLINE l4_umword_t
00255 l4util_cmpxchg_res(volatile l4_umword_t *dest,
00256 l4_umword_t cmp_val, l4_umword_t new_val)
00257 {
00258 return l4util_cmpxchg32_res((volatile l4_uint32_t *)dest,
00259 (l4_uint32_t)cmp_val, (l4_uint32_t)new_val);
00260 }
00261
00262 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD8
00263 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD16
00264 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD32
00265 #define __L4UTIL_ATOMIC_HAVE_ARCH_SUB8
00266 #define __L4UTIL_ATOMIC_HAVE_ARCH_SUB16
00267 #define __L4UTIL_ATOMIC_HAVE_ARCH_SUB32
00268 #define __L4UTIL_ATOMIC_HAVE_ARCH_AND8
00269 #define __L4UTIL_ATOMIC_HAVE_ARCH_AND16
00270 #define __L4UTIL_ATOMIC_HAVE_ARCH_AND32
00271 #define __L4UTIL_ATOMIC_HAVE_ARCH_OR8
00272 #define __L4UTIL_ATOMIC_HAVE_ARCH_OR16
00273 #define __L4UTIL_ATOMIC_HAVE_ARCH_OR32
00274
00275 #undef l4util_genop
00276 #define l4util_genop(bit, mod, op1, opname) \
00277 L4_INLINE void \
00278 l4util_##opname##bit(volatile l4_uint##bit##_t* dest, l4_uint##bit##_t val) \
00279 { \
00280 __asm__ __volatile__ \
00281 ( \
00282 #opname mod " %1,%0 \n\t" \
00283 : \
00284 : \
00285 "m" (*dest), "ir" (val) \
00286 : \
00287 "memory" \
00288 ); \
00289 }
00290 l4util_gen_allop(add)
00291 l4util_gen_allop(sub)
00292 l4util_gen_allop(and)
00293 l4util_gen_allop(or)
00294
00295 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD8_RES
00296 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD16_RES
00297 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD32_RES
00298 #define __L4UTIL_ATOMIC_HAVE_ARCH_SUB8_RES
00299 #define __L4UTIL_ATOMIC_HAVE_ARCH_SUB16_RES
00300 #define __L4UTIL_ATOMIC_HAVE_ARCH_SUB32_RES
00301 #define __L4UTIL_ATOMIC_HAVE_ARCH_AND8_RES
00302 #define __L4UTIL_ATOMIC_HAVE_ARCH_AND16_RES
00303 #define __L4UTIL_ATOMIC_HAVE_ARCH_AND32_RES
00304 #define __L4UTIL_ATOMIC_HAVE_ARCH_OR8_RES
00305 #define __L4UTIL_ATOMIC_HAVE_ARCH_OR16_RES
00306 #define __L4UTIL_ATOMIC_HAVE_ARCH_OR32_RES
00307
00308 #undef l4util_genop
00309 #define l4util_genop(bit, mod, op1, opname, opchar) \
00310 L4_INLINE l4_uint##bit##_t \
00311 l4util_##opname##bit##_res(volatile l4_uint##bit##_t* dest, \
00312 l4_uint##bit##_t val) \
00313 { \
00314 l4_uint##bit##_t res, old; \
00315 \
00316 do \
00317 { \
00318 old = *dest; \
00319 res = old opchar val; \
00320 } \
00321 while (!l4util_cmpxchg##bit(dest, old, res)); \
00322 \
00323 return res; \
00324 }
00325 l4util_gen_allop(add, +)
00326 l4util_gen_allop(sub, -)
00327 l4util_gen_allop(and, &)
00328 l4util_gen_allop(or, |)
00329
00330 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC8
00331 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC16
00332 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC32
00333 #define __L4UTIL_ATOMIC_HAVE_ARCH_DEC8
00334 #define __L4UTIL_ATOMIC_HAVE_ARCH_DEC16
00335 #define __L4UTIL_ATOMIC_HAVE_ARCH_DEC32
00336
00337 #undef l4util_genop
00338 #define l4util_genop(bit, mod, op1, opname) \
00339 L4_INLINE void \
00340 l4util_##opname##bit(volatile l4_uint##bit##_t* dest) \
00341 { \
00342 __asm__ __volatile__ \
00343 ( \
00344 #opname mod " %0 \n\t" \
00345 : \
00346 : \
00347 "m" (*dest) \
00348 : \
00349 "memory" \
00350 ); \
00351 }
00352 l4util_gen_allop(inc)
00353 l4util_gen_allop(dec)
00354
00355 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC8_RES
00356 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC16_RES
00357 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC32_RES
00358 #define __L4UTIL_ATOMIC_HAVE_ARCH_DEC8_RES
00359 #define __L4UTIL_ATOMIC_HAVE_ARCH_DEC16_RES
00360 #define __L4UTIL_ATOMIC_HAVE_ARCH_DEC32_RES
00361
00362 #undef l4util_genop
00363 #define l4util_genop(bit, mod, op1, opname, opchar) \
00364 L4_INLINE l4_uint##bit##_t \
00365 l4util_##opname##bit##_res(volatile l4_uint##bit##_t* dest) \
00366 { \
00367 l4_uint##bit##_t res, old; \
00368 \
00369 do \
00370 { \
00371 res = *dest; \
00372 old = res opchar; \
00373 } \
00374 while (!l4util_cmpxchg##bit(dest, old, res)); \
00375 \
00376 return res; \
00377 }
00378 l4util_gen_allop(inc, ++)
00379 l4util_gen_allop(dec, --)
00380
00381 #undef l4util_genop
00382 #undef l4util_gen_allop
00383
00384
00385 #define __L4UTIL_ATOMIC_HAVE_ARCH_ADD
00386 L4_INLINE void
00387 l4util_atomic_add(volatile long *dest, long val)
00388 {
00389 __asm__ __volatile__("addl %1, %0 \n"
00390 : "=m" (*dest)
00391 : "ri" (val), "m" (*dest));
00392 }
00393
00394 #define __L4UTIL_ATOMIC_HAVE_ARCH_INC
00395 L4_INLINE void
00396 l4util_atomic_inc(volatile long *dest)
00397 {
00398 __asm__ __volatile__("incl %0"
00399 : "=m" (*dest)
00400 : "m" (*dest));
00401 }
00402
00403 EXTERN_C_END
00404
00405 #endif