Viewing file: atomic.h (3.36 KB) -rw-r--r-- Select action/file-type: (+) | (+) | (+) | Code (+) | Session (+) | (+) | SDB (+) | (+) | (+) | (+) | (+) | (+) |
#ifndef __ASM_SH64_ATOMIC_H #define __ASM_SH64_ATOMIC_H
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * include/asm-sh64/atomic.h * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 Paul Mundt * */
/* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * */
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define atomic_read(v) ((v)->counter) #define atomic_set(v,i) ((v)->counter = (i))
#include <asm/system.h>
/* * To get proper branch prediction for the main line, we must branch * forward to code at the end of this object's .text section, then * branch back to restart the operation. */
static __inline__ void atomic_add(int i, atomic_t * v) { unsigned long flags;
local_irq_save(flags); *(long *)v += i; local_irq_restore(flags); }
static __inline__ void atomic_sub(int i, atomic_t *v) { unsigned long flags;
local_irq_save(flags); *(long *)v -= i; local_irq_restore(flags); }
static __inline__ int atomic_add_return(int i, atomic_t * v) { unsigned long temp, flags;
local_irq_save(flags); temp = *(long *)v; temp += i; *(long *)v = temp; local_irq_restore(flags);
return temp; }
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ int atomic_sub_return(int i, atomic_t * v) { unsigned long temp, flags;
local_irq_save(flags); temp = *(long *)v; temp -= i; *(long *)v = temp; local_irq_restore(flags);
return temp; }
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v))
/* * atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v)) #define atomic_dec(v) atomic_sub(1,(v))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; unsigned long flags;
local_irq_save(flags); ret = v->counter; if (likely(ret == old)) v->counter = new; local_irq_restore(flags);
return ret; }
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags;
local_irq_save(flags); ret = v->counter; if (ret != u) v->counter += a; local_irq_restore(flags);
return ret != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags;
local_irq_save(flags); *(long *)v &= ~mask; local_irq_restore(flags); }
static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) { unsigned long flags;
local_irq_save(flags); *(long *)v |= mask; local_irq_restore(flags); }
/* Atomic operations are already serializing on SH */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h> #endif /* __ASM_SH64_ATOMIC_H */
|