#include "icq_spinlock.h" #include "icq_task.h" #include "icq_task_report.h" uint32_t _unique_id = 1; void icq_spinlock_init(icq_spinlock_t* sp){ #ifdef LEON4_IN_PROGRESS sp->mutex = 0; #endif #ifdef RTEMS rtems_status_code status; /* Create/verify synchronisation semaphore */ status = rtems_semaphore_create( rtems_build_name ('S', 'E', 'M', '2'), 1, RTEMS_GLOBAL | RTEMS_BINARY_SEMAPHORE | RTEMS_FIFO, 1, &(sp->mutex) ); if(status!=RTEMS_SUCCESSFUL){ locked_printf("\n**rtems_semaphore_create not sucessfull**\n"); exit(1); } #endif sp->_id = _unique_id; _unique_id = _unique_id + 1; } int icq_spinlock_compareAndSwap(uint32_t* mutex){ #ifdef LEON4_IN_PROGRESS uint32_t* mem = mutex; register __typeof (*(mem)) __acev_newval __asm__ ("%g6"); register __typeof (mem) __acev_mem __asm__ ("%g1") = (mem); register __typeof (*(mem)) __acev_oldval __asm__ ("%g5"); __acev_oldval = 0; __acev_newval = 1; /** * From https://cr.yp.to/2005-590/sparcv9.pdf * * Compare-and-swap is an atomic operation which compares a value in a processor register to a * value in memory, and, if and only if they are equal, swaps the value in memory with the value in a * second processor register. Both 32-bit (CASA) and 64-bit (CASXA) operations are provided. The * compare-and-swap operation is atomic in the sense that once begun, no other processor can access * the memory location specified until the compare has completed and the swap (if any) has also * completed and is potentially visible to all other processors in the system. * * Note: .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, * because as will then mark the object file as V8+ arch. */ __asm__ __volatile__ ( ".word 0xcde05005" : "+r"(__acev_newval), "=m" (*__acev_mem) : "r" (__acev_oldval), "m" (*__acev_mem), "r" (__acev_mem) : "memory"); return __acev_newval; #endif #ifdef RTEMS return 0; #endif } #ifdef LEON4_IN_PROGRESS int icq_spinlock_lockMutex(volatile uint32_t* mutexRegister){ // spin until the icq_spinlock is released while(icq_spinlock_compareAndSwap((uint32_t*)&mutexRegister) == 1) { } return 0; } int icq_spinlock_unlockMutex(volatile uint32_t* mutexRegister){ // unlock mutex (*mutexRegister) = 0U; return 0; } #endif #ifdef RTEMS int icq_spinlock_lockMutex(icq_spinlock_t sp){ rtems_status_code status; status = rtems_semaphore_obtain( sp.mutex, RTEMS_NO_WAIT, 0 ); while (status != RTEMS_SUCCESSFUL ){ status = rtems_semaphore_obtain( sp.mutex, RTEMS_NO_WAIT, 0 ); } return 0; } int icq_spinlock_unlockMutex(icq_spinlock_t sp){ return rtems_semaphore_release( sp.mutex ); } #endif int icq_spinlock_lock(icq_spinlock_t sp){ icq_task_t* locker_task = icq_task_get_by_id(rtems_task_self()); icq_task_report_add_event_with_param(locker_task->task_report, SPIN_LOCK_START, sp._id); #ifdef LEON4_IN_PROGRESS icq_spinlock_lockMutex(&(sp.mutex)); #endif #ifdef RTEMS icq_spinlock_lockMutex(sp); #endif icq_task_report_add_event_with_param(locker_task->task_report, SPIN_LOCK_END, sp._id); return 0; } int icq_spinlock_unlock(icq_spinlock_t sp){ icq_task_t* locker_task = icq_task_get_by_id(rtems_task_self()); #ifdef LEON4_IN_PROGRESS icq_spinlock_unlockMutex(&(sp.mutex)); #endif #ifdef RTEMS icq_spinlock_unlockMutex(sp); #endif icq_task_report_add_event_with_param(locker_task->task_report, SPIN_UNLOCK, sp._id); return 0; }