This discussion has been locked.
You can no longer post new replies to this discussion. If you have a question you can start a new discussion

mutex implementation for m3 without rtx lib

Hello,
Is there a mutex implementation available for use that does not require the rtx lib?
Thanks,
JG

Parents
  • Andrew,
    Thanks for the reply.

    I cannot disable the interrupts.

    I had read both references you provide.

    I am not an assembly programmer so regarding the first reference, there is no complimentary "unlock" function, and no example of how to invoke the lock (I assume I must pass a volatile int as an arg).

    Regarding the 2nd reference, the last line of text is "However, it does not work".

Reply
  • Andrew,
    Thanks for the reply.

    I cannot disable the interrupts.

    I had read both references you provide.

    I am not an assembly programmer so regarding the first reference, there is no complimentary "unlock" function, and no example of how to invoke the lock (I assume I must pass a volatile int as an arg).

    Regarding the 2nd reference, the last line of text is "However, it does not work".

Children
  • 
    // interrupt scannot wait, sleep for meddle with the task scheduler as they must be as fast as possible.
    // the function refuse_interrupt_context() invokes a software error if this (or other) systems calls is called from
    // interrupt context
    int32s rtos_mutex_lock(int32s a_id)
    {
            int32u  l_scheduler_state ;
            int32s  l_result = NO_ERROR ;
    
            refuse_interrupt_context(__FILE__, __LINE__) ;
    
            // first check if the mutex was allocated.
            if ( (a_id >=0) && (a_id < MAX_MUTEXES) )
            {
                    t_mutex *lp_mutex ;
                    tcb_t   *lp_task ;
    
                    // the kernel is not reentrant; protect static/global data from ISRs
                    scheduler_disable(&l_scheduler_state) ;
    
                    lp_task = *(g_tcb + g_current_task_index) ;
                    lp_mutex = s_mutexes + a_id ;
    
                    // loop here and reschedule if a lock attempt fails
                    while (lp_mutex->owner != g_current_task_index)
                    {
                            // attempt to lock the mutex
                            if (lp_mutex->state == e_mutex_unlocked) // note: the lock is never relinquished if there is a task waiting for the mutex in order to prevent a race condition
                            // mutexes (another mutex might lock it before the dequeued task (from the mutex's queue) gets a chance to lock it. after the next owner of the lock is
                            // scehduled (because it is being put in the running queue), it is restarted at this function (after the call to 'scheduler_reschedule', see below - that is
                            // the new return address of the task) and locks the mutex. a mutex is freed only is there is no task waiting for it.
                            {
                                    lp_mutex->owner = g_current_task_index ;
                                    lp_mutex->state = e_mutex_locked ;
                                    ++lp_task->mutex_lock_reference_counters[a_id] ; // increment the refrence counter. a task that has locked a mutex 'x' times must release it 'x' times
                            }
                            else if ( (lp_mutex->state == e_mutex_locked) && (lp_mutex->owner == g_current_task_index) ) // check if the running task attempts to lock a mutex that it already owns
                            {
                                    // task is already owner - nothing to do
                                    ++lp_task->mutex_lock_reference_counters[a_id] ; // increment the refrence counter. a task that has locked a mutex 'x' times must release it 'x' times
                            }
                            else // if the mutex is locked, and the calling task is not the owner, it will have to wait
                            {
                                    int32s l_result ;
    
                                    // move task to the waiting tasks queue of the mutex. this task does not get any CPU time unless moved back to the running queue.
                                    // remember that g_current_task_index is absent from the running queue (because it was dequeued)
    
                                    if ( (l_result = priority_queue_insert(&lp_mutex->blocked_tasks, (*(g_tcb + g_current_task_index))->priority, g_current_task_index)) != NO_ERROR)
                                    {
                                            software_error("%d %s %d", l_result, __FILE__, __LINE__) ;
                                    }
    
                                    lp_task->status = e_task_blocked_mutex ;
                                    lp_task->blocked_on_primitive = a_id ;
    
                                    scheduler_restore(e_scheduler_enabled) ;
                                    scheduler_reschedule() ;
                                    scheduler_disable(&l_scheduler_state) ; // after a lock attempt has failed, execution resumes here
                                                                                                                    // which means the scheduler is enabled again
                            }
                    }
    
                    scheduler_restore(l_scheduler_state) ;
            }
            else // report an error
            {
                    software_warning("%d %s %d (%d)\n", ERR_MUTEX_NOT_FOUND, __FILE__, __LINE__, a_id ) ;
    
                    l_result = ERR_MUTEX_NOT_FOUND ;
            }
    
            return l_result ;
    }
    

  • // never use from within interrupt context. see comments for rtos_mutex_lock.
    int32s rtos_mutex_unlock(int32s a_id)
    {
            refuse_interrupt_context(__FILE__, __LINE__) ;
    
            if ( (a_id >=0) && (a_id < MAX_MUTEXES) )
            {
                    int32u   l_scheduler_state ;
                    t_mutex *lp_mutex ;
                    tcb_t   *lp_task ;
    
                    // the kernel is not reentrant; protect static/global data from ISRs
                    scheduler_disable(&l_scheduler_state) ;
    
                    lp_task = *(g_tcb + g_current_task_index) ;
                    lp_mutex = s_mutexes + a_id ;
    
                    if (lp_task->mutex_lock_reference_counters[a_id] > 0)
                    {
                            lp_task->mutex_lock_reference_counters[a_id]-- ; // always decrement the reference counter, no matter what task unlocks the mutex
                    }
    
                    // if the mutex is locked by the calling task and that task has reliqueshed all its
                    // referneces to the mutex (a task call lock a mutex multiple times if it is the owner)
                    // - allow other tasks try to lock it
                    if ( (lp_mutex->state == e_mutex_locked) &&
                             (lp_mutex->owner == g_current_task_index) &&
                             (lp_task->mutex_lock_reference_counters[a_id] == 0) )
                    {
                            int32s l_unblocked_task ;
    
                            // select a task to be returned to the running queue, so that the scheduler can select it
    
                            // there is at least one task blocked on the mutex - prepare it to be scheduled
    
                            lp_mutex->owner = ERR_MUTEX_NOT_FOUND ;
                            lp_mutex->state = e_mutex_unlocked ;
    
                            if (priority_queue_extract_minimum_data(&lp_mutex->blocked_tasks, &l_unblocked_task ) != ERR_QUEUE_EMPTY)
                            {
                                    // WAR STORY
                                    // this is the fairest approach: if a task has been waiting and the lock has become available,
                                    // select the next task from the blocked queue (note: this is not yet a priority queue) and put it in
                                    // the right slot in the ready list, for later selection. Howver, this introduces a problem of
                                    // possible starvation - the task needs the lock again but it might be lock by another task once it is
                                    // scheduled again. different approaches, such as granting the CPU to the extracted task in
                                    // the next task switch are doomed to fail, because they might introduce a priority inversion: if the
                                    // unblocked task task was a low priority task that plans to keep the lock for a while, it might be
                                    // preempted by a high priority task which will have to wait.
                                    //tcb_t *x = *g_tcb + l_unblocked_task ;
                                    scheduler_declare_task_ready(l_unblocked_task, g_tcb[l_unblocked_task]->priority) ;
    
                                    scheduler_restore(e_scheduler_enabled) ;
                                    scheduler_reschedule() ;
                            }
                    }
    
                    scheduler_restore(l_scheduler_state) ;
            }
            else
            {
                    software_warning("%d %s %d\n", ERR_MUTEX_NOT_FOUND, __FILE__, __LINE__) ;
            }
    
            return 0 ;
    }
    

  • typedef enum
    {
            e_mutex_unlocked = 0,
            e_mutex_locked
    } mutex_state ;
    
    typedef struct
    {
            mutex_state                     state ; // the status of the synchronization element
            int32s                          owner ; // a mutex has one owning task. other tasks must wait for it to be released
            priority_queue_t        blocked_tasks ; // // a queue of task id's that are waiting for a mutex to be released
    } t_mutex ;
    
    static t_mutex                     s_mutexes[MAX_MUTEXES] ; // system mutexs
    

  • You would, of course, need your _own_ kernel to use anything like this...