Skip to content

Commit

Permalink
merge default into stable for release v1.01
Browse files Browse the repository at this point in the history
  • Loading branch information
dimonomid committed Oct 9, 2014
2 parents 97f808e + c4b3e44 commit af9f16e
Show file tree
Hide file tree
Showing 18 changed files with 531 additions and 154 deletions.
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,17 @@ TNeoKernel is a compact and fast real-time kernel for the embedded 32/16 bits
microprocessors. It performs a preemptive priority-based scheduling and a
round-robin scheduling for the tasks with identical priority.

TNeoKernel was born as a thorough review and re-implementation of TNKernel. The new kernel has well-formed code, inherited bugs are fixed as well as new features being added, it is well documented and tested carefully with unit-tests.
TNeoKernel was born as a thorough review and re-implementation of [TNKernel](http://tnkernel.com) v2.7. The new kernel has well-formed code, inherited bugs are fixed as well as new features being added, it is well documented and tested carefully with unit-tests.

Currently it is available for PIC32 only, but will probably be ported to other architectures. Tested on PIC32MX.

Comprehensive documentation is available in two forms:
Comprehensive documentation is available in two forms: html and pdf.

* [Latest stable TNeoKernel (html)](http://goo.gl/bwyAxZ)
* [Latest stable TNeoKernel (pdf)](http://goo.gl/d9W9HE)
* Latest stable TNeoKernel: [html](http://goo.gl/bwyAxZ), [pdf](http://goo.gl/d9W9HE)
* Current development TNeoKernel BETA: [html](http://goo.gl/6S6Lv6), [pdf](http://goo.gl/c2Fp6e)
* [Changelog](http://goo.gl/N9v65n)


Index of all docs available can be found [here](http://goo.gl/HJFOqe).
Index of all available docs can be found [here](http://goo.gl/HJFOqe).

Documentation is generated by means of [doxygen](http://goo.gl/RQHRYr).

Expand Down
147 changes: 86 additions & 61 deletions src/core/tn_dqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,28 +166,59 @@ static enum TN_RCode _fifo_read(struct TN_DQueue *dque, void **pp_data)
}
// }}}

static void _cb_before_task_wait_complete__send(
struct TN_Task *task,
void *user_data_1,
void *user_data_2
)
{
//-- before task is woken up, set data that it is waiting for
task->subsys_wait.dqueue.data_elem = user_data_1;
}

static void _cb_before_task_wait_complete__receive_ok(
struct TN_Task *task,
void *user_data_1,
void *user_data_2
)
{
struct TN_DQueue *dque = (struct TN_DQueue *)user_data_1;

//-- put to data FIFO
enum TN_RCode rc = _fifo_write(dque, task->subsys_wait.dqueue.data_elem);
if (rc != TN_RC_OK){
_TN_FATAL_ERROR("rc should always be TN_RC_OK here");
}
}

static void _cb_before_task_wait_complete__receive_timeout(
struct TN_Task *task,
void *user_data_1,
void *user_data_2
)
{
// (that might happen if only dque->items_cnt is 0)

void **pp_data = (void **)user_data_1;

*pp_data = task->subsys_wait.dqueue.data_elem; //-- Return to caller
}


static enum TN_RCode _queue_send(
struct TN_DQueue *dque,
void *p_data
)
{
enum TN_RCode rc = TN_RC_OK;

if (!tn_is_list_empty(&(dque->wait_receive_list))){
struct TN_Task *task;
//-- there are tasks waiting for message,
// so, wake up first one

//-- get first task from wait_receive_list
task = tn_list_first_entry(
&(dque->wait_receive_list), typeof(*task), task_queue
);

task->subsys_wait.dqueue.data_elem = p_data;

_tn_task_wait_complete(task, TN_RC_OK);
} else {
//-- the data queue's wait_receive list is empty
if ( !_tn_task_first_wait_complete(
&dque->wait_receive_list, TN_RC_OK,
_cb_before_task_wait_complete__send, p_data, NULL
)
)
{
//-- the data queue's wait_receive list is empty
rc = _fifo_write(dque, p_data);
}

Expand All @@ -205,58 +236,45 @@ static enum TN_RCode _queue_receive(

switch (rc){
case TN_RC_OK:
//-- data is successfully read from the queue.
// Let's check whether there is some task that
// wants to write more data, and waits for room
if (!tn_is_list_empty(&(dque->wait_send_list))){
struct TN_Task *task;
//-- there are tasks that want to write data

task = tn_list_first_entry(
&(dque->wait_send_list),
typeof(*task),
task_queue
);

rc = _fifo_write(dque, task->subsys_wait.dqueue.data_elem); //-- Put to data FIFO
if (rc != TN_RC_OK){
_TN_FATAL_ERROR("rc should always be TN_RC_OK here");
}

_tn_task_wait_complete(task, TN_RC_OK);
}
//-- successfully read item from the queue.
// if there are tasks that wait to send data to the queue,
// wake the first one up, since there is room now.
_tn_task_first_wait_complete(
&dque->wait_send_list, TN_RC_OK,
_cb_before_task_wait_complete__receive_ok, dque, NULL
);
break;

case TN_RC_TIMEOUT:
//-- data FIFO is empty, there's nothing to read.
// let's check if some task waits to write
//-- nothing to read from the queue.
// Let's check whether some task wants to send data
// (that might happen if only dque->items_cnt is 0)
if (!tn_is_list_empty(&(dque->wait_send_list))){
struct TN_Task *task;
//-- there are tasks that want to write data
// (that might happen if only dque->items_cnt is 0)

task = tn_list_first_entry(
&(dque->wait_send_list),
typeof(*task),
task_queue
);

*pp_data = task->subsys_wait.dqueue.data_elem; //-- Return to caller
_tn_task_wait_complete(task, TN_RC_OK);

if ( _tn_task_first_wait_complete(
&dque->wait_send_list, TN_RC_OK,
_cb_before_task_wait_complete__receive_timeout, pp_data, NULL
)
)
{
//-- that might happen if only dque->items_cnt is 0:
// data was read to `pp_data` in the
// `_cb_before_task_wait_complete__receive_timeout()`
rc = TN_RC_OK;
} else {
//-- wait_send_list is empty.
}
break;
default:
//-- there's some abnormal error, we should leave return code as is

case TN_RC_WPARAM:
//-- do nothing, just return this error
break;

default:
_TN_FATAL_ERROR(
"rc should be TN_RC_OK, TN_RC_TIMEOUT or TN_RC_WPARAM here"
);
break;
}

return rc;
}
}


static enum TN_RCode _dqueue_job_perform(
Expand Down Expand Up @@ -326,15 +344,22 @@ static enum TN_RCode _dqueue_job_perform(
TN_INT_RESTORE();
_tn_switch_context_if_needed();
if (waited){

//-- get wait result
rc = tn_curr_run_task->task_wait_rc;

switch (job_type){
case _JOB_TYPE__SEND:
rc = tn_curr_run_task->task_wait_rc;
//-- do nothing special
break;
case _JOB_TYPE__RECEIVE:
//-- dqueue.data_elem should contain valid value now,
// return it to caller
*pp_data = tn_curr_run_task->subsys_wait.dqueue.data_elem;
rc = tn_curr_run_task->task_wait_rc;
//-- if wait result is TN_RC_OK, copy received pointer to the
// user's location
if (rc == TN_RC_OK){
//-- dqueue.data_elem should contain valid value now,
// return it to caller
*pp_data = tn_curr_run_task->subsys_wait.dqueue.data_elem;
}
break;
}
}
Expand Down
10 changes: 6 additions & 4 deletions src/core/tn_eventgrp.c
Original file line number Diff line number Diff line change
Expand Up @@ -356,13 +356,15 @@ enum TN_RCode tn_eventgrp_wait(
TN_INT_RESTORE();
_tn_switch_context_if_needed();
if (waited_for_event){
if ( tn_curr_run_task->task_wait_rc == TN_RC_OK
&& p_flags_pattern != NULL )
{
//-- get wait result
rc = tn_curr_run_task->task_wait_rc;

//-- if wait result is TN_RC_OK, and p_flags_pattern is provided,
// copy actual_pattern there
if (rc == TN_RC_OK && p_flags_pattern != NULL ){
*p_flags_pattern =
tn_curr_run_task->subsys_wait.eventgrp.actual_pattern;
}
rc = tn_curr_run_task->task_wait_rc;
}

out:
Expand Down
49 changes: 30 additions & 19 deletions src/core/tn_fmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,15 @@ static inline enum TN_RCode _check_param_job_perform(
#endif
// }}}

static void _cb_before_task_wait_complete(
struct TN_Task *task,
void *user_data_1,
void *user_data_2
)
{
task->subsys_wait.fmem.data_elem = user_data_1;
}

static inline enum TN_RCode _fmem_get(struct TN_FMem *fmem, void **p_data)
{
enum TN_RCode rc;
Expand All @@ -122,9 +131,7 @@ static inline enum TN_RCode _fmem_get(struct TN_FMem *fmem, void **p_data)
ptr = fmem->free_list;
fmem->free_list = *(void **)fmem->free_list; //-- ptr - to new free list
fmem->free_blocks_cnt--;
}

if (ptr != NULL){
*p_data = ptr;
rc = TN_RC_OK;
} else {
Expand All @@ -136,22 +143,14 @@ static inline enum TN_RCode _fmem_get(struct TN_FMem *fmem, void **p_data)

static inline enum TN_RCode _fmem_release(struct TN_FMem *fmem, void *p_data)
{
struct TN_Task *task;

enum TN_RCode rc = TN_RC_OK;

if (!tn_is_list_empty(&(fmem->wait_queue))){
//-- there is task(s) that are waiting for free memory block,
// so, pass given memory block to the first task in the queue.

task = tn_list_first_entry(
&(fmem->wait_queue), typeof(*task), task_queue
);

task->subsys_wait.fmem.data_elem = p_data;

_tn_task_wait_complete(task, TN_RC_OK);
} else {
if ( !_tn_task_first_wait_complete(
&fmem->wait_queue, TN_RC_OK,
_cb_before_task_wait_complete, p_data, NULL
)
)
{
//-- no task is waiting for free memory block, so,
// insert in to the memory pool

Expand All @@ -161,6 +160,13 @@ static inline enum TN_RCode _fmem_release(struct TN_FMem *fmem, void *p_data)
fmem->free_list = p_data;
fmem->free_blocks_cnt++;
} else {
#if TN_DEBUG
if (fmem->free_blocks_cnt > fmem->blocks_cnt){
_TN_FATAL_ERROR(
"free_blocks_cnt should never be more than blocks_cnt"
);
}
#endif
//-- the memory pool already has all the blocks free
rc = TN_RC_OVERFLOW;
}
Expand Down Expand Up @@ -332,11 +338,16 @@ enum TN_RCode tn_fmem_get(
TN_INT_RESTORE();
_tn_switch_context_if_needed();
if (waited_for_data){
//-- now, fmem.data_elem field should contain valid value, so,
// return it to caller.
*p_data = tn_curr_run_task->subsys_wait.fmem.data_elem;

//-- get wait result
rc = tn_curr_run_task->task_wait_rc;

//-- if wait result is TN_RC_OK, copy memory block pointer to the
// user's location
if (rc == TN_RC_OK){
*p_data = tn_curr_run_task->subsys_wait.fmem.data_elem;
}

}

out:
Expand Down
Loading

0 comments on commit af9f16e

Please sign in to comment.