OpenCores
URL https://opencores.org/ocsvn/funbase_ip_library/funbase_ip_library/trunk

Subversion Repositories funbase_ip_library

[/] [funbase_ip_library/] [trunk/] [TUT/] [ip.swp.api/] [openmcapi/] [1.0/] [libmcapi/] [shm/] [shm.c] - Blame information for rev 145

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 145 lanttu
/*
2
 * Copyright (c) 2010, Mentor Graphics Corporation
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions are met:
7
 *
8
 * 1. Redistributions of source code must retain the above copyright notice,
9
 *    this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright notice,
11
 *    this list of conditions and the following disclaimer in the documentation
12
 *    and/or other materials provided with the distribution.
13
 * 3. Neither the name of the <ORGANIZATION> nor the names of its contributors
14
 *    may be used to endorse or promote products derived from this software
15
 *    without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
 * POSSIBILITY OF SUCH DAMAGE.
28
 */
29
 
30
 
31
//#define   MCAPI_SM_DBG_SUPPORT
32
 
33
#include <mcapi.h>
34
#include <openmcapi.h>
35
#include <atomic.h>
36
#include "shm.h"
37
#include "shm_os.h"
38
 
39
#ifdef MCAPI_SM_DBG_SUPPORT
40
#include <stdio.h>
41
#endif
42
 
43
#define LOCK   1
44
#define UNLOCK 0
45
 
46
 
47
SHM_MGMT_BLOCK*  SHM_Mgmt_Blk = MCAPI_NULL;
48
SHM_BUFFER*      SHM_Buff_Array_Ptr;
49
MCAPI_INTERFACE* SHM_Current_Interface_Ptr;
50
 
51
 
52
extern MCAPI_BUF_QUEUE MCAPI_RX_Queue[MCAPI_PRIO_COUNT];
53
extern mcapi_node_t MCAPI_Node_ID;
54
 
55
 
56
static void shm_acquire_lock(shm_lock* plock)
57
{
58
        const int lockVal = LOCK;
59
        unsigned int retVal;
60
 
61
        do {
62
                retVal = xchg(plock, lockVal);
63
        } while (retVal==lockVal);
64
}
65
 
66
static void shm_release_lock(shm_lock* plock)
67
{
68
        mb();
69
 
70
        *plock = UNLOCK;
71
}
72
 
73
static mcapi_uint32_t get_first_zero_bit(mcapi_int_t value)
74
{
75
        mcapi_uint32_t idx;
76
        mcapi_uint32_t tmp32;
77
 
78
        /* Invert value */
79
        value = ~value;
80
 
81
        /* (~value) & (2's complement of value) */
82
        value = (value & (-value)) - 1;
83
 
84
        /* log2(value) */
85
 
86
        tmp32 = value - ((value >> 1) & 033333333333)- ((value >> 2) & 011111111111);
87
 
88
        idx = ((tmp32 + (tmp32 >> 3))& 030707070707) % 63;
89
 
90
        /* Obtain index (compiler optimized ) */
91
        //GET_IDX(idx,value);
92
 
93
        return idx;
94
}
95
 
96
static mcapi_status_t get_sm_buff_index(mcapi_uint32_t* p_idx)
97
{
98
        mcapi_uint32_t i, tmp32;
99
        mcapi_status_t status = MCAPI_ERR_TRANSMISSION;
100
 
101
        /* Find first available buffer */
102
        for (i = 0; i < BITMASK_WORD_COUNT; i++)
103
        {
104
                tmp32 = get_first_zero_bit(SHM_Mgmt_Blk->shm_buff_mgmt_blk.buff_bit_mask[i]);
105
 
106
                if (tmp32 < BITMASK_WORD_SIZE)
107
                {
108
                        /* Calculate absolute index of the available buffer */
109
                        *p_idx = tmp32 + (i * BITMASK_WORD_SIZE);
110
 
111
                        /* Mark the buffer taken */
112
                        SHM_Mgmt_Blk->shm_buff_mgmt_blk.buff_bit_mask[i] |= 1 << tmp32;
113
 
114
                        status = MCAPI_SUCCESS;
115
 
116
                        break;
117
                }
118
        }
119
 
120
        return status;
121
}
122
 
123
static void clear_sm_buff_index(mcapi_uint32_t idx)
124
{
125
        mcapi_uint32_t *word;
126
        mcapi_uint32_t bit_msk_idx = idx/BITMASK_WORD_SIZE;
127
        mcapi_uint8_t  bit_idx = idx%BITMASK_WORD_SIZE;
128
 
129
        /* Mark the buffer available */
130
        word = &SHM_Mgmt_Blk->shm_buff_mgmt_blk.buff_bit_mask[bit_msk_idx];
131
        *word ^= 1 << bit_idx;
132
}
133
 
134
/*************************************************************************
135
*
136
*   FUNCTION
137
*
138
*       shm_get_buffer
139
*
140
*   DESCRIPTION
141
*
142
*       Obtain a Shared memory driver buffer.
143
*
144
*   INPUTS
145
*
146
*       None.
147
*
148
*   OUTPUTS
149
*
150
*       MCAPI_BUFFER*       Pointer to allocated MCAPI buffer
151
*
152
*************************************************************************/
153
static MCAPI_BUFFER* shm_get_buffer(mcapi_node_t node_id, size_t size,
154
                                    mcapi_uint32_t priority)
155
{
156
        mcapi_uint32_t  idx;
157
        SHM_BUFFER*     p_sm_buff = MCAPI_NULL;
158
        MCAPI_BUFFER*   p_mcapi_buff = MCAPI_NULL;
159
        mcapi_status_t  status = MCAPI_SUCCESS;
160
 
161
        /* Acquire lock of SM buffer management block */
162
        shm_acquire_lock(&SHM_Mgmt_Blk->shm_buff_mgmt_blk.lock);
163
 
164
        /* Check if obtained buff index is less than the buff count */
165
        if ((priority == SHM_PRIO_0) ||
166
                (SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_count <= SHM_LOW_PRI_BUF_CONT))
167
        {
168
                for (idx = 0; idx < CONFIG_SHM_NR_NODES; idx++)
169
                {
170
                        if (SHM_Mgmt_Blk->shm_routes[idx].node_id == node_id)
171
                        {
172
                                /* Obtain the index of the first available SM buffer */
173
                                status = get_sm_buff_index(&idx);
174
 
175
#ifdef MCAPI_SM_DBG_SUPPORT
176
                                printf("Get buffer - priority  = %d \r\n",priority);
177
                                printf("Get buffer - obtained buffer index  = %d \r\n",idx);
178
#endif
179
 
180
                                if (status == MCAPI_SUCCESS)
181
                                {
182
                                        /* Obtain the address of the SM buffer for the index */
183
                                        p_sm_buff = (SHM_BUFFER*)OFFSET_TO_ADDRESS(SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_base_offset + (sizeof(SHM_BUFFER)* idx));
184
 
185
                                        /* Obtain pointer to MCAPI buffer */
186
                                        p_mcapi_buff = &p_sm_buff->mcapi_buff;
187
 
188
                                        /* increment used buffer count */
189
                                        SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_count++;
190
 
191
                                        break;
192
                                }
193
                        }
194
                }
195
        }
196
 
197
        /* Release lock of SM buffer management block */
198
        shm_release_lock(&SHM_Mgmt_Blk->shm_buff_mgmt_blk.lock);
199
 
200
        /* Return a MCAPI buffer to the caller */
201
        return p_mcapi_buff;
202
}
203
 
204
/*************************************************************************
205
*
206
*   FUNCTION
207
*
208
*       shm_free_buffer
209
*
210
*   DESCRIPTION
211
*
212
*       Free shared memory buffer.
213
*
214
*   INPUTS
215
*
216
*       SHM_BUFFER*          Pointer to SM buffer
217
*
218
*   OUTPUTS
219
*
220
*       None
221
*
222
*************************************************************************/
223
static void shm_free_buffer(MCAPI_BUFFER* buff)
224
{
225
        mcapi_uint32_t idx;
226
 
227
        /* Obtain the index of the buffer */
228
        idx = *((mcapi_uint32_t*)((mcapi_uint32_t)buff - (sizeof(mcapi_uint32_t))));
229
 
230
        shm_acquire_lock(&SHM_Mgmt_Blk->shm_buff_mgmt_blk.lock);
231
 
232
        /* Mark the buffer available */
233
        clear_sm_buff_index(idx);
234
 
235
        /* Decrement used buffer count */
236
        SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_count--;
237
 
238
        shm_release_lock(&SHM_Mgmt_Blk->shm_buff_mgmt_blk.lock);
239
 
240
#ifdef MCAPI_SM_DBG_SUPPORT
241
        printf("Free buffer - freed index  = %d \r\n",idx);
242
#endif
243
}
244
 
245
/*************************************************************************
246
*
247
*   FUNCTION
248
*
249
*       get_sm_ring_q
250
*
251
*   DESCRIPTION
252
*
253
*       Obtain the SM packet descriptor ring queue for the node ID
254
*       requested.
255
*
256
*   INPUTS
257
*
258
*       mcapi_uint32_t      Destination node ID
259
*       mcapi_uint32_t*     Pointer to unit ID for the destination node
260
*
261
*   OUTPUTS
262
*
263
*       SHM_BUFF_DESC_Q*     Pointer to SM ring queue for requested node ID.
264
*
265
*************************************************************************/
266
static SHM_BUFF_DESC_Q* get_sm_ring_q(mcapi_uint32_t node_id,
267
                                      mcapi_uint32_t *p_unit_id)
268
{
269
        int idx;
270
        mcapi_uint32_t unit_id;
271
        SHM_BUFF_DESC_Q* p_sm_ring_queue = MCAPI_NULL;
272
 
273
        /* Look up routes for the requested node ID
274
         * and obtain the corresponding unit ID and SM ring queue */
275
 
276
        for (idx = 0; idx < CONFIG_SHM_NR_NODES; idx++)
277
        {
278
                if (SHM_Mgmt_Blk->shm_routes[idx].node_id == node_id)
279
                {
280
                        unit_id = SHM_Mgmt_Blk->shm_routes[idx].unit_id;
281
 
282
                        /* Load unit ID for the caller */
283
                        *p_unit_id = unit_id;
284
 
285
                        /* Obtain pointer to ring queue */
286
                        p_sm_ring_queue = &SHM_Mgmt_Blk->shm_queues[node_id];
287
 
288
                        break;
289
                }
290
        }
291
 
292
        /* Return pointer to SM ring queue for the unit ID identified */
293
        return p_sm_ring_queue;
294
}
295
 
296
/*************************************************************************
297
*
298
*   FUNCTION
299
*
300
*       enqueue_sm_ring_q
301
*
302
*   DESCRIPTION
303
*
304
*       Enqueue a transmission request to the SM descriptor ring queue.
305
*
306
*   INPUTS
307
*
308
*       SHM_BUFF_DESC_Q*     Pointer to the SM packet descriptor queue
309
*       mcapi_uint16_t      Destination node ID
310
*       SHM_BUFFER*          Pointer to the SM buffer (payload)
311
*       size_t              Size of the SM buffer (payload)
312
*       mcapi_uint8_t       Message type
313
*
314
*   OUTPUTS
315
*
316
*       mcapi_status_t      status of attempt to enqueue.
317
*
318
*************************************************************************/
319
static mcapi_status_t enqueue_sm_ring_q(SHM_BUFF_DESC_Q *shm_des_q,
320
                                        mcapi_node_t node_id,
321
                                        MCAPI_BUFFER *buff,
322
                                        mcapi_priority_t priority,
323
                                        size_t buff_size, mcapi_uint8_t type)
324
{
325
        mcapi_uint32_t idx;
326
        mcapi_status_t status = MCAPI_SUCCESS;
327
        SHM_BUFF_DESC* shm_desc;
328
 
329
        /* Acquire lock of the SM packet descriptor queue */
330
        shm_acquire_lock(&shm_des_q->lock);
331
 
332
        /* Obtain put index into the queue */
333
        idx = shm_des_q->put_idx;
334
 
335
        if (shm_des_q->count == SHM_BUFF_DESC_Q_SIZE)
336
        {
337
                /* Queue is full fail denqueue operation */
338
                status = MCAPI_ERR_TRANSMISSION;
339
        }
340
        else
341
        {
342
                /* Load packet descriptor */
343
                shm_desc = &shm_des_q->pkt_desc_q[idx];
344
                shm_desc->priority = priority;
345
                shm_desc->type = type;
346
                shm_desc->value = ADDRESS_TO_OFFSET(buff);
347
 
348
                shm_des_q->put_idx = (shm_des_q->put_idx + 1) % SHM_BUFF_DESC_Q_SIZE;
349
                shm_des_q->count++;
350
 
351
                /* Enqueue operation successfully completed */
352
                status = MCAPI_SUCCESS;
353
        }
354
 
355
        /* Release lock of the SM packet descriptor queue */
356
        shm_release_lock(&shm_des_q->lock);
357
 
358
        return status;
359
}
360
 
361
/*************************************************************************
362
*
363
*   FUNCTION
364
*
365
*       shm_tx
366
*
367
*   DESCRIPTION
368
*
369
*       Transmit data using SM driver.
370
*
371
*   INPUTS
372
*
373
*       None.
374
*
375
*   OUTPUTS
376
*
377
*       mcapi_status_t      Return status of initialization
378
*
379
*************************************************************************/
380
static mcapi_status_t shm_tx(MCAPI_BUFFER *buffer, size_t buffer_size,
381
                             mcapi_priority_t priority,
382
                             struct _mcapi_endpoint *tx_endpoint)
383
{
384
        SHM_BUFF_DESC_Q* shm_q;
385
        mcapi_uint32_t  unit_id;
386
        mcapi_uint32_t  node_id;
387
        mcapi_status_t  status = MCAPI_SUCCESS;
388
 
389
#ifdef MCAPI_SM_DBG_SUPPORT
390
        mcapi_uint32_t  add = (mcapi_uint32_t)buffer;
391
        printf("TX buffer - transmitting buffer address  = %x \r\n",add);
392
        printf("TX buffer - transmitting buffer size     = %d \r\n",buffer_size);
393
        printf("TX buffer - transmitting buffer priority = %d \r\n",priority);
394
#endif
395
 
396
        /* Obtain SM ring queue for the destination node ID */
397
        node_id = tx_endpoint->mcapi_foreign_node_id;
398
        shm_q = get_sm_ring_q(node_id, &unit_id);
399
 
400
        if (shm_q)
401
        {
402
                /* Enqueue request to transmit data */
403
                status = enqueue_sm_ring_q(shm_q, node_id, buffer, priority,
404
                        buffer_size, tx_endpoint->mcapi_chan_type);
405
 
406
                /* Resume Tasks suspensed on TX */
407
                mcapi_check_resume(MCAPI_REQ_TX_FIN, tx_endpoint->mcapi_endp_handle,
408
                                                   MCAPI_NULL, (buffer->buf_size - MCAPI_HEADER_LEN), status);
409
 
410
                /* Start data transmission */
411
                if (status == MCAPI_SUCCESS)
412
                {
413
                        status = openmcapi_shm_notify(unit_id, node_id);
414
 
415
#ifdef MCAPI_SM_DBG_SUPPORT
416
                        printf("TX buffer - TX success \r\n");
417
#endif
418
                }
419
        }
420
        else
421
        {
422
                /* TX request to unrecognized node ID */
423
                status = MCAPI_ERR_NODE_NOTINIT;
424
 
425
#ifdef MCAPI_SM_DBG_SUPPORT
426
                printf("TX buffer - TX Failed \r\n");
427
#endif
428
        }
429
 
430
        return status;
431
 
432
}
433
 
434
/*************************************************************************
435
*
436
*   FUNCTION
437
*
438
*       shm_finalize
439
*
440
*   DESCRIPTION
441
*
442
*       Finalization sequence for SM driver.
443
*
444
*   INPUTS
445
*
446
*       mcapi_node_t    Local node ID.
447
*
448
*       SHM_MGMT_BLOCK*  Pointer to SM driver management.
449
*       structure
450
*
451
*   OUTPUTS
452
*
453
*       mcapi_status_t          Initialization status.
454
*
455
*************************************************************************/
456
static mcapi_status_t shm_finalize(mcapi_node_t node_id,
457
                                   SHM_MGMT_BLOCK *SHM_Mgmt_Blk)
458
{
459
        int i;
460
        mcapi_status_t status = MCAPI_ERR_NODE_INITFAILED;
461
 
462
        for (i = 0; i < CONFIG_SHM_NR_NODES; i++)
463
        {
464
                if (SHM_Mgmt_Blk->shm_routes[i].node_id == node_id)
465
                {
466
                        status = MCAPI_SUCCESS;
467
 
468
                        SHM_Mgmt_Blk->shm_routes[i].node_id = SHM_INVALID_NODE;
469
 
470
                        SHM_Mgmt_Blk->shm_routes[i].unit_id = SHM_INVALID_SCH_UNIT;
471
 
472
                        break;
473
                }
474
        }
475
 
476
        /* Return finalization status */
477
        return status;
478
}
479
 
480
/*************************************************************************
481
*
482
*   FUNCTION
483
*
484
*       shm_ioctl
485
*
486
*   DESCRIPTION
487
*
488
*       IOCTL routine for the shared memory driver interface.
489
*
490
*   INPUTS
491
*
492
*       optname                 The name of the IOCTL option.
493
*       *option                 A pointer to memory that will be
494
*                               filled in if this is a GET option
495
*                               or the new value if this is a SET
496
*                               option.
497
*       optlen                  The length of the memory at option.
498
*
499
*   OUTPUTS
500
*
501
*       MCAPI_SUCCESS           The call was successful.
502
*       MCAPI_ERR_ATTR_NUM         Unrecognized option.
503
*       MCAPI_ERR_ATTR_SIZE        The size of option is invalid.
504
*
505
*************************************************************************/
506
static mcapi_status_t shm_ioctl(mcapi_uint_t optname, void *option,
507
                                size_t optlen)
508
{
509
        mcapi_status_t status = MCAPI_SUCCESS;
510
 
511
        switch (optname)
512
        {
513
                /* The total number of buffers in the system. */
514
                case MCAPI_ATTR_NO_BUFFERS:
515
 
516
                        /* Ensure the buffer can hold the value. */
517
                        if (optlen >= sizeof(mcapi_uint32_t))
518
                                *(mcapi_uint32_t *)option = SHM_BUFF_COUNT;
519
                        else
520
                                status = MCAPI_ERR_ATTR_SIZE;
521
 
522
                        break;
523
 
524
                /* The maximum size of an interface buffer. */
525
                case MCAPI_ATTR_BUFFER_SIZE:
526
 
527
                        /* Ensure the buffer can hold the value. */
528
                        if (optlen >= sizeof(mcapi_uint32_t))
529
                                *(mcapi_uint32_t *)option = MCAPI_MAX_DATA_LEN;
530
                        else
531
                                status = MCAPI_ERR_ATTR_SIZE;
532
 
533
                        break;
534
 
535
                /* The number of buffers available for receiving data. */
536
                case MCAPI_ATTR_RECV_BUFFERS_AVAILABLE:
537
 
538
                        /* Ensure the buffer can hold the value. */
539
                        if (optlen >= sizeof(mcapi_uint32_t))
540
                                *(mcapi_uint32_t *)option = SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_count;
541
                        else
542
                                status = MCAPI_ERR_ATTR_SIZE;
543
 
544
                        break;
545
 
546
                /* The number of buffers available for receiving data. */
547
                case MCAPI_ATTR_NO_PRIORITIES:
548
 
549
                        /* Ensure the buffer can hold the value. */
550
                        if (optlen >= sizeof(mcapi_uint32_t))
551
                                *(mcapi_uint32_t *)option = SHM_NUM_PRIORITIES;
552
                        else
553
                                status = MCAPI_ERR_ATTR_SIZE;
554
 
555
                        break;
556
 
557
                /* The number of buffers available for receiving data. */
558
                case MCAPI_FINALIZE_DRIVER:
559
 
560
                        /* Finalize OS layer */
561
                        status = openmcapi_shm_os_finalize();
562
 
563
                        if (status == MCAPI_SUCCESS)
564
                        {
565
                                /* Finalize SM driver */
566
                                status = shm_finalize(MCAPI_Node_ID, SHM_Mgmt_Blk);
567
                        }
568
 
569
                        if (status == MCAPI_SUCCESS)
570
                        {
571
                                /* Unmap SM device */
572
                                openmcapi_shm_unmap((void*)SHM_Mgmt_Blk);
573
                        }
574
 
575
                        break;
576
 
577
                default:
578
 
579
                        status = MCAPI_ERR_ATTR_NUM;
580
                        break;
581
        }
582
 
583
        return status;
584
}
585
 
586
/*************************************************************************
587
*
588
*   FUNCTION
589
*
590
*       shm_master_node_init
591
*
592
*   DESCRIPTION
593
*
594
*       Initialize Shared memory driver as the Master node.
595
*
596
*   INPUTS
597
*
598
*       mcapi_node_t    Local node ID.
599
*
600
*       SHM_MGMT_BLOCK*  Pointer to SM driver management.
601
*       structure
602
*
603
*   OUTPUTS
604
*
605
*       mcapi_status_t              Initialization status.
606
*
607
*************************************************************************/
608
static mcapi_status_t shm_master_node_init(mcapi_node_t node_id,
609
                                           SHM_MGMT_BLOCK* SHM_Mgmt_Blk)
610
{
611
        int i;
612
        mcapi_status_t status = MCAPI_SUCCESS;
613
 
614
        /* The current node is the first node executing in the system.
615
         * Initialize SM driver as master node. */
616
 
617
        /* Initialize routes and SM buffer queue data structures */
618
        for (i = 0; i < CONFIG_SHM_NR_NODES; i++)
619
        {
620
                /* Initialize routes */
621
                SHM_Mgmt_Blk->shm_routes[i].node_id = SHM_INVALID_NODE;
622
                SHM_Mgmt_Blk->shm_routes[i].unit_id = SHM_INVALID_SCH_UNIT;
623
        }
624
 
625
        /* Initialize the base of shared memory buffers  */
626
        SHM_Buff_Array_Ptr = (SHM_BUFFER*)((((mcapi_uint32_t)SHM_Mgmt_Blk & (~(SHM_4K_ALIGN_SIZE - 1))) + SHM_4K_ALIGN_SIZE));
627
 
628
        /* Obtain the offset of the SM buffer space */
629
        SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_base_offset = ADDRESS_TO_OFFSET(SHM_Buff_Array_Ptr);
630
 
631
        /* Initialize all SM buffers */
632
        for (i = 0; i < SHM_BUFF_COUNT; i++)
633
        {
634
                /* Initialize index and offset */
635
                SHM_Buff_Array_Ptr[i].idx = i;
636
        }
637
 
638
        /* Make all SM buffers available */
639
        for (i = 0; i < BITMASK_WORD_COUNT; i++)
640
        {
641
                SHM_Mgmt_Blk->shm_buff_mgmt_blk.buff_bit_mask[i] = 0;
642
        }
643
 
644
        /* Initialize used buff count */
645
 
646
        SHM_Mgmt_Blk->shm_buff_mgmt_blk.shm_buff_count = 0;
647
 
648
        /* Load the route for the current node */
649
        SHM_Mgmt_Blk->shm_routes[0].node_id = node_id;
650
        SHM_Mgmt_Blk->shm_routes[0].unit_id = openmcapi_shm_schedunitid();
651
 
652
        /* Load shared memory initialization complete key */
653
        SHM_Mgmt_Blk->shm_init_field = SHM_INIT_COMPLETE_KEY;
654
 
655
        /* Return master node initialization status */
656
        return status;
657
}
658
 
659
/*************************************************************************
660
*
661
*   FUNCTION
662
*
663
*       shm_slave_node_init
664
*
665
*   DESCRIPTION
666
*
667
*       Initialization sequence for SM driver for a slave node.
668
*
669
*   INPUTS
670
*
671
*       mcapi_node_id   Local node ID.
672
*
673
*       SHM_MGMT_BLOCK*  Pointer to SM driver management structure.
674
*
675
*   OUTPUTS
676
*
677
*       mcapi_status_t          Initialization status.
678
*
679
*************************************************************************/
680
static mcapi_status_t shm_slave_node_init(mcapi_node_t node_id,
681
                                          SHM_MGMT_BLOCK* SHM_Mgmt_Blk)
682
{
683
        int i;
684
        mcapi_status_t status = MCAPI_SUCCESS;
685
 
686
        /* SM driver has already been initialized by the master node */
687
        /* Perform slave initialization of SM driver */
688
 
689
        /* Make sure the current node has not been initialized */
690
        for (i = 0; i < CONFIG_SHM_NR_NODES; i++)
691
        {
692
                if (SHM_Mgmt_Blk->shm_routes[i].node_id == node_id)
693
                {
694
                        status = MCAPI_ERR_NODE_INITFAILED;
695
 
696
                        break;
697
                }
698
        }
699
 
700
        if (status == MCAPI_SUCCESS)
701
        {
702
                /* Load the route for the current node */
703
                for (i = 0; i < CONFIG_SHM_NR_NODES; i++)
704
                {
705
                        if (SHM_Mgmt_Blk->shm_routes[i].node_id == SHM_INVALID_NODE)
706
                        {
707
                                SHM_Mgmt_Blk->shm_routes[i].node_id = node_id;
708
 
709
                                SHM_Mgmt_Blk->shm_routes[i].unit_id = openmcapi_shm_schedunitid();
710
 
711
                                break;
712
                        }
713
                }
714
 
715
        }
716
 
717
        /* Return slave node initialization status */
718
        return status;
719
}
720
 
721
/*************************************************************************
722
*
723
*   FUNCTION
724
*
725
*       openmcapi_shm_init
726
*
727
*   DESCRIPTION
728
*
729
*       Initialize the Shared Memory driver interface.
730
*
731
*   INPUTS
732
*
733
*       None.
734
*
735
*   OUTPUTS
736
*
737
*       mcapi_status_t  Return status of initialization
738
*
739
*************************************************************************/
740
mcapi_status_t openmcapi_shm_init(mcapi_node_t node_id,
741
                                  MCAPI_INTERFACE* int_ptr)
742
{
743
        mcapi_status_t status = MCAPI_SUCCESS;
744
 
745
        if (node_id >= CONFIG_SHM_NR_NODES)
746
                return MCAPI_ERR_NODE_INVALID;
747
 
748
        /* Store the name of this interface. */
749
        memcpy(int_ptr->mcapi_int_name, OPENMCAPI_SHM_NAME, MCAPI_INT_NAME_LEN);
750
 
751
        /* Set the maximum buffer size for incoming / outgoing data. */
752
        int_ptr->mcapi_max_buf_size = MCAPI_MAX_DATA_LEN;
753
 
754
        /* Set up function pointers for sending data, reserving an outgoing
755
         * driver buffer, returning the buffer to the free list, and
756
         * issuing ioctl commands.
757
         */
758
        int_ptr->mcapi_tx_output = shm_tx;
759
        int_ptr->mcapi_get_buffer = shm_get_buffer;
760
        int_ptr->mcapi_recover_buffer = shm_free_buffer;
761
        int_ptr->mcapi_ioctl = shm_ioctl;
762
 
763
        /* Obtain Shared memory base address */
764
        SHM_Mgmt_Blk = openmcapi_shm_map();
765
 
766
        if (SHM_Mgmt_Blk != MCAPI_NULL)
767
        {
768
                /* Initialize OS specific component */
769
                openmcapi_shm_os_init();
770
 
771
                /* Obtain SM driver initialization lock */
772
                shm_acquire_lock(&SHM_Mgmt_Blk->shm_init_lock);
773
 
774
                /* Has another node completed SM driver initialization */
775
                if (SHM_Mgmt_Blk->shm_init_field != SHM_INIT_COMPLETE_KEY)
776
                {
777
                        /* Initialize SM driver as the Master node */
778
                        status = shm_master_node_init(node_id, SHM_Mgmt_Blk);
779
                }
780
                else
781
                {
782
                        /* Initialize SM driver as the Slave node */
783
                        status = shm_slave_node_init(node_id, SHM_Mgmt_Blk);
784
                }
785
 
786
                /* Release SM driver initialization lock */
787
                shm_release_lock(&SHM_Mgmt_Blk->shm_init_lock);
788
        }
789
        else
790
        {
791
                status = MCAPI_ERR_GENERAL;
792
        }
793
 
794
        /* Obtain pointer to the local interface */
795
        SHM_Current_Interface_Ptr = int_ptr;
796
 
797
        /* Return status to caller */
798
        return status;
799
}
800
 
801
/* Return the first pending descriptor, or NULL. */
802
static SHM_BUFF_DESC* shm_desc_get_next(SHM_BUFF_DESC_Q* shm_des_q)
803
{
804
        if (shm_des_q->count)
805
                return &shm_des_q->pkt_desc_q[shm_des_q->get_idx];
806
 
807
        return MCAPI_NULL;
808
}
809
 
810
/* Make the first pending descriptor available to producers again. */
811
static void shm_desc_consume(SHM_BUFF_DESC_Q* shm_des_q)
812
{
813
        shm_acquire_lock(&shm_des_q->lock);
814
 
815
        /* Update index and count */
816
        shm_des_q->get_idx =  (shm_des_q->get_idx +1) % SHM_BUFF_DESC_Q_SIZE;
817
        shm_des_q->count--;
818
 
819
        shm_release_lock(&shm_des_q->lock);
820
}
821
 
822
/*************************************************************************
823
*
824
*   FUNCTION
825
*
826
*       shm_poll
827
*
828
*   DESCRIPTION
829
*
830
*       RX HISR for the shared memory driver.
831
*
832
*   INPUTS
833
*
834
*       None
835
*
836
*   OUTPUTS
837
*
838
*       None
839
*
840
*************************************************************************/
841
void shm_poll(void)
842
{
843
        SHM_BUFF_DESC_Q* shm_des_q;
844
        SHM_BUFF_DESC*   shm_des;
845
        MCAPI_BUFFER*    rcvd_pkt;
846
        int              got_data = 0;
847
 
848
#ifdef MCAPI_SM_DBG_SUPPORT
849
        mcapi_uint32_t  add;
850
        printf("Received data\r\n");
851
#endif
852
 
853
        /* Obtain the SM ring queue for the current Node ID */
854
        shm_des_q = &SHM_Mgmt_Blk->shm_queues[MCAPI_Node_ID];
855
 
856
        /* Enqueue all available data packets for this node */
857
        for (;;)
858
        {
859
                /* Get next available SM buffer descriptor */
860
                shm_des = shm_desc_get_next(shm_des_q);
861
 
862
                if (shm_des != MCAPI_NULL)
863
                {
864
                        if (shm_des->priority < SHM_NUM_PRIORITIES)
865
                        {
866
                                /* Check packet type */
867
                                if ((shm_des->type == MCAPI_MSG_TYPE) || \
868
                                        (shm_des->type == MCAPI_CHAN_PKT_TYPE) || \
869
                                        (shm_des->type == MCAPI_CHAN_SCAL_TYPE))
870
                                {
871
                                        /* Packet buffer handling */
872
                                        rcvd_pkt = (MCAPI_BUFFER*)OFFSET_TO_ADDRESS(shm_des->value);
873
 
874
                                        /* Load current SM interface pointer */
875
                                        rcvd_pkt->mcapi_dev_ptr = (MCAPI_POINTER)SHM_Current_Interface_Ptr;
876
 
877
                                        /* Enqueue the packet received to the global queue */
878
                                        mcapi_enqueue(&MCAPI_RX_Queue[shm_des->priority],rcvd_pkt);
879
                                }
880
                                else
881
                                {
882
                                        /* Scalar packet handling */
883
 
884
                                }
885
 
886
                                got_data = 1;
887
 
888
#ifdef MCAPI_SM_DBG_SUPPORT
889
                                add = (mcapi_uint32_t)rcvd_pkt;
890
                                printf(" RX HISR - received buffer address = %x \r\n", add);
891
                                add = rcvd_pkt->buf_size;
892
                                printf(" RX HISR - received buffer size = %d \r\n", add);
893
                                printf(" RX HISR - received buffer priority = %d \r\n", shm_des->priority);
894
#endif
895
                        }
896
 
897
                        /* Consume current SM buffer descriptor */
898
                        shm_desc_consume(shm_des_q);
899
                }
900
                else
901
                {
902
                        break;
903
                }
904
        }
905
 
906
        /* Set notification event */
907
        if (got_data)
908
                MCAPI_Set_RX_Event();
909
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.