sheepy

build system (sheepy) and package manager (spm) for C
git clone https://spartatek.se/git/sheepy.git
Log | Files | Refs | README | LICENSE

TokenQueue.i (14861B)


      1 /* -*- C++ -*- vim: set syntax=cpp: 
      2  * (C) 2004-2009 Frank-Rene Schaefer                               */
      3 #ifndef __QUEX_INCLUDE_GUARD__TOKEN__TOKEN_QUEUE_I
      4 #define __QUEX_INCLUDE_GUARD__TOKEN__TOKEN_QUEUE_I
      5 
      6 #include "definitions"
      7 #include "asserts"
      8 #include "MemoryManager"
      9 
     10 /* NOTE: QUEX_TYPE_TOKEN must be defined at this place! */
     11 
     12 
     13 QUEX_NAMESPACE_MAIN_OPEN
     14 
     15     QUEX_INLINE void
     16     QUEX_NAME(TokenQueue_construct)(QUEX_NAME(TokenQueue)* me, 
     17                                     QUEX_TYPE_TOKEN*       Memory, 
     18                                     const size_t           N)
     19     /* me:     The token queue.
     20      * Memory: Pointer to memory of token queue, 0x0 --> no initial memory.
     21      * N:      Number of token objects that the array can carry.               */
     22     {
     23 #       if ! defined(QUEX_OPTION_USER_MANAGED_TOKEN_MEMORY)
     24         QUEX_TYPE_TOKEN* iterator   = 0x0;
     25 #       endif
     26         QUEX_TYPE_TOKEN* memory_end = &Memory[N];
     27 
     28         __quex_assert(Memory != 0x0);
     29         __quex_assert(N > (size_t)QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER);
     30 
     31 #       if ! defined(QUEX_OPTION_USER_MANAGED_TOKEN_MEMORY)
     32         /* Call placement new (plain constructor) for all tokens in chunk. */
     33         for(iterator = Memory; iterator != memory_end; ++iterator) {
     34             QUEX_NAME_TOKEN(construct)(iterator);
     35         }
     36 #       endif
     37         QUEX_NAME(TokenQueue_init)(me, Memory, memory_end); 
     38     }
     39 
     40     QUEX_INLINE void
     41     QUEX_NAME(TokenQueue_reset)(QUEX_NAME(TokenQueue)* me) 
     42     {                                                    
     43         me->read_iterator  = (QUEX_TYPE_TOKEN*)me->begin; 
     44         me->write_iterator = (QUEX_TYPE_TOKEN*)me->begin; 
     45 
     46         __quex_assert(  me->end - me->begin 
     47                       > (ptrdiff_t)QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER);
     48         __quex_assert(   me->end_minus_safety_border 
     49                       == me->end - (ptrdiff_t)QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER);         
     50     }
     51 
     52     QUEX_INLINE void
     53     QUEX_NAME(TokenQueue_init)(QUEX_NAME(TokenQueue)* me, 
     54                                QUEX_TYPE_TOKEN* Memory, 
     55                                QUEX_TYPE_TOKEN* MemoryEnd) 
     56     {
     57         __quex_assert(  MemoryEnd - Memory 
     58                       > (ptrdiff_t)QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER);
     59         me->begin                   = Memory;                           
     60         me->end                     = MemoryEnd;                        
     61         me->end_minus_safety_border = MemoryEnd - QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER;         
     62         QUEX_NAME(TokenQueue_reset)(me);                                
     63     }
     64 
     65 #   if defined(QUEX_OPTION_USER_MANAGED_TOKEN_MEMORY)
     66     QUEX_INLINE void
     67     QUEX_NAME(TokenQueue_disfunctionality_set)(QUEX_NAME(TokenQueue)* me) 
     68     {
     69         me->begin                   = (QUEX_TYPE_TOKEN*)0;                           
     70         me->end                     = (QUEX_TYPE_TOKEN*)0;
     71         me->read_iterator           = (QUEX_TYPE_TOKEN*)0; 
     72         me->write_iterator          = (QUEX_TYPE_TOKEN*)0; 
     73         me->end_minus_safety_border = (QUEX_TYPE_TOKEN*)0;
     74     }
     75 
     76     QUEX_INLINE bool
     77     QUEX_NAME(TokenQueue_disfunctionality_check)(QUEX_NAME(TokenQueue)* me) 
     78     {
     79         return    me->begin                   == (QUEX_TYPE_TOKEN*)0                           
     80                && me->end                     == (QUEX_TYPE_TOKEN*)0
     81                && me->read_iterator           == (QUEX_TYPE_TOKEN*)0 
     82                && me->write_iterator          == (QUEX_TYPE_TOKEN*)0
     83                && me->end_minus_safety_border == (QUEX_TYPE_TOKEN*)0;
     84     }
     85 #   endif
     86 
     87     QUEX_INLINE void
     88     QUEX_NAME(TokenQueue_destruct)(QUEX_NAME(TokenQueue)* me)
     89     {
     90 #       if ! defined(QUEX_OPTION_USER_MANAGED_TOKEN_MEMORY)
     91         QUEX_TYPE_TOKEN* iterator = 0x0;
     92         /* Call explicit destructors for all tokens in array */
     93         for(iterator = me->begin; iterator != me->end; ++iterator) {
     94             QUEX_NAME_TOKEN(destruct)(iterator);
     95         }
     96         /* The memory chunk for the token queue itself is located 
     97          * inside the analyzer object. Thus, no explicit free is
     98          * necessary. In case of user managed token queue memory
     99          * the user takes care of the deletion.                   */
    100 #       endif
    101     }
    102 
    103     QUEX_INLINE void   
    104     QUEX_NAME(TokenQueue_remainder_get)(QUEX_NAME(TokenQueue)* me,
    105                                         QUEX_TYPE_TOKEN**      begin,
    106                                         QUEX_TYPE_TOKEN**      end)
    107     {
    108         *begin = me->read_iterator;
    109         *end   = me->write_iterator;
    110         QUEX_NAME(TokenQueue_reset)(me);
    111     }
    112 
    113     QUEX_INLINE void 
    114     QUEX_NAME(TokenQueue_memory_get)(QUEX_NAME(TokenQueue)* me,
    115                                      QUEX_TYPE_TOKEN**      memory,
    116                                      size_t*                n)
    117     {
    118         *memory = me->begin;
    119         *n      = (size_t)(me->end - me->begin);
    120     }
    121 
    122     QUEX_INLINE QUEX_TYPE_TOKEN* 
    123     QUEX_NAME(TokenQueue_access_write_p)(QUEX_NAME(TokenQueue)* me) 
    124     { 
    125 #       if defined(QUEX_OPTION_USER_MANAGED_TOKEN_MEMORY)
    126         __quex_assert( ! QUEX_NAME(TokenQueue_disfunctionality_check)(me) );
    127         __quex_assert( me->write_iterator < me->end_minus_safety_border );
    128         if( QUEX_NAME(TokenQueue_disfunctionality_check)(me) ) {
    129             return (QUEX_TYPE_TOKEN*)0;
    130         }
    131         else if( me->write_iterator < me->end_minus_safety_border ) {
    132             return (QUEX_TYPE_TOKEN*)0;
    133         }
    134 #       endif
    135         return me->write_iterator; 
    136     }
    137 
    138     QUEX_INLINE bool QUEX_NAME(TokenQueue_is_full)(QUEX_NAME(TokenQueue)* me) 
    139     { return me->write_iterator >= me->end_minus_safety_border; }
    140     
    141     QUEX_INLINE bool QUEX_NAME(TokenQueue_is_empty)(QUEX_NAME(TokenQueue)* me)
    142     { return me->read_iterator == me->write_iterator; }
    143 
    144     QUEX_INLINE QUEX_TYPE_TOKEN* QUEX_NAME(TokenQueue_pop)(QUEX_NAME(TokenQueue)* me)
    145     { return me->read_iterator++; }
    146 
    147     QUEX_INLINE QUEX_TYPE_TOKEN* QUEX_NAME(TokenQueue_begin)(QUEX_NAME(TokenQueue)* me)
    148     { return me->begin; }
    149 
    150     QUEX_INLINE QUEX_TYPE_TOKEN* QUEX_NAME(TokenQueue_back)(QUEX_NAME(TokenQueue)* me)
    151     { return me->end - 1; }
    152 
    153     QUEX_INLINE size_t QUEX_NAME(TokenQueue_available_n)(QUEX_NAME(TokenQueue)* me) 
    154     { return (size_t)(me->end - me->write_iterator); }
    155 
    156 #ifdef QUEX_OPTION_ASSERTS
    157     QUEX_INLINE void  
    158     QUEX_ASSERT_TOKEN_QUEUE_AFTER_WRITE(QUEX_NAME(TokenQueue)* me)
    159     {
    160         __quex_assert(me->begin != 0x0);
    161         __quex_assert(me->read_iterator  >= me->begin);
    162         __quex_assert(me->write_iterator >= me->read_iterator);
    163         /* If the following breaks, it means that the given queue size was to small */
    164         __quex_assert(me->begin == 0x0 || me->end_minus_safety_border >= me->begin + 1);
    165         if( me->write_iterator > me->end ) { 
    166             QUEX_ERROR_EXIT("Error: Token queue overflow. This happens if too many tokens are sent\n"
    167                             "       as a reaction to one single pattern match. Use quex's command line\n"
    168                             "       option --token-queue-safety-border, or define the macro\n"
    169                             "       QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER with a greater value.\n"); 
    170         }
    171     }
    172     QUEX_INLINE void  
    173     QUEX_TOKEN_QUEUE_ASSERT(QUEX_NAME(TokenQueue)* me)
    174     {
    175         QUEX_ASSERT_TOKEN_QUEUE_AFTER_WRITE(me);
    176         if( me->write_iterator == me->end ) { 
    177             QUEX_ERROR_EXIT("Error: Token queue overflow. This happens if too many tokens are sent\n"
    178                             "       as a reaction to one single pattern match. Use quex's command line\n"
    179                             "       option --token-queue-safety-border, or define the macro\n"
    180                             "       QUEX_SETTING_TOKEN_QUEUE_SAFETY_BORDER with a greater value.\n"); 
    181         }
    182     }
    183 #else
    184 #   define QUEX_TOKEN_QUEUE_ASSERT(me)             /* empty */
    185 #   define QUEX_ASSERT_TOKEN_QUEUE_AFTER_WRITE(me) /* empty */
    186 #endif
    187 
    188 #if 1
    189 QUEX_INLINE void
    190 QUEX_NAME(TokenQueueRemainder_save)(QUEX_NAME(TokenQueueRemainder)* me, QUEX_NAME(TokenQueue)* token_queue)
    191 {
    192     QUEX_TOKEN_QUEUE_ASSERT(token_queue);
    193     /* State of the token queue at the entry of this function:
    194      *
    195      *                           [A0] [B0]   [A1] [B1]   [A2] [B2]
    196      *                            |    |      |    |      |   | 
    197      *    |    |      |    |      |    |      |    |      |   | 
    198      *  [ token 1  ][ token 2  ][ token 3  ][ token 4  ][ token 5  ][ ....
    199      *                          |                                   |
    200      *                          read_iterator                       write_iterator
    201      *                                          
    202      *
    203      * 1. Step: Allocate a plain chunk of memory, to carry the remaining tokens:
    204      *
    205      *                           [ store 1  ][ store 2  ][ store 3  ]
    206      *
    207      *          The elements of this plain chunk of memory are neither subject
    208      *          to constructor nor destructor calls.
    209      *
    210      * 2. Step: Make a plain copy of the tokens of the remainder (from read_iterator
    211      *          to write_iterator.
    212      *
    213      *                        [A0] [B0]   [A1] [B1]   [A2] [B2]
    214      *                         |    |      |    |      |   | 
    215      *                         |    |      |    |      |   | 
    216      *                       [ store 1  ][ store 2  ][ store 3  ]
    217      *
    218      *      As a consequence, the objects to which the original tokens referred
    219      *      are now referred by the stored tokens. However, at the time of 'restore'
    220      *      the content is copied at the beginning of the queue. 
    221      *
    222      *      !! Thus, the following scenerio is conceivable:
    223      *      !!
    224      *      !!    [A0] [B0]   [A1] [B1]   [A2] [B2]   [A1] [B1]   [A2] [B2]
    225      *      !!     |    |      |    |      |    |      |    |      |   | 
    226      *      !!     |    |      |    |      |    |      |    |      |   | 
    227      *      !!   [ token 1  ][ token 2  ][ token 3  ][ token 4  ][ token 5  ][ ....
    228      *      !!   |                                   |
    229      *      !!   begin                               |
    230      *      !!   |<--------- store size ------------>|
    231      *      !!
    232      *      !! If this was happening, then the destructor for the objects A1, B1, ... 
    233      *      !! would be called twice at the destruction time of the token queue!       
    234      *      !!
    235      *      !! PREVENTION: See next step.
    236      *
    237      * 3. Step: Calling placement new on token objects that are saved:
    238      *          Resulting original token queue:
    239      *
    240      *                           [X1] [X1]   [Y2] [Y2]   [Z0] [Z0]   
    241      *                            |    |      |    |      |   | 
    242      *    |    |      |    |      |    |      |    |      |   | 
    243      *  [ token 1  ][ token 2  ][ token 3  ][ token 4  ][ token 5  ][ ....
    244      *                          |                                   |
    245      *                          read_iterator                       write_iterator
    246      *
    247      *  Note, that token 3 originally not subject to 'double occurence'. However,
    248      *  it may be overwritten by filling the queue, and then references to objects
    249      *  would get lost.                                                         */
    250     me->size = (size_t)(token_queue->write_iterator - token_queue->read_iterator);
    251     if( me->size != 0 ) {
    252         QUEX_TYPE_TOKEN* iterator = 0x0;
    253         
    254         /* Step 1: allocate plain chunk of memory.                              */
    255         me->token_list = (QUEX_TYPE_TOKEN*)QUEXED(MemoryManager_allocate)(sizeof(QUEX_TYPE_TOKEN) * me->size, 
    256                                                                           E_MemoryObjectType_TOKEN_ARRAY);
    257         if( me->token_list == 0x0 ) {
    258             QUEX_ERROR_EXIT("Memory allocation error on request for token array.");
    259         }
    260 
    261         /* Step 2: copy plain chunk of memory                                   */
    262         __QUEX_STD_memcpy((void*)me->token_list, (void*)token_queue->read_iterator, sizeof(QUEX_TYPE_TOKEN) * me->size);
    263 
    264         /* Step 3: Call cleaning placement new on objects which are subject to
    265          *         potential double deletion.                                   */
    266         for(iterator = token_queue->read_iterator; 
    267             iterator != token_queue->write_iterator; ++iterator) {
    268             QUEX_NAME_TOKEN(construct)(iterator);
    269         }
    270     }
    271     QUEX_NAME(TokenQueue_reset)(token_queue);
    272 
    273     QUEX_TOKEN_QUEUE_ASSERT(token_queue);
    274 }
    275 
    276 QUEX_INLINE void
    277 QUEX_NAME(TokenQueueRemainder_restore)(QUEX_NAME(TokenQueueRemainder)* me, QUEX_NAME(TokenQueue)* token_queue)
    278 {
    279     QUEX_TYPE_TOKEN* iterator = 0x0;
    280     /* NOTE: When a token queue remainder is restored, this happens as a result
    281      *       of 'return from included file'. The return from an included file
    282      *       is triggered by a TERMINATION token. By definition, the TERMINATION
    283      *       is the last token to be sent. When the user detects a TERMINATION
    284      *       token, the read_iterator == write_iterator, which means that the
    285      *       token queue is empty. => Thus, the 'refill' can start from the beginning.  
    286      *       THIS IS TRUE WHEN THE INLCUDE_PUSH, INCLUDE_POP HAPPENS FROM OUTSIDE
    287      *       THE LEXICAL ANALYZER ENGINE.                                            */
    288     if( ! QUEX_NAME(TokenQueue_is_empty)(token_queue) ) {
    289         QUEX_ERROR_EXIT("Token queue not empty on return from included file. This can\n"
    290                         "only happen if include handling was done from inside analyzer\n"
    291                         "actions. Please, consider directory demo/005 for an example to\n"
    292                         "handle file inclusion.\n");
    293     }
    294     QUEX_TOKEN_QUEUE_ASSERT(token_queue);
    295 
    296     if( me->size != 0 ) {
    297         /* Step 1: Call explicit destructors for token objects that are overwritten  */
    298         for(iterator = token_queue->begin; 
    299             iterator != token_queue->begin + me->size; ++iterator) {
    300             QUEX_NAME_TOKEN(destruct)(iterator);
    301         }
    302         /* Step 2: Plain copy of objects stored in the 'remainder store'             */
    303         __QUEX_STD_memcpy((void*)token_queue->begin, (void*)me->token_list, sizeof(QUEX_TYPE_TOKEN) * me->size);
    304 
    305         /* Step 3: De-Allocate the remainder objects                                 
    306          *         NO explicit destructor calls, since the referred objects are now
    307          *         referred from inside the 'real' token queue.                      */
    308         QUEXED(MemoryManager_free)(me->token_list, E_MemoryObjectType_TOKEN_ARRAY);
    309     }
    310     /* Reset the read and write iterators */
    311     token_queue->read_iterator  = token_queue->begin;
    312     token_queue->write_iterator = token_queue->begin + me->size;
    313 
    314     QUEX_TOKEN_QUEUE_ASSERT(token_queue);
    315 }
    316 #endif
    317 
    318 QUEX_NAMESPACE_MAIN_CLOSE
    319 
    320 #endif /* __QUEX_INCLUDE_GUARD__TOKEN__TOKEN_QUEUE_I */