LCOV - code coverage report
Current view: top level - lib/generic - queue.c Hit Total Coverage
Test: Knot Resolver 3.2.1-POSIX coverage report Lines: 59 65 90.8 %
Date: 2019-03-12 03:31:59
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*  Copyright (C) 2018 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
       2             : 
       3             :     This program is free software: you can redistribute it and/or modify
       4             :     it under the terms of the GNU General Public License as published by
       5             :     the Free Software Foundation, either version 3 of the License, or
       6             :     (at your option) any later version.
       7             : 
       8             :     This program is distributed in the hope that it will be useful,
       9             :     but WITHOUT ANY WARRANTY; without even the implied warranty of
      10             :     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      11             :     GNU General Public License for more details.
      12             : 
      13             :     You should have received a copy of the GNU General Public License
      14             :     along with this program.  If not, see <http://www.gnu.org/licenses/>.
      15             :  */
      16             : 
      17             : #include "lib/generic/queue.h"
      18             : #include <string.h>
      19             : 
      20       15656 : KR_EXPORT void queue_init_impl(struct queue *q, size_t item_size)
      21             : {
      22       15656 :         q->len = 0;
      23       15656 :         q->item_size = item_size;
      24       15656 :         q->head = q->tail = NULL;
      25             :         /* Take 128 B (two x86 cache lines), except a small margin
      26             :          * that the allocator can use for its overhead.
      27             :          * Normally (64-bit pointers) this means 16 B header + 13*8 B data. */
      28       15656 :         q->chunk_cap = (128 - offsetof(struct queue_chunk, data)
      29             :                         - sizeof(size_t)
      30       15656 :                         ) / item_size;
      31       15656 :         if (!q->chunk_cap) q->chunk_cap = 1; /* item_size big enough by itself */
      32       15656 : }
      33             : 
      34       15614 : KR_EXPORT void queue_deinit_impl(struct queue *q)
      35             : {
      36       15614 :         assert(q);
      37       15614 :         struct queue_chunk *p = q->head;
      38       15634 :         while (p != NULL) {
      39           0 :                 struct queue_chunk *pf = p;
      40          17 :                 p = p->next;
      41          17 :                 free(pf);
      42             :         }
      43             : #ifndef NDEBUG
      44       15614 :         memset(q, 0, sizeof(*q));
      45             : #endif
      46       15614 : }
      47             : 
      48         119 : static struct queue_chunk * queue_chunk_new(const struct queue *q)
      49             : {
      50         119 :         struct queue_chunk *c = malloc(offsetof(struct queue_chunk, data)
      51         119 :                                         + q->chunk_cap * q->item_size);
      52         119 :         if (unlikely(!c)) abort(); // simplify stuff
      53         119 :         memset(c, 0, offsetof(struct queue_chunk, data));
      54         119 :         c->cap = q->chunk_cap;
      55             :         /* ->begin and ->end are zero, i.e. we optimize for _push
      56             :          * and not _push_head, by default. */
      57         119 :         return c;
      58             : }
      59             : 
      60             : /* Return pointer to the space for the new element. */
      61         438 : KR_EXPORT void * queue_push_impl(struct queue *q)
      62             : {
      63         438 :         assert(q);
      64         438 :         struct queue_chunk *t = q->tail; // shorthand
      65         438 :         if (unlikely(!t)) {
      66         120 :                 assert(!q->head && !q->len);
      67         120 :                 q->head = q->tail = t = queue_chunk_new(q);
      68             :         } else
      69         318 :         if (t->end == t->cap) {
      70          25 :                 if (t->begin * 2 >= t->cap) {
      71             :                         /* Utilization is below 50%, so let's shift (no overlap). */
      72          10 :                         memcpy(t->data, t->data + t->begin * q->item_size,
      73          10 :                                 (t->end - t->begin) * q->item_size);
      74          10 :                         t->end -= t->begin;
      75          10 :                         t->begin = 0;
      76             :                 } else {
      77             :                         /* Let's grow the tail by another chunk. */
      78          15 :                         assert(!t->next);
      79          15 :                         t->next = queue_chunk_new(q);
      80          15 :                         t = q->tail = t->next;
      81             :                 }
      82             :         }
      83         438 :         assert(t->end < t->cap);
      84         438 :         ++(q->len);
      85         438 :         ++(t->end);
      86         438 :         return t->data + q->item_size * (t->end - 1);
      87             : }
      88             : 
      89             : /* Return pointer to the space for the new element. */
      90         102 : KR_EXPORT void * queue_push_head_impl(struct queue *q)
      91             : {
      92             :         /* When we have choice, we optimize for further _push_head,
      93             :          * i.e. when shifting or allocating a chunk,
      94             :          * we store items on the tail-end of the chunk. */
      95         102 :         assert(q);
      96         102 :         struct queue_chunk *h = q->head; // shorthand
      97         102 :         if (unlikely(!h)) {
      98           1 :                 assert(!q->tail && !q->len);
      99           1 :                 h = q->head = q->tail = queue_chunk_new(q);
     100           1 :                 h->begin = h->end = h->cap;
     101             :         } else
     102         101 :         if (h->begin == 0) {
     103           7 :                 if (h->end * 2 <= h->cap) {
     104             :                         /* Utilization is below 50%, so let's shift (no overlap).
     105             :                          * Computations here are simplified due to h->begin == 0. */
     106           0 :                         const int cnt = h->end;
     107           0 :                         memcpy(h->data + (h->cap - cnt) * q->item_size, h->data,
     108           0 :                                 cnt * q->item_size);
     109           0 :                         h->begin = h->cap - cnt;
     110           0 :                         h->end = h->cap;
     111             :                 } else {
     112             :                         /* Let's grow the head by another chunk. */
     113           7 :                         h = queue_chunk_new(q);
     114           7 :                         h->next = q->head;
     115           7 :                         q->head = h;
     116           7 :                         h->begin = h->end = h->cap;
     117             :                 }
     118             :         }
     119         102 :         assert(h->begin > 0);
     120         102 :         --(h->begin);
     121         102 :         ++(q->len);
     122         102 :         return h->data + q->item_size * h->begin;
     123             : }
     124             : 

Generated by: LCOV version 1.13