]>
glassweightruler.freedombox.rocks Git - Ventoy.git/blob - SQUASHFS/squashfs-tools-4.4/squashfs-tools/caches-queues-lists.c
2 * Create a squashfs filesystem. This is a highly compressed read only
5 * Copyright (c) 2013, 2014, 2019
6 * Phillip Lougher <phillip@squashfs.org.uk>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2,
11 * or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * caches-queues-lists.c
31 #include "caches-queues-lists.h"
33 extern int add_overflow(int, int);
34 extern int multiply_overflow(int, int);
39 struct queue
*queue_init(int size
)
41 struct queue
*queue
= malloc(sizeof(struct queue
));
46 if(add_overflow(size
, 1) ||
47 multiply_overflow(size
+ 1, sizeof(void *)))
48 BAD_ERROR("Size too large in queue_init\n");
50 queue
->data
= malloc(sizeof(void *) * (size
+ 1));
51 if(queue
->data
== NULL
)
54 queue
->size
= size
+ 1;
55 queue
->readp
= queue
->writep
= 0;
56 pthread_mutex_init(&queue
->mutex
, NULL
);
57 pthread_cond_init(&queue
->empty
, NULL
);
58 pthread_cond_init(&queue
->full
, NULL
);
64 void queue_put(struct queue
*queue
, void *data
)
68 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
69 pthread_mutex_lock(&queue
->mutex
);
71 while((nextp
= (queue
->writep
+ 1) % queue
->size
) == queue
->readp
)
72 pthread_cond_wait(&queue
->full
, &queue
->mutex
);
74 queue
->data
[queue
->writep
] = data
;
75 queue
->writep
= nextp
;
76 pthread_cond_signal(&queue
->empty
);
77 pthread_cleanup_pop(1);
81 void *queue_get(struct queue
*queue
)
85 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
86 pthread_mutex_lock(&queue
->mutex
);
88 while(queue
->readp
== queue
->writep
)
89 pthread_cond_wait(&queue
->empty
, &queue
->mutex
);
91 data
= queue
->data
[queue
->readp
];
92 queue
->readp
= (queue
->readp
+ 1) % queue
->size
;
93 pthread_cond_signal(&queue
->full
);
94 pthread_cleanup_pop(1);
100 int queue_empty(struct queue
*queue
)
104 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
105 pthread_mutex_lock(&queue
->mutex
);
107 empty
= queue
->readp
== queue
->writep
;
109 pthread_cleanup_pop(1);
115 void queue_flush(struct queue
*queue
)
117 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
118 pthread_mutex_lock(&queue
->mutex
);
120 queue
->readp
= queue
->writep
;
122 pthread_cleanup_pop(1);
126 void dump_queue(struct queue
*queue
)
128 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
129 pthread_mutex_lock(&queue
->mutex
);
131 printf("\tMax size %d, size %d%s\n", queue
->size
- 1,
132 queue
->readp
<= queue
->writep
? queue
->writep
- queue
->readp
:
133 queue
->size
- queue
->readp
+ queue
->writep
,
134 queue
->readp
== queue
->writep
? " (EMPTY)" :
135 ((queue
->writep
+ 1) % queue
->size
) == queue
->readp
?
138 pthread_cleanup_pop(1);
142 /* define seq queue hash tables */
143 #define CALCULATE_SEQ_HASH(N) CALCULATE_HASH(N)
145 /* Called with the seq queue mutex held */
146 INSERT_HASH_TABLE(seq
, struct seq_queue
, CALCULATE_SEQ_HASH
, sequence
, seq
)
148 /* Called with the cache mutex held */
149 REMOVE_HASH_TABLE(seq
, struct seq_queue
, CALCULATE_SEQ_HASH
, sequence
, seq
);
152 struct seq_queue
*seq_queue_init()
154 struct seq_queue
*queue
= malloc(sizeof(struct seq_queue
));
158 memset(queue
, 0, sizeof(struct seq_queue
));
160 pthread_mutex_init(&queue
->mutex
, NULL
);
161 pthread_cond_init(&queue
->wait
, NULL
);
167 void seq_queue_put(struct seq_queue
*queue
, struct file_buffer
*entry
)
169 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
170 pthread_mutex_lock(&queue
->mutex
);
172 insert_seq_hash_table(queue
, entry
);
175 queue
->fragment_count
++;
177 queue
->block_count
++;
179 if(entry
->sequence
== queue
->sequence
)
180 pthread_cond_signal(&queue
->wait
);
182 pthread_cleanup_pop(1);
186 struct file_buffer
*seq_queue_get(struct seq_queue
*queue
)
189 * Return next buffer from queue in sequence order (queue->sequence). If
190 * found return it, otherwise wait for it to arrive.
192 int hash
= CALCULATE_SEQ_HASH(queue
->sequence
);
193 struct file_buffer
*entry
;
195 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
196 pthread_mutex_lock(&queue
->mutex
);
199 for(entry
= queue
->hash_table
[hash
]; entry
;
200 entry
= entry
->seq_next
)
201 if(entry
->sequence
== queue
->sequence
)
206 * found the buffer in the queue, decrement the
207 * appropriate count, and remove from hash list
210 queue
->fragment_count
--;
212 queue
->block_count
--;
214 remove_seq_hash_table(queue
, entry
);
221 /* entry not found, wait for it to arrive */
222 pthread_cond_wait(&queue
->wait
, &queue
->mutex
);
225 pthread_cleanup_pop(1);
231 void seq_queue_flush(struct seq_queue
*queue
)
235 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
236 pthread_mutex_lock(&queue
->mutex
);
238 for(i
= 0; i
< HASH_SIZE
; i
++)
239 queue
->hash_table
[i
] = NULL
;
241 queue
->fragment_count
= queue
->block_count
= 0;
243 pthread_cleanup_pop(1);
247 void dump_seq_queue(struct seq_queue
*queue
, int fragment_queue
)
251 pthread_cleanup_push((void *) pthread_mutex_unlock
, &queue
->mutex
);
252 pthread_mutex_lock(&queue
->mutex
);
254 size
= fragment_queue
? queue
->fragment_count
: queue
->block_count
;
256 printf("\tMax size unlimited, size %d%s\n", size
,
257 size
== 0 ? " (EMPTY)" : "");
259 pthread_cleanup_pop(1);
263 /* define cache hash tables */
264 #define CALCULATE_CACHE_HASH(N) CALCULATE_HASH(llabs(N))
266 /* Called with the cache mutex held */
267 INSERT_HASH_TABLE(cache
, struct cache
, CALCULATE_CACHE_HASH
, index
, hash
)
269 /* Called with the cache mutex held */
270 REMOVE_HASH_TABLE(cache
, struct cache
, CALCULATE_CACHE_HASH
, index
, hash
);
272 /* define cache free list */
274 /* Called with the cache mutex held */
275 INSERT_LIST(free
, struct file_buffer
)
277 /* Called with the cache mutex held */
278 REMOVE_LIST(free
, struct file_buffer
)
281 struct cache
*cache_init(int buffer_size
, int max_buffers
, int noshrink_lookup
,
284 struct cache
*cache
= malloc(sizeof(struct cache
));
289 cache
->max_buffers
= max_buffers
;
290 cache
->buffer_size
= buffer_size
;
293 cache
->free_list
= NULL
;
296 * The cache will grow up to max_buffers in size in response to
297 * an increase in readhead/number of buffers in flight. But
298 * once the outstanding buffers gets returned, we can either elect
299 * to shrink the cache, or to put the freed blocks onto a free list.
301 * For the caches where we want to do lookup (fragment/writer),
302 * a don't shrink policy is best, for the reader cache it
303 * makes no sense to keep buffers around longer than necessary as
304 * we don't do any lookup on those blocks.
306 cache
->noshrink_lookup
= noshrink_lookup
;
309 * The default use freelist before growing cache policy behaves
310 * poorly with appending - with many duplicates the caches
311 * do not grow due to the fact that large queues of outstanding
312 * fragments/writer blocks do not occur, leading to small caches
313 * and un-uncessary performance loss to frequent cache
314 * replacement in the small caches. Therefore with appending
315 * change the policy to grow the caches before reusing blocks
318 cache
->first_freelist
= first_freelist
;
320 memset(cache
->hash_table
, 0, sizeof(struct file_buffer
*) * 65536);
321 pthread_mutex_init(&cache
->mutex
, NULL
);
322 pthread_cond_init(&cache
->wait_for_free
, NULL
);
323 pthread_cond_init(&cache
->wait_for_unlock
, NULL
);
329 struct file_buffer
*cache_lookup(struct cache
*cache
, long long index
)
331 /* Lookup block in the cache, if found return with usage count
332 * incremented, if not found return NULL */
333 int hash
= CALCULATE_CACHE_HASH(index
);
334 struct file_buffer
*entry
;
336 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
337 pthread_mutex_lock(&cache
->mutex
);
339 for(entry
= cache
->hash_table
[hash
]; entry
; entry
= entry
->hash_next
)
340 if(entry
->index
== index
)
344 /* found the block in the cache, increment used count and
345 * if necessary remove from free list so it won't disappear
347 if(entry
->used
== 0) {
348 remove_free_list(&cache
->free_list
, entry
);
354 pthread_cleanup_pop(1);
360 static struct file_buffer
*cache_freelist(struct cache
*cache
)
362 struct file_buffer
*entry
= cache
->free_list
;
364 remove_free_list(&cache
->free_list
, entry
);
366 /* a block on the free_list is hashed */
367 remove_cache_hash_table(cache
, entry
);
374 static struct file_buffer
*cache_alloc(struct cache
*cache
)
376 struct file_buffer
*entry
= malloc(sizeof(struct file_buffer
) +
381 entry
->cache
= cache
;
382 entry
->free_prev
= entry
->free_next
= NULL
;
388 static struct file_buffer
*_cache_get(struct cache
*cache
, long long index
,
391 /* Get a free block out of the cache indexed on index. */
392 struct file_buffer
*entry
= NULL
;
394 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
395 pthread_mutex_lock(&cache
->mutex
);
398 if(cache
->noshrink_lookup
) {
399 /* first try to get a block from the free list */
400 if(cache
->first_freelist
&& cache
->free_list
)
401 entry
= cache_freelist(cache
);
402 else if(cache
->count
< cache
->max_buffers
) {
403 entry
= cache_alloc(cache
);
405 } else if(!cache
->first_freelist
&& cache
->free_list
)
406 entry
= cache_freelist(cache
);
407 } else { /* shrinking non-lookup cache */
408 if(cache
->count
< cache
->max_buffers
) {
409 entry
= cache_alloc(cache
);
410 if(cache
->count
> cache
->max_count
)
411 cache
->max_count
= cache
->count
;
418 /* wait for a block */
419 pthread_cond_wait(&cache
->wait_for_free
, &cache
->mutex
);
422 /* initialise block and if hash is set insert into the hash table */
424 entry
->locked
= FALSE
;
425 entry
->wait_on_unlock
= FALSE
;
426 entry
->error
= FALSE
;
428 entry
->index
= index
;
429 insert_cache_hash_table(cache
, entry
);
432 pthread_cleanup_pop(1);
438 struct file_buffer
*cache_get(struct cache
*cache
, long long index
)
440 return _cache_get(cache
, index
, 1);
444 struct file_buffer
*cache_get_nohash(struct cache
*cache
)
446 return _cache_get(cache
, 0, 0);
450 void cache_hash(struct file_buffer
*entry
, long long index
)
452 struct cache
*cache
= entry
->cache
;
454 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
455 pthread_mutex_lock(&cache
->mutex
);
457 entry
->index
= index
;
458 insert_cache_hash_table(cache
, entry
);
460 pthread_cleanup_pop(1);
464 void cache_block_put(struct file_buffer
*entry
)
469 * Finished with this cache entry, once the usage count reaches zero it
472 * If noshrink_lookup is set, put the block onto the free list.
473 * As blocks remain accessible via the hash table they can be found
474 * getting a new lease of life before they are reused.
476 * if noshrink_lookup is not set then shrink the cache.
482 cache
= entry
->cache
;
484 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
485 pthread_mutex_lock(&cache
->mutex
);
488 if(entry
->used
== 0) {
489 if(cache
->noshrink_lookup
) {
490 insert_free_list(&cache
->free_list
, entry
);
497 /* One or more threads may be waiting on this block */
498 pthread_cond_signal(&cache
->wait_for_free
);
501 pthread_cleanup_pop(1);
505 void dump_cache(struct cache
*cache
)
507 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
508 pthread_mutex_lock(&cache
->mutex
);
510 if(cache
->noshrink_lookup
)
511 printf("\tMax buffers %d, Current size %d, Used %d, %s\n",
512 cache
->max_buffers
, cache
->count
, cache
->used
,
513 cache
->free_list
? "Free buffers" : "No free buffers");
515 printf("\tMax buffers %d, Current size %d, Maximum historical "
516 "size %d\n", cache
->max_buffers
, cache
->count
,
519 pthread_cleanup_pop(1);
523 struct file_buffer
*cache_get_nowait(struct cache
*cache
, long long index
)
525 struct file_buffer
*entry
= NULL
;
527 * block doesn't exist, create it, but return it with the
528 * locked flag set, so nothing tries to use it while it doesn't
531 * If there's no space in the cache then return NULL.
534 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
535 pthread_mutex_lock(&cache
->mutex
);
537 /* first try to get a block from the free list */
538 if(cache
->first_freelist
&& cache
->free_list
)
539 entry
= cache_freelist(cache
);
540 else if(cache
->count
< cache
->max_buffers
) {
541 entry
= cache_alloc(cache
);
543 } else if(!cache
->first_freelist
&& cache
->free_list
)
544 entry
= cache_freelist(cache
);
547 /* initialise block and insert into the hash table */
549 entry
->locked
= TRUE
;
550 entry
->wait_on_unlock
= FALSE
;
551 entry
->error
= FALSE
;
552 entry
->index
= index
;
553 insert_cache_hash_table(cache
, entry
);
556 pthread_cleanup_pop(1);
562 struct file_buffer
*cache_lookup_nowait(struct cache
*cache
, long long index
,
566 * Lookup block in the cache, if found return it with the locked flag
567 * indicating whether it is currently locked. In both cases increment
570 * If it doesn't exist in the cache return NULL;
572 int hash
= CALCULATE_CACHE_HASH(index
);
573 struct file_buffer
*entry
;
575 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
576 pthread_mutex_lock(&cache
->mutex
);
578 /* first check if the entry already exists */
579 for(entry
= cache
->hash_table
[hash
]; entry
; entry
= entry
->hash_next
)
580 if(entry
->index
== index
)
584 if(entry
->used
== 0) {
585 remove_free_list(&cache
->free_list
, entry
);
589 *locked
= entry
->locked
;
592 pthread_cleanup_pop(1);
598 void cache_wait_unlock(struct file_buffer
*buffer
)
600 struct cache
*cache
= buffer
->cache
;
602 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
603 pthread_mutex_lock(&cache
->mutex
);
605 while(buffer
->locked
) {
607 * another thread is filling this in, wait until it
608 * becomes unlocked. Used has been incremented to ensure it
609 * doesn't get reused. By definition a block can't be
610 * locked and unused, and so we don't need to worry
611 * about it being on the freelist now, but, it may
612 * become unused when unlocked unless used is
615 buffer
->wait_on_unlock
= TRUE
;
616 pthread_cond_wait(&cache
->wait_for_unlock
, &cache
->mutex
);
619 pthread_cleanup_pop(1);
623 void cache_unlock(struct file_buffer
*entry
)
625 struct cache
*cache
= entry
->cache
;
628 * Unlock this locked cache entry. If anything is waiting for this
629 * to become unlocked, wake it up.
631 pthread_cleanup_push((void *) pthread_mutex_unlock
, &cache
->mutex
);
632 pthread_mutex_lock(&cache
->mutex
);
634 entry
->locked
= FALSE
;
636 if(entry
->wait_on_unlock
) {
637 entry
->wait_on_unlock
= FALSE
;
638 pthread_cond_broadcast(&cache
->wait_for_unlock
);
641 pthread_cleanup_pop(1);