]> glassweightruler.freedombox.rocks Git - Ventoy.git/blob - SQUASHFS/squashfs-tools-4.4/squashfs-tools/caches-queues-lists.c
add IMG to no iso tip (#480)
[Ventoy.git] / SQUASHFS / squashfs-tools-4.4 / squashfs-tools / caches-queues-lists.c
1 /*
2 * Create a squashfs filesystem. This is a highly compressed read only
3 * filesystem.
4 *
5 * Copyright (c) 2013, 2014, 2019
6 * Phillip Lougher <phillip@squashfs.org.uk>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2,
11 * or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * caches-queues-lists.c
23 */
24
25 #include <pthread.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <stdio.h>
29
30 #include "error.h"
31 #include "caches-queues-lists.h"
32
33 extern int add_overflow(int, int);
34 extern int multiply_overflow(int, int);
35
36 #define TRUE 1
37 #define FALSE 0
38
39 struct queue *queue_init(int size)
40 {
41 struct queue *queue = malloc(sizeof(struct queue));
42
43 if(queue == NULL)
44 MEM_ERROR();
45
46 if(add_overflow(size, 1) ||
47 multiply_overflow(size + 1, sizeof(void *)))
48 BAD_ERROR("Size too large in queue_init\n");
49
50 queue->data = malloc(sizeof(void *) * (size + 1));
51 if(queue->data == NULL)
52 MEM_ERROR();
53
54 queue->size = size + 1;
55 queue->readp = queue->writep = 0;
56 pthread_mutex_init(&queue->mutex, NULL);
57 pthread_cond_init(&queue->empty, NULL);
58 pthread_cond_init(&queue->full, NULL);
59
60 return queue;
61 }
62
63
64 void queue_put(struct queue *queue, void *data)
65 {
66 int nextp;
67
68 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
69 pthread_mutex_lock(&queue->mutex);
70
71 while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
72 pthread_cond_wait(&queue->full, &queue->mutex);
73
74 queue->data[queue->writep] = data;
75 queue->writep = nextp;
76 pthread_cond_signal(&queue->empty);
77 pthread_cleanup_pop(1);
78 }
79
80
81 void *queue_get(struct queue *queue)
82 {
83 void *data;
84
85 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
86 pthread_mutex_lock(&queue->mutex);
87
88 while(queue->readp == queue->writep)
89 pthread_cond_wait(&queue->empty, &queue->mutex);
90
91 data = queue->data[queue->readp];
92 queue->readp = (queue->readp + 1) % queue->size;
93 pthread_cond_signal(&queue->full);
94 pthread_cleanup_pop(1);
95
96 return data;
97 }
98
99
100 int queue_empty(struct queue *queue)
101 {
102 int empty;
103
104 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
105 pthread_mutex_lock(&queue->mutex);
106
107 empty = queue->readp == queue->writep;
108
109 pthread_cleanup_pop(1);
110
111 return empty;
112 }
113
114
115 void queue_flush(struct queue *queue)
116 {
117 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
118 pthread_mutex_lock(&queue->mutex);
119
120 queue->readp = queue->writep;
121
122 pthread_cleanup_pop(1);
123 }
124
125
126 void dump_queue(struct queue *queue)
127 {
128 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
129 pthread_mutex_lock(&queue->mutex);
130
131 printf("\tMax size %d, size %d%s\n", queue->size - 1,
132 queue->readp <= queue->writep ? queue->writep - queue->readp :
133 queue->size - queue->readp + queue->writep,
134 queue->readp == queue->writep ? " (EMPTY)" :
135 ((queue->writep + 1) % queue->size) == queue->readp ?
136 " (FULL)" : "");
137
138 pthread_cleanup_pop(1);
139 }
140
141
142 /* define seq queue hash tables */
143 #define CALCULATE_SEQ_HASH(N) CALCULATE_HASH(N)
144
145 /* Called with the seq queue mutex held */
146 INSERT_HASH_TABLE(seq, struct seq_queue, CALCULATE_SEQ_HASH, sequence, seq)
147
148 /* Called with the cache mutex held */
149 REMOVE_HASH_TABLE(seq, struct seq_queue, CALCULATE_SEQ_HASH, sequence, seq);
150
151
152 struct seq_queue *seq_queue_init()
153 {
154 struct seq_queue *queue = malloc(sizeof(struct seq_queue));
155 if(queue == NULL)
156 MEM_ERROR();
157
158 memset(queue, 0, sizeof(struct seq_queue));
159
160 pthread_mutex_init(&queue->mutex, NULL);
161 pthread_cond_init(&queue->wait, NULL);
162
163 return queue;
164 }
165
166
167 void seq_queue_put(struct seq_queue *queue, struct file_buffer *entry)
168 {
169 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
170 pthread_mutex_lock(&queue->mutex);
171
172 insert_seq_hash_table(queue, entry);
173
174 if(entry->fragment)
175 queue->fragment_count ++;
176 else
177 queue->block_count ++;
178
179 if(entry->sequence == queue->sequence)
180 pthread_cond_signal(&queue->wait);
181
182 pthread_cleanup_pop(1);
183 }
184
185
186 struct file_buffer *seq_queue_get(struct seq_queue *queue)
187 {
188 /*
189 * Return next buffer from queue in sequence order (queue->sequence). If
190 * found return it, otherwise wait for it to arrive.
191 */
192 int hash = CALCULATE_SEQ_HASH(queue->sequence);
193 struct file_buffer *entry;
194
195 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
196 pthread_mutex_lock(&queue->mutex);
197
198 while(1) {
199 for(entry = queue->hash_table[hash]; entry;
200 entry = entry->seq_next)
201 if(entry->sequence == queue->sequence)
202 break;
203
204 if(entry) {
205 /*
206 * found the buffer in the queue, decrement the
207 * appropriate count, and remove from hash list
208 */
209 if(entry->fragment)
210 queue->fragment_count --;
211 else
212 queue->block_count --;
213
214 remove_seq_hash_table(queue, entry);
215
216 queue->sequence ++;
217
218 break;
219 }
220
221 /* entry not found, wait for it to arrive */
222 pthread_cond_wait(&queue->wait, &queue->mutex);
223 }
224
225 pthread_cleanup_pop(1);
226
227 return entry;
228 }
229
230
231 void seq_queue_flush(struct seq_queue *queue)
232 {
233 int i;
234
235 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
236 pthread_mutex_lock(&queue->mutex);
237
238 for(i = 0; i < HASH_SIZE; i++)
239 queue->hash_table[i] = NULL;
240
241 queue->fragment_count = queue->block_count = 0;
242
243 pthread_cleanup_pop(1);
244 }
245
246
247 void dump_seq_queue(struct seq_queue *queue, int fragment_queue)
248 {
249 int size;
250
251 pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
252 pthread_mutex_lock(&queue->mutex);
253
254 size = fragment_queue ? queue->fragment_count : queue->block_count;
255
256 printf("\tMax size unlimited, size %d%s\n", size,
257 size == 0 ? " (EMPTY)" : "");
258
259 pthread_cleanup_pop(1);
260 }
261
262
263 /* define cache hash tables */
264 #define CALCULATE_CACHE_HASH(N) CALCULATE_HASH(llabs(N))
265
266 /* Called with the cache mutex held */
267 INSERT_HASH_TABLE(cache, struct cache, CALCULATE_CACHE_HASH, index, hash)
268
269 /* Called with the cache mutex held */
270 REMOVE_HASH_TABLE(cache, struct cache, CALCULATE_CACHE_HASH, index, hash);
271
272 /* define cache free list */
273
274 /* Called with the cache mutex held */
275 INSERT_LIST(free, struct file_buffer)
276
277 /* Called with the cache mutex held */
278 REMOVE_LIST(free, struct file_buffer)
279
280
281 struct cache *cache_init(int buffer_size, int max_buffers, int noshrink_lookup,
282 int first_freelist)
283 {
284 struct cache *cache = malloc(sizeof(struct cache));
285
286 if(cache == NULL)
287 MEM_ERROR();
288
289 cache->max_buffers = max_buffers;
290 cache->buffer_size = buffer_size;
291 cache->count = 0;
292 cache->used = 0;
293 cache->free_list = NULL;
294
295 /*
296 * The cache will grow up to max_buffers in size in response to
297 * an increase in readhead/number of buffers in flight. But
298 * once the outstanding buffers gets returned, we can either elect
299 * to shrink the cache, or to put the freed blocks onto a free list.
300 *
301 * For the caches where we want to do lookup (fragment/writer),
302 * a don't shrink policy is best, for the reader cache it
303 * makes no sense to keep buffers around longer than necessary as
304 * we don't do any lookup on those blocks.
305 */
306 cache->noshrink_lookup = noshrink_lookup;
307
308 /*
309 * The default use freelist before growing cache policy behaves
310 * poorly with appending - with many duplicates the caches
311 * do not grow due to the fact that large queues of outstanding
312 * fragments/writer blocks do not occur, leading to small caches
313 * and un-uncessary performance loss to frequent cache
314 * replacement in the small caches. Therefore with appending
315 * change the policy to grow the caches before reusing blocks
316 * from the freelist
317 */
318 cache->first_freelist = first_freelist;
319
320 memset(cache->hash_table, 0, sizeof(struct file_buffer *) * 65536);
321 pthread_mutex_init(&cache->mutex, NULL);
322 pthread_cond_init(&cache->wait_for_free, NULL);
323 pthread_cond_init(&cache->wait_for_unlock, NULL);
324
325 return cache;
326 }
327
328
329 struct file_buffer *cache_lookup(struct cache *cache, long long index)
330 {
331 /* Lookup block in the cache, if found return with usage count
332 * incremented, if not found return NULL */
333 int hash = CALCULATE_CACHE_HASH(index);
334 struct file_buffer *entry;
335
336 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
337 pthread_mutex_lock(&cache->mutex);
338
339 for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
340 if(entry->index == index)
341 break;
342
343 if(entry) {
344 /* found the block in the cache, increment used count and
345 * if necessary remove from free list so it won't disappear
346 */
347 if(entry->used == 0) {
348 remove_free_list(&cache->free_list, entry);
349 cache->used ++;
350 }
351 entry->used ++;
352 }
353
354 pthread_cleanup_pop(1);
355
356 return entry;
357 }
358
359
360 static struct file_buffer *cache_freelist(struct cache *cache)
361 {
362 struct file_buffer *entry = cache->free_list;
363
364 remove_free_list(&cache->free_list, entry);
365
366 /* a block on the free_list is hashed */
367 remove_cache_hash_table(cache, entry);
368
369 cache->used ++;
370 return entry;
371 }
372
373
374 static struct file_buffer *cache_alloc(struct cache *cache)
375 {
376 struct file_buffer *entry = malloc(sizeof(struct file_buffer) +
377 cache->buffer_size);
378 if(entry == NULL)
379 MEM_ERROR();
380
381 entry->cache = cache;
382 entry->free_prev = entry->free_next = NULL;
383 cache->count ++;
384 return entry;
385 }
386
387
388 static struct file_buffer *_cache_get(struct cache *cache, long long index,
389 int hash)
390 {
391 /* Get a free block out of the cache indexed on index. */
392 struct file_buffer *entry = NULL;
393
394 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
395 pthread_mutex_lock(&cache->mutex);
396
397 while(1) {
398 if(cache->noshrink_lookup) {
399 /* first try to get a block from the free list */
400 if(cache->first_freelist && cache->free_list)
401 entry = cache_freelist(cache);
402 else if(cache->count < cache->max_buffers) {
403 entry = cache_alloc(cache);
404 cache->used ++;
405 } else if(!cache->first_freelist && cache->free_list)
406 entry = cache_freelist(cache);
407 } else { /* shrinking non-lookup cache */
408 if(cache->count < cache->max_buffers) {
409 entry = cache_alloc(cache);
410 if(cache->count > cache->max_count)
411 cache->max_count = cache->count;
412 }
413 }
414
415 if(entry)
416 break;
417
418 /* wait for a block */
419 pthread_cond_wait(&cache->wait_for_free, &cache->mutex);
420 }
421
422 /* initialise block and if hash is set insert into the hash table */
423 entry->used = 1;
424 entry->locked = FALSE;
425 entry->wait_on_unlock = FALSE;
426 entry->error = FALSE;
427 if(hash) {
428 entry->index = index;
429 insert_cache_hash_table(cache, entry);
430 }
431
432 pthread_cleanup_pop(1);
433
434 return entry;
435 }
436
437
438 struct file_buffer *cache_get(struct cache *cache, long long index)
439 {
440 return _cache_get(cache, index, 1);
441 }
442
443
444 struct file_buffer *cache_get_nohash(struct cache *cache)
445 {
446 return _cache_get(cache, 0, 0);
447 }
448
449
450 void cache_hash(struct file_buffer *entry, long long index)
451 {
452 struct cache *cache = entry->cache;
453
454 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
455 pthread_mutex_lock(&cache->mutex);
456
457 entry->index = index;
458 insert_cache_hash_table(cache, entry);
459
460 pthread_cleanup_pop(1);
461 }
462
463
464 void cache_block_put(struct file_buffer *entry)
465 {
466 struct cache *cache;
467
468 /*
469 * Finished with this cache entry, once the usage count reaches zero it
470 * can be reused.
471 *
472 * If noshrink_lookup is set, put the block onto the free list.
473 * As blocks remain accessible via the hash table they can be found
474 * getting a new lease of life before they are reused.
475 *
476 * if noshrink_lookup is not set then shrink the cache.
477 */
478
479 if(entry == NULL)
480 return;
481
482 cache = entry->cache;
483
484 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
485 pthread_mutex_lock(&cache->mutex);
486
487 entry->used --;
488 if(entry->used == 0) {
489 if(cache->noshrink_lookup) {
490 insert_free_list(&cache->free_list, entry);
491 cache->used --;
492 } else {
493 free(entry);
494 cache->count --;
495 }
496
497 /* One or more threads may be waiting on this block */
498 pthread_cond_signal(&cache->wait_for_free);
499 }
500
501 pthread_cleanup_pop(1);
502 }
503
504
505 void dump_cache(struct cache *cache)
506 {
507 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
508 pthread_mutex_lock(&cache->mutex);
509
510 if(cache->noshrink_lookup)
511 printf("\tMax buffers %d, Current size %d, Used %d, %s\n",
512 cache->max_buffers, cache->count, cache->used,
513 cache->free_list ? "Free buffers" : "No free buffers");
514 else
515 printf("\tMax buffers %d, Current size %d, Maximum historical "
516 "size %d\n", cache->max_buffers, cache->count,
517 cache->max_count);
518
519 pthread_cleanup_pop(1);
520 }
521
522
523 struct file_buffer *cache_get_nowait(struct cache *cache, long long index)
524 {
525 struct file_buffer *entry = NULL;
526 /*
527 * block doesn't exist, create it, but return it with the
528 * locked flag set, so nothing tries to use it while it doesn't
529 * contain data.
530 *
531 * If there's no space in the cache then return NULL.
532 */
533
534 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
535 pthread_mutex_lock(&cache->mutex);
536
537 /* first try to get a block from the free list */
538 if(cache->first_freelist && cache->free_list)
539 entry = cache_freelist(cache);
540 else if(cache->count < cache->max_buffers) {
541 entry = cache_alloc(cache);
542 cache->used ++;
543 } else if(!cache->first_freelist && cache->free_list)
544 entry = cache_freelist(cache);
545
546 if(entry) {
547 /* initialise block and insert into the hash table */
548 entry->used = 1;
549 entry->locked = TRUE;
550 entry->wait_on_unlock = FALSE;
551 entry->error = FALSE;
552 entry->index = index;
553 insert_cache_hash_table(cache, entry);
554 }
555
556 pthread_cleanup_pop(1);
557
558 return entry;
559 }
560
561
562 struct file_buffer *cache_lookup_nowait(struct cache *cache, long long index,
563 char *locked)
564 {
565 /*
566 * Lookup block in the cache, if found return it with the locked flag
567 * indicating whether it is currently locked. In both cases increment
568 * the used count.
569 *
570 * If it doesn't exist in the cache return NULL;
571 */
572 int hash = CALCULATE_CACHE_HASH(index);
573 struct file_buffer *entry;
574
575 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
576 pthread_mutex_lock(&cache->mutex);
577
578 /* first check if the entry already exists */
579 for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
580 if(entry->index == index)
581 break;
582
583 if(entry) {
584 if(entry->used == 0) {
585 remove_free_list(&cache->free_list, entry);
586 cache->used ++;
587 }
588 entry->used ++;
589 *locked = entry->locked;
590 }
591
592 pthread_cleanup_pop(1);
593
594 return entry;
595 }
596
597
598 void cache_wait_unlock(struct file_buffer *buffer)
599 {
600 struct cache *cache = buffer->cache;
601
602 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
603 pthread_mutex_lock(&cache->mutex);
604
605 while(buffer->locked) {
606 /*
607 * another thread is filling this in, wait until it
608 * becomes unlocked. Used has been incremented to ensure it
609 * doesn't get reused. By definition a block can't be
610 * locked and unused, and so we don't need to worry
611 * about it being on the freelist now, but, it may
612 * become unused when unlocked unless used is
613 * incremented
614 */
615 buffer->wait_on_unlock = TRUE;
616 pthread_cond_wait(&cache->wait_for_unlock, &cache->mutex);
617 }
618
619 pthread_cleanup_pop(1);
620 }
621
622
623 void cache_unlock(struct file_buffer *entry)
624 {
625 struct cache *cache = entry->cache;
626
627 /*
628 * Unlock this locked cache entry. If anything is waiting for this
629 * to become unlocked, wake it up.
630 */
631 pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
632 pthread_mutex_lock(&cache->mutex);
633
634 entry->locked = FALSE;
635
636 if(entry->wait_on_unlock) {
637 entry->wait_on_unlock = FALSE;
638 pthread_cond_broadcast(&cache->wait_for_unlock);
639 }
640
641 pthread_cleanup_pop(1);
642 }