2016-09-27 23:55:02 +01:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2017-01-18 19:40:31 +00:00
|
|
|
#ifdef ENABLE_SHADER_CACHE
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
#include <ctype.h>
|
2017-02-06 22:49:47 +00:00
|
|
|
#include <ftw.h>
|
2016-09-27 23:55:02 +01:00
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <sys/file.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <dirent.h>
|
2019-06-11 23:27:04 +01:00
|
|
|
#include <inttypes.h>
|
2017-03-01 05:04:23 +00:00
|
|
|
#include "zlib.h"
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2019-10-31 20:26:00 +00:00
|
|
|
#ifdef HAVE_ZSTD
|
|
|
|
#include "zstd.h"
|
|
|
|
#endif
|
|
|
|
|
2017-02-24 04:14:56 +00:00
|
|
|
#include "util/crc32.h"
|
2017-09-08 10:49:45 +01:00
|
|
|
#include "util/debug.h"
|
2017-03-21 08:35:22 +00:00
|
|
|
#include "util/rand_xor.h"
|
2016-09-27 23:55:02 +01:00
|
|
|
#include "util/u_atomic.h"
|
2017-03-08 23:51:01 +00:00
|
|
|
#include "util/u_queue.h"
|
2016-09-27 23:55:02 +01:00
|
|
|
#include "util/mesa-sha1.h"
|
|
|
|
#include "util/ralloc.h"
|
2020-03-26 01:11:44 +00:00
|
|
|
#include "util/compiler.h"
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2016-11-13 15:38:01 +00:00
|
|
|
#include "disk_cache.h"
|
2020-07-30 05:50:02 +01:00
|
|
|
#include "disk_cache_os.h"
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
/* Number of bits to mask off from a cache key to get an index. */
|
|
|
|
#define CACHE_INDEX_KEY_BITS 16
|
|
|
|
|
|
|
|
/* Mask for computing an index from a key. */
|
|
|
|
#define CACHE_INDEX_KEY_MASK ((1 << CACHE_INDEX_KEY_BITS) - 1)
|
|
|
|
|
|
|
|
/* The number of keys that can be stored in the index. */
|
|
|
|
#define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS)
|
|
|
|
|
2017-08-24 02:11:40 +01:00
|
|
|
/* The cache version should be bumped whenever a change is made to the
|
|
|
|
* structure of cache entries or the index. This will give any 3rd party
|
|
|
|
* applications reading the cache entries a chance to adjust to the changes.
|
|
|
|
*
|
|
|
|
* - The cache version is checked internally when reading a cache entry. If we
|
|
|
|
* ever have a mismatch we are in big trouble as this means we had a cache
|
|
|
|
* collision. In case of such an event please check the skys for giant
|
|
|
|
* asteroids and that the entire Mesa team hasn't been eaten by wolves.
|
|
|
|
*
|
|
|
|
* - There is no strict requirement that cache versions be backwards
|
|
|
|
* compatible but effort should be taken to limit disruption where possible.
|
|
|
|
*/
|
|
|
|
#define CACHE_VERSION 1
|
|
|
|
|
2019-10-31 20:26:00 +00:00
|
|
|
/* 3 is the recomended level, with 22 as the absolute maximum */
|
|
|
|
#define ZSTD_COMPRESSION_LEVEL 3
|
|
|
|
|
2016-11-13 15:54:38 +00:00
|
|
|
struct disk_cache {
|
2016-09-27 23:55:02 +01:00
|
|
|
/* The path to the cache directory. */
|
|
|
|
char *path;
|
2018-01-22 09:55:06 +00:00
|
|
|
bool path_init_failed;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-03-08 23:51:01 +00:00
|
|
|
/* Thread queue for compressing and writing cache entries to disk */
|
|
|
|
struct util_queue cache_queue;
|
|
|
|
|
2017-03-21 08:35:22 +00:00
|
|
|
/* Seed for rand, which is used to pick a random directory */
|
|
|
|
uint64_t seed_xorshift128plus[2];
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
/* A pointer to the mmapped index file within the cache directory. */
|
|
|
|
uint8_t *index_mmap;
|
|
|
|
size_t index_mmap_size;
|
|
|
|
|
|
|
|
/* Pointer to total size of all objects in cache (within index_mmap) */
|
|
|
|
uint64_t *size;
|
|
|
|
|
|
|
|
/* Pointer to stored keys, (within index_mmap). */
|
|
|
|
uint8_t *stored_keys;
|
|
|
|
|
|
|
|
/* Maximum size of all cached objects (in bytes). */
|
|
|
|
uint64_t max_size;
|
2017-03-15 23:09:29 +00:00
|
|
|
|
|
|
|
/* Driver cache keys. */
|
|
|
|
uint8_t *driver_keys_blob;
|
|
|
|
size_t driver_keys_blob_size;
|
2018-02-07 06:13:00 +00:00
|
|
|
|
|
|
|
disk_cache_put_cb blob_put_cb;
|
|
|
|
disk_cache_get_cb blob_get_cb;
|
2016-09-27 23:55:02 +01:00
|
|
|
};
|
|
|
|
|
2017-03-12 23:14:35 +00:00
|
|
|
struct disk_cache_put_job {
|
|
|
|
struct util_queue_fence fence;
|
|
|
|
|
|
|
|
struct disk_cache *cache;
|
|
|
|
|
|
|
|
cache_key key;
|
|
|
|
|
|
|
|
/* Copy of cache data to be compressed and written. */
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
/* Size of data to be compressed and written. */
|
|
|
|
size_t size;
|
2017-07-31 04:00:35 +01:00
|
|
|
|
|
|
|
struct cache_item_metadata cache_item_metadata;
|
2017-03-12 23:14:35 +00:00
|
|
|
};
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
/* Create a directory named 'path' if it does not already exist.
|
|
|
|
*
|
|
|
|
* Returns: 0 if path already exists as a directory or if created.
|
|
|
|
* -1 in all other cases.
|
|
|
|
*/
|
|
|
|
static int
|
2017-03-05 20:58:52 +00:00
|
|
|
mkdir_if_needed(const char *path)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
|
|
|
|
/* If the path exists already, then our work is done if it's a
|
|
|
|
* directory, but it's an error if it is not.
|
|
|
|
*/
|
|
|
|
if (stat(path, &sb) == 0) {
|
|
|
|
if (S_ISDIR(sb.st_mode)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
2016-11-13 15:38:01 +00:00
|
|
|
fprintf(stderr, "Cannot use %s for shader cache (not a directory)"
|
|
|
|
"---disabling.\n", path);
|
2016-09-27 23:55:02 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int ret = mkdir(path, 0755);
|
|
|
|
if (ret == 0 || (ret == -1 && errno == EEXIST))
|
|
|
|
return 0;
|
|
|
|
|
2016-11-13 15:38:01 +00:00
|
|
|
fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n",
|
|
|
|
path, strerror(errno));
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
#define DRV_KEY_CPY(_dst, _src, _src_size) \
|
|
|
|
do { \
|
|
|
|
memcpy(_dst, _src, _src_size); \
|
|
|
|
_dst += _src_size; \
|
|
|
|
} while (0);
|
|
|
|
|
|
|
|
struct disk_cache *
|
2018-09-19 01:21:05 +01:00
|
|
|
disk_cache_create(const char *gpu_name, const char *driver_id,
|
2018-02-09 05:37:49 +00:00
|
|
|
uint64_t driver_flags)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2018-02-09 05:37:49 +00:00
|
|
|
void *local;
|
|
|
|
struct disk_cache *cache = NULL;
|
2020-07-30 05:50:02 +01:00
|
|
|
char *max_size_str;
|
2018-02-09 05:37:49 +00:00
|
|
|
uint64_t max_size;
|
2016-09-27 23:55:02 +01:00
|
|
|
int fd = -1;
|
|
|
|
struct stat sb;
|
|
|
|
size_t size;
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
uint8_t cache_version = CACHE_VERSION;
|
|
|
|
size_t cv_size = sizeof(cache_version);
|
|
|
|
|
|
|
|
/* If running as a users other than the real user disable cache */
|
|
|
|
if (geteuid() != getuid())
|
|
|
|
return NULL;
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
/* A ralloc context for transient data during this invocation. */
|
|
|
|
local = ralloc_context(NULL);
|
|
|
|
if (local == NULL)
|
|
|
|
goto fail;
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
/* At user request, disable shader cache entirely. */
|
|
|
|
if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
cache = rzalloc(NULL, struct disk_cache);
|
|
|
|
if (cache == NULL)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* Assume failure. */
|
|
|
|
cache->path_init_failed = true;
|
|
|
|
|
2020-07-30 05:50:02 +01:00
|
|
|
char *path = disk_cache_generate_cache_dir(local);
|
|
|
|
if (!path)
|
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
cache->path = ralloc_strdup(cache, path);
|
|
|
|
if (cache->path == NULL)
|
2018-02-09 05:37:49 +00:00
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
path = ralloc_asprintf(local, "%s/index", cache->path);
|
|
|
|
if (path == NULL)
|
2018-02-09 05:37:49 +00:00
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
|
|
|
|
if (fd == -1)
|
2018-02-09 05:37:49 +00:00
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
if (fstat(fd, &sb) == -1)
|
2018-02-09 05:37:49 +00:00
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
/* Force the index file to be the expected size. */
|
|
|
|
size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE;
|
|
|
|
if (sb.st_size != size) {
|
|
|
|
if (ftruncate(fd, size) == -1)
|
2018-02-09 05:37:49 +00:00
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We map this shared so that other processes see updates that we
|
|
|
|
* make.
|
|
|
|
*
|
|
|
|
* Note: We do use atomic addition to ensure that multiple
|
|
|
|
* processes don't scramble the cache size recorded in the
|
|
|
|
* index. But we don't use any locking to prevent multiple
|
|
|
|
* processes from updating the same entry simultaneously. The idea
|
|
|
|
* is that if either result lands entirely in the index, then
|
|
|
|
* that's equivalent to a well-ordered write followed by an
|
|
|
|
* eviction and a write. On the other hand, if the simultaneous
|
|
|
|
* writes result in a corrupt entry, that's not really any
|
|
|
|
* different than both entries being evicted, (since within the
|
|
|
|
* guarantees of the cryptographic hash, a corrupt entry is
|
|
|
|
* unlikely to ever match a real cache key).
|
|
|
|
*/
|
|
|
|
cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_SHARED, fd, 0);
|
|
|
|
if (cache->index_mmap == MAP_FAILED)
|
2018-02-09 05:37:49 +00:00
|
|
|
goto path_fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
cache->index_mmap_size = size;
|
|
|
|
|
|
|
|
cache->size = (uint64_t *) cache->index_mmap;
|
|
|
|
cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
|
|
|
|
|
|
|
|
max_size = 0;
|
|
|
|
|
|
|
|
max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE");
|
|
|
|
if (max_size_str) {
|
|
|
|
char *end;
|
|
|
|
max_size = strtoul(max_size_str, &end, 10);
|
|
|
|
if (end == max_size_str) {
|
|
|
|
max_size = 0;
|
|
|
|
} else {
|
|
|
|
switch (*end) {
|
|
|
|
case 'K':
|
|
|
|
case 'k':
|
|
|
|
max_size *= 1024;
|
|
|
|
break;
|
|
|
|
case 'M':
|
|
|
|
case 'm':
|
|
|
|
max_size *= 1024*1024;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
case 'G':
|
|
|
|
case 'g':
|
|
|
|
default:
|
|
|
|
max_size *= 1024*1024*1024;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-28 04:05:52 +01:00
|
|
|
/* Default to 1GB for maximum cache size. */
|
2017-03-06 16:17:32 +00:00
|
|
|
if (max_size == 0) {
|
2017-04-28 04:05:52 +01:00
|
|
|
max_size = 1024*1024*1024;
|
2017-03-06 16:17:32 +00:00
|
|
|
}
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
cache->max_size = max_size;
|
|
|
|
|
2019-09-03 05:13:05 +01:00
|
|
|
/* 4 threads were chosen below because just about all modern CPUs currently
|
|
|
|
* available that run Mesa have *at least* 4 cores. For these CPUs allowing
|
|
|
|
* more threads can result in the queue being processed faster, thus
|
|
|
|
* avoiding excessive memory use due to a backlog of cache entrys building
|
|
|
|
* up in the queue. Since we set the UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
|
|
|
|
* flag this should have little negative impact on low core systems.
|
2018-02-09 05:37:49 +00:00
|
|
|
*
|
|
|
|
* The queue will resize automatically when it's full, so adding new jobs
|
|
|
|
* doesn't stall.
|
|
|
|
*/
|
2019-09-03 05:13:05 +01:00
|
|
|
util_queue_init(&cache->cache_queue, "disk$", 32, 4,
|
2018-02-09 05:37:49 +00:00
|
|
|
UTIL_QUEUE_INIT_RESIZE_IF_FULL |
|
2018-10-01 20:51:06 +01:00
|
|
|
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
|
|
|
|
UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY);
|
2018-02-09 05:37:49 +00:00
|
|
|
|
|
|
|
cache->path_init_failed = false;
|
|
|
|
|
|
|
|
path_fail:
|
|
|
|
|
2019-03-19 14:36:30 +00:00
|
|
|
if (fd != -1)
|
|
|
|
close(fd);
|
|
|
|
|
2017-08-24 02:11:40 +01:00
|
|
|
cache->driver_keys_blob_size = cv_size;
|
|
|
|
|
2017-03-15 23:09:29 +00:00
|
|
|
/* Create driver id keys */
|
2018-09-19 01:21:05 +01:00
|
|
|
size_t id_size = strlen(driver_id) + 1;
|
2017-03-15 23:09:31 +00:00
|
|
|
size_t gpu_name_size = strlen(gpu_name) + 1;
|
2018-09-19 01:21:05 +01:00
|
|
|
cache->driver_keys_blob_size += id_size;
|
2017-03-15 23:09:31 +00:00
|
|
|
cache->driver_keys_blob_size += gpu_name_size;
|
|
|
|
|
|
|
|
/* We sometimes store entire structs that contains a pointers in the cache,
|
|
|
|
* use pointer size as a key to avoid hard to debug issues.
|
|
|
|
*/
|
|
|
|
uint8_t ptr_size = sizeof(void *);
|
|
|
|
size_t ptr_size_size = sizeof(ptr_size);
|
|
|
|
cache->driver_keys_blob_size += ptr_size_size;
|
2017-03-15 23:09:29 +00:00
|
|
|
|
2017-05-20 02:06:25 +01:00
|
|
|
size_t driver_flags_size = sizeof(driver_flags);
|
|
|
|
cache->driver_keys_blob_size += driver_flags_size;
|
|
|
|
|
2017-03-15 23:09:29 +00:00
|
|
|
cache->driver_keys_blob =
|
|
|
|
ralloc_size(cache, cache->driver_keys_blob_size);
|
2018-02-09 05:37:49 +00:00
|
|
|
if (!cache->driver_keys_blob)
|
|
|
|
goto fail;
|
2017-03-15 23:09:29 +00:00
|
|
|
|
2017-08-24 02:11:40 +01:00
|
|
|
uint8_t *drv_key_blob = cache->driver_keys_blob;
|
|
|
|
DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size)
|
2018-09-19 01:21:05 +01:00
|
|
|
DRV_KEY_CPY(drv_key_blob, driver_id, id_size)
|
2017-08-24 02:11:40 +01:00
|
|
|
DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size)
|
|
|
|
DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size)
|
|
|
|
DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size)
|
2017-03-15 23:09:29 +00:00
|
|
|
|
2017-03-21 08:35:22 +00:00
|
|
|
/* Seed our rand function */
|
|
|
|
s_rand_xorshift128plus(cache->seed_xorshift128plus, true);
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
ralloc_free(local);
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
return cache;
|
2018-02-09 05:37:49 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
if (cache)
|
|
|
|
ralloc_free(cache);
|
|
|
|
ralloc_free(local);
|
|
|
|
|
|
|
|
return NULL;
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-11-13 15:54:38 +00:00
|
|
|
disk_cache_destroy(struct disk_cache *cache)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2018-02-09 05:37:49 +00:00
|
|
|
if (cache && !cache->path_init_failed) {
|
2019-10-23 21:15:37 +01:00
|
|
|
util_queue_finish(&cache->cache_queue);
|
2017-03-08 23:51:01 +00:00
|
|
|
util_queue_destroy(&cache->cache_queue);
|
2017-02-07 01:10:19 +00:00
|
|
|
munmap(cache->index_mmap, cache->index_mmap_size);
|
2017-03-08 23:51:01 +00:00
|
|
|
}
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
ralloc_free(cache);
|
|
|
|
}
|
|
|
|
|
2020-03-10 21:52:42 +00:00
|
|
|
void
|
|
|
|
disk_cache_wait_for_idle(struct disk_cache *cache)
|
|
|
|
{
|
|
|
|
util_queue_finish(&cache->cache_queue);
|
|
|
|
}
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
/* Return a filename within the cache's directory corresponding to 'key'. The
|
|
|
|
* returned filename is ralloced with 'cache' as the parent context.
|
|
|
|
*
|
|
|
|
* Returns NULL if out of memory.
|
|
|
|
*/
|
|
|
|
static char *
|
2017-03-05 20:58:52 +00:00
|
|
|
get_cache_file(struct disk_cache *cache, const cache_key key)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
|
|
|
char buf[41];
|
2017-02-08 22:04:52 +00:00
|
|
|
char *filename;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
if (cache->path_init_failed)
|
2018-01-22 09:55:06 +00:00
|
|
|
return NULL;
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
_mesa_sha1_format(buf, key);
|
2017-02-09 11:41:15 +00:00
|
|
|
if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
|
|
|
|
buf[1], buf + 2) == -1)
|
|
|
|
return NULL;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-02-08 22:04:52 +00:00
|
|
|
return filename;
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Create the directory that will be needed for the cache file for \key.
|
|
|
|
*
|
|
|
|
* Obviously, the implementation here must closely match
|
|
|
|
* _get_cache_file above.
|
|
|
|
*/
|
|
|
|
static void
|
2017-03-05 20:58:52 +00:00
|
|
|
make_cache_file_directory(struct disk_cache *cache, const cache_key key)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
|
|
|
char *dir;
|
|
|
|
char buf[41];
|
|
|
|
|
|
|
|
_mesa_sha1_format(buf, key);
|
2017-02-09 11:41:15 +00:00
|
|
|
if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1)
|
|
|
|
return;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-02-09 11:41:15 +00:00
|
|
|
mkdir_if_needed(dir);
|
2017-02-08 22:04:52 +00:00
|
|
|
free(dir);
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
/* Given a directory path and predicate function, find the entry with
|
|
|
|
* the oldest access time in that directory for which the predicate
|
|
|
|
* returns true.
|
2016-09-27 23:55:02 +01:00
|
|
|
*
|
|
|
|
* Returns: A malloc'ed string for the path to the chosen file, (or
|
|
|
|
* NULL on any error). The caller should free the string when
|
|
|
|
* finished.
|
|
|
|
*/
|
|
|
|
static char *
|
2017-03-10 16:22:51 +00:00
|
|
|
choose_lru_file_matching(const char *dir_path,
|
2017-03-17 18:05:43 +00:00
|
|
|
bool (*predicate)(const char *dir_path,
|
|
|
|
const struct stat *,
|
|
|
|
const char *, const size_t))
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
|
|
|
DIR *dir;
|
|
|
|
struct dirent *entry;
|
|
|
|
char *filename;
|
2017-03-10 16:22:51 +00:00
|
|
|
char *lru_name = NULL;
|
|
|
|
time_t lru_atime = 0;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
dir = opendir(dir_path);
|
|
|
|
if (dir == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
entry = readdir(dir);
|
|
|
|
if (entry == NULL)
|
|
|
|
break;
|
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
struct stat sb;
|
|
|
|
if (fstatat(dirfd(dir), entry->d_name, &sb, 0) == 0) {
|
|
|
|
if (!lru_atime || (sb.st_atime < lru_atime)) {
|
2017-03-17 18:05:43 +00:00
|
|
|
size_t len = strlen(entry->d_name);
|
|
|
|
|
|
|
|
if (!predicate(dir_path, &sb, entry->d_name, len))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
char *tmp = realloc(lru_name, len + 1);
|
2017-03-10 16:22:51 +00:00
|
|
|
if (tmp) {
|
|
|
|
lru_name = tmp;
|
2017-03-17 18:05:43 +00:00
|
|
|
memcpy(lru_name, entry->d_name, len + 1);
|
2017-03-10 16:22:51 +00:00
|
|
|
lru_atime = sb.st_atime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
if (lru_name == NULL) {
|
2016-09-27 23:55:02 +01:00
|
|
|
closedir(dir);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
if (asprintf(&filename, "%s/%s", dir_path, lru_name) < 0)
|
2016-11-03 09:23:17 +00:00
|
|
|
filename = NULL;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
free(lru_name);
|
2016-09-27 23:55:02 +01:00
|
|
|
closedir(dir);
|
|
|
|
|
|
|
|
return filename;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is entry a regular file, and not having a name with a trailing
|
|
|
|
* ".tmp"
|
|
|
|
*/
|
|
|
|
static bool
|
2017-03-17 18:05:43 +00:00
|
|
|
is_regular_non_tmp_file(const char *path, const struct stat *sb,
|
|
|
|
const char *d_name, const size_t len)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2017-03-17 18:05:43 +00:00
|
|
|
if (!S_ISREG(sb->st_mode))
|
2016-09-27 23:55:02 +01:00
|
|
|
return false;
|
|
|
|
|
2017-03-17 18:05:43 +00:00
|
|
|
if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0)
|
2016-09-27 23:55:02 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns the size of the deleted file, (or 0 on any error). */
|
|
|
|
static size_t
|
2017-03-10 16:22:51 +00:00
|
|
|
unlink_lru_file_from_directory(const char *path)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
char *filename;
|
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
filename = choose_lru_file_matching(path, is_regular_non_tmp_file);
|
2016-09-27 23:55:02 +01:00
|
|
|
if (filename == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (stat(filename, &sb) == -1) {
|
|
|
|
free (filename);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlink(filename);
|
|
|
|
free (filename);
|
|
|
|
|
2017-04-27 02:15:30 +01:00
|
|
|
return sb.st_blocks * 512;
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Is entry a directory with a two-character name, (and not the
|
2017-03-14 00:22:44 +00:00
|
|
|
* special name of ".."). We also return false if the dir is empty.
|
2016-09-27 23:55:02 +01:00
|
|
|
*/
|
|
|
|
static bool
|
2017-03-17 18:05:43 +00:00
|
|
|
is_two_character_sub_directory(const char *path, const struct stat *sb,
|
|
|
|
const char *d_name, const size_t len)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2017-03-17 18:05:43 +00:00
|
|
|
if (!S_ISDIR(sb->st_mode))
|
2017-02-08 04:05:19 +00:00
|
|
|
return false;
|
|
|
|
|
2017-03-17 18:05:43 +00:00
|
|
|
if (len != 2)
|
2017-03-14 00:22:44 +00:00
|
|
|
return false;
|
2017-02-08 04:05:19 +00:00
|
|
|
|
2017-03-17 18:05:43 +00:00
|
|
|
if (strcmp(d_name, "..") == 0)
|
2016-09-27 23:55:02 +01:00
|
|
|
return false;
|
|
|
|
|
2017-03-17 18:05:43 +00:00
|
|
|
char *subdir;
|
|
|
|
if (asprintf(&subdir, "%s/%s", path, d_name) == -1)
|
2016-09-27 23:55:02 +01:00
|
|
|
return false;
|
2017-03-14 00:22:44 +00:00
|
|
|
DIR *dir = opendir(subdir);
|
|
|
|
free(subdir);
|
|
|
|
|
|
|
|
if (dir == NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned subdir_entries = 0;
|
|
|
|
struct dirent *d;
|
|
|
|
while ((d = readdir(dir)) != NULL) {
|
|
|
|
if(++subdir_entries > 2)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
closedir(dir);
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-03-14 00:22:44 +00:00
|
|
|
/* If dir only contains '.' and '..' it must be empty */
|
|
|
|
if (subdir_entries <= 2)
|
2016-09-27 23:55:02 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-10 16:22:51 +00:00
|
|
|
evict_lru_item(struct disk_cache *cache)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
|
|
|
char *dir_path;
|
|
|
|
|
|
|
|
/* With a reasonably-sized, full cache, (and with keys generated
|
|
|
|
* from a cryptographic hash), we can choose two random hex digits
|
|
|
|
* and reasonably expect the directory to exist with a file in it.
|
2017-03-10 16:22:51 +00:00
|
|
|
* Provides pseudo-LRU eviction to reduce checking all cache files.
|
2016-09-27 23:55:02 +01:00
|
|
|
*/
|
2017-03-21 08:35:22 +00:00
|
|
|
uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
|
|
|
|
if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
|
2016-09-27 23:55:02 +01:00
|
|
|
return;
|
|
|
|
|
2017-03-21 08:35:22 +00:00
|
|
|
size_t size = unlink_lru_file_from_directory(dir_path);
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
free(dir_path);
|
|
|
|
|
|
|
|
if (size) {
|
2017-03-09 00:54:53 +00:00
|
|
|
p_atomic_add(cache->size, - (uint64_t)size);
|
2016-09-27 23:55:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In the case where the random choice of directory didn't find
|
2017-03-10 16:22:51 +00:00
|
|
|
* something, we choose the least recently accessed from the
|
|
|
|
* existing directories.
|
2016-09-27 23:55:02 +01:00
|
|
|
*
|
|
|
|
* Really, the only reason this code exists is to allow the unit
|
|
|
|
* tests to work, (which use an artificially-small cache to be able
|
|
|
|
* to force a single cached item to be evicted).
|
|
|
|
*/
|
2017-03-10 16:22:51 +00:00
|
|
|
dir_path = choose_lru_file_matching(cache->path,
|
|
|
|
is_two_character_sub_directory);
|
2016-09-27 23:55:02 +01:00
|
|
|
if (dir_path == NULL)
|
|
|
|
return;
|
|
|
|
|
2017-03-10 16:22:51 +00:00
|
|
|
size = unlink_lru_file_from_directory(dir_path);
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
free(dir_path);
|
|
|
|
|
|
|
|
if (size)
|
2017-03-09 00:54:53 +00:00
|
|
|
p_atomic_add(cache->size, - (uint64_t)size);
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
2016-04-26 05:56:22 +01:00
|
|
|
void
|
2017-03-05 20:58:52 +00:00
|
|
|
disk_cache_remove(struct disk_cache *cache, const cache_key key)
|
2016-04-26 05:56:22 +01:00
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
|
|
|
|
char *filename = get_cache_file(cache, key);
|
|
|
|
if (filename == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stat(filename, &sb) == -1) {
|
2017-02-08 22:04:52 +00:00
|
|
|
free(filename);
|
2016-04-26 05:56:22 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlink(filename);
|
2017-02-08 22:04:52 +00:00
|
|
|
free(filename);
|
2016-04-26 05:56:22 +01:00
|
|
|
|
2017-04-27 02:15:30 +01:00
|
|
|
if (sb.st_blocks)
|
|
|
|
p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512);
|
2016-04-26 05:56:22 +01:00
|
|
|
}
|
|
|
|
|
2017-03-24 23:58:42 +00:00
|
|
|
static ssize_t
|
|
|
|
read_all(int fd, void *buf, size_t count)
|
|
|
|
{
|
|
|
|
char *in = buf;
|
|
|
|
ssize_t read_ret;
|
|
|
|
size_t done;
|
|
|
|
|
|
|
|
for (done = 0; done < count; done += read_ret) {
|
|
|
|
read_ret = read(fd, in + done, count - done);
|
|
|
|
if (read_ret == -1 || read_ret == 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return done;
|
|
|
|
}
|
|
|
|
|
2017-03-15 23:09:32 +00:00
|
|
|
static ssize_t
|
|
|
|
write_all(int fd, const void *buf, size_t count)
|
|
|
|
{
|
|
|
|
const char *out = buf;
|
|
|
|
ssize_t written;
|
|
|
|
size_t done;
|
|
|
|
|
|
|
|
for (done = 0; done < count; done += written) {
|
|
|
|
written = write(fd, out + done, count - done);
|
|
|
|
if (written == -1)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return done;
|
|
|
|
}
|
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
/* From the zlib docs:
|
|
|
|
* "If the memory is available, buffers sizes on the order of 128K or 256K
|
|
|
|
* bytes should be used."
|
|
|
|
*/
|
|
|
|
#define BUFSIZE 256 * 1024
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Compresses cache entry in memory and writes it to disk. Returns the size
|
|
|
|
* of the data written to disk.
|
|
|
|
*/
|
|
|
|
static size_t
|
|
|
|
deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest,
|
2017-03-05 20:58:52 +00:00
|
|
|
const char *filename)
|
2017-03-01 05:04:23 +00:00
|
|
|
{
|
2019-10-31 20:26:00 +00:00
|
|
|
#ifdef HAVE_ZSTD
|
|
|
|
/* from the zstd docs (https://facebook.github.io/zstd/zstd_manual.html):
|
|
|
|
* compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
|
|
|
|
*/
|
|
|
|
size_t out_size = ZSTD_compressBound(in_data_size);
|
|
|
|
void * out = malloc(out_size);
|
|
|
|
|
|
|
|
size_t ret = ZSTD_compress(out, out_size, in_data, in_data_size,
|
|
|
|
ZSTD_COMPRESSION_LEVEL);
|
|
|
|
if (ZSTD_isError(ret)) {
|
|
|
|
free(out);
|
|
|
|
return 0;
|
|
|
|
}
|
2020-02-02 17:15:09 +00:00
|
|
|
ssize_t written = write_all(dest, out, ret);
|
|
|
|
if (written == -1) {
|
|
|
|
free(out);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-10-31 20:26:00 +00:00
|
|
|
free(out);
|
|
|
|
return ret;
|
|
|
|
#else
|
2019-06-10 19:48:02 +01:00
|
|
|
unsigned char *out;
|
2017-03-01 05:04:23 +00:00
|
|
|
|
|
|
|
/* allocate deflate state */
|
|
|
|
z_stream strm;
|
|
|
|
strm.zalloc = Z_NULL;
|
|
|
|
strm.zfree = Z_NULL;
|
|
|
|
strm.opaque = Z_NULL;
|
|
|
|
strm.next_in = (uint8_t *) in_data;
|
|
|
|
strm.avail_in = in_data_size;
|
|
|
|
|
|
|
|
int ret = deflateInit(&strm, Z_BEST_COMPRESSION);
|
|
|
|
if (ret != Z_OK)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* compress until end of in_data */
|
|
|
|
size_t compressed_size = 0;
|
|
|
|
int flush;
|
2019-06-10 19:48:02 +01:00
|
|
|
|
|
|
|
out = malloc(BUFSIZE * sizeof(unsigned char));
|
|
|
|
if (out == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
do {
|
|
|
|
int remaining = in_data_size - BUFSIZE;
|
|
|
|
flush = remaining > 0 ? Z_NO_FLUSH : Z_FINISH;
|
|
|
|
in_data_size -= BUFSIZE;
|
|
|
|
|
|
|
|
/* Run deflate() on input until the output buffer is not full (which
|
|
|
|
* means there is no more data to deflate).
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
strm.avail_out = BUFSIZE;
|
|
|
|
strm.next_out = out;
|
|
|
|
|
|
|
|
ret = deflate(&strm, flush); /* no bad return value */
|
|
|
|
assert(ret != Z_STREAM_ERROR); /* state not clobbered */
|
|
|
|
|
|
|
|
size_t have = BUFSIZE - strm.avail_out;
|
2017-03-09 00:54:52 +00:00
|
|
|
compressed_size += have;
|
2017-03-01 05:04:23 +00:00
|
|
|
|
2017-03-15 23:09:32 +00:00
|
|
|
ssize_t written = write_all(dest, out, have);
|
|
|
|
if (written == -1) {
|
|
|
|
(void)deflateEnd(&strm);
|
2019-06-10 19:48:02 +01:00
|
|
|
free(out);
|
2017-03-15 23:09:32 +00:00
|
|
|
return 0;
|
2017-03-01 05:04:23 +00:00
|
|
|
}
|
|
|
|
} while (strm.avail_out == 0);
|
|
|
|
|
|
|
|
/* all input should be used */
|
|
|
|
assert(strm.avail_in == 0);
|
|
|
|
|
|
|
|
} while (flush != Z_FINISH);
|
|
|
|
|
|
|
|
/* stream should be complete */
|
|
|
|
assert(ret == Z_STREAM_END);
|
|
|
|
|
|
|
|
/* clean up and return */
|
|
|
|
(void)deflateEnd(&strm);
|
2019-06-10 19:48:02 +01:00
|
|
|
free(out);
|
2017-03-01 05:04:23 +00:00
|
|
|
return compressed_size;
|
2019-10-31 20:26:00 +00:00
|
|
|
# endif
|
2017-03-01 05:04:23 +00:00
|
|
|
}
|
|
|
|
|
2017-03-12 23:14:35 +00:00
|
|
|
static struct disk_cache_put_job *
|
|
|
|
create_put_job(struct disk_cache *cache, const cache_key key,
|
2017-08-23 07:33:00 +01:00
|
|
|
const void *data, size_t size,
|
|
|
|
struct cache_item_metadata *cache_item_metadata)
|
2017-03-12 23:14:35 +00:00
|
|
|
{
|
|
|
|
struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *)
|
|
|
|
malloc(sizeof(struct disk_cache_put_job) + size);
|
|
|
|
|
|
|
|
if (dc_job) {
|
|
|
|
dc_job->cache = cache;
|
|
|
|
memcpy(dc_job->key, key, sizeof(cache_key));
|
|
|
|
dc_job->data = dc_job + 1;
|
|
|
|
memcpy(dc_job->data, data, size);
|
|
|
|
dc_job->size = size;
|
2017-07-31 04:00:35 +01:00
|
|
|
|
|
|
|
/* Copy the cache item metadata */
|
|
|
|
if (cache_item_metadata) {
|
|
|
|
dc_job->cache_item_metadata.type = cache_item_metadata->type;
|
|
|
|
if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) {
|
|
|
|
dc_job->cache_item_metadata.num_keys =
|
|
|
|
cache_item_metadata->num_keys;
|
|
|
|
dc_job->cache_item_metadata.keys = (cache_key *)
|
|
|
|
malloc(cache_item_metadata->num_keys * sizeof(cache_key));
|
|
|
|
|
|
|
|
if (!dc_job->cache_item_metadata.keys)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
memcpy(dc_job->cache_item_metadata.keys,
|
|
|
|
cache_item_metadata->keys,
|
|
|
|
sizeof(cache_key) * cache_item_metadata->num_keys);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN;
|
|
|
|
dc_job->cache_item_metadata.keys = NULL;
|
|
|
|
}
|
2017-03-12 23:14:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return dc_job;
|
2017-07-31 04:00:35 +01:00
|
|
|
|
|
|
|
fail:
|
|
|
|
free(dc_job);
|
|
|
|
|
|
|
|
return NULL;
|
2017-03-12 23:14:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
destroy_put_job(void *job, int thread_index)
|
|
|
|
{
|
|
|
|
if (job) {
|
2017-07-31 04:00:35 +01:00
|
|
|
struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
|
|
|
|
free(dc_job->cache_item_metadata.keys);
|
|
|
|
|
2017-03-12 23:14:35 +00:00
|
|
|
free(job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
struct cache_entry_file_data {
|
|
|
|
uint32_t crc32;
|
|
|
|
uint32_t uncompressed_size;
|
|
|
|
};
|
|
|
|
|
2017-03-13 00:07:30 +00:00
|
|
|
static void
|
|
|
|
cache_put(void *job, int thread_index)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2017-03-13 00:07:30 +00:00
|
|
|
assert(job);
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
int fd = -1, fd_final = -1, err, ret;
|
2017-03-06 16:17:31 +00:00
|
|
|
unsigned i = 0;
|
2016-09-27 23:55:02 +01:00
|
|
|
char *filename = NULL, *filename_tmp = NULL;
|
2017-03-13 00:07:30 +00:00
|
|
|
struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-03-13 00:07:30 +00:00
|
|
|
filename = get_cache_file(dc_job->cache, dc_job->key);
|
2016-09-27 23:55:02 +01:00
|
|
|
if (filename == NULL)
|
|
|
|
goto done;
|
|
|
|
|
2017-03-15 18:53:56 +00:00
|
|
|
/* If the cache is too large, evict something else first. */
|
|
|
|
while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size &&
|
|
|
|
i < 8) {
|
|
|
|
evict_lru_item(dc_job->cache);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
/* Write to a temporary file to allow for an atomic rename to the
|
|
|
|
* final destination filename, (to prevent any readers from seeing
|
|
|
|
* a partially written file).
|
|
|
|
*/
|
2017-02-09 11:41:15 +00:00
|
|
|
if (asprintf(&filename_tmp, "%s.tmp", filename) == -1)
|
2016-09-27 23:55:02 +01:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
|
|
|
|
|
|
|
|
/* Make the two-character subdirectory within the cache as needed. */
|
|
|
|
if (fd == -1) {
|
|
|
|
if (errno != ENOENT)
|
|
|
|
goto done;
|
|
|
|
|
2017-03-13 00:07:30 +00:00
|
|
|
make_cache_file_directory(dc_job->cache, dc_job->key);
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
|
|
|
|
if (fd == -1)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With the temporary file open, we take an exclusive flock on
|
|
|
|
* it. If the flock fails, then another process still has the file
|
|
|
|
* open with the flock held. So just let that file be responsible
|
|
|
|
* for writing the file.
|
|
|
|
*/
|
2017-04-22 05:57:50 +01:00
|
|
|
#ifdef HAVE_FLOCK
|
2016-09-27 23:55:02 +01:00
|
|
|
err = flock(fd, LOCK_EX | LOCK_NB);
|
2017-04-22 05:57:50 +01:00
|
|
|
#else
|
|
|
|
struct flock lock = {
|
|
|
|
.l_start = 0,
|
|
|
|
.l_len = 0, /* entire file */
|
|
|
|
.l_type = F_WRLCK,
|
|
|
|
.l_whence = SEEK_SET
|
|
|
|
};
|
|
|
|
err = fcntl(fd, F_SETLK, &lock);
|
|
|
|
#endif
|
2016-09-27 23:55:02 +01:00
|
|
|
if (err == -1)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Now that we have the lock on the open temporary file, we can
|
|
|
|
* check to see if the destination file already exists. If so,
|
|
|
|
* another process won the race between when we saw that the file
|
|
|
|
* didn't exist and now. In this case, we don't do anything more,
|
|
|
|
* (to ensure the size accounting of the cache doesn't get off).
|
|
|
|
*/
|
|
|
|
fd_final = open(filename, O_RDONLY | O_CLOEXEC);
|
2017-03-18 20:58:54 +00:00
|
|
|
if (fd_final != -1) {
|
|
|
|
unlink(filename_tmp);
|
2016-09-27 23:55:02 +01:00
|
|
|
goto done;
|
2017-03-18 20:58:54 +00:00
|
|
|
}
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
/* OK, we're now on the hook to write out a file that we know is
|
|
|
|
* not in the cache, and is also not being written out to the cache
|
|
|
|
* by some other process.
|
2017-03-21 05:05:20 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Write the driver_keys_blob, this can be used find information about the
|
|
|
|
* mesa version that produced the entry or deal with hash collisions,
|
|
|
|
* should that ever become a real problem.
|
|
|
|
*/
|
|
|
|
ret = write_all(fd, dc_job->cache->driver_keys_blob,
|
|
|
|
dc_job->cache->driver_keys_blob_size);
|
|
|
|
if (ret == -1) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-07-31 04:00:35 +01:00
|
|
|
/* Write the cache item metadata. This data can be used to deal with
|
|
|
|
* hash collisions, as well as providing useful information to 3rd party
|
|
|
|
* tools reading the cache files.
|
|
|
|
*/
|
|
|
|
ret = write_all(fd, &dc_job->cache_item_metadata.type,
|
|
|
|
sizeof(uint32_t));
|
|
|
|
if (ret == -1) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
|
|
|
|
ret = write_all(fd, &dc_job->cache_item_metadata.num_keys,
|
|
|
|
sizeof(uint32_t));
|
|
|
|
if (ret == -1) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = write_all(fd, dc_job->cache_item_metadata.keys[0],
|
|
|
|
dc_job->cache_item_metadata.num_keys *
|
|
|
|
sizeof(cache_key));
|
|
|
|
if (ret == -1) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-21 05:05:20 +00:00
|
|
|
/* Create CRC of the data. We will read this when restoring the cache and
|
|
|
|
* use it to check for corruption.
|
2017-02-24 04:14:56 +00:00
|
|
|
*/
|
2017-03-01 05:04:23 +00:00
|
|
|
struct cache_entry_file_data cf_data;
|
2017-03-13 00:07:30 +00:00
|
|
|
cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size);
|
|
|
|
cf_data.uncompressed_size = dc_job->size;
|
2017-03-01 05:04:23 +00:00
|
|
|
|
|
|
|
size_t cf_data_size = sizeof(cf_data);
|
2017-03-15 23:09:32 +00:00
|
|
|
ret = write_all(fd, &cf_data, cf_data_size);
|
|
|
|
if (ret == -1) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
2017-02-24 04:14:56 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
/* Now, finally, write out the contents to the temporary file, then
|
|
|
|
* rename them atomically to the destination filename, and also
|
|
|
|
* perform an atomic increment of the total cache size.
|
|
|
|
*/
|
2017-03-13 00:07:30 +00:00
|
|
|
size_t file_size = deflate_and_write_to_disk(dc_job->data, dc_job->size,
|
|
|
|
fd, filename_tmp);
|
2017-03-01 05:04:23 +00:00
|
|
|
if (file_size == 0) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
2017-03-18 20:58:55 +00:00
|
|
|
ret = rename(filename_tmp, filename);
|
|
|
|
if (ret == -1) {
|
|
|
|
unlink(filename_tmp);
|
|
|
|
goto done;
|
|
|
|
}
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-04-27 02:15:30 +01:00
|
|
|
struct stat sb;
|
|
|
|
if (stat(filename, &sb) == -1) {
|
|
|
|
/* Something went wrong remove the file */
|
|
|
|
unlink(filename);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
p_atomic_add(dc_job->cache->size, sb.st_blocks * 512);
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2016-11-21 15:21:23 +00:00
|
|
|
done:
|
|
|
|
if (fd_final != -1)
|
|
|
|
close(fd_final);
|
2017-03-18 20:58:55 +00:00
|
|
|
/* This close finally releases the flock, (now that the final file
|
2016-09-27 23:55:02 +01:00
|
|
|
* has been renamed into place and the size has been added).
|
|
|
|
*/
|
2016-11-21 15:21:23 +00:00
|
|
|
if (fd != -1)
|
|
|
|
close(fd);
|
2017-10-10 12:58:45 +01:00
|
|
|
free(filename_tmp);
|
|
|
|
free(filename);
|
2016-09-27 23:55:02 +01:00
|
|
|
}
|
|
|
|
|
2017-03-13 00:07:30 +00:00
|
|
|
void
|
|
|
|
disk_cache_put(struct disk_cache *cache, const cache_key key,
|
2017-08-23 07:33:00 +01:00
|
|
|
const void *data, size_t size,
|
|
|
|
struct cache_item_metadata *cache_item_metadata)
|
2017-03-13 00:07:30 +00:00
|
|
|
{
|
2018-02-07 06:13:00 +00:00
|
|
|
if (cache->blob_put_cb) {
|
|
|
|
cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
if (cache->path_init_failed)
|
2018-01-22 09:55:06 +00:00
|
|
|
return;
|
|
|
|
|
2017-03-13 00:07:30 +00:00
|
|
|
struct disk_cache_put_job *dc_job =
|
2017-08-23 07:33:00 +01:00
|
|
|
create_put_job(cache, key, data, size, cache_item_metadata);
|
2017-03-13 00:07:30 +00:00
|
|
|
|
|
|
|
if (dc_job) {
|
|
|
|
util_queue_fence_init(&dc_job->fence);
|
|
|
|
util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
|
2019-09-03 05:22:50 +01:00
|
|
|
cache_put, destroy_put_job, dc_job->size);
|
2017-03-13 00:07:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
/**
|
|
|
|
* Decompresses cache entry, returns true if successful.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
inflate_cache_data(uint8_t *in_data, size_t in_data_size,
|
|
|
|
uint8_t *out_data, size_t out_data_size)
|
|
|
|
{
|
2019-10-31 20:26:00 +00:00
|
|
|
#ifdef HAVE_ZSTD
|
|
|
|
size_t ret = ZSTD_decompress(out_data, out_data_size, in_data, in_data_size);
|
|
|
|
return !ZSTD_isError(ret);
|
|
|
|
#else
|
2017-03-01 05:04:23 +00:00
|
|
|
z_stream strm;
|
|
|
|
|
|
|
|
/* allocate inflate state */
|
|
|
|
strm.zalloc = Z_NULL;
|
|
|
|
strm.zfree = Z_NULL;
|
|
|
|
strm.opaque = Z_NULL;
|
|
|
|
strm.next_in = in_data;
|
|
|
|
strm.avail_in = in_data_size;
|
|
|
|
strm.next_out = out_data;
|
|
|
|
strm.avail_out = out_data_size;
|
|
|
|
|
|
|
|
int ret = inflateInit(&strm);
|
|
|
|
if (ret != Z_OK)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ret = inflate(&strm, Z_NO_FLUSH);
|
|
|
|
assert(ret != Z_STREAM_ERROR); /* state not clobbered */
|
|
|
|
|
|
|
|
/* Unless there was an error we should have decompressed everything in one
|
|
|
|
* go as we know the uncompressed file size.
|
|
|
|
*/
|
|
|
|
if (ret != Z_STREAM_END) {
|
|
|
|
(void)inflateEnd(&strm);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
assert(strm.avail_out == 0);
|
|
|
|
|
|
|
|
/* clean up and return */
|
|
|
|
(void)inflateEnd(&strm);
|
|
|
|
return true;
|
2019-10-31 20:26:00 +00:00
|
|
|
#endif
|
2017-03-01 05:04:23 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
void *
|
2017-03-05 20:58:52 +00:00
|
|
|
disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2017-03-24 23:58:42 +00:00
|
|
|
int fd = -1, ret;
|
2016-09-27 23:55:02 +01:00
|
|
|
struct stat sb;
|
|
|
|
char *filename = NULL;
|
|
|
|
uint8_t *data = NULL;
|
2017-03-01 05:04:23 +00:00
|
|
|
uint8_t *uncompressed_data = NULL;
|
2017-08-23 07:32:58 +01:00
|
|
|
uint8_t *file_header = NULL;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
if (size)
|
|
|
|
*size = 0;
|
|
|
|
|
2018-02-07 06:13:00 +00:00
|
|
|
if (cache->blob_get_cb) {
|
|
|
|
/* This is what Android EGL defines as the maxValueSize in egl_cache_t
|
|
|
|
* class implementation.
|
|
|
|
*/
|
|
|
|
const signed long max_blob_size = 64 * 1024;
|
|
|
|
void *blob = malloc(max_blob_size);
|
|
|
|
if (!blob)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
signed long bytes =
|
|
|
|
cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size);
|
|
|
|
|
|
|
|
if (!bytes) {
|
|
|
|
free(blob);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size)
|
|
|
|
*size = bytes;
|
|
|
|
return blob;
|
|
|
|
}
|
|
|
|
|
2016-09-27 23:55:02 +01:00
|
|
|
filename = get_cache_file(cache, key);
|
|
|
|
if (filename == NULL)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
fd = open(filename, O_RDONLY | O_CLOEXEC);
|
|
|
|
if (fd == -1)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (fstat(fd, &sb) == -1)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
data = malloc(sb.st_size);
|
|
|
|
if (data == NULL)
|
|
|
|
goto fail;
|
|
|
|
|
2017-03-21 05:05:20 +00:00
|
|
|
size_t ck_size = cache->driver_keys_blob_size;
|
2017-08-23 07:32:58 +01:00
|
|
|
file_header = malloc(ck_size);
|
2017-03-21 05:05:20 +00:00
|
|
|
if (!file_header)
|
|
|
|
goto fail;
|
|
|
|
|
2017-08-23 07:32:58 +01:00
|
|
|
if (sb.st_size < ck_size)
|
2017-03-24 23:58:42 +00:00
|
|
|
goto fail;
|
2017-03-21 05:05:20 +00:00
|
|
|
|
2017-08-23 07:32:58 +01:00
|
|
|
ret = read_all(fd, file_header, ck_size);
|
2017-03-21 05:05:20 +00:00
|
|
|
if (ret == -1)
|
|
|
|
goto fail;
|
2017-08-23 07:32:58 +01:00
|
|
|
|
|
|
|
/* Check for extremely unlikely hash collisions */
|
2017-08-25 05:04:19 +01:00
|
|
|
if (memcmp(cache->driver_keys_blob, file_header, ck_size) != 0) {
|
|
|
|
assert(!"Mesa cache keys mismatch!");
|
2017-08-23 07:32:58 +01:00
|
|
|
goto fail;
|
2017-08-25 05:04:19 +01:00
|
|
|
}
|
2017-03-21 05:05:20 +00:00
|
|
|
|
2017-07-31 04:00:35 +01:00
|
|
|
size_t cache_item_md_size = sizeof(uint32_t);
|
|
|
|
uint32_t md_type;
|
|
|
|
ret = read_all(fd, &md_type, cache_item_md_size);
|
|
|
|
if (ret == -1)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (md_type == CACHE_ITEM_TYPE_GLSL) {
|
|
|
|
uint32_t num_keys;
|
|
|
|
cache_item_md_size += sizeof(uint32_t);
|
|
|
|
ret = read_all(fd, &num_keys, sizeof(uint32_t));
|
|
|
|
if (ret == -1)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* The cache item metadata is currently just used for distributing
|
|
|
|
* precompiled shaders, they are not used by Mesa so just skip them for
|
|
|
|
* now.
|
|
|
|
* TODO: pass the metadata back to the caller and do some basic
|
|
|
|
* validation.
|
|
|
|
*/
|
2017-10-14 06:04:52 +01:00
|
|
|
cache_item_md_size += num_keys * sizeof(cache_key);
|
2017-07-31 04:00:35 +01:00
|
|
|
ret = lseek(fd, num_keys * sizeof(cache_key), SEEK_CUR);
|
|
|
|
if (ret == -1)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2017-02-24 04:14:56 +00:00
|
|
|
/* Load the CRC that was created when the file was written. */
|
2017-03-01 05:04:23 +00:00
|
|
|
struct cache_entry_file_data cf_data;
|
|
|
|
size_t cf_data_size = sizeof(cf_data);
|
2017-03-24 23:58:42 +00:00
|
|
|
ret = read_all(fd, &cf_data, cf_data_size);
|
|
|
|
if (ret == -1)
|
|
|
|
goto fail;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-02-24 04:14:56 +00:00
|
|
|
/* Load the actual cache data. */
|
2017-07-31 04:00:35 +01:00
|
|
|
size_t cache_data_size =
|
|
|
|
sb.st_size - cf_data_size - ck_size - cache_item_md_size;
|
2017-03-24 23:58:42 +00:00
|
|
|
ret = read_all(fd, data, cache_data_size);
|
|
|
|
if (ret == -1)
|
|
|
|
goto fail;
|
2017-02-24 04:14:56 +00:00
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
/* Uncompress the cache data */
|
|
|
|
uncompressed_data = malloc(cf_data.uncompressed_size);
|
|
|
|
if (!inflate_cache_data(data, cache_data_size, uncompressed_data,
|
|
|
|
cf_data.uncompressed_size))
|
|
|
|
goto fail;
|
|
|
|
|
2017-02-24 04:14:56 +00:00
|
|
|
/* Check the data for corruption */
|
2017-03-01 05:04:23 +00:00
|
|
|
if (cf_data.crc32 != util_hash_crc32(uncompressed_data,
|
|
|
|
cf_data.uncompressed_size))
|
2017-02-24 04:14:56 +00:00
|
|
|
goto fail;
|
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
free(data);
|
2017-02-08 22:04:52 +00:00
|
|
|
free(filename);
|
2017-10-10 12:58:45 +01:00
|
|
|
free(file_header);
|
2016-09-27 23:55:02 +01:00
|
|
|
close(fd);
|
|
|
|
|
|
|
|
if (size)
|
2017-03-01 05:04:23 +00:00
|
|
|
*size = cf_data.uncompressed_size;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
2017-03-01 05:04:23 +00:00
|
|
|
return uncompressed_data;
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
fail:
|
|
|
|
if (data)
|
|
|
|
free(data);
|
2017-03-01 05:04:23 +00:00
|
|
|
if (uncompressed_data)
|
|
|
|
free(uncompressed_data);
|
2016-09-27 23:55:02 +01:00
|
|
|
if (filename)
|
2017-02-08 22:04:52 +00:00
|
|
|
free(filename);
|
2017-08-23 07:32:58 +01:00
|
|
|
if (file_header)
|
|
|
|
free(file_header);
|
2016-09-27 23:55:02 +01:00
|
|
|
if (fd != -1)
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-05 20:58:52 +00:00
|
|
|
disk_cache_put_key(struct disk_cache *cache, const cache_key key)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2017-03-05 20:58:52 +00:00
|
|
|
const uint32_t *key_chunk = (const uint32_t *) key;
|
2017-11-23 18:41:34 +00:00
|
|
|
int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
|
2016-09-27 23:55:02 +01:00
|
|
|
unsigned char *entry;
|
|
|
|
|
2018-02-07 06:13:00 +00:00
|
|
|
if (cache->blob_put_cb) {
|
|
|
|
cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
if (cache->path_init_failed)
|
2018-01-22 09:55:06 +00:00
|
|
|
return;
|
|
|
|
|
2017-03-18 22:46:39 +00:00
|
|
|
entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
memcpy(entry, key, CACHE_KEY_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function lets us test whether a given key was previously
|
2016-11-13 15:54:38 +00:00
|
|
|
* stored in the cache with disk_cache_put_key(). The implement is
|
2016-09-27 23:55:02 +01:00
|
|
|
* efficient by not using syscalls or hitting the disk. It's not
|
|
|
|
* race-free, but the races are benign. If we race with someone else
|
2016-11-13 15:54:38 +00:00
|
|
|
* calling disk_cache_put_key, then that's just an extra cache miss and an
|
2016-09-27 23:55:02 +01:00
|
|
|
* extra recompile.
|
|
|
|
*/
|
|
|
|
bool
|
2017-03-05 20:58:52 +00:00
|
|
|
disk_cache_has_key(struct disk_cache *cache, const cache_key key)
|
2016-09-27 23:55:02 +01:00
|
|
|
{
|
2017-03-05 20:58:52 +00:00
|
|
|
const uint32_t *key_chunk = (const uint32_t *) key;
|
2017-11-23 18:41:34 +00:00
|
|
|
int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
|
2016-09-27 23:55:02 +01:00
|
|
|
unsigned char *entry;
|
|
|
|
|
2018-02-07 06:13:00 +00:00
|
|
|
if (cache->blob_get_cb) {
|
|
|
|
uint32_t blob;
|
|
|
|
return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
2018-02-09 05:37:49 +00:00
|
|
|
if (cache->path_init_failed)
|
2018-01-22 09:55:06 +00:00
|
|
|
return false;
|
|
|
|
|
2017-03-18 22:46:39 +00:00
|
|
|
entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
|
2016-09-27 23:55:02 +01:00
|
|
|
|
|
|
|
return memcmp(entry, key, CACHE_KEY_SIZE) == 0;
|
|
|
|
}
|
2017-01-18 19:40:31 +00:00
|
|
|
|
2017-03-15 23:09:27 +00:00
|
|
|
void
|
|
|
|
disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size,
|
|
|
|
cache_key key)
|
|
|
|
{
|
2017-03-15 23:09:29 +00:00
|
|
|
struct mesa_sha1 ctx;
|
|
|
|
|
|
|
|
_mesa_sha1_init(&ctx);
|
|
|
|
_mesa_sha1_update(&ctx, cache->driver_keys_blob,
|
|
|
|
cache->driver_keys_blob_size);
|
|
|
|
_mesa_sha1_update(&ctx, data, size);
|
|
|
|
_mesa_sha1_final(&ctx, key);
|
2017-03-15 23:09:27 +00:00
|
|
|
}
|
|
|
|
|
2018-02-07 06:13:00 +00:00
|
|
|
void
|
|
|
|
disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put,
|
|
|
|
disk_cache_get_cb get)
|
|
|
|
{
|
|
|
|
cache->blob_put_cb = put;
|
|
|
|
cache->blob_get_cb = get;
|
|
|
|
}
|
|
|
|
|
2017-01-18 19:40:31 +00:00
|
|
|
#endif /* ENABLE_SHADER_CACHE */
|