2009-11-16 18:56:18 +00:00
|
|
|
/**********************************************************
|
|
|
|
* Copyright 2008-2009 VMware, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person
|
|
|
|
* obtaining a copy of this software and associated documentation
|
|
|
|
* files (the "Software"), to deal in the Software without
|
|
|
|
* restriction, including without limitation the rights to use, copy,
|
|
|
|
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
|
|
|
* of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be
|
|
|
|
* included in all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
**********************************************************/
|
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
#include "util/u_math.h"
|
2009-11-16 18:56:18 +00:00
|
|
|
#include "util/u_memory.h"
|
2016-11-22 17:28:18 +00:00
|
|
|
#include "util/crc32.h"
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
#include "svga_debug.h"
|
2011-12-07 23:57:11 +00:00
|
|
|
#include "svga_format.h"
|
2009-11-16 18:56:18 +00:00
|
|
|
#include "svga_winsys.h"
|
|
|
|
#include "svga_screen.h"
|
|
|
|
#include "svga_screen_cache.h"
|
2017-01-27 02:46:23 +00:00
|
|
|
#include "svga_context.h"
|
2019-05-08 14:50:18 +01:00
|
|
|
#include "svga_cmd.h"
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
#define SVGA_SURFACE_CACHE_ENABLED 1
|
|
|
|
|
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
/**
|
|
|
|
* Return the size of the surface described by the key (in bytes).
|
|
|
|
*/
|
2020-05-26 16:59:50 +01:00
|
|
|
unsigned
|
|
|
|
svga_surface_size(const struct svga_host_surface_cache_key *key)
|
2011-12-07 23:57:11 +00:00
|
|
|
{
|
|
|
|
unsigned bw, bh, bpb, total_size, i;
|
|
|
|
|
|
|
|
assert(key->numMipLevels > 0);
|
|
|
|
assert(key->numFaces > 0);
|
2017-06-06 19:52:50 +01:00
|
|
|
assert(key->arraySize > 0);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
|
|
|
if (key->format == SVGA3D_BUFFER) {
|
|
|
|
/* Special case: we don't want to count vertex/index buffers
|
|
|
|
* against the cache size limit, so view them as zero-sized.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
svga_format_size(key->format, &bw, &bh, &bpb);
|
|
|
|
|
|
|
|
total_size = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < key->numMipLevels; i++) {
|
|
|
|
unsigned w = u_minify(key->size.width, i);
|
|
|
|
unsigned h = u_minify(key->size.height, i);
|
|
|
|
unsigned d = u_minify(key->size.depth, i);
|
|
|
|
unsigned img_size = ((w + bw - 1) / bw) * ((h + bh - 1) / bh) * d * bpb;
|
|
|
|
total_size += img_size;
|
|
|
|
}
|
|
|
|
|
2017-08-21 20:08:41 +01:00
|
|
|
total_size *= key->numFaces * key->arraySize * MAX2(1, key->sampleCount);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
|
|
|
return total_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
/**
|
|
|
|
* Compute the bucket for this key.
|
2009-11-16 18:56:18 +00:00
|
|
|
*/
|
2015-07-21 00:58:43 +01:00
|
|
|
static inline unsigned
|
2009-11-16 18:56:18 +00:00
|
|
|
svga_screen_cache_bucket(const struct svga_host_surface_cache_key *key)
|
|
|
|
{
|
2012-08-02 16:40:40 +01:00
|
|
|
return util_hash_crc32(key, sizeof *key) % SVGA_HOST_SURFACE_CACHE_BUCKETS;
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
/**
|
|
|
|
* Search the cache for a surface that matches the key. If a match is
|
|
|
|
* found, remove it from the cache and return the surface pointer.
|
|
|
|
* Return NULL otherwise.
|
|
|
|
*/
|
2014-08-13 23:30:48 +01:00
|
|
|
static struct svga_winsys_surface *
|
2009-11-16 18:56:18 +00:00
|
|
|
svga_screen_cache_lookup(struct svga_screen *svgascreen,
|
|
|
|
const struct svga_host_surface_cache_key *key)
|
|
|
|
{
|
|
|
|
struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
|
|
|
struct svga_host_surface_cache_entry *entry;
|
|
|
|
struct svga_winsys_surface *handle = NULL;
|
|
|
|
struct list_head *curr, *next;
|
|
|
|
unsigned bucket;
|
|
|
|
unsigned tries = 0;
|
|
|
|
|
2009-11-24 21:13:18 +00:00
|
|
|
assert(key->cachable);
|
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
bucket = svga_screen_cache_bucket(key);
|
|
|
|
|
2017-03-05 01:12:30 +00:00
|
|
|
mtx_lock(&cache->mutex);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
curr = cache->bucket[bucket].next;
|
|
|
|
next = curr->next;
|
2012-08-02 16:40:40 +01:00
|
|
|
while (curr != &cache->bucket[bucket]) {
|
2009-11-16 18:56:18 +00:00
|
|
|
++tries;
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2022-07-27 16:48:11 +01:00
|
|
|
entry = list_entry(curr, struct svga_host_surface_cache_entry, bucket_head);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
assert(entry->handle);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* If the key matches and the fence is signalled (the surface is no
|
|
|
|
* longer needed) the lookup was successful. We found a surface that
|
|
|
|
* can be reused.
|
|
|
|
* We unlink the surface from the cache entry and we add the entry to
|
|
|
|
* the 'empty' list.
|
|
|
|
*/
|
2012-08-02 16:40:40 +01:00
|
|
|
if (memcmp(&entry->key, key, sizeof *key) == 0 &&
|
2015-08-13 19:00:58 +01:00
|
|
|
sws->fence_signalled(sws, entry->fence, 0) == 0) {
|
2011-12-07 23:57:11 +00:00
|
|
|
unsigned surf_size;
|
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
assert(sws->surface_is_flushed(sws, entry->handle));
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
handle = entry->handle; /* Reference is transfered here. */
|
2009-11-16 18:56:18 +00:00
|
|
|
entry->handle = NULL;
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Remove from hash table */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->bucket_head);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* remove from LRU list */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->head);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Add the cache entry (but not the surface!) to the empty list */
|
2019-10-27 23:03:21 +00:00
|
|
|
list_add(&entry->head, &cache->empty);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
/* update the cache size */
|
2020-05-26 16:59:50 +01:00
|
|
|
surf_size = svga_surface_size(&entry->key);
|
2011-12-07 23:57:11 +00:00
|
|
|
assert(surf_size <= cache->total_size);
|
|
|
|
if (surf_size > cache->total_size)
|
|
|
|
cache->total_size = 0; /* should never happen, but be safe */
|
|
|
|
else
|
|
|
|
cache->total_size -= surf_size;
|
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
curr = next;
|
2009-11-16 18:56:18 +00:00
|
|
|
next = curr->next;
|
|
|
|
}
|
|
|
|
|
2017-03-05 01:32:06 +00:00
|
|
|
mtx_unlock(&cache->mutex);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2009-11-24 21:13:18 +00:00
|
|
|
if (SVGA_DEBUG & DEBUG_DMA)
|
2012-08-02 16:40:40 +01:00
|
|
|
debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__,
|
2009-11-26 15:25:09 +00:00
|
|
|
handle ? "hit" : "miss", tries, bucket);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
/**
|
|
|
|
* Free the least recently used entries in the surface cache until the
|
|
|
|
* cache size is <= the target size OR there are no unused entries left
|
|
|
|
* to discard. We don't do any flushing to try to free up additional
|
|
|
|
* surfaces.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
svga_screen_cache_shrink(struct svga_screen *svgascreen,
|
|
|
|
unsigned target_size)
|
|
|
|
{
|
|
|
|
struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
2012-06-22 16:43:18 +01:00
|
|
|
struct svga_host_surface_cache_entry *entry = NULL, *next_entry;
|
2011-12-07 23:57:11 +00:00
|
|
|
|
|
|
|
/* Walk over the list of unused buffers in reverse order: from oldest
|
|
|
|
* to newest.
|
|
|
|
*/
|
|
|
|
LIST_FOR_EACH_ENTRY_SAFE_REV(entry, next_entry, &cache->unused, head) {
|
|
|
|
if (entry->key.format != SVGA3D_BUFFER) {
|
|
|
|
/* we don't want to discard vertex/index buffers */
|
|
|
|
|
2020-05-26 16:59:50 +01:00
|
|
|
cache->total_size -= svga_surface_size(&entry->key);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
|
|
|
assert(entry->handle);
|
|
|
|
sws->surface_reference(sws, &entry->handle, NULL);
|
|
|
|
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->bucket_head);
|
|
|
|
list_del(&entry->head);
|
2019-10-27 23:03:21 +00:00
|
|
|
list_add(&entry->head, &cache->empty);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
|
|
|
if (cache->total_size <= target_size) {
|
|
|
|
/* all done */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
/**
|
2015-08-13 19:00:58 +01:00
|
|
|
* Add a surface to the cache. This is done when the driver deletes
|
|
|
|
* the surface. Note: transfers a handle reference.
|
2009-11-16 18:56:18 +00:00
|
|
|
*/
|
2014-08-13 23:30:48 +01:00
|
|
|
static void
|
2009-11-16 18:56:18 +00:00
|
|
|
svga_screen_cache_add(struct svga_screen *svgascreen,
|
2012-08-02 16:40:40 +01:00
|
|
|
const struct svga_host_surface_cache_key *key,
|
2021-12-16 23:07:58 +00:00
|
|
|
boolean to_invalidate,
|
2009-11-16 18:56:18 +00:00
|
|
|
struct svga_winsys_surface **p_handle)
|
|
|
|
{
|
|
|
|
struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
|
|
|
struct svga_host_surface_cache_entry *entry = NULL;
|
|
|
|
struct svga_winsys_surface *handle = *p_handle;
|
2011-12-07 23:57:11 +00:00
|
|
|
unsigned surf_size;
|
2015-08-13 19:00:58 +01:00
|
|
|
|
2009-11-24 21:13:18 +00:00
|
|
|
assert(key->cachable);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
if (!handle)
|
2009-11-16 18:56:18 +00:00
|
|
|
return;
|
2015-08-13 19:00:58 +01:00
|
|
|
|
2020-05-26 16:59:50 +01:00
|
|
|
surf_size = svga_surface_size(key);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
*p_handle = NULL;
|
2017-03-05 01:12:30 +00:00
|
|
|
mtx_lock(&cache->mutex);
|
2015-08-13 19:00:58 +01:00
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
|
|
|
|
/* this surface is too large to cache, just free it */
|
|
|
|
sws->surface_reference(sws, &handle, NULL);
|
2017-03-05 01:32:06 +00:00
|
|
|
mtx_unlock(&cache->mutex);
|
2011-12-07 23:57:11 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cache->total_size + surf_size > SVGA_HOST_SURFACE_CACHE_BYTES) {
|
|
|
|
/* Adding this surface would exceed the cache size.
|
|
|
|
* Try to discard least recently used entries until we hit the
|
|
|
|
* new target cache size.
|
|
|
|
*/
|
|
|
|
unsigned target_size = SVGA_HOST_SURFACE_CACHE_BYTES - surf_size;
|
|
|
|
|
|
|
|
svga_screen_cache_shrink(svgascreen, target_size);
|
|
|
|
|
|
|
|
if (cache->total_size > target_size) {
|
|
|
|
/* we weren't able to shrink the cache as much as we wanted so
|
|
|
|
* just discard this surface.
|
|
|
|
*/
|
|
|
|
sws->surface_reference(sws, &handle, NULL);
|
2017-03-05 01:32:06 +00:00
|
|
|
mtx_unlock(&cache->mutex);
|
2011-12-07 23:57:11 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-28 10:30:51 +00:00
|
|
|
if (!list_is_empty(&cache->empty)) {
|
2015-08-13 19:00:58 +01:00
|
|
|
/* An empty entry has no surface associated with it.
|
|
|
|
* Use the first empty entry.
|
|
|
|
*/
|
2022-07-27 16:48:11 +01:00
|
|
|
entry = list_entry(cache->empty.next,
|
|
|
|
struct svga_host_surface_cache_entry,
|
|
|
|
head);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Remove from LRU list */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->head);
|
2009-11-24 21:13:18 +00:00
|
|
|
}
|
2019-10-28 10:30:51 +00:00
|
|
|
else if (!list_is_empty(&cache->unused)) {
|
2009-11-16 18:56:18 +00:00
|
|
|
/* free the last used buffer and reuse its entry */
|
2022-07-27 16:48:11 +01:00
|
|
|
entry = list_entry(cache->unused.prev,
|
|
|
|
struct svga_host_surface_cache_entry,
|
|
|
|
head);
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
|
|
|
"unref sid %p (make space)\n", entry->handle);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
2020-05-26 16:59:50 +01:00
|
|
|
cache->total_size -= svga_surface_size(&entry->key);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
sws->surface_reference(sws, &entry->handle, NULL);
|
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Remove from hash table */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->bucket_head);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Remove from LRU list */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->head);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
if (entry) {
|
2015-08-13 19:00:58 +01:00
|
|
|
assert(entry->handle == NULL);
|
2009-11-16 18:56:18 +00:00
|
|
|
entry->handle = handle;
|
|
|
|
memcpy(&entry->key, key, sizeof entry->key);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
|
|
|
"cache sid %p\n", entry->handle);
|
2019-05-08 14:50:18 +01:00
|
|
|
|
|
|
|
/* If we don't have gb objects, we don't need to invalidate. */
|
2021-12-16 23:07:58 +00:00
|
|
|
if (sws->have_gb_objects) {
|
|
|
|
if (to_invalidate)
|
|
|
|
list_add(&entry->head, &cache->validated);
|
|
|
|
else
|
|
|
|
list_add(&entry->head, &cache->invalidated);
|
|
|
|
}
|
2019-05-08 14:50:18 +01:00
|
|
|
else
|
2019-10-27 23:03:21 +00:00
|
|
|
list_add(&entry->head, &cache->invalidated);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
|
|
|
cache->total_size += surf_size;
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Couldn't cache the buffer -- this really shouldn't happen */
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
|
|
|
"unref sid %p (couldn't find space)\n", handle);
|
2009-11-16 18:56:18 +00:00
|
|
|
sws->surface_reference(sws, &handle, NULL);
|
|
|
|
}
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2017-03-05 01:32:06 +00:00
|
|
|
mtx_unlock(&cache->mutex);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-26 16:56:42 +01:00
|
|
|
/* Maximum number of invalidate surface commands in a command buffer */
|
|
|
|
# define SVGA_MAX_SURFACE_TO_INVALIDATE 1000
|
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
/**
|
|
|
|
* Called during the screen flush to move all buffers not in a validate list
|
|
|
|
* into the unused list.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
svga_screen_cache_flush(struct svga_screen *svgascreen,
|
2017-01-27 02:46:23 +00:00
|
|
|
struct svga_context *svga,
|
2009-11-16 18:56:18 +00:00
|
|
|
struct pipe_fence_handle *fence)
|
|
|
|
{
|
|
|
|
struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
|
|
|
struct svga_host_surface_cache_entry *entry;
|
|
|
|
struct list_head *curr, *next;
|
|
|
|
unsigned bucket;
|
|
|
|
|
2017-03-05 01:12:30 +00:00
|
|
|
mtx_lock(&cache->mutex);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2016-03-08 19:18:51 +00:00
|
|
|
/* Loop over entries in the invalidated list */
|
|
|
|
curr = cache->invalidated.next;
|
2009-11-16 18:56:18 +00:00
|
|
|
next = curr->next;
|
2016-03-08 19:18:51 +00:00
|
|
|
while (curr != &cache->invalidated) {
|
2022-07-27 16:48:11 +01:00
|
|
|
entry = list_entry(curr, struct svga_host_surface_cache_entry, head);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
assert(entry->handle);
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
if (sws->surface_is_flushed(sws, entry->handle)) {
|
2016-03-08 19:18:51 +00:00
|
|
|
/* remove entry from the invalidated list */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->head);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2016-08-15 18:38:39 +01:00
|
|
|
sws->fence_reference(sws, &entry->fence, fence);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Add entry to the unused list */
|
2019-10-27 23:03:21 +00:00
|
|
|
list_add(&entry->head, &cache->unused);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2015-08-13 19:00:58 +01:00
|
|
|
/* Add entry to the hash table bucket */
|
2009-11-16 18:56:18 +00:00
|
|
|
bucket = svga_screen_cache_bucket(&entry->key);
|
2019-10-27 23:03:21 +00:00
|
|
|
list_add(&entry->bucket_head, &cache->bucket[bucket]);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
curr = next;
|
2009-11-16 18:56:18 +00:00
|
|
|
next = curr->next;
|
|
|
|
}
|
|
|
|
|
2020-05-26 16:56:42 +01:00
|
|
|
unsigned nsurf = 0;
|
2016-03-08 19:18:51 +00:00
|
|
|
curr = cache->validated.next;
|
|
|
|
next = curr->next;
|
|
|
|
while (curr != &cache->validated) {
|
2022-07-27 16:48:11 +01:00
|
|
|
entry = list_entry(curr, struct svga_host_surface_cache_entry, head);
|
2016-03-08 19:18:51 +00:00
|
|
|
|
|
|
|
assert(entry->handle);
|
2019-05-08 14:50:18 +01:00
|
|
|
assert(svga_have_gb_objects(svga));
|
2016-03-08 19:18:51 +00:00
|
|
|
|
|
|
|
if (sws->surface_is_flushed(sws, entry->handle)) {
|
|
|
|
/* remove entry from the validated list */
|
2019-10-27 23:11:53 +00:00
|
|
|
list_del(&entry->head);
|
2016-03-08 19:18:51 +00:00
|
|
|
|
2017-01-27 02:46:23 +00:00
|
|
|
/* It is now safe to invalidate the surface content.
|
|
|
|
* It will be done using the current context.
|
|
|
|
*/
|
2020-05-26 16:59:50 +01:00
|
|
|
if (SVGA_TRY(SVGA3D_InvalidateGBSurface(svga->swc, entry->handle))
|
|
|
|
!= PIPE_OK) {
|
2019-06-19 12:47:19 +01:00
|
|
|
ASSERTED enum pipe_error ret;
|
svga: fix pre-mature flushing of the command buffer
When surface_invalidate is called to invalidate a newly created surface
in svga_validate_surface_view(), it is possible that the command
buffer is already full, and in this case, currently, the associated wddm
winsys function will flush the command buffer and resend the invalidate
surface command. However, this can pre-maturely flush the command buffer
if there is still pending image updates to be patched.
To fix the problem, this patch will add a return status to the
surface_invalidate interface and if it returns FALSE, the caller will
call svga_context_flush() to do the proper context flush.
Note, we don't call svga_context_flush() if surface_invalidate()
fails when flushing the screen surface cache though, because it is
already in the process of context flush, all the image updates are already
patched, calling svga_context_flush() can trigger a deadlock.
So in this case, we call the winsys context flush interface directly
to flush the command buffer.
Fixes driver errors and graphics corruption running Tropics. VMware bug 1891975.
Also tested with MTT glretrace, piglit and various OpenGL apps such as
Heaven, CinebenchR15, NobelClinicianViewer, Lightsmark, GoogleEarth.
cc: mesa-stable@lists.freedesktop.org
Reviewed-by: Brian Paul <brianp@vmware.com>
2017-06-21 23:35:38 +01:00
|
|
|
|
|
|
|
/* Even though surface invalidation here is done after the command
|
|
|
|
* buffer is flushed, it is still possible that it will
|
|
|
|
* fail because there might be just enough of this command that is
|
|
|
|
* filling up the command buffer, so in this case we will call
|
|
|
|
* the winsys flush directly to flush the buffer.
|
|
|
|
* Note, we don't want to call svga_context_flush() here because
|
|
|
|
* this function itself is called inside svga_context_flush().
|
|
|
|
*/
|
2020-05-26 16:59:50 +01:00
|
|
|
svga_retry_enter(svga);
|
svga: fix pre-mature flushing of the command buffer
When surface_invalidate is called to invalidate a newly created surface
in svga_validate_surface_view(), it is possible that the command
buffer is already full, and in this case, currently, the associated wddm
winsys function will flush the command buffer and resend the invalidate
surface command. However, this can pre-maturely flush the command buffer
if there is still pending image updates to be patched.
To fix the problem, this patch will add a return status to the
surface_invalidate interface and if it returns FALSE, the caller will
call svga_context_flush() to do the proper context flush.
Note, we don't call svga_context_flush() if surface_invalidate()
fails when flushing the screen surface cache though, because it is
already in the process of context flush, all the image updates are already
patched, calling svga_context_flush() can trigger a deadlock.
So in this case, we call the winsys context flush interface directly
to flush the command buffer.
Fixes driver errors and graphics corruption running Tropics. VMware bug 1891975.
Also tested with MTT glretrace, piglit and various OpenGL apps such as
Heaven, CinebenchR15, NobelClinicianViewer, Lightsmark, GoogleEarth.
cc: mesa-stable@lists.freedesktop.org
Reviewed-by: Brian Paul <brianp@vmware.com>
2017-06-21 23:35:38 +01:00
|
|
|
svga->swc->flush(svga->swc, NULL);
|
2020-05-26 16:56:42 +01:00
|
|
|
nsurf = 0;
|
2019-05-08 14:50:18 +01:00
|
|
|
ret = SVGA3D_InvalidateGBSurface(svga->swc, entry->handle);
|
2020-05-26 16:59:50 +01:00
|
|
|
svga_retry_exit(svga);
|
svga: fix pre-mature flushing of the command buffer
When surface_invalidate is called to invalidate a newly created surface
in svga_validate_surface_view(), it is possible that the command
buffer is already full, and in this case, currently, the associated wddm
winsys function will flush the command buffer and resend the invalidate
surface command. However, this can pre-maturely flush the command buffer
if there is still pending image updates to be patched.
To fix the problem, this patch will add a return status to the
surface_invalidate interface and if it returns FALSE, the caller will
call svga_context_flush() to do the proper context flush.
Note, we don't call svga_context_flush() if surface_invalidate()
fails when flushing the screen surface cache though, because it is
already in the process of context flush, all the image updates are already
patched, calling svga_context_flush() can trigger a deadlock.
So in this case, we call the winsys context flush interface directly
to flush the command buffer.
Fixes driver errors and graphics corruption running Tropics. VMware bug 1891975.
Also tested with MTT glretrace, piglit and various OpenGL apps such as
Heaven, CinebenchR15, NobelClinicianViewer, Lightsmark, GoogleEarth.
cc: mesa-stable@lists.freedesktop.org
Reviewed-by: Brian Paul <brianp@vmware.com>
2017-06-21 23:35:38 +01:00
|
|
|
assert(ret == PIPE_OK);
|
|
|
|
}
|
2016-03-08 19:18:51 +00:00
|
|
|
|
|
|
|
/* add the entry to the invalidated list */
|
2020-05-26 16:59:50 +01:00
|
|
|
|
2019-10-27 23:03:21 +00:00
|
|
|
list_add(&entry->head, &cache->invalidated);
|
2020-05-26 16:56:42 +01:00
|
|
|
nsurf++;
|
2016-03-08 19:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
curr = next;
|
|
|
|
next = curr->next;
|
|
|
|
}
|
|
|
|
|
2017-03-05 01:32:06 +00:00
|
|
|
mtx_unlock(&cache->mutex);
|
2020-05-26 16:56:42 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* In some rare cases (when running ARK survival), we hit the max number
|
|
|
|
* of surface relocations with invalidated surfaces during context flush.
|
|
|
|
* So if the number of invalidated surface exceeds a certain limit (1000),
|
|
|
|
* we'll do another winsys flush.
|
|
|
|
*/
|
|
|
|
if (nsurf > SVGA_MAX_SURFACE_TO_INVALIDATE) {
|
|
|
|
svga->swc->flush(svga->swc, NULL);
|
|
|
|
}
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
/**
|
|
|
|
* Free all the surfaces in the cache.
|
|
|
|
* Called when destroying the svga screen object.
|
|
|
|
*/
|
2009-11-16 18:56:18 +00:00
|
|
|
void
|
|
|
|
svga_screen_cache_cleanup(struct svga_screen *svgascreen)
|
|
|
|
{
|
|
|
|
struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
|
|
|
unsigned i;
|
2012-08-02 16:40:40 +01:00
|
|
|
|
|
|
|
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i) {
|
|
|
|
if (cache->entries[i].handle) {
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
|
|
|
"unref sid %p (shutdown)\n", cache->entries[i].handle);
|
2009-11-16 18:56:18 +00:00
|
|
|
sws->surface_reference(sws, &cache->entries[i].handle, NULL);
|
2011-12-07 23:57:11 +00:00
|
|
|
|
2020-05-26 16:59:50 +01:00
|
|
|
cache->total_size -= svga_surface_size(&cache->entries[i].key);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
if (cache->entries[i].fence)
|
2016-08-15 18:38:39 +01:00
|
|
|
sws->fence_reference(sws, &cache->entries[i].fence, NULL);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2017-03-05 01:32:04 +00:00
|
|
|
mtx_destroy(&cache->mutex);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
enum pipe_error
|
|
|
|
svga_screen_cache_init(struct svga_screen *svgascreen)
|
|
|
|
{
|
|
|
|
struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
unsigned i;
|
|
|
|
|
2011-12-07 23:57:11 +00:00
|
|
|
assert(cache->total_size == 0);
|
|
|
|
|
2017-03-05 01:00:15 +00:00
|
|
|
(void) mtx_init(&cache->mutex, mtx_plain);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
|
|
|
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_BUCKETS; ++i)
|
2019-10-27 22:49:39 +00:00
|
|
|
list_inithead(&cache->bucket[i]);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
2019-10-27 22:49:39 +00:00
|
|
|
list_inithead(&cache->unused);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2019-10-27 22:49:39 +00:00
|
|
|
list_inithead(&cache->validated);
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2019-10-27 22:49:39 +00:00
|
|
|
list_inithead(&cache->invalidated);
|
2016-03-08 19:18:51 +00:00
|
|
|
|
2019-10-27 22:49:39 +00:00
|
|
|
list_inithead(&cache->empty);
|
2012-08-02 16:40:40 +01:00
|
|
|
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
|
2019-10-27 22:58:31 +00:00
|
|
|
list_addtail(&cache->entries[i].head, &cache->empty);
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
return PIPE_OK;
|
|
|
|
}
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
/**
|
|
|
|
* Allocate a new host-side surface. If the surface is marked as cachable,
|
|
|
|
* first try re-using a surface in the cache of freed surfaces. Otherwise,
|
|
|
|
* allocate a new surface.
|
2015-08-13 19:00:58 +01:00
|
|
|
* \param bind_flags bitmask of PIPE_BIND_x flags
|
|
|
|
* \param usage one of PIPE_USAGE_x values
|
2016-10-27 00:15:23 +01:00
|
|
|
* \param validated return True if the surface is a reused surface
|
2012-08-02 16:40:40 +01:00
|
|
|
*/
|
2009-11-16 18:56:18 +00:00
|
|
|
struct svga_winsys_surface *
|
|
|
|
svga_screen_surface_create(struct svga_screen *svgascreen,
|
2016-05-26 00:13:23 +01:00
|
|
|
unsigned bind_flags, enum pipe_resource_usage usage,
|
2016-10-27 00:15:23 +01:00
|
|
|
boolean *validated,
|
2009-11-16 18:56:18 +00:00
|
|
|
struct svga_host_surface_cache_key *key)
|
|
|
|
{
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
|
|
|
struct svga_winsys_surface *handle = NULL;
|
2009-11-24 21:13:18 +00:00
|
|
|
boolean cachable = SVGA_SURFACE_CACHE_ENABLED && key->cachable;
|
|
|
|
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
2015-08-13 19:00:58 +01:00
|
|
|
"%s sz %dx%dx%d mips %d faces %d arraySize %d cachable %d\n",
|
2009-11-24 21:13:18 +00:00
|
|
|
__FUNCTION__,
|
|
|
|
key->size.width,
|
|
|
|
key->size.height,
|
|
|
|
key->size.depth,
|
|
|
|
key->numMipLevels,
|
|
|
|
key->numFaces,
|
2015-08-13 19:00:58 +01:00
|
|
|
key->arraySize,
|
2009-11-24 21:13:18 +00:00
|
|
|
key->cachable);
|
|
|
|
|
|
|
|
if (cachable) {
|
2017-06-16 23:34:43 +01:00
|
|
|
/* Try to re-cycle a previously freed, cached surface */
|
2009-11-24 21:13:18 +00:00
|
|
|
if (key->format == SVGA3D_BUFFER) {
|
2017-06-01 23:12:14 +01:00
|
|
|
SVGA3dSurfaceAllFlags hint_flag;
|
2015-08-13 19:00:58 +01:00
|
|
|
|
2009-11-24 21:13:18 +00:00
|
|
|
/* For buffers, round the buffer size up to the nearest power
|
|
|
|
* of two to increase the probability of cache hits. Keep
|
|
|
|
* texture surface dimensions unchanged.
|
|
|
|
*/
|
|
|
|
uint32_t size = 1;
|
2012-08-02 16:40:40 +01:00
|
|
|
while (size < key->size.width)
|
2009-11-24 21:13:18 +00:00
|
|
|
size <<= 1;
|
|
|
|
key->size.width = size;
|
2015-08-13 19:00:58 +01:00
|
|
|
|
|
|
|
/* Determine whether the buffer is static or dynamic.
|
|
|
|
* This is a bit of a heuristic which can be tuned as needed.
|
|
|
|
*/
|
|
|
|
if (usage == PIPE_USAGE_DEFAULT ||
|
|
|
|
usage == PIPE_USAGE_IMMUTABLE) {
|
|
|
|
hint_flag = SVGA3D_SURFACE_HINT_STATIC;
|
|
|
|
}
|
|
|
|
else if (bind_flags & PIPE_BIND_INDEX_BUFFER) {
|
|
|
|
/* Index buffers don't change too often. Mark them as static.
|
|
|
|
*/
|
|
|
|
hint_flag = SVGA3D_SURFACE_HINT_STATIC;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Since we're reusing buffers we're effectively transforming all
|
|
|
|
* of them into dynamic buffers.
|
|
|
|
*
|
|
|
|
* It would be nice to not cache long lived static buffers. But there
|
|
|
|
* is no way to detect the long lived from short lived ones yet. A
|
|
|
|
* good heuristic would be buffer size.
|
|
|
|
*/
|
|
|
|
hint_flag = SVGA3D_SURFACE_HINT_DYNAMIC;
|
|
|
|
}
|
|
|
|
|
|
|
|
key->flags &= ~(SVGA3D_SURFACE_HINT_STATIC |
|
|
|
|
SVGA3D_SURFACE_HINT_DYNAMIC);
|
|
|
|
key->flags |= hint_flag;
|
2009-11-24 21:13:18 +00:00
|
|
|
}
|
2009-11-16 18:56:18 +00:00
|
|
|
|
|
|
|
handle = svga_screen_cache_lookup(svgascreen, key);
|
2009-11-24 21:13:18 +00:00
|
|
|
if (handle) {
|
|
|
|
if (key->format == SVGA3D_BUFFER)
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
2012-08-02 16:40:40 +01:00
|
|
|
"reuse sid %p sz %d (buffer)\n", handle,
|
2009-11-24 21:13:18 +00:00
|
|
|
key->size.width);
|
|
|
|
else
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
2015-08-13 19:00:58 +01:00
|
|
|
"reuse sid %p sz %dx%dx%d mips %d faces %d arraySize %d\n", handle,
|
2009-11-24 21:13:18 +00:00
|
|
|
key->size.width,
|
|
|
|
key->size.height,
|
|
|
|
key->size.depth,
|
|
|
|
key->numMipLevels,
|
2015-08-13 19:00:58 +01:00
|
|
|
key->numFaces,
|
|
|
|
key->arraySize);
|
2016-10-27 00:15:23 +01:00
|
|
|
*validated = TRUE;
|
2009-11-24 21:13:18 +00:00
|
|
|
}
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!handle) {
|
2017-06-16 23:34:43 +01:00
|
|
|
/* Unable to recycle surface, allocate a new one */
|
2015-08-13 19:00:58 +01:00
|
|
|
unsigned usage = 0;
|
|
|
|
|
|
|
|
if (!key->cachable)
|
|
|
|
usage |= SVGA_SURFACE_USAGE_SHARED;
|
|
|
|
if (key->scanout)
|
|
|
|
usage |= SVGA_SURFACE_USAGE_SCANOUT;
|
2019-04-05 08:09:19 +01:00
|
|
|
if (key->coherent)
|
|
|
|
usage |= SVGA_SURFACE_USAGE_COHERENT;
|
2015-08-13 19:00:58 +01:00
|
|
|
|
2009-11-16 18:56:18 +00:00
|
|
|
handle = sws->surface_create(sws,
|
|
|
|
key->flags,
|
|
|
|
key->format,
|
2015-08-13 19:00:58 +01:00
|
|
|
usage,
|
2012-08-02 16:40:40 +01:00
|
|
|
key->size,
|
2015-08-13 19:00:58 +01:00
|
|
|
key->numFaces * key->arraySize,
|
2015-08-06 23:44:35 +01:00
|
|
|
key->numMipLevels,
|
2015-08-13 19:00:58 +01:00
|
|
|
key->sampleCount);
|
2009-11-16 18:56:18 +00:00
|
|
|
if (handle)
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
|
2012-08-02 16:40:40 +01:00
|
|
|
" CREATE sid %p sz %dx%dx%d\n",
|
|
|
|
handle,
|
2009-11-27 12:18:22 +00:00
|
|
|
key->size.width,
|
|
|
|
key->size.height,
|
|
|
|
key->size.depth);
|
2016-10-27 00:15:23 +01:00
|
|
|
|
|
|
|
*validated = FALSE;
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-02 16:40:40 +01:00
|
|
|
/**
|
|
|
|
* Release a surface. We don't actually free the surface- we put
|
|
|
|
* it into the cache of freed surfaces (if it's cachable).
|
|
|
|
*/
|
2009-11-16 18:56:18 +00:00
|
|
|
void
|
|
|
|
svga_screen_surface_destroy(struct svga_screen *svgascreen,
|
|
|
|
const struct svga_host_surface_cache_key *key,
|
2021-12-16 23:07:58 +00:00
|
|
|
boolean to_invalidate,
|
2009-11-16 18:56:18 +00:00
|
|
|
struct svga_winsys_surface **p_handle)
|
|
|
|
{
|
|
|
|
struct svga_winsys_screen *sws = svgascreen->sws;
|
2012-08-02 16:40:40 +01:00
|
|
|
|
2009-11-24 21:13:18 +00:00
|
|
|
/* We only set the cachable flag for surfaces of which we are the
|
|
|
|
* exclusive owner. So just hold onto our existing reference in
|
|
|
|
* that case.
|
|
|
|
*/
|
2012-08-02 16:40:40 +01:00
|
|
|
if (SVGA_SURFACE_CACHE_ENABLED && key->cachable) {
|
2021-12-16 23:07:58 +00:00
|
|
|
svga_screen_cache_add(svgascreen, key, to_invalidate, p_handle);
|
2009-11-16 18:56:18 +00:00
|
|
|
}
|
|
|
|
else {
|
2009-11-27 12:18:22 +00:00
|
|
|
SVGA_DBG(DEBUG_DMA,
|
|
|
|
"unref sid %p (uncachable)\n", *p_handle);
|
2009-11-16 18:56:18 +00:00
|
|
|
sws->surface_reference(sws, p_handle, NULL);
|
|
|
|
}
|
|
|
|
}
|
2012-10-17 16:55:54 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Print/dump the contents of the screen cache. For debugging.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
svga_screen_cache_dump(const struct svga_screen *svgascreen)
|
|
|
|
{
|
|
|
|
const struct svga_host_surface_cache *cache = &svgascreen->cache;
|
|
|
|
unsigned bucket;
|
|
|
|
unsigned count = 0;
|
|
|
|
|
|
|
|
debug_printf("svga3d surface cache:\n");
|
|
|
|
for (bucket = 0; bucket < SVGA_HOST_SURFACE_CACHE_BUCKETS; bucket++) {
|
|
|
|
struct list_head *curr;
|
|
|
|
curr = cache->bucket[bucket].next;
|
|
|
|
while (curr && curr != &cache->bucket[bucket]) {
|
|
|
|
struct svga_host_surface_cache_entry *entry =
|
2022-07-27 16:48:11 +01:00
|
|
|
list_entry(curr, struct svga_host_surface_cache_entry,bucket_head);
|
2016-03-04 22:58:02 +00:00
|
|
|
if (entry->key.format == SVGA3D_BUFFER) {
|
|
|
|
debug_printf(" %p: buffer %u bytes\n",
|
|
|
|
entry->handle,
|
|
|
|
entry->key.size.width);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
debug_printf(" %p: %u x %u x %u format %u\n",
|
|
|
|
entry->handle,
|
2012-10-17 16:55:54 +01:00
|
|
|
entry->key.size.width,
|
|
|
|
entry->key.size.height,
|
|
|
|
entry->key.size.depth,
|
|
|
|
entry->key.format);
|
|
|
|
}
|
|
|
|
curr = curr->next;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
debug_printf("%u surfaces, %u bytes\n", count, cache->total_size);
|
|
|
|
}
|