Compare commits

...

15 Commits

Author SHA1 Message Date
Matthias Clasen
c5988a22a4 gpu: Add a front cache
This copies the approach taken in the gl renderer to avoid much
of the hash table lookup overhead by means of a front cache.
2024-01-20 11:19:23 -05:00
Matthias Clasen
498bf67cec gpu: Print more detailed cache statistics
Print out stale glyphs and dead pixel ratios.
2024-01-20 08:50:50 -05:00
Matthias Clasen
65455cdb91 gpu: Rework glyph aging
Instead of counting atlas occupancy by itesm, count how many
dead pixels we have, and free the atlas if more than half of its
pixels are dead. This matches what the GL renderer does.

As part of this, change when glyphs are freed. We now keep them
in the hash table until their atlas is freed and we only do dead
pixel accounting when should_collect is called. This keeps the
glyphs available for use from the cache as long as are in the atlas.

If a stale glyph is sused, we 'revive' itby removing its pixels
from the dead.

This matches more closely what the gl renderer does.
2024-01-20 08:48:33 -05:00
Matthias Clasen
d4963ba2e5 gpu: Make cache freeing more robust
Currently, we only free an atlas when all its glyphs are gone,
but we might revisit that in the future, so be prepared for it.
2024-01-20 08:05:38 -05:00
Matthias Clasen
ec5fed6cc4 gpu: Fix atlas allocation logic
The code wasn't careful enough, and could make the last slice
higher than the atlas, or put the image into a too-narrow slice.

Add more assertions.
2024-01-20 08:05:38 -05:00
Matthias Clasen
b7ae5404a3 gsk: Reshuffle code a bit
Move the atlas allocation to the atlas section in the code.
2024-01-20 08:05:38 -05:00
Matthias Clasen
fb4f210430 gpu: Fix ordering problem in clear_cache()
We must free the glyphs before their atlases, since we now maintain
the item count of the atlas when glyphs are freed.
2024-01-20 08:05:30 -05:00
Matthias Clasen
82df109444 gpu: Fix atlas freeing
We need to unset current_atlas if we free that one.
2024-01-20 08:05:30 -05:00
Matthias Clasen
bd84142f43 gpu: Evict stale atlases from the cache
We evict atlases when they are old and unused.
2024-01-20 08:05:30 -05:00
Matthias Clasen
65074bbbac gpu: Count glyphs on atlases
That is the only way to know when its safe to gc an atlas. We can't
do that if there's still glyphs living on it.
2024-01-20 08:05:30 -05:00
Matthias Clasen
21fb5ec0b2 gpu: Evict stale glyphs from the cache 2024-01-20 08:05:30 -05:00
Matthias Clasen
e57f92b7e9 gpu: Fix texture eviction
If we gc a cached texture for which the GdkTexture is still alive,
the cached texture object will remain accessible via the render
data, so need to make sure not to leave a dangling pointer behind
here.
2024-01-20 08:05:30 -05:00
Matthias Clasen
e94e348ad5 gpu: Evict stale textures from the cache
Fixes: #6346
2024-01-20 08:05:30 -05:00
Matthias Clasen
91234e75f8 gpu: Start doing gc
Call gc periodically from a timeout.
2024-01-20 08:05:30 -05:00
Matthias Clasen
212e9a6957 gpu: Print some cache stats
This reuses the GLYPHCACHE debug flag that is used for the same
purpose in the gl renderer.
2024-01-20 08:05:30 -05:00

View File

@@ -3,17 +3,28 @@
#include "gskgpudeviceprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuimageprivate.h"
#include "gskgpuuploadopprivate.h"
#include "gdk/gdkdisplayprivate.h"
#include "gdk/gdktextureprivate.h"
#include "gsk/gskdebugprivate.h"
#define MAX_SLICES_PER_ATLAS 64
#define ATLAS_SIZE 1024
#define MAX_ATLAS_ITEM_SIZE 256
G_STATIC_ASSERT (MAX_ATLAS_ITEM_SIZE < ATLAS_SIZE);
#define MAX_DEAD_PIXELS (ATLAS_SIZE * ATLAS_SIZE / 2)
#define CACHE_GC_TIMEOUT 1 /* seconds */
#define CACHE_MAX_AGE (G_TIME_SPAN_SECOND * 4) /* 4 seconds, in µs */
typedef struct _GskGpuCached GskGpuCached;
typedef struct _GskGpuCachedClass GskGpuCachedClass;
typedef struct _GskGpuCachedAtlas GskGpuCachedAtlas;
@@ -21,6 +32,15 @@ typedef struct _GskGpuCachedGlyph GskGpuCachedGlyph;
typedef struct _GskGpuCachedTexture GskGpuCachedTexture;
typedef struct _GskGpuDevicePrivate GskGpuDevicePrivate;
typedef struct _GlyphKey GlyphKey;
struct _GlyphKey
{
PangoFont *font;
PangoGlyph glyph;
GskGpuGlyphLookupFlags flags;
float scale;
};
struct _GskGpuDevicePrivate
{
GdkDisplay *display;
@@ -34,6 +54,11 @@ struct _GskGpuDevicePrivate
GHashTable *glyph_cache;
GskGpuCachedAtlas *current_atlas;
struct {
GlyphKey key;
GskGpuCachedGlyph *value;
} front[256];
};
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuDevice, gsk_gpu_device, G_TYPE_OBJECT)
@@ -54,12 +79,34 @@ struct _GskGpuCachedClass
struct _GskGpuCached
{
const GskGpuCachedClass *class;
GskGpuCachedAtlas *atlas;
GskGpuCached *next;
GskGpuCached *prev;
gint64 timestamp;
gboolean stale;
guint pixels; /* For glyphs, pixels. For atlases, dead pixels */
};
static inline void
mark_as_stale (GskGpuCached *cached,
gboolean stale)
{
if (cached->stale != stale)
{
cached->stale = stale;
if (cached->atlas)
{
if (stale)
((GskGpuCached *) cached->atlas)->pixels += cached->pixels;
else
((GskGpuCached *) cached->atlas)->pixels -= cached->pixels;
}
}
}
static void
gsk_gpu_cached_free (GskGpuDevice *device,
GskGpuCached *cached)
@@ -75,6 +122,8 @@ gsk_gpu_cached_free (GskGpuDevice *device,
else
priv->first_cached = cached->next;
mark_as_stale (cached, TRUE);
cached->class->free (device, cached);
}
@@ -114,7 +163,8 @@ gsk_gpu_cached_use (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
/* FIXME */
cached->timestamp = timestamp;
mark_as_stale (cached, FALSE);
}
/* }}} */
@@ -137,10 +187,23 @@ static void
gsk_gpu_cached_atlas_free (GskGpuDevice *device,
GskGpuCached *cached)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
GskGpuCachedAtlas *self = (GskGpuCachedAtlas *) cached;
GskGpuCached *c, *next;
/* Free all remaining glyphs on this atlas */
for (c = priv->first_cached; c != NULL; c = next)
{
next = c->next;
if (c->atlas == self)
gsk_gpu_cached_free (device, c);
}
if (priv->current_atlas == self)
priv->current_atlas = NULL;
g_object_unref (self->image);
g_free (self);
}
@@ -149,8 +212,7 @@ gsk_gpu_cached_atlas_should_collect (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
/* FIXME */
return FALSE;
return cached->pixels > MAX_DEAD_PIXELS;
}
static const GskGpuCachedClass GSK_GPU_CACHED_ATLAS_CLASS =
@@ -171,298 +233,6 @@ gsk_gpu_cached_atlas_new (GskGpuDevice *device)
return self;
}
/* }}} */
/* {{{ CachedTexture */
struct _GskGpuCachedTexture
{
GskGpuCached parent;
/* atomic */ GdkTexture *texture;
GskGpuImage *image;
};
static void
gsk_gpu_cached_texture_free (GskGpuDevice *device,
GskGpuCached *cached)
{
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
gboolean texture_still_alive;
texture_still_alive = g_atomic_pointer_exchange (&self->texture, NULL) != NULL;
g_object_unref (self->image);
if (!texture_still_alive)
g_free (self);
}
static gboolean
gsk_gpu_cached_texture_should_collect (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
/* FIXME */
return FALSE;
}
static const GskGpuCachedClass GSK_GPU_CACHED_TEXTURE_CLASS =
{
sizeof (GskGpuCachedTexture),
gsk_gpu_cached_texture_free,
gsk_gpu_cached_texture_should_collect
};
static void
gsk_gpu_cached_texture_destroy_cb (gpointer data)
{
GskGpuCachedTexture *cache = data;
gboolean cache_still_alive;
cache_still_alive = g_atomic_pointer_exchange (&cache->texture, NULL) != NULL;
if (!cache_still_alive)
g_free (cache);
}
static GskGpuCachedTexture *
gsk_gpu_cached_texture_new (GskGpuDevice *device,
GdkTexture *texture,
GskGpuImage *image)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
GskGpuCachedTexture *self;
if (gdk_texture_get_render_data (texture, device))
gdk_texture_clear_render_data (texture);
else if ((self = g_hash_table_lookup (priv->texture_cache, texture)))
{
g_hash_table_remove (priv->texture_cache, texture);
g_object_weak_unref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
}
self = gsk_gpu_cached_new (device, &GSK_GPU_CACHED_TEXTURE_CLASS, NULL);
self->texture = texture;
self->image = g_object_ref (image);
if (!gdk_texture_set_render_data (texture, device, self, gsk_gpu_cached_texture_destroy_cb))
{
g_object_weak_ref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
g_hash_table_insert (priv->texture_cache, texture, self);
}
return self;
}
/* }}} */
/* {{{ CachedGlyph */
struct _GskGpuCachedGlyph
{
GskGpuCached parent;
PangoFont *font;
PangoGlyph glyph;
GskGpuGlyphLookupFlags flags;
float scale;
GskGpuImage *image;
graphene_rect_t bounds;
graphene_point_t origin;
};
static void
gsk_gpu_cached_glyph_free (GskGpuDevice *device,
GskGpuCached *cached)
{
GskGpuCachedGlyph *self = (GskGpuCachedGlyph *) cached;
g_object_unref (self->font);
g_object_unref (self->image);
g_free (self);
}
static gboolean
gsk_gpu_cached_glyph_should_collect (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
/* FIXME */
return FALSE;
}
static guint
gsk_gpu_cached_glyph_hash (gconstpointer data)
{
const GskGpuCachedGlyph *glyph = data;
return GPOINTER_TO_UINT (glyph->font) ^
glyph->glyph ^
(glyph->flags << 24) ^
((guint) glyph->scale * PANGO_SCALE);
}
static gboolean
gsk_gpu_cached_glyph_equal (gconstpointer v1,
gconstpointer v2)
{
const GskGpuCachedGlyph *glyph1 = v1;
const GskGpuCachedGlyph *glyph2 = v2;
return glyph1->font == glyph2->font
&& glyph1->glyph == glyph2->glyph
&& glyph1->flags == glyph2->flags
&& glyph1->scale == glyph2->scale;
}
static const GskGpuCachedClass GSK_GPU_CACHED_GLYPH_CLASS =
{
sizeof (GskGpuCachedGlyph),
gsk_gpu_cached_glyph_free,
gsk_gpu_cached_glyph_should_collect
};
/* }}} */
/* {{{ GskGpuDevice */
void
gsk_gpu_device_gc (GskGpuDevice *self,
gint64 timestamp)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
GskGpuCached *cached, *next;
for (cached = priv->first_cached; cached != NULL; cached = next)
{
next = cached->next;
if (gsk_gpu_cached_should_collect (self, cached, timestamp))
gsk_gpu_cached_free (self, cached);
}
}
static void
gsk_gpu_device_clear_cache (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
for (GskGpuCached *cached = priv->first_cached; cached; cached = cached->next)
{
if (cached->prev == NULL)
g_assert (priv->first_cached == cached);
else
g_assert (cached->prev->next == cached);
if (cached->next == NULL)
g_assert (priv->last_cached == cached);
else
g_assert (cached->next->prev == cached);
}
while (priv->first_cached)
gsk_gpu_cached_free (self, priv->first_cached);
g_assert (priv->last_cached == NULL);
}
static void
gsk_gpu_device_dispose (GObject *object)
{
GskGpuDevice *self = GSK_GPU_DEVICE (object);
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
gsk_gpu_device_clear_cache (self);
g_hash_table_unref (priv->glyph_cache);
g_hash_table_unref (priv->texture_cache);
G_OBJECT_CLASS (gsk_gpu_device_parent_class)->dispose (object);
}
static void
gsk_gpu_device_finalize (GObject *object)
{
GskGpuDevice *self = GSK_GPU_DEVICE (object);
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
g_object_unref (priv->display);
G_OBJECT_CLASS (gsk_gpu_device_parent_class)->finalize (object);
}
static void
gsk_gpu_device_class_init (GskGpuDeviceClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->dispose = gsk_gpu_device_dispose;
object_class->finalize = gsk_gpu_device_finalize;
}
static void
gsk_gpu_device_init (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
priv->glyph_cache = g_hash_table_new (gsk_gpu_cached_glyph_hash,
gsk_gpu_cached_glyph_equal);
priv->texture_cache = g_hash_table_new (g_direct_hash,
g_direct_equal);
}
void
gsk_gpu_device_setup (GskGpuDevice *self,
GdkDisplay *display,
gsize max_image_size)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
priv->display = g_object_ref (display);
priv->max_image_size = max_image_size;
}
GdkDisplay *
gsk_gpu_device_get_display (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
return priv->display;
}
gsize
gsk_gpu_device_get_max_image_size (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
return priv->max_image_size;
}
GskGpuImage *
gsk_gpu_device_create_offscreen_image (GskGpuDevice *self,
gboolean with_mipmap,
GdkMemoryDepth depth,
gsize width,
gsize height)
{
return GSK_GPU_DEVICE_GET_CLASS (self)->create_offscreen_image (self, with_mipmap, depth, width, height);
}
GskGpuImage *
gsk_gpu_device_create_upload_image (GskGpuDevice *self,
gboolean with_mipmap,
GdkMemoryFormat format,
gsize width,
gsize height)
{
return GSK_GPU_DEVICE_GET_CLASS (self)->create_upload_image (self, with_mipmap, format, width, height);
}
GskGpuImage *
gsk_gpu_device_create_download_image (GskGpuDevice *self,
GdkMemoryDepth depth,
gsize width,
gsize height)
{
return GSK_GPU_DEVICE_GET_CLASS (self)->create_download_image (self, depth, width, height);
}
/* This rounds up to the next number that has <= 2 bits set:
* 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, ...
* That is roughly sqrt(2), so it should limit waste
@@ -520,25 +290,397 @@ gsk_gpu_cached_atlas_allocate (GskGpuCachedAtlas *atlas,
if (!can_add_slice)
return FALSE;
if (y + height > ATLAS_SIZE)
return FALSE;
atlas->n_slices++;
if (atlas->n_slices == MAX_SLICES_PER_ATLAS)
atlas->slices[i].height = ATLAS_SIZE - y;
else
atlas->slices[i].height = round_up_atlas_size (MAX (height, 4));
atlas->slices[i].height = MIN (round_up_atlas_size (MAX (height, 4)), ATLAS_SIZE - y);
atlas->slices[i].width = 0;
best_y = y;
best_slice = i;
}
g_assert (best_slice < MAX_SLICES_PER_ATLAS);
*out_x = atlas->slices[best_slice].width;
*out_y = best_y;
atlas->slices[best_slice].width += width;
g_assert (atlas->slices[best_slice].height >= height);
g_assert (atlas->slices[best_slice].width <= ATLAS_SIZE);
g_assert (best_y + atlas->slices[best_slice].height <= ATLAS_SIZE);
return TRUE;
}
/* }}} */
/* {{{ CachedTexture */
struct _GskGpuCachedTexture
{
GskGpuCached parent;
/* atomic */ GdkTexture *texture;
GskGpuImage *image;
};
static void
gsk_gpu_cached_texture_free (GskGpuDevice *device,
GskGpuCached *cached)
{
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
gboolean texture_still_alive;
texture_still_alive = g_atomic_pointer_exchange (&self->texture, NULL) != NULL;
g_clear_object (&self->image);
if (!texture_still_alive)
g_free (self);
}
static gboolean
gsk_gpu_cached_texture_should_collect (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
return timestamp - cached->timestamp > CACHE_MAX_AGE;
}
static const GskGpuCachedClass GSK_GPU_CACHED_TEXTURE_CLASS =
{
sizeof (GskGpuCachedTexture),
gsk_gpu_cached_texture_free,
gsk_gpu_cached_texture_should_collect
};
static void
gsk_gpu_cached_texture_destroy_cb (gpointer data)
{
GskGpuCachedTexture *cache = data;
gboolean cache_still_alive;
cache_still_alive = g_atomic_pointer_exchange (&cache->texture, NULL) != NULL;
if (!cache_still_alive)
g_free (cache);
}
static GskGpuCachedTexture *
gsk_gpu_cached_texture_new (GskGpuDevice *device,
GdkTexture *texture,
GskGpuImage *image)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
GskGpuCachedTexture *self;
if (gdk_texture_get_render_data (texture, device))
gdk_texture_clear_render_data (texture);
else if ((self = g_hash_table_lookup (priv->texture_cache, texture)))
{
g_hash_table_remove (priv->texture_cache, texture);
g_object_weak_unref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
}
self = gsk_gpu_cached_new (device, &GSK_GPU_CACHED_TEXTURE_CLASS, NULL);
self->texture = texture;
self->image = g_object_ref (image);
if (!gdk_texture_set_render_data (texture, device, self, gsk_gpu_cached_texture_destroy_cb))
{
g_object_weak_ref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
g_hash_table_insert (priv->texture_cache, texture, self);
}
return self;
}
/* }}} */
/* {{{ CachedGlyph */
struct _GskGpuCachedGlyph
{
GskGpuCached parent;
GlyphKey key;
GskGpuImage *image;
graphene_rect_t bounds;
graphene_point_t origin;
};
static void
gsk_gpu_cached_glyph_free (GskGpuDevice *device,
GskGpuCached *cached)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
GskGpuCachedGlyph *self = (GskGpuCachedGlyph *) cached;
g_hash_table_remove (priv->glyph_cache, self);
g_object_unref (self->key.font);
g_object_unref (self->image);
g_free (self);
}
static gboolean
gsk_gpu_cached_glyph_should_collect (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
if (timestamp - cached->timestamp > CACHE_MAX_AGE)
mark_as_stale (cached, TRUE);
/* Glyphs are only collected when their atlas is freed */
return FALSE;
}
static guint
gsk_gpu_cached_glyph_hash (gconstpointer data)
{
const GskGpuCachedGlyph *glyph = data;
return GPOINTER_TO_UINT (glyph->key.font) ^
glyph->key.glyph ^
(glyph->key.flags << 24) ^
((guint) glyph->key.scale * PANGO_SCALE);
}
static gboolean
gsk_gpu_cached_glyph_equal (gconstpointer v1,
gconstpointer v2)
{
const GskGpuCachedGlyph *glyph1 = v1;
const GskGpuCachedGlyph *glyph2 = v2;
return glyph1->key.font == glyph2->key.font
&& glyph1->key.glyph == glyph2->key.glyph
&& glyph1->key.flags == glyph2->key.flags
&& glyph1->key.scale == glyph2->key.scale;
}
static const GskGpuCachedClass GSK_GPU_CACHED_GLYPH_CLASS =
{
sizeof (GskGpuCachedGlyph),
gsk_gpu_cached_glyph_free,
gsk_gpu_cached_glyph_should_collect
};
/* }}} */
/* {{{ GskGpuDevice */
static void
print_cache_stats (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
GskGpuCached *cached;
guint glyphs = 0;
guint stale_glyphs = 0;
guint textures = 0;
guint atlases = 0;
GString *ratios = g_string_new ("");
for (cached = priv->first_cached; cached != NULL; cached = cached->next)
{
if (cached->class == &GSK_GPU_CACHED_GLYPH_CLASS)
{
glyphs++;
if (cached->stale)
stale_glyphs++;
}
else if (cached->class == &GSK_GPU_CACHED_TEXTURE_CLASS)
textures++;
else if (cached->class == &GSK_GPU_CACHED_ATLAS_CLASS)
{
double ratio;
atlases++;
ratio = (double) cached->pixels / (double) (ATLAS_SIZE * ATLAS_SIZE);
if (ratios->len == 0)
g_string_append (ratios, " (ratios ");
else
g_string_append (ratios, ", ");
g_string_append_printf (ratios, "%.2f", ratio);
}
}
if (ratios->len > 0)
g_string_append (ratios, ")");
gdk_debug_message ("cached items\n"
" glyphs: %5u (%u stale)\n"
" textures: %5u\n"
" atlases: %5u%s",
glyphs, stale_glyphs, textures, atlases, ratios->str);
g_string_free (ratios, TRUE);
}
void
gsk_gpu_device_gc (GskGpuDevice *self,
gint64 timestamp)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
GskGpuCached *cached, *prev;
/* We walk the cache from the end so we don't end up with prev
* begin a leftover glyph on the atlas we are freeing
*/
for (cached = priv->last_cached; cached != NULL; cached = prev)
{
prev = cached->prev;
if (gsk_gpu_cached_should_collect (self, cached, timestamp))
gsk_gpu_cached_free (self, cached);
}
if (GSK_DEBUG_CHECK (GLYPH_CACHE))
print_cache_stats (self);
}
static void
gsk_gpu_device_clear_cache (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
for (GskGpuCached *cached = priv->first_cached; cached; cached = cached->next)
{
if (cached->prev == NULL)
g_assert (priv->first_cached == cached);
else
g_assert (cached->prev->next == cached);
if (cached->next == NULL)
g_assert (priv->last_cached == cached);
else
g_assert (cached->next->prev == cached);
}
/* We clear the cache from the end so glyphs get freed before their atlas */
while (priv->last_cached)
gsk_gpu_cached_free (self, priv->last_cached);
g_assert (priv->last_cached == NULL);
}
static void
gsk_gpu_device_dispose (GObject *object)
{
GskGpuDevice *self = GSK_GPU_DEVICE (object);
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
gsk_gpu_device_clear_cache (self);
g_hash_table_unref (priv->glyph_cache);
g_hash_table_unref (priv->texture_cache);
g_clear_handle_id (&priv->cache_gc_source, g_source_remove);
G_OBJECT_CLASS (gsk_gpu_device_parent_class)->dispose (object);
}
static void
gsk_gpu_device_finalize (GObject *object)
{
GskGpuDevice *self = GSK_GPU_DEVICE (object);
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
g_object_unref (priv->display);
G_OBJECT_CLASS (gsk_gpu_device_parent_class)->finalize (object);
}
static void
gsk_gpu_device_class_init (GskGpuDeviceClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->dispose = gsk_gpu_device_dispose;
object_class->finalize = gsk_gpu_device_finalize;
}
static void
gsk_gpu_device_init (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
priv->glyph_cache = g_hash_table_new (gsk_gpu_cached_glyph_hash,
gsk_gpu_cached_glyph_equal);
priv->texture_cache = g_hash_table_new (g_direct_hash,
g_direct_equal);
}
static gboolean
cache_gc_source_callback (gpointer data)
{
GskGpuDevice *self = data;
gsk_gpu_device_gc (self, g_get_monotonic_time ());
return G_SOURCE_CONTINUE;
}
void
gsk_gpu_device_setup (GskGpuDevice *self,
GdkDisplay *display,
gsize max_image_size)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
priv->display = g_object_ref (display);
priv->max_image_size = max_image_size;
priv->cache_gc_source = g_timeout_add_seconds (CACHE_GC_TIMEOUT, cache_gc_source_callback, self);
}
GdkDisplay *
gsk_gpu_device_get_display (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
return priv->display;
}
gsize
gsk_gpu_device_get_max_image_size (GskGpuDevice *self)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
return priv->max_image_size;
}
GskGpuImage *
gsk_gpu_device_create_offscreen_image (GskGpuDevice *self,
gboolean with_mipmap,
GdkMemoryDepth depth,
gsize width,
gsize height)
{
return GSK_GPU_DEVICE_GET_CLASS (self)->create_offscreen_image (self, with_mipmap, depth, width, height);
}
GskGpuImage *
gsk_gpu_device_create_upload_image (GskGpuDevice *self,
gboolean with_mipmap,
GdkMemoryFormat format,
gsize width,
gsize height)
{
return GSK_GPU_DEVICE_GET_CLASS (self)->create_upload_image (self, with_mipmap, format, width, height);
}
GskGpuImage *
gsk_gpu_device_create_download_image (GskGpuDevice *self,
GdkMemoryDepth depth,
gsize width,
gsize height)
{
return GSK_GPU_DEVICE_GET_CLASS (self)->create_download_image (self, depth, width, height);
}
static void
gsk_gpu_device_ensure_atlas (GskGpuDevice *self,
gboolean recreate,
@@ -576,20 +718,14 @@ gsk_gpu_device_add_atlas_image (GskGpuDevice *self,
return NULL;
gsk_gpu_device_ensure_atlas (self, FALSE, timestamp);
if (gsk_gpu_cached_atlas_allocate (priv->current_atlas, width, height, out_x, out_y))
{
gsk_gpu_cached_use (self, (GskGpuCached *) priv->current_atlas, timestamp);
return priv->current_atlas->image;
}
return priv->current_atlas->image;
gsk_gpu_device_ensure_atlas (self, TRUE, timestamp);
if (gsk_gpu_cached_atlas_allocate (priv->current_atlas, width, height, out_x, out_y))
{
gsk_gpu_cached_use (self, (GskGpuCached *) priv->current_atlas, timestamp);
return priv->current_atlas->image;
}
return priv->current_atlas->image;
return NULL;
}
@@ -606,10 +742,8 @@ gsk_gpu_device_lookup_texture_image (GskGpuDevice *self,
if (cache == NULL)
cache = g_hash_table_lookup (priv->texture_cache, texture);
if (cache)
{
return g_object_ref (cache->image);
}
if (cache && cache->image)
return g_object_ref (cache->image);
return NULL;
}
@@ -639,10 +773,10 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
GskGpuCachedGlyph lookup = {
.font = font,
.glyph = glyph,
.flags = flags,
.scale = scale
.key.font = font,
.key.glyph = glyph,
.key.flags = flags,
.key.scale = scale
};
GskGpuCachedGlyph *cache;
PangoRectangle ink_rect;
@@ -650,14 +784,32 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
graphene_point_t origin;
GskGpuImage *image;
gsize atlas_x, atlas_y, padding;
guint64 timestamp = gsk_gpu_frame_get_timestamp (frame);
guint front_index = glyph & 0xFF;
if (memcmp (&lookup.key, &priv->front[front_index], sizeof (GlyphKey)) == 0)
{
cache = priv->front[front_index].value;
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
*out_bounds = cache->bounds;
*out_origin = cache->origin;
return cache->image;
}
cache = g_hash_table_lookup (priv->glyph_cache, &lookup);
if (cache)
{
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
memcpy (&priv->front[front_index].key, &lookup.key, sizeof (GlyphKey));
priv->front[front_index].value = cache;
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
*out_bounds = cache->bounds;
*out_origin = cache->origin;
return cache->image;
}
@@ -669,7 +821,7 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
padding = 1;
image = gsk_gpu_device_add_atlas_image (self,
gsk_gpu_frame_get_timestamp (frame),
timestamp,
rect.size.width + 2 * padding, rect.size.height + 2 * padding,
&atlas_x, &atlas_y);
if (image)
@@ -688,12 +840,13 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
cache = gsk_gpu_cached_new (self, &GSK_GPU_CACHED_GLYPH_CLASS, NULL);
}
cache->font = g_object_ref (font),
cache->glyph = glyph,
cache->flags = flags,
cache->scale = scale,
cache->key.font = g_object_ref (font),
cache->key.glyph = glyph,
cache->key.flags = flags,
cache->key.scale = scale,
cache->bounds = rect,
cache->image = image,
((GskGpuCached *) cache)->pixels = (rect.size.width + 2 * padding) * (rect.size.height + 2 * padding);
cache->origin = GRAPHENE_POINT_INIT (- origin.x + (flags & 3) / 4.f,
- origin.y + ((flags >> 2) & 3) / 4.f);
@@ -712,10 +865,14 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
cache->origin.y + padding));
g_hash_table_insert (priv->glyph_cache, cache, cache);
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
memcpy (&priv->front[front_index].key, &lookup.key, sizeof (GlyphKey));
priv->front[front_index].value = cache;
*out_bounds = cache->bounds;
*out_origin = cache->origin;
return cache->image;
}