Compare commits

...

15 Commits

Author SHA1 Message Date
Christian Hergert
23c3f3bddf blah 2024-03-11 19:50:10 -07:00
Matthias Clasen
9bc5cde606 gsk: Implement the 'underline trick'
Add an optimization that turns color ops into colorize ops when
the previous op was colorize as well. This is beneficial when we
have a mix of text and color nodes (such as underlined text, or
text with background color).
2024-03-11 15:52:43 -07:00
Matthias Clasen
67b2ae5932 gsk: Add a 'colorize' optimization flag
We are going to add an optimization that turns color ops into
colorize ops when beneficial. This flags will make it possible
to turn that off with

GSK_GPU_SKIP=colorize
2024-03-11 15:52:38 -07:00
Matthias Clasen
6ad68ed755 gsk: Export the colorize op class
This is going to be used in the future to check whether the previous
op was a colorize one.
2024-03-11 15:52:32 -07:00
Matthias Clasen
f65387de99 gsk: Add a way to get a 'solid' image
Add a method to GskGpuDevice that returns a 1x1 image in the atlas,
for use as a mask.
2024-03-11 15:52:27 -07:00
Matthias Clasen
dcf3a3c389 gsk: Add a solid upload op
Add a variant of the glyph upload op that just draws a 3x3 white
rect, for use as a mask.
2024-03-11 15:52:22 -07:00
Matthias Clasen
75f529f3d2 gsk: Add gsk_gpu_frame_get_last_op
This function will be used in the future to find the previous
op during node processing, so we can make optimization decisions
based on that.
2024-03-11 15:52:14 -07:00
Christian Hergert
d5bd49fb85 gpu: Add a front cache
This copies the approach taken in the gl renderer to avoid much
of the hash table lookup overhead by means of a front cache.

# Conflicts:
#	gsk/gpu/gskgpudevice.c
2024-03-11 14:11:02 -07:00
Christian Hergert
a6ef0f615e gsk/gpu: allow clip operations to be inlined
The functions these call were made inline'able for the previous GL
renderer to speed up some inner loops. Expose those here through the
GskGpuClip so they continue to get inlined.
2024-03-11 12:32:12 -07:00
Christian Hergert
7ba7256c70 gsk/gpu: avoid Private usage for GskGpuImage
By using GskGpuImagePrivate for fields we require a runtime lookup of the
offset to add to the address for dereference. This appears to prevent LTO
from opperating successfully here.

Keep the API the same but also move these into static inline so that we
don't require extremely slow LTO builds to optimize further.
2024-03-11 12:28:45 -07:00
Christian Hergert
75769e44a8 gsk/gpu: try harder for an intrinsic for roundf() 2024-03-11 12:25:30 -07:00
Christian Hergert
540ddd9ae8 gtk/imcontext: fix ABI break via activate_osk_with_event
Fixes #6529
2024-03-11 12:05:47 -07:00
Christian Hergert
3490de0598 Merge remote-tracking branch 'origin/gpu-desc-nonobject' into wip/chergert/glyph_node 2024-03-11 12:01:17 -07:00
Matthias Clasen
40e5a37795 gpu: Make GskGpuDescriptors non-objects
This is a bit involved, since we have to deal with subclassing
here, but in the end it works out ok. And it make g_object_ref
and g_object_unref disappear from the profiles.
2024-01-21 10:13:44 -05:00
Matthias Clasen
f69cfd3d12 array: Split declaration and implementation
Make it possible to get only one or the other, by defining
INCLUDE_DECL or INCLUDE_IMPL. By default, both are included.
2024-01-21 10:13:44 -05:00
35 changed files with 1015 additions and 604 deletions

View File

@@ -52,6 +52,13 @@ G_BEGIN_DECLS
#define gdk_array_paste(GDK_ARRAY_NAME, func_name) gdk_array_paste_more (GDK_ARRAY_NAME, func_name)
#define gdk_array(func_name) gdk_array_paste (GDK_ARRAY_NAME, func_name)
#if !defined(INCLUDE_DECL) && !defined(INCLUDE_IMPL)
#define INCLUDE_DECL 1
#define INCLUDE_IMPL 1
#endif
#ifdef INCLUDE_DECL
typedef struct GdkArray GdkArray;
struct GdkArray
@@ -82,6 +89,9 @@ gdk_array(init) (GdkArray *self)
#endif
}
#endif /* INCLUDE_DECL */
#ifdef INCLUDE_IMPL
G_GNUC_UNUSED static inline gsize
gdk_array(get_capacity) (const GdkArray *self)
{
@@ -304,6 +314,8 @@ gdk_array(get) (const GdkArray *self,
}
#endif
#endif /* INCLUDE_IMPL */
#ifndef GDK_ARRAY_NO_UNDEF
#undef _T_
@@ -321,6 +333,9 @@ gdk_array(get) (const GdkArray *self,
#undef GDK_ARRAY_PREALLOC
#undef GDK_ARRAY_TYPE_NAME
#undef GDK_ARRAY_NO_MEMSET
#undef INCLUDE_DECL
#undef INCLUDE_IMPL
#endif
G_END_DECLS

View File

@@ -11,7 +11,6 @@ struct _GskGLBuffer
GLenum target;
GLuint buffer_id;
GLenum access;
guchar *data;
};
G_DEFINE_TYPE (GskGLBuffer, gsk_gl_buffer, GSK_TYPE_GPU_BUFFER)
@@ -24,7 +23,6 @@ gsk_gl_buffer_finalize (GObject *object)
{
GskGLBuffer *self = GSK_GL_BUFFER (object);
g_free (self->data);
glDeleteBuffers (1, &self->buffer_id);
G_OBJECT_CLASS (gsk_gl_buffer_parent_class)->finalize (object);
@@ -35,7 +33,7 @@ gsk_gl_buffer_map (GskGpuBuffer *buffer)
{
GskGLBuffer *self = GSK_GL_BUFFER (buffer);
return self->data;
return (guchar *)glMapBuffer (self->target, self->access);
}
static void
@@ -44,14 +42,12 @@ gsk_gl_buffer_unmap (GskGpuBuffer *buffer,
{
GskGLBuffer *self = GSK_GL_BUFFER (buffer);
if (used == 0)
return;
gsk_gl_buffer_bind (self);
profiler_buffer_uploads += used;
glBufferSubData (self->target, 0, used, self->data);
gdk_profiler_set_int_counter (profiler_buffer_uploads_id, profiler_buffer_uploads);
glUnmapBuffer (self->target);
}
static void
@@ -90,7 +86,6 @@ gsk_gl_buffer_new (GLenum target,
glGenBuffers (1, &self->buffer_id);
glBindBuffer (target, self->buffer_id);
glBufferData (target, size, NULL, GL_STATIC_DRAW);
self->data = malloc (size);
return GSK_GPU_BUFFER (self);
}

View File

@@ -5,24 +5,14 @@
#include "gskglbufferprivate.h"
#include "gskglimageprivate.h"
struct _GskGLDescriptors
{
GskGpuDescriptors parent_instance;
GskGLDevice *device;
guint n_external;
};
G_DEFINE_TYPE (GskGLDescriptors, gsk_gl_descriptors, GSK_TYPE_GPU_DESCRIPTORS)
static void
gsk_gl_descriptors_finalize (GObject *object)
gsk_gl_descriptors_finalize (GskGpuDescriptors *desc)
{
GskGLDescriptors *self = GSK_GL_DESCRIPTORS (object);
GskGLDescriptors *self = GSK_GL_DESCRIPTORS (desc);
g_object_unref (self->device);
G_OBJECT_CLASS (gsk_gl_descriptors_parent_class)->finalize (object);
gsk_gpu_descriptors_finalize (&self->parent_instance);
}
static gboolean
@@ -72,29 +62,25 @@ gsk_gl_descriptors_add_buffer (GskGpuDescriptors *desc,
return TRUE;
}
static void
gsk_gl_descriptors_class_init (GskGLDescriptorsClass *klass)
static GskGpuDescriptorsClass GSK_GL_DESCRIPTORS_CLASS =
{
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_gl_descriptors_finalize;
descriptors_class->add_image = gsk_gl_descriptors_add_image;
descriptors_class->add_buffer = gsk_gl_descriptors_add_buffer;
}
static void
gsk_gl_descriptors_init (GskGLDescriptors *self)
{
}
.finalize = gsk_gl_descriptors_finalize,
.add_image = gsk_gl_descriptors_add_image,
.add_buffer = gsk_gl_descriptors_add_buffer,
};
GskGpuDescriptors *
gsk_gl_descriptors_new (GskGLDevice *device)
{
GskGLDescriptors *self;
GskGpuDescriptors *desc;
self = g_object_new (GSK_TYPE_GL_DESCRIPTORS, NULL);
self = g_new0 (GskGLDescriptors, 1);
desc = GSK_GPU_DESCRIPTORS (self);
desc->ref_count = 1;
desc->desc_class = (GskGpuDescriptorsClass *) &GSK_GL_DESCRIPTORS_CLASS;
gsk_gpu_descriptors_init (&self->parent_instance);
self->device = g_object_ref (device);
@@ -110,7 +96,7 @@ gsk_gl_descriptors_get_n_external (GskGLDescriptors *self)
void
gsk_gl_descriptors_use (GskGLDescriptors *self)
{
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
GskGpuDescriptors *desc = &self->parent_instance;
gsize i, ext, n_textures;
n_textures = 16 - 3 * self->n_external;

View File

@@ -6,9 +6,17 @@
G_BEGIN_DECLS
#define GSK_TYPE_GL_DESCRIPTORS (gsk_gl_descriptors_get_type ())
typedef struct _GskGLDescriptors GskGLDescriptors;
G_DECLARE_FINAL_TYPE (GskGLDescriptors, gsk_gl_descriptors, GSK, GL_DESCRIPTORS, GskGpuDescriptors)
#define GSK_GL_DESCRIPTORS(d) ((GskGLDescriptors *) (d))
struct _GskGLDescriptors
{
GskGpuDescriptors parent_instance;
GskGLDevice *device;
guint n_external;
};
GskGpuDescriptors * gsk_gl_descriptors_new (GskGLDevice *device);

View File

@@ -25,17 +25,7 @@ void
gsk_gpu_buffer_setup (GskGpuBuffer *self,
gsize size)
{
GskGpuBufferPrivate *priv = gsk_gpu_buffer_get_instance_private (self);
priv->size = size;
}
gsize
gsk_gpu_buffer_get_size (GskGpuBuffer *self)
{
GskGpuBufferPrivate *priv = gsk_gpu_buffer_get_instance_private (self);
return priv->size;
self->size = size;
}
guchar *

View File

@@ -16,6 +16,7 @@ typedef struct _GskGpuBufferClass GskGpuBufferClass;
struct _GskGpuBuffer
{
GObject parent_instance;
gsize size;
};
struct _GskGpuBufferClass
@@ -32,12 +33,15 @@ GType gsk_gpu_buffer_get_type (void) G
void gsk_gpu_buffer_setup (GskGpuBuffer *self,
gsize size);
gsize gsk_gpu_buffer_get_size (GskGpuBuffer *self);
guchar * gsk_gpu_buffer_map (GskGpuBuffer *self);
void gsk_gpu_buffer_unmap (GskGpuBuffer *self,
gsize used);
static inline gsize G_GNUC_PURE
gsk_gpu_buffer_get_size (const GskGpuBuffer *buffer)
{
return buffer->size;
}
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuBuffer, g_object_unref)

View File

@@ -260,44 +260,3 @@ gsk_gpu_clip_may_intersect_rect (const GskGpuClip *self,
}
}
gboolean
gsk_gpu_clip_contains_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
graphene_rect_t r = *rect;
r.origin.x += offset->x;
r.origin.y += offset->y;
switch (self->type)
{
default:
g_assert_not_reached();
case GSK_GPU_CLIP_ALL_CLIPPED:
return FALSE;
case GSK_GPU_CLIP_NONE:
case GSK_GPU_CLIP_CONTAINED:
case GSK_GPU_CLIP_RECT:
return gsk_rect_contains_rect (&self->rect.bounds, &r);
case GSK_GPU_CLIP_ROUNDED:
return gsk_rounded_rect_contains_rect (&self->rect, &r);
}
}
GskGpuShaderClip
gsk_gpu_clip_get_shader_clip (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
if (self->type == GSK_GPU_CLIP_NONE ||
self->type == GSK_GPU_CLIP_CONTAINED ||
gsk_gpu_clip_contains_rect (self, offset, rect))
return GSK_GPU_SHADER_CLIP_NONE;
else if (self->type == GSK_GPU_CLIP_RECT)
return GSK_GPU_SHADER_CLIP_RECT;
else
return GSK_GPU_SHADER_CLIP_ROUNDED;
}

View File

@@ -5,6 +5,7 @@
#include <gdk/gdk.h>
#include <graphene.h>
#include <gsk/gskroundedrect.h>
#include <gsk/gskrectprivate.h>
G_BEGIN_DECLS
@@ -59,15 +60,50 @@ gboolean gsk_gpu_clip_transform (GskGpuC
GskTransform *transform,
const graphene_rect_t *viewport) G_GNUC_WARN_UNUSED_RESULT;
gboolean gsk_gpu_clip_contains_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect) G_GNUC_WARN_UNUSED_RESULT;
gboolean gsk_gpu_clip_may_intersect_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect) G_GNUC_WARN_UNUSED_RESULT;
GskGpuShaderClip gsk_gpu_clip_get_shader_clip (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect);
static inline gboolean G_GNUC_PURE G_GNUC_WARN_UNUSED_RESULT
gsk_gpu_clip_contains_rect (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
graphene_rect_t r = *rect;
r.origin.x += offset->x;
r.origin.y += offset->y;
switch (self->type)
{
default:
g_assert_not_reached();
case GSK_GPU_CLIP_ALL_CLIPPED:
return FALSE;
case GSK_GPU_CLIP_NONE:
case GSK_GPU_CLIP_CONTAINED:
case GSK_GPU_CLIP_RECT:
return gsk_rect_contains_rect (&self->rect.bounds, &r);
case GSK_GPU_CLIP_ROUNDED:
return gsk_rounded_rect_contains_rect (&self->rect, &r);
}
}
static inline GskGpuShaderClip G_GNUC_PURE
gsk_gpu_clip_get_shader_clip (const GskGpuClip *self,
const graphene_point_t *offset,
const graphene_rect_t *rect)
{
if (self->type == GSK_GPU_CLIP_NONE ||
self->type == GSK_GPU_CLIP_CONTAINED ||
gsk_gpu_clip_contains_rect (self, offset, rect))
return GSK_GPU_SHADER_CLIP_NONE;
else if (self->type == GSK_GPU_CLIP_RECT)
return GSK_GPU_SHADER_CLIP_RECT;
else
return GSK_GPU_SHADER_CLIP_ROUNDED;
}
G_END_DECLS

View File

@@ -34,7 +34,7 @@ gsk_gpu_colorize_op_print (GskGpuOp *op,
gsk_gpu_print_newline (string);
}
static const GskGpuShaderOpClass GSK_GPU_COLORIZE_OP_CLASS = {
const GskGpuShaderOpClass GSK_GPU_COLORIZE_OP_CLASS = {
{
GSK_GPU_OP_SIZE (GskGpuColorizeOp),
GSK_GPU_STAGE_SHADER,

View File

@@ -6,6 +6,8 @@
G_BEGIN_DECLS
extern const GskGpuShaderOpClass GSK_GPU_COLORIZE_OP_CLASS;
void gsk_gpu_colorize_op (GskGpuFrame *frame,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,

View File

@@ -2,22 +2,6 @@
#include "gskgpudescriptorsprivate.h"
typedef struct _GskGpuImageEntry GskGpuImageEntry;
typedef struct _GskGpuBufferEntry GskGpuBufferEntry;
struct _GskGpuImageEntry
{
GskGpuImage *image;
GskGpuSampler sampler;
guint32 descriptor;
};
struct _GskGpuBufferEntry
{
GskGpuBuffer *buffer;
guint32 descriptor;
};
static void
gsk_gpu_image_entry_clear (gpointer data)
{
@@ -34,6 +18,8 @@ gsk_gpu_buffer_entry_clear (gpointer data)
g_object_unref (entry->buffer);
}
#define INCLUDE_IMPL 1
#define GDK_ARRAY_NAME gsk_gpu_image_entries
#define GDK_ARRAY_TYPE_NAME GskGpuImageEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuImageEntry
@@ -43,6 +29,8 @@ gsk_gpu_buffer_entry_clear (gpointer data)
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define INCLUDE_IMPL 1
#define GDK_ARRAY_NAME gsk_gpu_buffer_entries
#define GDK_ARRAY_TYPE_NAME GskGpuBufferEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuBufferEntry
@@ -52,80 +40,48 @@ gsk_gpu_buffer_entry_clear (gpointer data)
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
typedef struct _GskGpuDescriptorsPrivate GskGpuDescriptorsPrivate;
struct _GskGpuDescriptorsPrivate
void
gsk_gpu_descriptors_finalize (GskGpuDescriptors *self)
{
GskGpuImageEntries images;
GskGpuBufferEntries buffers;
};
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuDescriptors, gsk_gpu_descriptors, G_TYPE_OBJECT)
static void
gsk_gpu_descriptors_finalize (GObject *object)
{
GskGpuDescriptors *self = GSK_GPU_DESCRIPTORS (object);
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_clear (&priv->images);
gsk_gpu_buffer_entries_clear (&priv->buffers);
G_OBJECT_CLASS (gsk_gpu_descriptors_parent_class)->finalize (object);
gsk_gpu_image_entries_clear (&self->images);
gsk_gpu_buffer_entries_clear (&self->buffers);
}
static void
gsk_gpu_descriptors_class_init (GskGpuDescriptorsClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_gpu_descriptors_finalize;
}
static void
void
gsk_gpu_descriptors_init (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_init (&priv->images);
gsk_gpu_buffer_entries_init (&priv->buffers);
gsk_gpu_image_entries_init (&self->images);
gsk_gpu_buffer_entries_init (&self->buffers);
}
gsize
gsk_gpu_descriptors_get_n_images (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
return gsk_gpu_image_entries_get_size (&priv->images);
return gsk_gpu_image_entries_get_size (&self->images);
}
gsize
gsk_gpu_descriptors_get_n_buffers (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
return gsk_gpu_buffer_entries_get_size (&priv->buffers);
return gsk_gpu_buffer_entries_get_size (&self->buffers);
}
void
gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize n_images,
gsize n_buffers)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
g_assert (n_images <= gsk_gpu_image_entries_get_size (&self->images));
gsk_gpu_image_entries_set_size (&self->images, n_images);
g_assert (n_images <= gsk_gpu_image_entries_get_size (&priv->images));
gsk_gpu_image_entries_set_size (&priv->images, n_images);
g_assert (n_buffers <= gsk_gpu_buffer_entries_get_size (&priv->buffers));
gsk_gpu_buffer_entries_set_size (&priv->buffers, n_buffers);
g_assert (n_buffers <= gsk_gpu_buffer_entries_get_size (&self->buffers));
gsk_gpu_buffer_entries_set_size (&self->buffers, n_buffers);
}
GskGpuImage *
gsk_gpu_descriptors_get_image (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, id);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&self->images, id);
return entry->image;
}
@@ -134,8 +90,7 @@ GskGpuSampler
gsk_gpu_descriptors_get_sampler (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, id);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&self->images, id);
return entry->sampler;
}
@@ -144,12 +99,11 @@ gsize
gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
guint32 descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
for (i = 0; i < gsk_gpu_image_entries_get_size (&priv->images); i++)
for (i = 0; i < gsk_gpu_image_entries_get_size (&self->images); i++)
{
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, i);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&self->images, i);
if (entry->descriptor == descriptor)
return i;
@@ -162,8 +116,7 @@ GskGpuBuffer *
gsk_gpu_descriptors_get_buffer (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuBufferEntry *entry = gsk_gpu_buffer_entries_get (&priv->buffers, id);
const GskGpuBufferEntry *entry = gsk_gpu_buffer_entries_get (&self->buffers, id);
return entry->buffer;
}
@@ -174,13 +127,12 @@ gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuSampler sampler,
guint32 *out_descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
guint32 descriptor;
for (i = 0; i < gsk_gpu_image_entries_get_size (&priv->images); i++)
for (i = 0; i < gsk_gpu_image_entries_get_size (&self->images); i++)
{
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&priv->images, i);
const GskGpuImageEntry *entry = gsk_gpu_image_entries_get (&self->images, i);
if (entry->image == image && entry->sampler == sampler)
{
@@ -189,10 +141,10 @@ gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
}
}
if (!GSK_GPU_DESCRIPTORS_GET_CLASS (self)->add_image (self, image, sampler, &descriptor))
if (!self->desc_class->add_image (self, image, sampler, &descriptor))
return FALSE;
gsk_gpu_image_entries_append (&priv->images,
gsk_gpu_image_entries_append (&self->images,
&(GskGpuImageEntry) {
.image = g_object_ref (image),
.sampler = sampler,
@@ -209,13 +161,12 @@ gsk_gpu_descriptors_add_buffer (GskGpuDescriptors *self,
GskGpuBuffer *buffer,
guint32 *out_descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
guint32 descriptor;
for (i = 0; i < gsk_gpu_buffer_entries_get_size (&priv->buffers); i++)
for (i = 0; i < gsk_gpu_buffer_entries_get_size (&self->buffers); i++)
{
const GskGpuBufferEntry *entry = gsk_gpu_buffer_entries_get (&priv->buffers, i);
const GskGpuBufferEntry *entry = gsk_gpu_buffer_entries_get (&self->buffers, i);
if (entry->buffer == buffer)
{
@@ -224,10 +175,10 @@ gsk_gpu_descriptors_add_buffer (GskGpuDescriptors *self,
}
}
if (!GSK_GPU_DESCRIPTORS_GET_CLASS (self)->add_buffer (self, buffer, &descriptor))
if (!self->desc_class->add_buffer (self, buffer, &descriptor))
return FALSE;
gsk_gpu_buffer_entries_append (&priv->buffers,
gsk_gpu_buffer_entries_append (&self->buffers,
&(GskGpuBufferEntry) {
.buffer = g_object_ref (buffer),
.descriptor = descriptor
@@ -238,3 +189,22 @@ gsk_gpu_descriptors_add_buffer (GskGpuDescriptors *self,
return TRUE;
}
GskGpuDescriptors *
gsk_gpu_descriptors_ref (GskGpuDescriptors *self)
{
self->ref_count++;
return self;
}
void
gsk_gpu_descriptors_unref (GskGpuDescriptors *self)
{
self->ref_count--;
if (self->ref_count == 0)
{
self->desc_class->finalize (self);
g_free (self);
}
}

View File

@@ -2,26 +2,62 @@
#include "gskgputypesprivate.h"
typedef struct _GskGpuImageEntry GskGpuImageEntry;
typedef struct _GskGpuBufferEntry GskGpuBufferEntry;
struct _GskGpuImageEntry
{
GskGpuImage *image;
GskGpuSampler sampler;
guint32 descriptor;
};
struct _GskGpuBufferEntry
{
GskGpuBuffer *buffer;
guint32 descriptor;
};
#define INCLUDE_DECL 1
#define GDK_ARRAY_NAME gsk_gpu_image_entries
#define GDK_ARRAY_TYPE_NAME GskGpuImageEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuImageEntry
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 16
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define INCLUDE_DECL 1
#define GDK_ARRAY_NAME gsk_gpu_buffer_entries
#define GDK_ARRAY_TYPE_NAME GskGpuBufferEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuBufferEntry
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 4
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
G_BEGIN_DECLS
#define GSK_TYPE_GPU_DESCRIPTORS (gsk_gpu_descriptors_get_type ())
#define GSK_GPU_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptors))
#define GSK_GPU_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptorsClass))
#define GSK_IS_GPU_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_GPU_DESCRIPTORS))
#define GSK_IS_GPU_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_GPU_DESCRIPTORS))
#define GSK_GPU_DESCRIPTORS_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_GPU_DESCRIPTORS, GskGpuDescriptorsClass))
typedef struct _GskGpuDescriptors GskGpuDescriptors;
typedef struct _GskGpuDescriptorsClass GskGpuDescriptorsClass;
#define GSK_GPU_DESCRIPTORS(d) ((GskGpuDescriptors *) (d))
struct _GskGpuDescriptors
{
GObject parent_instance;
GskGpuDescriptorsClass *desc_class;
int ref_count;
GskGpuImageEntries images;
GskGpuBufferEntries buffers;
};
struct _GskGpuDescriptorsClass
{
GObjectClass parent_class;
void (* finalize) (GskGpuDescriptors *self);
gboolean (* add_image) (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
@@ -31,7 +67,9 @@ struct _GskGpuDescriptorsClass
guint32 *out_id);
};
GType gsk_gpu_descriptors_get_type (void) G_GNUC_CONST;
GskGpuDescriptors * gsk_gpu_descriptors_ref (GskGpuDescriptors *self);
void gsk_gpu_descriptors_unref (GskGpuDescriptors *self);
gsize gsk_gpu_descriptors_get_n_images (GskGpuDescriptors *self);
gsize gsk_gpu_descriptors_get_n_buffers (GskGpuDescriptors *self);
@@ -55,7 +93,7 @@ gboolean gsk_gpu_descriptors_add_buffer (GskGpuD
GskGpuBuffer *buffer,
guint32 *out_descriptor);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuDescriptors, g_object_unref)
void gsk_gpu_descriptors_finalize (GskGpuDescriptors *self);
void gsk_gpu_descriptors_init (GskGpuDescriptors *self);
G_END_DECLS

View File

@@ -13,8 +13,6 @@
#include "gsk/gskdebugprivate.h"
#include "gsk/gskprivate.h"
#define MAX_SLICES_PER_ATLAS 64
#define ATLAS_SIZE 1024
#define MAX_ATLAS_ITEM_SIZE 256
@@ -26,10 +24,6 @@
G_STATIC_ASSERT (MAX_ATLAS_ITEM_SIZE < ATLAS_SIZE);
G_STATIC_ASSERT (MAX_DEAD_PIXELS < ATLAS_SIZE * ATLAS_SIZE);
typedef struct _GskGpuCached GskGpuCached;
typedef struct _GskGpuCachedClass GskGpuCachedClass;
typedef struct _GskGpuCachedAtlas GskGpuCachedAtlas;
typedef struct _GskGpuCachedGlyph GskGpuCachedGlyph;
typedef struct _GskGpuCachedTexture GskGpuCachedTexture;
typedef struct _GskGpuDevicePrivate GskGpuDevicePrivate;
@@ -66,37 +60,6 @@ struct _GskGpuCachedClass
gint64 timestamp);
};
struct _GskGpuCached
{
const GskGpuCachedClass *class;
GskGpuCachedAtlas *atlas;
GskGpuCached *next;
GskGpuCached *prev;
gint64 timestamp;
gboolean stale;
guint pixels; /* For glyphs and textures, pixels. For atlases, dead pixels */
};
static inline void
mark_as_stale (GskGpuCached *cached,
gboolean stale)
{
if (cached->stale != stale)
{
cached->stale = stale;
if (cached->atlas)
{
if (stale)
((GskGpuCached *) cached->atlas)->pixels += cached->pixels;
else
((GskGpuCached *) cached->atlas)->pixels -= cached->pixels;
}
}
}
static void
gsk_gpu_cached_free (GskGpuDevice *device,
GskGpuCached *cached)
@@ -148,15 +111,6 @@ gsk_gpu_cached_new (GskGpuDevice *device,
return cached;
}
static void
gsk_gpu_cached_use (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
cached->timestamp = timestamp;
mark_as_stale (cached, FALSE);
}
static inline gboolean
gsk_gpu_cached_is_old (GskGpuDevice *device,
GskGpuCached *cached,
@@ -173,19 +127,6 @@ gsk_gpu_cached_is_old (GskGpuDevice *device,
/* }}} */
/* {{{ CachedAtlas */
struct _GskGpuCachedAtlas
{
GskGpuCached parent;
GskGpuImage *image;
gsize n_slices;
struct {
gsize width;
gsize height;
} slices[MAX_SLICES_PER_ATLAS];
};
static void
gsk_gpu_cached_atlas_free (GskGpuDevice *device,
GskGpuCached *cached)
@@ -233,6 +174,8 @@ gsk_gpu_cached_atlas_new (GskGpuDevice *device)
self = gsk_gpu_cached_new (device, &GSK_GPU_CACHED_ATLAS_CLASS, NULL);
self->image = GSK_GPU_DEVICE_GET_CLASS (device)->create_atlas_image (device, ATLAS_SIZE, ATLAS_SIZE);
self->has_colorize = FALSE;
return self;
}
@@ -357,20 +300,6 @@ gsk_gpu_cached_texture_new (GskGpuDevice *device,
/* }}} */
/* {{{ CachedGlyph */
struct _GskGpuCachedGlyph
{
GskGpuCached parent;
PangoFont *font;
PangoGlyph glyph;
GskGpuGlyphLookupFlags flags;
float scale;
GskGpuImage *image;
graphene_rect_t bounds;
graphene_point_t origin;
};
static void
gsk_gpu_cached_glyph_free (GskGpuDevice *device,
GskGpuCached *cached)
@@ -380,7 +309,7 @@ gsk_gpu_cached_glyph_free (GskGpuDevice *device,
g_hash_table_remove (priv->glyph_cache, self);
g_object_unref (self->font);
g_object_unref (self->key.font);
g_object_unref (self->image);
g_free (self);
@@ -408,10 +337,10 @@ gsk_gpu_cached_glyph_hash (gconstpointer data)
{
const GskGpuCachedGlyph *glyph = data;
return GPOINTER_TO_UINT (glyph->font) ^
glyph->glyph ^
(glyph->flags << 24) ^
((guint) glyph->scale * PANGO_SCALE);
return GPOINTER_TO_UINT (glyph->key.font) ^
glyph->key.glyph ^
(glyph->key.flags << 24) ^
((guint) glyph->key.scale * PANGO_SCALE);
}
static gboolean
@@ -421,10 +350,10 @@ gsk_gpu_cached_glyph_equal (gconstpointer v1,
const GskGpuCachedGlyph *glyph1 = v1;
const GskGpuCachedGlyph *glyph2 = v2;
return glyph1->font == glyph2->font
&& glyph1->glyph == glyph2->glyph
&& glyph1->flags == glyph2->flags
&& glyph1->scale == glyph2->scale;
return glyph1->key.font == glyph2->key.font
&& glyph1->key.glyph == glyph2->key.glyph
&& glyph1->key.flags == glyph2->key.flags
&& glyph1->key.scale == glyph2->key.scale;
}
static const GskGpuCachedClass GSK_GPU_CACHED_GLYPH_CLASS =
@@ -894,10 +823,10 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
GskGpuCachedGlyph lookup = {
.font = font,
.glyph = glyph,
.flags = flags,
.scale = scale
.key.font = font,
.key.glyph = glyph,
.key.flags = flags,
.key.scale = scale
};
GskGpuCachedGlyph *cache;
PangoRectangle ink_rect;
@@ -907,14 +836,20 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
gsize atlas_x, atlas_y, padding;
float subpixel_x, subpixel_y;
PangoFont *scaled_font;
guint64 timestamp = gsk_gpu_frame_get_timestamp (frame);
guint front_index = glyph & 0xFF;
cache = g_hash_table_lookup (priv->glyph_cache, &lookup);
if (cache)
{
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
memcpy (&self->front[front_index].key, &lookup.key, sizeof (GlyphKey));
self->front[front_index].value = cache;
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
*out_bounds = cache->bounds;
*out_origin = cache->origin;
return cache->image;
}
@@ -947,10 +882,10 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
cache = gsk_gpu_cached_new (self, &GSK_GPU_CACHED_GLYPH_CLASS, NULL);
}
cache->font = g_object_ref (font);
cache->glyph = glyph;
cache->flags = flags;
cache->scale = scale;
cache->key.font = g_object_ref (font);
cache->key.glyph = glyph;
cache->key.flags = flags;
cache->key.scale = scale;
cache->bounds = rect;
cache->image = image;
cache->origin = GRAPHENE_POINT_INIT (- origin.x + subpixel_x,
@@ -971,7 +906,10 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
cache->origin.y + padding));
g_hash_table_insert (priv->glyph_cache, cache, cache);
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
memcpy (&self->front[front_index].key, &lookup.key, sizeof (GlyphKey));
self->front[front_index].value = cache;
*out_bounds = cache->bounds;
*out_origin = cache->origin;
@@ -981,5 +919,43 @@ gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
return cache->image;
}
GskGpuImage *
gsk_gpu_device_get_solid_image (GskGpuDevice *self,
GskGpuFrame *frame,
graphene_rect_t *out_bounds)
{
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
gsk_gpu_device_ensure_atlas (self, FALSE);
if (!priv->current_atlas->has_colorize)
{
gsize x, y;
if (!gsk_gpu_cached_atlas_allocate (priv->current_atlas, 5, 5, &x, &y))
{
gsk_gpu_device_ensure_atlas (self, TRUE);
gsk_gpu_cached_atlas_allocate (priv->current_atlas, 5, 5, &x, &y);
}
gsk_gpu_upload_solid_op (frame,
priv->current_atlas->image,
&(cairo_rectangle_int_t) { x, y, 5, 5 },
&GRAPHENE_POINT_INIT (1, 1));
priv->current_atlas->colorize_x = x + 2;
priv->current_atlas->colorize_y = y + 2;
priv->current_atlas->has_colorize = TRUE;
}
out_bounds->origin.x = priv->current_atlas->colorize_x;
out_bounds->origin.y = priv->current_atlas->colorize_y;
out_bounds->size.width = 1;
out_bounds->size.height = 1;
return priv->current_atlas->image;
}
/* }}} */
/* vim:set foldmethod=marker expandtab: */

View File

@@ -1,6 +1,7 @@
#pragma once
#include "gskgputypesprivate.h"
#include "gskgpuframeprivate.h"
#include <graphene.h>
@@ -15,9 +16,81 @@ G_BEGIN_DECLS
typedef struct _GskGpuDeviceClass GskGpuDeviceClass;
typedef enum
{
GSK_GPU_GLYPH_X_OFFSET_1 = 0x1,
GSK_GPU_GLYPH_X_OFFSET_2 = 0x2,
GSK_GPU_GLYPH_X_OFFSET_3 = 0x3,
GSK_GPU_GLYPH_Y_OFFSET_1 = 0x4,
GSK_GPU_GLYPH_Y_OFFSET_2 = 0x8,
GSK_GPU_GLYPH_Y_OFFSET_3 = 0xC
} GskGpuGlyphLookupFlags;
typedef struct _GskGpuCached GskGpuCached;
typedef struct _GskGpuCachedGlyph GskGpuCachedGlyph;
typedef struct _GskGpuCachedAtlas GskGpuCachedAtlas;
typedef struct _GskGpuCachedClass GskGpuCachedClass;
#define MAX_SLICES_PER_ATLAS 64
struct _GskGpuCached
{
const GskGpuCachedClass *class;
GskGpuCachedAtlas *atlas;
GskGpuCached *next;
GskGpuCached *prev;
gint64 timestamp;
gboolean stale;
guint pixels; /* For glyphs and textures, pixels. For atlases, dead pixels */
};
struct _GskGpuCachedAtlas
{
GskGpuCached parent;
GskGpuImage *image;
gboolean has_colorize;
gsize colorize_x;
gsize colorize_y;
gsize n_slices;
struct {
gsize width;
gsize height;
} slices[MAX_SLICES_PER_ATLAS];
};
typedef struct _GlyphKey GlyphKey;
struct _GlyphKey
{
PangoFont *font;
PangoGlyph glyph;
GskGpuGlyphLookupFlags flags;
float scale;
};
struct _GskGpuCachedGlyph
{
GskGpuCached parent;
GlyphKey key;
GskGpuImage *image;
graphene_rect_t bounds;
graphene_point_t origin;
};
struct _GskGpuDevice
{
GObject parent_instance;
struct {
GlyphKey key;
GskGpuCachedGlyph *value;
} front[256];
};
struct _GskGpuDeviceClass
@@ -79,16 +152,6 @@ void gsk_gpu_device_cache_texture_image (GskGpuD
gint64 timestamp,
GskGpuImage *image);
typedef enum
{
GSK_GPU_GLYPH_X_OFFSET_1 = 0x1,
GSK_GPU_GLYPH_X_OFFSET_2 = 0x2,
GSK_GPU_GLYPH_X_OFFSET_3 = 0x3,
GSK_GPU_GLYPH_Y_OFFSET_1 = 0x4,
GSK_GPU_GLYPH_Y_OFFSET_2 = 0x8,
GSK_GPU_GLYPH_Y_OFFSET_3 = 0xC
} GskGpuGlyphLookupFlags;
GskGpuImage * gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
GskGpuFrame *frame,
PangoFont *font,
@@ -98,6 +161,72 @@ GskGpuImage * gsk_gpu_device_lookup_glyph_image (GskGpuD
graphene_rect_t *out_bounds,
graphene_point_t *out_origin);
GskGpuImage * gsk_gpu_device_get_solid_image (GskGpuDevice *self,
GskGpuFrame *frame,
graphene_rect_t *out_bounds);
static inline void
mark_as_stale (GskGpuCached *cached,
gboolean stale)
{
if (cached->stale != stale)
{
cached->stale = stale;
if (cached->atlas)
{
if (stale)
((GskGpuCached *) cached->atlas)->pixels += cached->pixels;
else
((GskGpuCached *) cached->atlas)->pixels -= cached->pixels;
}
}
}
static inline void
gsk_gpu_cached_use (GskGpuDevice *device,
GskGpuCached *cached,
gint64 timestamp)
{
cached->timestamp = timestamp;
mark_as_stale (cached, FALSE);
}
static inline GskGpuImage *
_gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
GskGpuFrame *frame,
PangoFont *font,
PangoGlyph glyph,
GskGpuGlyphLookupFlags flags,
float scale,
graphene_rect_t *out_bounds,
graphene_point_t *out_origin)
{
GskGpuCachedGlyph lookup = {
.key.font = font,
.key.glyph = glyph,
.key.flags = flags,
.key.scale = scale
};
guint front_index = glyph & 0xFF;
if (memcmp (&lookup.key, &self->front[front_index], sizeof (GlyphKey)) == 0)
{
GskGpuCachedGlyph *cache = self->front[front_index].value;
gsk_gpu_cached_use (self, (GskGpuCached *) cache,
gsk_gpu_frame_get_timestamp (frame));
*out_bounds = cache->bounds;
*out_origin = cache->origin;
return cache->image;
}
return gsk_gpu_device_lookup_glyph_image (self, frame, font, glyph, flags, scale, out_bounds, out_origin);
}
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuDevice, g_object_unref)

View File

@@ -40,10 +40,9 @@ struct _GskGpuFramePrivate
GskGpuOps ops;
GskGpuOp *first_op;
GskGpuOp *last_op;
GskGpuBuffer *vertex_buffer;
guchar *vertex_buffer_data;
gsize vertex_buffer_used;
GskGpuBuffer *storage_buffer;
guchar *storage_buffer_data;
gsize storage_buffer_used;
@@ -70,6 +69,8 @@ gsk_gpu_frame_default_cleanup (GskGpuFrame *self)
gsk_gpu_op_finish (op);
}
gsk_gpu_ops_set_size (&priv->ops, 0);
priv->last_op = NULL;
}
static void
@@ -110,7 +111,7 @@ gsk_gpu_frame_finalize (GObject *object)
gsk_gpu_ops_clear (&priv->ops);
g_clear_object (&priv->vertex_buffer);
g_clear_object (&self->vertex_buffer);
g_clear_object (&priv->storage_buffer);
g_object_unref (priv->device);
@@ -234,7 +235,7 @@ gsk_gpu_frame_seal_ops (GskGpuFrame *self)
}
}
typedef struct
typedef struct
{
struct {
GskGpuOp *first;
@@ -331,7 +332,7 @@ gsk_gpu_frame_sort_ops (GskGpuFrame *self)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
SortData sort_data = { { NULL, }, };
gsk_gpu_frame_sort_render_pass (self, priv->first_op, &sort_data);
if (sort_data.upload.first)
@@ -343,6 +344,8 @@ gsk_gpu_frame_sort_ops (GskGpuFrame *self)
priv->first_op = sort_data.command.first;
if (sort_data.command.last)
sort_data.command.last->next = NULL;
priv->last_op = NULL;
}
gpointer
@@ -360,7 +363,17 @@ gsk_gpu_frame_alloc_op (GskGpuFrame *self,
NULL,
size);
return gsk_gpu_ops_index (&priv->ops, pos);
priv->last_op = (GskGpuOp *) gsk_gpu_ops_index (&priv->ops, pos);
return priv->last_op;
}
GskGpuOp *
gsk_gpu_frame_get_last_op (GskGpuFrame *self)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
return priv->last_op;
}
GskGpuImage *
@@ -399,57 +412,41 @@ gsk_gpu_frame_create_storage_buffer (GskGpuFrame *self,
return GSK_GPU_FRAME_GET_CLASS (self)->create_storage_buffer (self, size);
}
static inline gsize
round_up (gsize number, gsize divisor)
{
return (number + divisor - 1) / divisor * divisor;
}
gsize
gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size)
_gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
gsize size_needed;
if (priv->vertex_buffer == NULL)
priv->vertex_buffer = gsk_gpu_frame_create_vertex_buffer (self, DEFAULT_VERTEX_BUFFER_SIZE);
if (self->vertex_buffer == NULL)
self->vertex_buffer = gsk_gpu_frame_create_vertex_buffer (self, DEFAULT_VERTEX_BUFFER_SIZE);
size_needed = round_up (priv->vertex_buffer_used, size) + size;
size_needed = round_up (self->vertex_buffer_used, size) + size;
if (gsk_gpu_buffer_get_size (priv->vertex_buffer) < size_needed)
if (gsk_gpu_buffer_get_size (self->vertex_buffer) < size_needed)
{
gsize old_size = gsk_gpu_buffer_get_size (priv->vertex_buffer);
gsize old_size = gsk_gpu_buffer_get_size (self->vertex_buffer);
GskGpuBuffer *new_buffer = gsk_gpu_frame_create_vertex_buffer (self, old_size * 2);
guchar *new_data = gsk_gpu_buffer_map (new_buffer);
if (priv->vertex_buffer_data)
g_print ("regrow\n");
if (self->vertex_buffer_data)
{
memcpy (new_data, priv->vertex_buffer_data, old_size);
gsk_gpu_buffer_unmap (priv->vertex_buffer, old_size);
memcpy (new_data, self->vertex_buffer_data, old_size);
gsk_gpu_buffer_unmap (self->vertex_buffer, old_size);
}
g_object_unref (priv->vertex_buffer);
priv->vertex_buffer = new_buffer;
priv->vertex_buffer_data = new_data;
g_object_unref (self->vertex_buffer);
self->vertex_buffer = new_buffer;
self->vertex_buffer_data = new_data;
}
priv->vertex_buffer_used = size_needed;
self->vertex_buffer_used = size_needed;
return size_needed - size;
}
guchar *
gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
if (priv->vertex_buffer_data == NULL)
priv->vertex_buffer_data = gsk_gpu_buffer_map (priv->vertex_buffer);
return priv->vertex_buffer_data + offset;
}
static void
gsk_gpu_frame_ensure_storage_buffer (GskGpuFrame *self)
{
@@ -589,11 +586,11 @@ gsk_gpu_frame_submit (GskGpuFrame *self)
gsk_gpu_frame_sort_ops (self);
gsk_gpu_frame_verbose_print (self, "after sort");
if (priv->vertex_buffer)
if (self->vertex_buffer)
{
gsk_gpu_buffer_unmap (priv->vertex_buffer, priv->vertex_buffer_used);
priv->vertex_buffer_data = NULL;
priv->vertex_buffer_used = 0;
gsk_gpu_buffer_unmap (self->vertex_buffer, self->vertex_buffer_used);
self->vertex_buffer_data = NULL;
self->vertex_buffer_used = 0;
}
if (priv->storage_buffer_data)
@@ -604,7 +601,7 @@ gsk_gpu_frame_submit (GskGpuFrame *self)
}
GSK_GPU_FRAME_GET_CLASS (self)->submit (self,
priv->vertex_buffer,
self->vertex_buffer,
priv->first_op);
}

View File

@@ -2,6 +2,7 @@
#include "gskgpurenderer.h"
#include "gskgputypesprivate.h"
#include "gskgpubufferprivate.h"
G_BEGIN_DECLS
@@ -17,6 +18,10 @@ typedef struct _GskGpuFrameClass GskGpuFrameClass;
struct _GskGpuFrame
{
GObject parent_instance;
GskGpuBuffer *vertex_buffer;
gsize vertex_buffer_used;
guchar *vertex_buffer_data;
};
struct _GskGpuFrameClass
@@ -59,10 +64,8 @@ GskGpuImage * gsk_gpu_frame_upload_texture (GskGpuF
gboolean with_mipmap,
GdkTexture *texture);
GskGpuDescriptors * gsk_gpu_frame_create_descriptors (GskGpuFrame *self);
gsize gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize _gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size);
guchar * gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset);
GskGpuBuffer * gsk_gpu_frame_write_storage_buffer (GskGpuFrame *self,
const guchar *data,
gsize size,
@@ -83,6 +86,42 @@ void gsk_gpu_frame_download_texture (GskGpuF
GdkMemoryFormat format,
guchar *data,
gsize stride);
GskGpuOp *gsk_gpu_frame_get_last_op (GskGpuFrame *self);
static inline gsize
round_up (gsize number, gsize divisor)
{
return (number + divisor - 1) / divisor * divisor;
}
static inline gsize
gsk_gpu_frame_reserve_vertex_data (GskGpuFrame *self,
gsize size)
{
if (self->vertex_buffer != NULL)
{
gsize size_needed = round_up (self->vertex_buffer_used, size) + size;
if (gsk_gpu_buffer_get_size (self->vertex_buffer) >= size_needed)
{
self->vertex_buffer_used = size_needed;
return size_needed - size;
}
}
return _gsk_gpu_frame_reserve_vertex_data (self, size);
}
static inline guchar *
gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset)
{
if G_UNLIKELY (self->vertex_buffer_data == NULL)
self->vertex_buffer_data = gsk_gpu_buffer_map (self->vertex_buffer);
return self->vertex_buffer_data + offset;
}
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuFrame, g_object_unref)

View File

@@ -2,30 +2,18 @@
#include "gskgpuimageprivate.h"
typedef struct _GskGpuImagePrivate GskGpuImagePrivate;
struct _GskGpuImagePrivate
{
GskGpuImageFlags flags;
GdkMemoryFormat format;
gsize width;
gsize height;
};
#define ORTHO_NEAR_PLANE -10000
#define ORTHO_FAR_PLANE 10000
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuImage, gsk_gpu_image, G_TYPE_OBJECT)
G_DEFINE_TYPE (GskGpuImage, gsk_gpu_image, G_TYPE_OBJECT)
static void
gsk_gpu_image_get_default_projection_matrix (GskGpuImage *self,
graphene_matrix_t *out_projection)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
graphene_matrix_init_ortho (out_projection,
0, priv->width,
0, priv->height,
0, self->width,
0, self->height,
ORTHO_NEAR_PLANE,
ORTHO_FAR_PLANE);
}
@@ -45,11 +33,10 @@ static void
gsk_gpu_image_dispose (GObject *object)
{
GskGpuImage *self = GSK_GPU_IMAGE (object);
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
if (priv->flags & GSK_GPU_IMAGE_TOGGLE_REF)
if (self->flags & GSK_GPU_IMAGE_TOGGLE_REF)
{
priv->flags &= ~GSK_GPU_IMAGE_TOGGLE_REF;
self->flags &= ~GSK_GPU_IMAGE_TOGGLE_REF;
G_OBJECT (self)->ref_count++;
g_object_remove_toggle_ref (G_OBJECT (self), gsk_gpu_image_texture_toggle_ref_cb, NULL);
}
@@ -79,12 +66,10 @@ gsk_gpu_image_setup (GskGpuImage *self,
gsize width,
gsize height)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
priv->flags = flags;
priv->format = format;
priv->width = width;
priv->height = height;
self->flags = flags;
self->format = format;
self->width = width;
self->height = height;
}
/*
@@ -105,55 +90,19 @@ void
gsk_gpu_image_toggle_ref_texture (GskGpuImage *self,
GdkTexture *texture)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
g_assert ((self->flags & GSK_GPU_IMAGE_TOGGLE_REF) == 0);
g_assert ((priv->flags & GSK_GPU_IMAGE_TOGGLE_REF) == 0);
priv->flags |= GSK_GPU_IMAGE_TOGGLE_REF;
self->flags |= GSK_GPU_IMAGE_TOGGLE_REF;
g_object_ref (texture);
g_object_add_toggle_ref (G_OBJECT (self), gsk_gpu_image_texture_toggle_ref_cb, texture);
g_object_unref (self);
}
GdkMemoryFormat
gsk_gpu_image_get_format (GskGpuImage *self)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
return priv->format;
}
gsize
gsk_gpu_image_get_width (GskGpuImage *self)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
return priv->width;
}
gsize
gsk_gpu_image_get_height (GskGpuImage *self)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
return priv->height;
}
GskGpuImageFlags
gsk_gpu_image_get_flags (GskGpuImage *self)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
return priv->flags;
}
void
gsk_gpu_image_set_flags (GskGpuImage *self,
GskGpuImageFlags flags)
{
GskGpuImagePrivate *priv = gsk_gpu_image_get_instance_private (self);
priv->flags |= flags;
self->flags |= flags;
}
void

View File

@@ -18,6 +18,10 @@ typedef struct _GskGpuImageClass GskGpuImageClass;
struct _GskGpuImage
{
GObject parent_instance;
GskGpuImageFlags flags;
GdkMemoryFormat format;
gsize width;
gsize height;
};
struct _GskGpuImageClass
@@ -38,16 +42,35 @@ void gsk_gpu_image_setup (GskGpuI
void gsk_gpu_image_toggle_ref_texture (GskGpuImage *self,
GdkTexture *texture);
GdkMemoryFormat gsk_gpu_image_get_format (GskGpuImage *self);
gsize gsk_gpu_image_get_width (GskGpuImage *self);
gsize gsk_gpu_image_get_height (GskGpuImage *self);
GskGpuImageFlags gsk_gpu_image_get_flags (GskGpuImage *self);
void gsk_gpu_image_set_flags (GskGpuImage *self,
GskGpuImageFlags flags);
void gsk_gpu_image_get_projection_matrix (GskGpuImage *self,
graphene_matrix_t *out_projection);
static inline GdkMemoryFormat
gsk_gpu_image_get_format (GskGpuImage *self)
{
return self->format;
}
static inline gsize
gsk_gpu_image_get_width (GskGpuImage *self)
{
return self->width;
}
static inline gsize
gsk_gpu_image_get_height (GskGpuImage *self)
{
return self->height;
}
static inline GskGpuImageFlags
gsk_gpu_image_get_flags (GskGpuImage *self)
{
return self->flags;
}
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuImage, g_object_unref)

View File

@@ -148,11 +148,20 @@ static void gsk_gpu_node_processor_add_node (GskGpuN
static gboolean gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
GskRenderNode *node);
static inline int
simple_roundf (float f)
{
/* roundf() does not appear to get intrinsics from GCC
* but this does and generally gives us the same answer.
*/
return floorf (f + .5);
}
static void
gsk_gpu_node_processor_finish (GskGpuNodeProcessor *self)
{
g_clear_pointer (&self->modelview, gsk_transform_unref);
g_clear_object (&self->desc);
g_clear_pointer (&self->desc, gsk_gpu_descriptors_unref);
}
static void
@@ -170,7 +179,7 @@ gsk_gpu_node_processor_init (GskGpuNodeProcessor *self,
self->frame = frame;
if (desc)
self->desc = g_object_ref (desc);
self->desc = gsk_gpu_descriptors_ref (desc);
else
self->desc = NULL;
@@ -268,7 +277,7 @@ gsk_gpu_node_processor_add_image (GskGpuNodeProcessor *self,
if (gsk_gpu_descriptors_add_image (self->desc, image, sampler, &descriptor))
return descriptor;
g_object_unref (self->desc);
gsk_gpu_descriptors_unref (self->desc);
}
self->desc = gsk_gpu_frame_create_descriptors (self->frame);
@@ -278,7 +287,7 @@ gsk_gpu_node_processor_add_image (GskGpuNodeProcessor *self,
g_assert_not_reached ();
return 0;
}
return descriptor;
}
@@ -422,12 +431,6 @@ gsk_gpu_pattern_writer_init (GskGpuPatternWriter *self,
pattern_buffer_init (&self->buffer);
}
static inline gsize
round_up (gsize number, gsize divisor)
{
return (number + divisor - 1) / divisor * divisor;
}
static void
gsk_gpu_pattern_writer_append (GskGpuPatternWriter *self,
gsize align,
@@ -435,8 +438,8 @@ gsk_gpu_pattern_writer_append (GskGpuPatternWriter *self,
gsize size)
{
pattern_buffer_set_size (&self->buffer, round_up (pattern_buffer_get_size (&self->buffer), align));
pattern_buffer_splice (&self->buffer,
pattern_buffer_splice (&self->buffer,
pattern_buffer_get_size (&self->buffer),
0,
FALSE,
@@ -545,7 +548,7 @@ gsk_gpu_pattern_writer_finish (GskGpuPatternWriter *self)
{
pattern_buffer_clear (&self->buffer);
g_assert (self->stack == 0);
g_clear_object (&self->desc);
g_clear_pointer (&self->desc, gsk_gpu_descriptors_unref);
}
static gboolean
@@ -672,12 +675,12 @@ gsk_gpu_node_processor_get_clip_bounds (GskGpuNodeProcessor *self,
- self->offset.x,
- self->offset.y,
out_bounds);
/* FIXME: We could try the scissor rect here.
* But how often is that smaller than the clip bounds?
*/
}
static gboolean G_GNUC_WARN_UNUSED_RESULT
gsk_gpu_node_processor_clip_node_bounds (GskGpuNodeProcessor *self,
GskRenderNode *node,
@@ -686,7 +689,7 @@ gsk_gpu_node_processor_clip_node_bounds (GskGpuNodeProcessor *self,
graphene_rect_t tmp;
gsk_gpu_node_processor_get_clip_bounds (self, &tmp);
if (!gsk_rect_intersection (&tmp, &node->bounds, out_bounds))
return FALSE;
@@ -1196,7 +1199,7 @@ gsk_gpu_node_processor_try_node_as_pattern (GskGpuNodeProcessor *self,
GskGpuBuffer *buffer;
gsize offset;
guint32 pattern_id;
g_assert (self->pending_globals == 0);
if (!gsk_gpu_node_processor_clip_node_bounds (self, node, &clipped))
@@ -1207,7 +1210,7 @@ gsk_gpu_node_processor_try_node_as_pattern (GskGpuNodeProcessor *self,
&self->scale,
&self->offset,
&clipped);
if (!gsk_gpu_node_processor_create_node_pattern (&writer, node))
{
gsk_gpu_pattern_writer_finish (&writer);
@@ -1252,7 +1255,7 @@ gsk_gpu_node_processor_try_node_as_pattern (GskGpuNodeProcessor *self,
return TRUE;
}
static void
gsk_gpu_node_processor_add_without_opacity (GskGpuNodeProcessor *self,
GskRenderNode *node)
@@ -1277,7 +1280,7 @@ gsk_gpu_node_processor_add_without_opacity (GskGpuNodeProcessor *self,
return;
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_color_matrix_op_opacity (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
self->desc,
@@ -1758,9 +1761,9 @@ gsk_gpu_node_processor_create_transform_pattern (GskGpuPatternWriter *self,
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POSITION_POP);
gsk_gpu_pattern_writer_pop_stack (self);
self->scale = old_scale;
self->bounds = old_bounds;
self->offset = old_offset;
self->scale = old_scale;
self->bounds = old_bounds;
self->offset = old_offset;
return result;
}
@@ -1778,6 +1781,57 @@ gsk_gpu_node_processor_add_opacity_node (GskGpuNodeProcessor *self,
self->opacity = old_opacity;
}
static void
gsk_gpu_node_processor_color_op (GskGpuNodeProcessor *self,
GskGpuShaderClip clip,
const graphene_rect_t *rect,
const graphene_point_t *offset,
const GdkRGBA *color)
{
GskGpuOp *last_op = gsk_gpu_frame_get_last_op (self->frame);
if (last_op &&
last_op->op_class == (const GskGpuOpClass *) &GSK_GPU_COLORIZE_OP_CLASS &&
gsk_gpu_frame_should_optimize (self->frame, GSK_GPU_OPTIMIZE_COLORIZE))
{
GskGpuDevice *device;
GskGpuImage *image;
guint32 descriptor;
graphene_rect_t bounds, tex_rect;
device = gsk_gpu_frame_get_device (self->frame);
image = gsk_gpu_device_get_solid_image (device, self->frame, &bounds);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_rect_scale (&GRAPHENE_RECT_INIT (- bounds.origin.x,
- bounds.origin.y,
gsk_gpu_image_get_width (image),
gsk_gpu_image_get_height (image)),
rect->size.width / bounds.size.width,
rect->size.height / bounds.size.height,
&tex_rect);
gsk_rect_init_offset (&tex_rect, &tex_rect, rect->origin.x, rect->origin.y);
gsk_gpu_colorize_op (self->frame,
clip,
self->desc,
descriptor,
rect,
offset,
&tex_rect,
color);
}
else
{
gsk_gpu_color_op (self->frame,
clip,
rect,
offset,
color);
}
}
static void
gsk_gpu_node_processor_add_color_node (GskGpuNodeProcessor *self,
GskRenderNode *node)
@@ -1812,11 +1866,11 @@ gsk_gpu_node_processor_add_color_node (GskGpuNodeProcessor *self,
if (self->modelview)
{
/* Yuck, rounded clip and modelview. I give up. */
gsk_gpu_color_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
&node->bounds,
&self->offset,
gsk_color_node_get_color (node));
gsk_gpu_node_processor_color_op (self,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
&node->bounds,
&self->offset,
gsk_color_node_get_color (node));
return;
}
@@ -1882,11 +1936,11 @@ gsk_gpu_node_processor_add_color_node (GskGpuNodeProcessor *self,
return;
}
gsk_gpu_color_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
&node->bounds,
&self->offset,
&GDK_RGBA_INIT_ALPHA (color, self->opacity));
gsk_gpu_node_processor_color_op (self,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
&node->bounds,
&self->offset,
&GDK_RGBA_INIT_ALPHA (color, self->opacity));
}
static gboolean
@@ -2263,7 +2317,7 @@ gsk_gpu_node_processor_add_gradient_node (GskGpuNodeProcessor *self,
}
stops = real_stops;
}
func (self, node, stops, n_stops);
return;
@@ -2285,7 +2339,7 @@ gsk_gpu_node_processor_add_gradient_node (GskGpuNodeProcessor *self,
other.blend = GSK_GPU_BLEND_ADD;
other.pending_globals |= GSK_GPU_GLOBAL_BLEND;
gsk_gpu_node_processor_sync_globals (&other, 0);
for (i = 0; i < n_stops; /* happens inside the loop */)
{
if (i == 0)
@@ -2381,7 +2435,7 @@ gsk_gpu_node_processor_create_linear_gradient_pattern (GskGpuPatternWriter *self
gsk_gpu_pattern_writer_append_point (self,
gsk_linear_gradient_node_get_end (node),
&self->offset);
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_linear_gradient_node_get_color_stops (node, NULL),
gsk_linear_gradient_node_get_n_color_stops (node));
@@ -2437,7 +2491,7 @@ gsk_gpu_node_processor_create_radial_gradient_pattern (GskGpuPatternWriter *self
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_vradius (node));
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_start (node));
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_end (node));
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_radial_gradient_node_get_color_stops (node, NULL),
gsk_radial_gradient_node_get_n_color_stops (node));
@@ -2480,7 +2534,7 @@ gsk_gpu_node_processor_create_conic_gradient_pattern (GskGpuPatternWriter *self,
gsk_conic_gradient_node_get_center (node),
&self->offset);
gsk_gpu_pattern_writer_append_float (self, gsk_conic_gradient_node_get_angle (node));
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_conic_gradient_node_get_color_stops (node, NULL),
gsk_conic_gradient_node_get_n_color_stops (node));
@@ -2555,7 +2609,7 @@ gsk_gpu_node_processor_add_shadow_node (GskGpuNodeProcessor *self,
image = gsk_gpu_node_processor_get_node_as_image (self,
0,
GSK_GPU_IMAGE_STRAIGHT_ALPHA,
&clip_bounds,
&clip_bounds,
child,
&tex_rect);
if (image == NULL)
@@ -2994,6 +3048,7 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
GskRenderNode *node)
{
GskGpuDevice *device;
GskGpuImage *last_image = NULL;
const PangoGlyphInfo *glyphs;
PangoFont *font;
graphene_point_t offset;
@@ -3002,6 +3057,7 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
GdkRGBA color;
gboolean glyph_align;
gboolean hinting;
guint32 descriptor = 0;
if (self->opacity < 1.0 &&
gsk_text_node_has_color_glyphs (node))
@@ -3032,7 +3088,6 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
GskGpuImage *image;
graphene_rect_t glyph_bounds, glyph_tex_rect;
graphene_point_t glyph_offset, glyph_origin;
guint32 descriptor;
GskGpuGlyphLookupFlags flags;
glyph_origin = GRAPHENE_POINT_INIT (offset.x + (float) glyphs[i].geometry.x_offset / PANGO_SCALE,
@@ -3043,15 +3098,15 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
/* Force glyph_origin.y to be device pixel aligned.
* The hinter expects that.
*/
glyph_origin.x = roundf (glyph_origin.x * scale * 4);
glyph_origin.x = simple_roundf (glyph_origin.x * scale * 4);
flags = ((int) glyph_origin.x & 3);
glyph_origin.x = 0.25 * inv_scale * glyph_origin.x;
glyph_origin.y = roundf (glyph_origin.y * scale) * inv_scale;
glyph_origin.y = simple_roundf (glyph_origin.y * scale) * inv_scale;
}
else if (glyph_align)
{
glyph_origin.x = roundf (glyph_origin.x * scale * 4);
glyph_origin.y = roundf (glyph_origin.y * scale * 4);
glyph_origin.x = simple_roundf (glyph_origin.x * scale * 4);
glyph_origin.y = simple_roundf (glyph_origin.y * scale * 4);
flags = ((int) glyph_origin.x & 3) |
(((int) glyph_origin.y & 3) << 2);
glyph_origin.x = 0.25 * inv_scale * glyph_origin.x;
@@ -3059,12 +3114,12 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
}
else
{
glyph_origin.x = roundf (glyph_origin.x * scale) * inv_scale;
glyph_origin.y = roundf (glyph_origin.y * scale) * inv_scale;
glyph_origin.x = simple_roundf (glyph_origin.x * scale) * inv_scale;
glyph_origin.y = simple_roundf (glyph_origin.y * scale) * inv_scale;
flags = 0;
}
image = gsk_gpu_device_lookup_glyph_image (device,
image = _gsk_gpu_device_lookup_glyph_image (device,
self->frame,
font,
glyphs[i].glyph,
@@ -3077,7 +3132,13 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
gsk_rect_scale (&GRAPHENE_RECT_INIT (0, 0, glyph_bounds.size.width, glyph_bounds.size.height), inv_scale, inv_scale, &glyph_bounds);
glyph_origin = GRAPHENE_POINT_INIT (glyph_origin.x - glyph_offset.x * inv_scale,
glyph_origin.y - glyph_offset.y * inv_scale);
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
if (image != last_image)
{
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
last_image = image;
}
if (glyphs[i].attr.is_color)
gsk_gpu_texture_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &glyph_offset, &glyph_bounds),
@@ -3153,15 +3214,15 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
/* Force glyph_origin.y to be device pixel aligned.
* The hinter expects that.
*/
glyph_origin.x = roundf (glyph_origin.x * scale * 4);
glyph_origin.x = simple_roundf (glyph_origin.x * scale * 4);
flags = ((int) glyph_origin.x & 3);
glyph_origin.x = 0.25 * inv_scale * glyph_origin.x;
glyph_origin.y = roundf (glyph_origin.y * scale) * inv_scale;
glyph_origin.y = simple_roundf (glyph_origin.y * scale) * inv_scale;
}
else if (glyph_align)
{
glyph_origin.x = roundf (glyph_origin.x * scale * 4);
glyph_origin.y = roundf (glyph_origin.y * scale * 4);
glyph_origin.x = simple_roundf (glyph_origin.x * scale * 4);
glyph_origin.y = simple_roundf (glyph_origin.y * scale * 4);
flags = ((int) glyph_origin.x & 3) |
(((int) glyph_origin.y & 3) << 2);
glyph_origin.x = 0.25 * inv_scale * glyph_origin.x;
@@ -3169,12 +3230,12 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
}
else
{
glyph_origin.x = roundf (glyph_origin.x * scale) * inv_scale;
glyph_origin.y = roundf (glyph_origin.y * scale) * inv_scale;
glyph_origin.x = simple_roundf (glyph_origin.x * scale) * inv_scale;
glyph_origin.y = simple_roundf (glyph_origin.y * scale) * inv_scale;
flags = 0;
}
image = gsk_gpu_device_lookup_glyph_image (device,
image = _gsk_gpu_device_lookup_glyph_image (device,
self->frame,
font,
glyphs[i].glyph,
@@ -3272,7 +3333,7 @@ gsk_gpu_node_processor_add_color_matrix_node (GskGpuNodeProcessor *self,
return;
descriptor = gsk_gpu_node_processor_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT);
gsk_gpu_color_matrix_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
self->desc,

View File

@@ -31,7 +31,9 @@ static const GdkDebugKey gsk_gpu_optimization_keys[] = {
{ "gradients", GSK_GPU_OPTIMIZE_GRADIENTS, "Don't supersample gradients" },
{ "mipmap", GSK_GPU_OPTIMIZE_MIPMAP, "Avoid creating mipmaps" },
{ "glyph-align", GSK_GPU_OPTIMIZE_GLYPH_ALIGN, "Never align glyphs to the subpixel grid" },
{ "colorize", GSK_GPU_OPTIMIZE_COLORIZE, "Don't replace color by colorize" },
/* These require hardware support */
{ "gl-baseinstance", GSK_GPU_OPTIMIZE_GL_BASE_INSTANCE, "Assume no ARB/EXT_base_instance support" },
};

View File

@@ -23,7 +23,7 @@ gsk_gpu_shader_op_finish (GskGpuOp *op)
{
GskGpuShaderOp *self = (GskGpuShaderOp *) op;
g_clear_object (&self->desc);
g_clear_pointer (&self->desc, gsk_gpu_descriptors_unref);
}
#ifdef GDK_RENDERING_VULKAN
@@ -56,7 +56,7 @@ gsk_gpu_shader_op_vk_command_n (GskGpuOp *op,
for (next = op->next; next && i < n; next = next->next)
{
GskGpuShaderOp *next_shader = (GskGpuShaderOp *) next;
if (next->op_class != op->op_class ||
next_shader->desc != self->desc ||
next_shader->variation != self->variation ||
@@ -182,28 +182,3 @@ gsk_gpu_shader_op_gl_command (GskGpuOp *op,
return gsk_gpu_shader_op_gl_command_n (op, frame, state, 1);
}
GskGpuShaderOp *
gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
guint32 variation,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
gpointer out_vertex_data)
{
GskGpuShaderOp *self;
self = (GskGpuShaderOp *) gsk_gpu_op_alloc (frame, &op_class->parent_class);
self->variation = variation;
self->clip = clip;
if (desc)
self->desc = g_object_ref (desc);
else
self->desc = NULL;
self->vertex_offset = gsk_gpu_frame_reserve_vertex_data (frame, op_class->vertex_size);
*((gpointer *) out_vertex_data) = gsk_gpu_frame_get_vertex_data (frame, self->vertex_offset);
return self;
}

View File

@@ -3,6 +3,8 @@
#include "gskgpuopprivate.h"
#include "gskgputypesprivate.h"
#include "gskgpudescriptorsprivate.h"
#include "gskgpuframeprivate.h"
G_BEGIN_DECLS
@@ -29,13 +31,6 @@ struct _GskGpuShaderOpClass
void (* setup_vao) (gsize offset);
};
GskGpuShaderOp * gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
guint32 variation,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
gpointer out_vertex_data);
void gsk_gpu_shader_op_finish (GskGpuOp *op);
#ifdef GDK_RENDERING_VULKAN
@@ -76,5 +71,30 @@ gsk_gpu_point_to_float (const graphene_point_t *point,
values[1] = point->y + offset->y;
}
static inline GskGpuShaderOp *
gsk_gpu_shader_op_alloc (GskGpuFrame *frame,
const GskGpuShaderOpClass *op_class,
guint32 variation,
GskGpuShaderClip clip,
GskGpuDescriptors *desc,
gpointer out_vertex_data)
{
GskGpuShaderOp *self;
self = (GskGpuShaderOp *) gsk_gpu_op_alloc (frame, &op_class->parent_class);
self->variation = variation;
self->clip = clip;
if (desc)
self->desc = gsk_gpu_descriptors_ref (desc);
else
self->desc = NULL;
self->vertex_offset = gsk_gpu_frame_reserve_vertex_data (frame, op_class->vertex_size);
*((gpointer *) out_vertex_data) = gsk_gpu_frame_get_vertex_data (frame, self->vertex_offset);
return self;
}
G_END_DECLS

View File

@@ -119,7 +119,9 @@ typedef enum {
GSK_GPU_OPTIMIZE_GRADIENTS = 1 << 4,
GSK_GPU_OPTIMIZE_MIPMAP = 1 << 5,
GSK_GPU_OPTIMIZE_GLYPH_ALIGN = 1 << 6,
GSK_GPU_OPTIMIZE_COLORIZE = 1 << 7,
/* These require hardware support */
GSK_GPU_OPTIMIZE_GL_BASE_INSTANCE = 1 << 7,
GSK_GPU_OPTIMIZE_GL_BASE_INSTANCE = 1 << 16,
} GskGpuOptimizations;

View File

@@ -636,3 +636,134 @@ gsk_gpu_upload_glyph_op (GskGpuFrame *frame,
self->glyph = glyph;
self->origin = *origin;
}
typedef struct _GskGpuUploadSolidOp GskGpuUploadSolidOp;
struct _GskGpuUploadSolidOp
{
GskGpuOp op;
GskGpuImage *image;
cairo_rectangle_int_t area;
graphene_point_t origin;
GskGpuBuffer *buffer;
};
static void
gsk_gpu_upload_solid_op_finish (GskGpuOp *op)
{
GskGpuUploadSolidOp *self = (GskGpuUploadSolidOp *) op;
g_object_unref (self->image);
g_clear_object (&self->buffer);
}
static void
gsk_gpu_upload_solid_op_print (GskGpuOp *op,
GskGpuFrame *frame,
GString *string,
guint indent)
{
GskGpuUploadSolidOp *self = (GskGpuUploadSolidOp *) op;
gsk_gpu_print_op (string, indent, "upload-solid");
gsk_gpu_print_int_rect (string, &self->area);
gsk_gpu_print_newline (string);
}
static void
gsk_gpu_upload_solid_op_draw (GskGpuOp *op,
guchar *data,
gsize stride)
{
GskGpuUploadSolidOp *self = (GskGpuUploadSolidOp *) op;
cairo_surface_t *surface;
cairo_t *cr;
surface = cairo_image_surface_create_for_data (data,
CAIRO_FORMAT_ARGB32,
self->area.width,
self->area.height,
stride);
cairo_surface_set_device_offset (surface, self->origin.x, self->origin.y);
cr = cairo_create (surface);
cairo_set_operator (cr, CAIRO_OPERATOR_CLEAR);
cairo_paint (cr);
cairo_set_operator (cr, CAIRO_OPERATOR_OVER);
/* Make sure the entire surface is initialized to black */
cairo_set_source_rgba (cr, 0, 0, 0, 0);
cairo_rectangle (cr, 0.0, 0.0, self->area.width, self->area.height);
cairo_fill (cr);
/* Draw solid */
cairo_set_source_rgba (cr, 1, 1, 1, 1);
cairo_rectangle (cr, 0, 0, self->area.width, self->area.height);
cairo_fill (cr);
cairo_destroy (cr);
cairo_surface_finish (surface);
cairo_surface_destroy (surface);
}
#ifdef GDK_RENDERING_VULKAN
static GskGpuOp *
gsk_gpu_upload_solid_op_vk_command (GskGpuOp *op,
GskGpuFrame *frame,
GskVulkanCommandState *state)
{
GskGpuUploadSolidOp *self = (GskGpuUploadSolidOp *) op;
return gsk_gpu_upload_op_vk_command_with_area (op,
frame,
state,
GSK_VULKAN_IMAGE (self->image),
&self->area,
gsk_gpu_upload_solid_op_draw,
&self->buffer);
}
#endif
static GskGpuOp *
gsk_gpu_upload_solid_op_gl_command (GskGpuOp *op,
GskGpuFrame *frame,
GskGLCommandState *state)
{
GskGpuUploadSolidOp *self = (GskGpuUploadSolidOp *) op;
return gsk_gpu_upload_op_gl_command_with_area (op,
frame,
self->image,
&self->area,
gsk_gpu_upload_solid_op_draw);
}
static const GskGpuOpClass GSK_GPU_UPLOAD_SOLID_OP_CLASS = {
GSK_GPU_OP_SIZE (GskGpuUploadSolidOp),
GSK_GPU_STAGE_UPLOAD,
gsk_gpu_upload_solid_op_finish,
gsk_gpu_upload_solid_op_print,
#ifdef GDK_RENDERING_VULKAN
gsk_gpu_upload_solid_op_vk_command,
#endif
gsk_gpu_upload_solid_op_gl_command,
};
void
gsk_gpu_upload_solid_op (GskGpuFrame *frame,
GskGpuImage *image,
const cairo_rectangle_int_t *area,
const graphene_point_t *origin)
{
GskGpuUploadSolidOp *self;
self = (GskGpuUploadSolidOp *) gsk_gpu_op_alloc (frame, &GSK_GPU_UPLOAD_SOLID_OP_CLASS);
self->image = g_object_ref (image);
self->area = *area;
self->origin = *origin;
}

View File

@@ -27,5 +27,9 @@ void gsk_gpu_upload_glyph_op (GskGpuF
const cairo_rectangle_int_t *area,
const graphene_point_t *origin);
void gsk_gpu_upload_solid_op (GskGpuFrame *frame,
GskGpuImage *image,
const cairo_rectangle_int_t *area,
const graphene_point_t *origin);
G_END_DECLS

View File

@@ -6,22 +6,13 @@
#include "gskvulkanframeprivate.h"
#include "gskvulkanimageprivate.h"
G_DEFINE_TYPE (GskVulkanDescriptors, gsk_vulkan_descriptors, GSK_TYPE_GPU_DESCRIPTORS)
static void
gsk_vulkan_descriptors_class_init (GskVulkanDescriptorsClass *klass)
{
}
static void
gsk_vulkan_descriptors_init (GskVulkanDescriptors *self)
{
}
GskVulkanPipelineLayout *
gsk_vulkan_descriptors_get_pipeline_layout (GskVulkanDescriptors *self)
{
return GSK_VULKAN_DESCRIPTORS_GET_CLASS (self)->get_pipeline_layout (self);
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
GskVulkanDescriptorsClass *class = GSK_VULKAN_DESCRIPTORS_CLASS (desc->desc_class);
return class->get_pipeline_layout (self);
}
void
@@ -48,5 +39,20 @@ gsk_vulkan_descriptors_bind (GskVulkanDescriptors *self,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer)
{
return GSK_VULKAN_DESCRIPTORS_GET_CLASS (self)->bind (self, previous, vk_command_buffer);
GskGpuDescriptors *desc = GSK_GPU_DESCRIPTORS (self);
GskVulkanDescriptorsClass *class = GSK_VULKAN_DESCRIPTORS_CLASS (desc->desc_class);
return class->bind (self, previous, vk_command_buffer);
}
void
gsk_vulkan_descriptors_init (GskVulkanDescriptors *self)
{
gsk_gpu_descriptors_init ((GskGpuDescriptors *) self);
}
void
gsk_vulkan_descriptors_finalize (GskVulkanDescriptors *self)
{
gsk_gpu_descriptors_finalize ((GskGpuDescriptors *) self);
}

View File

@@ -6,15 +6,12 @@
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_DESCRIPTORS (gsk_vulkan_descriptors_get_type ())
#define GSK_VULKAN_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_VULKAN_DESCRIPTORS, GskVulkanDescriptors))
#define GSK_VULKAN_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_VULKAN_DESCRIPTORS, GskVulkanDescriptorsClass))
#define GSK_IS_VULKAN_DESCRIPTORS(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_VULKAN_DESCRIPTORS))
#define GSK_IS_VULKAN_DESCRIPTORS_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_VULKAN_DESCRIPTORS))
#define GSK_VULKAN_DESCRIPTORS_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_VULKAN_DESCRIPTORS, GskVulkanDescriptorsClass))
typedef struct _GskVulkanDescriptors GskVulkanDescriptors;
typedef struct _GskVulkanDescriptorsClass GskVulkanDescriptorsClass;
#define GSK_VULKAN_DESCRIPTORS(d) ((GskVulkanDescriptors *) (d))
#define GSK_VULKAN_DESCRIPTORS_CLASS(d) ((GskVulkanDescriptorsClass *) (d))
struct _GskVulkanDescriptors
{
GskGpuDescriptors parent_instance;
@@ -30,8 +27,6 @@ struct _GskVulkanDescriptorsClass
VkCommandBuffer vk_command_buffer);
};
GType gsk_vulkan_descriptors_get_type (void) G_GNUC_CONST;
GskVulkanPipelineLayout * gsk_vulkan_descriptors_get_pipeline_layout (GskVulkanDescriptors *self);
void gsk_vulkan_descriptors_transition (GskVulkanDescriptors *self,
@@ -41,7 +36,8 @@ void gsk_vulkan_descriptors_bind
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskVulkanDescriptors, g_object_unref)
void gsk_vulkan_descriptors_init (GskVulkanDescriptors *self);
void gsk_vulkan_descriptors_finalize (GskVulkanDescriptors *self);
G_END_DECLS

View File

@@ -13,10 +13,16 @@
#include "gdk/gdkdisplayprivate.h"
#include "gdk/gdkdmabuftextureprivate.h"
static inline void
gsk_vulkan_real_descriptors_unref (GskVulkanRealDescriptors *desc)
{
gsk_gpu_descriptors_unref (GSK_GPU_DESCRIPTORS (desc));
}
#define GDK_ARRAY_NAME gsk_descriptors
#define GDK_ARRAY_TYPE_NAME GskDescriptors
#define GDK_ARRAY_ELEMENT_TYPE GskVulkanRealDescriptors *
#define GDK_ARRAY_FREE_FUNC g_object_unref
#define GDK_ARRAY_FREE_FUNC gsk_vulkan_real_descriptors_unref
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
@@ -262,7 +268,7 @@ gsk_vulkan_frame_create_descriptors (GskGpuFrame *frame)
desc = gsk_vulkan_real_descriptors_new (self);
gsk_descriptors_append (&self->descriptors, desc);
return GSK_GPU_DESCRIPTORS (g_object_ref (desc));
return gsk_gpu_descriptors_ref (GSK_GPU_DESCRIPTORS (desc));
}
}

View File

@@ -2,9 +2,7 @@
#include "gskvulkanrealdescriptorsprivate.h"
#include "gskvulkanbufferprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkanimageprivate.h"
#define INCLUDE_IMPL 1
#define GDK_ARRAY_NAME gsk_descriptor_image_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorImageInfos
@@ -14,6 +12,8 @@
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define INCLUDE_IMPL 1
#define GDK_ARRAY_NAME gsk_descriptor_buffer_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorBufferInfos
#define GDK_ARRAY_ELEMENT_TYPE VkDescriptorBufferInfo
@@ -22,6 +22,8 @@
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define INCLUDE_IMPL 1
#define GDK_ARRAY_NAME gsk_samplers
#define GDK_ARRAY_TYPE_NAME GskSamplers
#define GDK_ARRAY_ELEMENT_TYPE VkSampler
@@ -29,24 +31,6 @@
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
struct _GskVulkanRealDescriptors
{
GskVulkanDescriptors parent_instance;
GskVulkanFrame *frame; /* no reference, the frame owns us */
GskVulkanPipelineLayout *pipeline_layout;
GskSamplers immutable_samplers;
GskDescriptorImageInfos descriptor_immutable_images;
GskDescriptorImageInfos descriptor_images;
GskDescriptorBufferInfos descriptor_buffers;
VkDescriptorSet descriptor_sets[GSK_VULKAN_N_DESCRIPTOR_SETS];
};
G_DEFINE_TYPE (GskVulkanRealDescriptors, gsk_vulkan_real_descriptors, GSK_TYPE_VULKAN_DESCRIPTORS)
static GskVulkanPipelineLayout *
gsk_vulkan_real_descriptors_get_pipeline_layout (GskVulkanDescriptors *desc)
{
@@ -154,9 +138,9 @@ gsk_vulkan_real_descriptors_add_buffer (GskGpuDescriptors *desc,
}
static void
gsk_vulkan_real_descriptors_finalize (GObject *object)
gsk_vulkan_real_descriptors_finalize (GskGpuDescriptors *desc)
{
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (object);
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (desc);
gsk_samplers_clear (&self->immutable_samplers);
gsk_descriptor_image_infos_clear (&self->descriptor_immutable_images);
@@ -166,28 +150,25 @@ gsk_vulkan_real_descriptors_finalize (GObject *object)
gsk_vulkan_device_release_pipeline_layout (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (GSK_GPU_FRAME (self->frame))),
self->pipeline_layout);
G_OBJECT_CLASS (gsk_vulkan_real_descriptors_parent_class)->finalize (object);
gsk_vulkan_descriptors_finalize ((GskVulkanDescriptors *) desc);
}
static void
gsk_vulkan_real_descriptors_class_init (GskVulkanRealDescriptorsClass *klass)
static GskVulkanDescriptorsClass GSK_VULKAN_REAL_DESCRIPTORS_CLASS =
{
GskVulkanDescriptorsClass *vulkan_descriptors_class = GSK_VULKAN_DESCRIPTORS_CLASS (klass);
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_vulkan_real_descriptors_finalize;
descriptors_class->add_image = gsk_vulkan_real_descriptors_add_image;
descriptors_class->add_buffer = gsk_vulkan_real_descriptors_add_buffer;
vulkan_descriptors_class->get_pipeline_layout = gsk_vulkan_real_descriptors_get_pipeline_layout;
vulkan_descriptors_class->bind = gsk_vulkan_real_descriptors_bind;
}
.parent_class = (GskGpuDescriptorsClass) {
.finalize = gsk_vulkan_real_descriptors_finalize,
.add_image = gsk_vulkan_real_descriptors_add_image,
.add_buffer = gsk_vulkan_real_descriptors_add_buffer,
},
.get_pipeline_layout = gsk_vulkan_real_descriptors_get_pipeline_layout,
.bind = gsk_vulkan_real_descriptors_bind
};
static void
gsk_vulkan_real_descriptors_init (GskVulkanRealDescriptors *self)
{
gsk_vulkan_descriptors_init (GSK_VULKAN_DESCRIPTORS (self));
gsk_samplers_init (&self->immutable_samplers);
gsk_descriptor_image_infos_init (&self->descriptor_immutable_images);
gsk_descriptor_image_infos_init (&self->descriptor_images);
@@ -198,8 +179,15 @@ GskVulkanRealDescriptors *
gsk_vulkan_real_descriptors_new (GskVulkanFrame *frame)
{
GskVulkanRealDescriptors *self;
GskGpuDescriptors *desc;
self = g_object_new (GSK_TYPE_VULKAN_REAL_DESCRIPTORS, NULL);
self = g_new0 (GskVulkanRealDescriptors, 1);
desc = GSK_GPU_DESCRIPTORS (self);
desc->ref_count = 1;
desc->desc_class = (GskGpuDescriptorsClass *) &GSK_VULKAN_REAL_DESCRIPTORS_CLASS;
gsk_vulkan_real_descriptors_init (self);
self->frame = frame;

View File

@@ -3,11 +3,61 @@
#include "gskvulkandescriptorsprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkanbufferprivate.h"
#include "gskvulkanframeprivate.h"
#include "gskvulkanimageprivate.h"
#define INCLUDE_DECL 1
#define GDK_ARRAY_NAME gsk_descriptor_image_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorImageInfos
#define GDK_ARRAY_ELEMENT_TYPE VkDescriptorImageInfo
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 128
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define INCLUDE_DECL 1
#define GDK_ARRAY_NAME gsk_descriptor_buffer_infos
#define GDK_ARRAY_TYPE_NAME GskDescriptorBufferInfos
#define GDK_ARRAY_ELEMENT_TYPE VkDescriptorBufferInfo
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 32
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define INCLUDE_DECL 1
#define GDK_ARRAY_NAME gsk_samplers
#define GDK_ARRAY_TYPE_NAME GskSamplers
#define GDK_ARRAY_ELEMENT_TYPE VkSampler
#define GDK_ARRAY_PREALLOC 32
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_REAL_DESCRIPTORS (gsk_vulkan_real_descriptors_get_type ())
typedef struct _GskVulkanRealDescriptors GskVulkanRealDescriptors;
G_DECLARE_FINAL_TYPE (GskVulkanRealDescriptors, gsk_vulkan_real_descriptors, GSK, VULKAN_REAL_DESCRIPTORS, GskVulkanDescriptors)
#define GSK_VULKAN_REAL_DESCRIPTORS(d) ((GskVulkanRealDescriptors *) (d))
struct _GskVulkanRealDescriptors
{
GskVulkanDescriptors parent_instance;
GskVulkanFrame *frame; /* no reference, the frame owns us */
GskVulkanPipelineLayout *pipeline_layout;
GskSamplers immutable_samplers;
GskDescriptorImageInfos descriptor_immutable_images;
GskDescriptorImageInfos descriptor_images;
GskDescriptorBufferInfos descriptor_buffers;
VkDescriptorSet descriptor_sets[GSK_VULKAN_N_DESCRIPTOR_SETS];
};
GskVulkanRealDescriptors * gsk_vulkan_real_descriptors_new (GskVulkanFrame *frame);

View File

@@ -2,15 +2,32 @@
#include "gskvulkansubdescriptorsprivate.h"
struct _GskVulkanSubDescriptors
static void gsk_vulkan_sub_descriptors_finalize (GskGpuDescriptors *desc);
static gboolean gsk_vulkan_sub_descriptors_add_buffer (GskGpuDescriptors *desc,
GskGpuBuffer *buffer,
guint32 *out_descriptor);
static gboolean gsk_vulkan_sub_descriptors_add_image (GskGpuDescriptors *desc,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor);
static GskVulkanPipelineLayout *
gsk_vulkan_sub_descriptors_get_pipeline_layout (GskVulkanDescriptors *desc);
static void gsk_vulkan_sub_descriptors_bind (GskVulkanDescriptors *desc,
GskVulkanDescriptors *previous,
VkCommandBuffer vk_command_buffer);
static GskVulkanDescriptorsClass GSK_VULKAN_SUB_DESCRIPTORS_CLASS =
{
GskVulkanDescriptors parent_instance;
GskVulkanDescriptors *parent;
.parent_class = (GskGpuDescriptorsClass) {
.finalize = gsk_vulkan_sub_descriptors_finalize,
.add_image = gsk_vulkan_sub_descriptors_add_image,
.add_buffer = gsk_vulkan_sub_descriptors_add_buffer,
},
.get_pipeline_layout = gsk_vulkan_sub_descriptors_get_pipeline_layout,
.bind = gsk_vulkan_sub_descriptors_bind,
};
G_DEFINE_TYPE (GskVulkanSubDescriptors, gsk_vulkan_sub_descriptors, GSK_TYPE_VULKAN_DESCRIPTORS)
static GskVulkanPipelineLayout *
gsk_vulkan_sub_descriptors_get_pipeline_layout (GskVulkanDescriptors *desc)
{
@@ -26,7 +43,7 @@ gsk_vulkan_sub_descriptors_bind (GskVulkanDescriptors *desc,
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (desc);
if (GSK_IS_VULKAN_SUB_DESCRIPTORS (previous))
if (GSK_GPU_DESCRIPTORS (previous)->desc_class == (GskGpuDescriptorsClass *) &GSK_VULKAN_SUB_DESCRIPTORS_CLASS)
previous = GSK_VULKAN_SUB_DESCRIPTORS (previous)->parent;
if (self->parent == previous)
@@ -62,45 +79,35 @@ gsk_vulkan_sub_descriptors_add_buffer (GskGpuDescriptors *desc,
}
static void
gsk_vulkan_sub_descriptors_finalize (GObject *object)
gsk_vulkan_sub_descriptors_finalize (GskGpuDescriptors *desc)
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (object);
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (desc);
g_object_unref (self->parent);
gsk_gpu_descriptors_unref (GSK_GPU_DESCRIPTORS (self->parent));
G_OBJECT_CLASS (gsk_vulkan_sub_descriptors_parent_class)->finalize (object);
}
static void
gsk_vulkan_sub_descriptors_class_init (GskVulkanSubDescriptorsClass *klass)
{
GskVulkanDescriptorsClass *vulkan_descriptors_class = GSK_VULKAN_DESCRIPTORS_CLASS (klass);
GskGpuDescriptorsClass *descriptors_class = GSK_GPU_DESCRIPTORS_CLASS (klass);
GObjectClass *object_class = G_OBJECT_CLASS (klass);
object_class->finalize = gsk_vulkan_sub_descriptors_finalize;
descriptors_class->add_image = gsk_vulkan_sub_descriptors_add_image;
descriptors_class->add_buffer = gsk_vulkan_sub_descriptors_add_buffer;
vulkan_descriptors_class->get_pipeline_layout = gsk_vulkan_sub_descriptors_get_pipeline_layout;
vulkan_descriptors_class->bind = gsk_vulkan_sub_descriptors_bind;
gsk_vulkan_descriptors_finalize (GSK_VULKAN_DESCRIPTORS (self));
}
static void
gsk_vulkan_sub_descriptors_init (GskVulkanSubDescriptors *self)
{
gsk_vulkan_descriptors_init (GSK_VULKAN_DESCRIPTORS (self));
}
GskVulkanSubDescriptors *
gsk_vulkan_sub_descriptors_new (GskVulkanDescriptors *parent)
{
GskVulkanSubDescriptors *self;
GskGpuDescriptors *desc;
self = g_object_new (GSK_TYPE_VULKAN_SUB_DESCRIPTORS, NULL);
self = g_new0 (GskVulkanSubDescriptors, 1);
desc = GSK_GPU_DESCRIPTORS (self);
self->parent = g_object_ref (parent);
desc->ref_count = 1;
desc->desc_class = (GskGpuDescriptorsClass *) &GSK_VULKAN_SUB_DESCRIPTORS_CLASS;
gsk_vulkan_sub_descriptors_init (self);
self->parent = GSK_VULKAN_DESCRIPTORS (gsk_gpu_descriptors_ref (GSK_GPU_DESCRIPTORS (parent)));
return self;
}

View File

@@ -4,9 +4,16 @@
G_BEGIN_DECLS
#define GSK_TYPE_VULKAN_SUB_DESCRIPTORS (gsk_vulkan_sub_descriptors_get_type ())
typedef struct _GskVulkanSubDescriptors GskVulkanSubDescriptors;
G_DECLARE_FINAL_TYPE (GskVulkanSubDescriptors, gsk_vulkan_sub_descriptors, GSK, VULKAN_SUB_DESCRIPTORS, GskVulkanDescriptors)
#define GSK_VULKAN_SUB_DESCRIPTORS(d) ((GskVulkanSubDescriptors *) (d))
struct _GskVulkanSubDescriptors
{
GskVulkanDescriptors parent_instance;
GskVulkanDescriptors *parent;
};
GskVulkanSubDescriptors * gsk_vulkan_sub_descriptors_new (GskVulkanDescriptors *parent);

View File

@@ -100,7 +100,6 @@ struct _GtkIMContextClass
GdkEvent *event);
/* Padding for future expansion */
void (*_gtk_reserved1) (void);
void (*_gtk_reserved2) (void);
void (*_gtk_reserved3) (void);
void (*_gtk_reserved4) (void);

View File

@@ -274,14 +274,14 @@ _gtk_elide_underscores (const char *original)
const char *p, *end;
gsize len;
gboolean last_underscore;
if (!original)
return NULL;
len = strlen (original);
q = result = g_malloc (len + 1);
last_underscore = FALSE;
end = original + len;
for (p = original; p < end; p++)
{
@@ -290,7 +290,7 @@ _gtk_elide_underscores (const char *original)
else
{
last_underscore = FALSE;
if (original + 2 <= p && p + 1 <= end &&
if (original + 2 <= p && p + 1 <= end &&
p[-2] == '(' && p[-1] == '_' && p[0] != '_' && p[1] == ')')
{
q--;
@@ -304,8 +304,8 @@ _gtk_elide_underscores (const char *original)
if (last_underscore)
*q++ = '_';
*q = '\0';
return result;
}

View File

@@ -50,6 +50,9 @@ struct _GtkTextLineDisplayCache
#endif
};
static GQueue gc_in_idle;
static guint gc_in_idle_source;
#if DEBUG_LINE_DISPLAY_CACHE
# define STAT_ADD(val,n) ((val) += n)
# define STAT_INC(val) STAT_ADD(val,1)
@@ -72,6 +75,31 @@ dump_stats (gpointer data)
# define STAT_INC(val)
#endif
static gboolean
do_gc_in_idle (gpointer data)
{
GQueue q = gc_in_idle;
gc_in_idle.head = NULL;
gc_in_idle.tail = NULL;
gc_in_idle.length = 0;
while (q.head)
{
GtkTextLineDisplay *display = q.head->data;
g_queue_unlink (&q, &display->mru_link);
gtk_text_line_display_unref (display);
}
if (gc_in_idle.head == NULL)
{
gc_in_idle_source = 0;
return G_SOURCE_REMOVE;
}
return G_SOURCE_CONTINUE;
}
GtkTextLineDisplayCache *
gtk_text_line_display_cache_new (void)
{
@@ -249,8 +277,21 @@ gtk_text_line_display_cache_invalidate_display (GtkTextLineDisplayCache *cache,
g_hash_table_remove (cache->line_to_display, display->line);
g_queue_unlink (&cache->mru, &display->mru_link);
gtk_text_line_display_ref (display);
g_queue_push_head_link (&gc_in_idle, &display->mru_link);
if (iter != NULL)
g_sequence_remove (iter);
if G_UNLIKELY (gc_in_idle_source == 0)
{
GSource *source;
gc_in_idle_source = g_idle_add_full (G_PRIORITY_LOW + 1,
do_gc_in_idle, NULL, NULL);
source = g_main_context_find_source_by_id (NULL, gc_in_idle_source);
g_source_set_static_name (source, "[gtk+ line-display-cache-gc]");
}
}
STAT_INC (cache->inval);