common sw_engine: adds missing composite methods.

Now tvg user could combine Mask Alpha & Mask Method like this:

paint->composite(target, tvg::CompositeMethod::AddMask);

binary size diff: 201307 -> 223399 (+22kb)

@APIs:
- CompositeMethod::AddMask
- CompositeMethod::SubtractMask
- CompositeMethod::IntersectMask
- CompositeMethod::DifferenceMask

@Issue: https://github.com/thorvg/thorvg/issues/404
This commit is contained in:
Hermet Park 2023-05-30 18:51:23 +09:00 committed by Hermet Park
parent b45c8efd75
commit fddc409728
10 changed files with 1671 additions and 152 deletions

View file

@ -161,19 +161,34 @@ enum class FillRule
EvenOdd ///< A line from the point to a location outside the shape is drawn and its intersections with the path segments of the shape are counted. If the number of intersections is an odd number, the point is inside the shape.
};
/**
* @brief Enumeration indicating the method used in the composition of two objects - the target and the source.
*
* In the case of Mask composition, you need to perform bit operations on two options - Mask Alpha and Mask Operation.
* Mask Alpha specifies the origin of the alpha channel, while Mask Operation specifies the masking operation.
* @code paint->composite(tvg::CompositeMethod::AlphaMask + tvg::CompositeMethod::AddMaskOp); @endcode
*
* @note If you don't specify the mask alpha, @c AlphaMask will be used.
* @note If you don't specify the mask method, @c AddMaskOp will be used.
* @warning Composition does not support multiple choices for both Mask Alpha and Mask Operation.
* @see Paint::composite()
*/
enum class CompositeMethod
{
None = 0, ///< No composition is applied.
ClipPath, ///< The intersection of the source and the target is determined and only the resulting pixels from the source are rendered.
AlphaMask, ///< The pixels of the source and the target are alpha blended. As a result, only the part of the source, which alpha intersects with the target is visible.
InvAlphaMask, ///< The pixels of the source and the complement to the target's pixels are alpha blended. As a result, only the part of the source which alpha is not covered by the target is visible.
LumaMask, ///< The source pixels are converted to the grayscale (luma value) and alpha blended with the target. As a result, only the part of the source, which intersects with the target is visible. @since 0.9
InvLumaMask ///< The source pixels are converted to the grayscale (luma value) and the complement to the target's pixels are alpha blended. As a result, only the part of the source which grayscale is not covered by the target is visible. @BETA_API
AlphaMask, ///< Mask Alpha: Use the compositing target's pixels as an alpha value.
InvAlphaMask, ///< Mask Alpha: Use the complement to the compositing target's pixels as an alpha.
LumaMask, ///< Mask Alpha: Use the grayscale (0.2125R + 0.7154G + 0.0721*B) of the compositing target's pixels. @since 0.9
InvLumaMask, ///< Mask Alpha: Use the grayscale (0.2125R + 0.7154G + 0.0721*B) of the complement to the compositing target's pixels. @BETA_API
AddMask, ///< Mask Operation: Combines the source and target pixels using Mask Alpha. @BETA_API
SubtractMask, ///< Mask Operation: Subtracts the target color from the source color while considering their respective Mask Alpha. @BETA_API
IntersectMask, ///< Mask Operation: Computes the result by taking the minimum value between the Mask Alpha and the target alpha and multiplies it with the source color. @BETA_API
DifferenceMask ///< Mask Operation: Calculates the absolute difference between the source color and the target color multiplied by the complement of the Mask Alpha. @BETA_API
};
/**
* @brief Enumeration specifying the engine type used for the graphics backend. For multiple backends bitwise operation is allowed.
*/
@ -1636,6 +1651,7 @@ std::unique_ptr<T> cast(Paint* paint)
return std::unique_ptr<T>(static_cast<T*>(paint));
}
/**
* @brief The cast() function is a utility function used to cast a 'Fill' to type 'T'.
*
@ -1648,6 +1664,17 @@ std::unique_ptr<T> cast(Fill* fill)
}
/**
* @brief The operator() function is the OR function used to combine Mask Alpha & Mask Operation
*
* @BETA_API
*/
constexpr CompositeMethod operator+(CompositeMethod a, CompositeMethod b)
{
return CompositeMethod(int(a) | int(b));
}
/** @}*/
} //namespace

View file

@ -248,7 +248,8 @@ struct SwBlender
SwAlpha alpha(CompositeMethod method)
{
return alphas[(int)(method) - 2]; //0: None, 1: ClipPath
auto idx = (int)(method) - 2; //0: None, 1: ClipPath
return alphas[idx > 3 ? 0 : idx]; //CompositeMethod has only four Matting methods.
}
};
@ -331,6 +332,26 @@ static inline uint32_t opBlend(uint32_t s, uint32_t d, TVG_UNUSED uint8_t a)
return s + ALPHA_BLEND(d, IALPHA(s));
}
static inline uint32_t opSubMask(uint32_t s, uint32_t d, TVG_UNUSED uint8_t a)
{
return ALPHA_BLEND(d, IALPHA(s));
}
static inline uint32_t opDifMask(uint32_t s, uint32_t d, TVG_UNUSED uint8_t a)
{
return ALPHA_BLEND(s, IALPHA(d)) + ALPHA_BLEND(d, IALPHA(s));
}
static inline uint32_t opIntMask(uint32_t s, uint32_t d, TVG_UNUSED uint8_t a)
{
return ALPHA_BLEND(d, ALPHA(s));
}
static inline uint32_t opAddMask(uint32_t s, uint32_t d, TVG_UNUSED uint8_t a)
{
return opBlend(s, d, a);
}
static inline uint32_t opInterpolate(uint32_t s, uint32_t d, uint8_t a)
{
return INTERPOLATE(s, d, a);
@ -385,6 +406,7 @@ bool fillGenColorTable(SwFill* fill, const Fill* fdata, const Matrix* transform,
void fillReset(SwFill* fill);
void fillFree(SwFill* fill);
void fillLinear(const SwFill* fill, uint32_t* dst, uint32_t y, uint32_t x, uint32_t len, SwBlendOp op = nullptr, uint8_t a = 255); //blending ver.
void fillLinear(const SwFill* fill, uint32_t* dst, uint32_t y, uint32_t x, uint32_t len, SwBlendOp op, uint8_t a); //blending ver.
void fillLinear(const SwFill* fill, uint32_t* dst, uint32_t y, uint32_t x, uint32_t len, uint8_t* cmp, SwAlpha alpha, uint8_t csize, uint8_t opacity); //masking ver.
void fillRadial(const SwFill* fill, uint32_t* dst, uint32_t y, uint32_t x, uint32_t len, SwBlendOp op = nullptr, uint8_t a = 255); //blending ver.
void fillRadial(const SwFill* fill, uint32_t* dst, uint32_t y, uint32_t x, uint32_t len, uint8_t* cmp, SwAlpha alpha, uint8_t csize, uint8_t opacity); //masking ver.

View file

@ -37,6 +37,8 @@
/************************************************************************/
constexpr auto DOWN_SCALE_TOLERANCE = 0.5f;
static bool _rasterDirectRGBAImage(SwSurface* surface, const SwImage* image, const SwBBox& region, uint32_t opacity = 255);
static inline uint8_t ALPHA(uint8_t* a)
{
@ -95,6 +97,20 @@ static inline bool _compositing(const SwSurface* surface)
}
static inline bool _matting(const SwSurface* surface)
{
if ((int)surface->compositor->method < (int)CompositeMethod::AddMask) return true;
else return false;
}
static inline bool _masking(const SwSurface* surface)
{
if ((int)surface->compositor->method >= (int)CompositeMethod::AddMask) return true;
else return false;
}
#include "tvgSwRasterTexmap.h"
#include "tvgSwRasterC.h"
#include "tvgSwRasterAvx.h"
@ -166,6 +182,79 @@ void _rasterGrayscale8(uint8_t *dst, uint32_t val, uint32_t offset, int32_t len)
/************************************************************************/
static bool _rasterMaskedRect(SwSurface* surface, const SwBBox& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
{
//32bit channels composition
if (surface->channelSize != sizeof(uint32_t)) return false;
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * surface->compositor->image.stride + region.min.x); //compositor buffer
auto cstride = surface->compositor->image.stride;
auto color = surface->blender.join(r, g, b, a);
auto ialpha = 255 - a;
auto method = surface->compositor->method;
TVGLOG("SW_ENGINE", "Masked(%d) Rect [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
if (method == CompositeMethod::AddMask) {
for (uint32_t y = 0; y < h; ++y) {
auto cmp = cbuffer;
for (uint32_t x = 0; x < w; ++x, ++cmp) {
*cmp = color + ALPHA_BLEND(*cmp, ialpha);
}
cbuffer += cstride;
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t y = 0; y < h; ++y) {
auto cmp = cbuffer;
for (uint32_t x = 0; x < w; ++x, ++cmp) {
*cmp = ALPHA_BLEND(*cmp, ialpha);
}
cbuffer += cstride;
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = surface->compositor->image.buf32 + (y * cstride + surface->compositor->bbox.min.x);
if (y == region.min.y) {
for (uint32_t y2 = y; y2 < region.max.y; ++y2) {
auto tmp = cmp;
auto x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (x == region.min.x) {
for (uint32_t i = 0; i < w; ++i, ++tmp) {
*tmp = ALPHA_BLEND(*tmp, a);
}
x += w;
} else {
*tmp = 0;
++tmp;
++x;
}
}
cmp += cstride;
}
y += (h - 1);
} else {
rasterRGBA32(cmp, 0x00000000, 0, w);
cmp += cstride;
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t y = 0; y < h; ++y) {
auto cmp = cbuffer;
for (uint32_t x = 0; x < w; ++x, ++cmp) {
*cmp = ALPHA_BLEND(color, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, ialpha);
}
cbuffer += cstride;
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
static bool _rasterMattedRect(SwSurface* surface, const SwBBox& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
{
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
@ -173,7 +262,7 @@ static bool _rasterMaskedRect(SwSurface* surface, const SwBBox& region, uint8_t
auto cbuffer = surface->compositor->image.buf8 + ((region.min.y * surface->compositor->image.stride + region.min.x) * csize); //compositor buffer
auto alpha = surface->blender.alpha(surface->compositor->method);
TVGLOG("SW_ENGINE", "Masked Rect [Region: %lu %lu %u %u]", region.min.x, region.min.y, w, h);
TVGLOG("SW_ENGINE", "Matted(%d) Rect [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
//32bits channels
if (surface->channelSize == sizeof(uint32_t)) {
@ -181,7 +270,7 @@ static bool _rasterMaskedRect(SwSurface* surface, const SwBBox& region, uint8_t
auto buffer = surface->buf32 + (region.min.y * surface->stride) + region.min.x;
for (uint32_t y = 0; y < h; ++y) {
auto dst = &buffer[y * surface->stride];
auto cmp = &cbuffer[y * surface->stride * csize];
auto cmp = &cbuffer[y * surface->compositor->image.stride * csize];
for (uint32_t x = 0; x < w; ++x, ++dst, cmp += csize) {
*dst = INTERPOLATE(color, *dst, alpha(cmp));
}
@ -191,7 +280,7 @@ static bool _rasterMaskedRect(SwSurface* surface, const SwBBox& region, uint8_t
auto buffer = surface->buf8 + (region.min.y * surface->stride) + region.min.x;
for (uint32_t y = 0; y < h; ++y) {
auto dst = &buffer[y * surface->stride];
auto cmp = &cbuffer[y * surface->stride * csize];
auto cmp = &cbuffer[y * surface->compositor->image.stride * csize];
for (uint32_t x = 0; x < w; ++x, ++dst, cmp += csize) {
*dst = INTERPOLATE8(a, *dst, alpha(cmp));
}
@ -213,21 +302,25 @@ static bool _rasterSolidRect(SwSurface* surface, const SwBBox& region, uint8_t r
for (uint32_t y = 0; y < h; ++y) {
rasterRGBA32(buffer + y * surface->stride, color, region.min.x, w);
}
return true;
//8bits grayscale
} else if (surface->channelSize == sizeof(uint8_t)) {
}
if (surface->channelSize == sizeof(uint8_t)) {
auto buffer = surface->buf8 + (region.min.y * surface->stride);
for (uint32_t y = 0; y < h; ++y) {
_rasterGrayscale8(buffer + y * surface->stride, 255, region.min.x, w);
}
}
return true;
}
return false;
}
static bool _rasterRect(SwSurface* surface, const SwBBox& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
{
if (_compositing(surface)) {
return _rasterMaskedRect(surface, region, r, g, b, a);
if (_matting(surface)) return _rasterMattedRect(surface, region, r, g, b, a);
else return _rasterMaskedRect(surface, region, r, g, b, a);
} else {
if (a == 255) {
return _rasterSolidRect(surface, region, r, g, b);
@ -251,7 +344,78 @@ static bool _rasterRect(SwSurface* surface, const SwBBox& region, uint8_t r, uin
static bool _rasterMaskedRle(SwSurface* surface, SwRleData* rle, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
{
TVGLOG("SW_ENGINE", "Masked Rle");
TVGLOG("SW_ENGINE", "Masked(%d) Rle", (int)surface->compositor->method);
//32bit channels composition
if (surface->channelSize != sizeof(uint32_t)) return false;
auto span = rle->spans;
auto cbuffer = surface->compositor->image.buf32;
auto cstride = surface->compositor->image.stride;
auto color = surface->blender.join(r, g, b, a);
auto method = surface->compositor->method;
uint32_t src;
if (method == CompositeMethod::AddMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
if (span->coverage == 255) src = color;
else src = ALPHA_BLEND(color, span->coverage);
auto ialpha = IALPHA(src);
for (auto x = 0; x < span->len; ++x, ++cmp) {
*cmp = src + ALPHA_BLEND(*cmp, ialpha);
}
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
if (span->coverage == 255) src = color;
else src = ALPHA_BLEND(color, span->coverage);
auto ialpha = IALPHA(src);
for (auto x = 0; x < span->len; ++x, ++cmp) {
*cmp = ALPHA_BLEND(*cmp, ialpha);
}
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = &cbuffer[y * cstride];
uint32_t x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (y == span->y && x == span->x && x + span->len <= surface->compositor->bbox.max.x) {
if (span->coverage == 255) src = color;
else src = ALPHA_BLEND(color, span->coverage);
auto alpha = ALPHA(src);
for (uint32_t i = 0; i < span->len; ++i) {
cmp[x + i] = ALPHA_BLEND(cmp[x + i], alpha);
}
x += span->len;
++span;
} else {
cmp[x] = 0;
++x;
}
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
if (span->coverage == 255) src = color;
else src = ALPHA_BLEND(color, span->coverage);
auto ialpha = IALPHA(src);
for (uint32_t x = 0; x < span->len; ++x, ++cmp) {
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, ialpha);
}
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
static bool _rasterMattedRle(SwSurface* surface, SwRleData* rle, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
{
TVGLOG("SW_ENGINE", "Matted(%d) Rle", (int)surface->compositor->method);
auto span = rle->spans;
auto cbuffer = surface->compositor->image.buf8;
@ -271,8 +435,10 @@ static bool _rasterMaskedRle(SwSurface* surface, SwRleData* rle, uint8_t r, uint
*dst = INTERPOLATE(src, *dst, alpha(cmp));
}
}
return true;
}
//8bit grayscale
} else if (surface->channelSize == sizeof(uint8_t)) {
if (surface->channelSize == sizeof(uint8_t)) {
uint8_t src;
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto dst = &surface->buf8[span->y * surface->stride + span->x];
@ -283,9 +449,10 @@ static bool _rasterMaskedRle(SwSurface* surface, SwRleData* rle, uint8_t r, uint
*dst = INTERPOLATE8(src, *dst, alpha(cmp));
}
}
}
return true;
}
return false;
}
static bool _rasterSolidRle(SwSurface* surface, const SwRleData* rle, uint8_t r, uint8_t g, uint8_t b)
@ -329,7 +496,8 @@ static bool _rasterRle(SwSurface* surface, SwRleData* rle, uint8_t r, uint8_t g,
if (!rle) return false;
if (_compositing(surface)) {
return _rasterMaskedRle(surface, rle, r, g, b, a);
if (_matting(surface)) return _rasterMattedRle(surface, rle, r, g, b, a);
else return _rasterMaskedRle(surface, rle, r, g, b, a);
} else {
if (a == 255) {
return _rasterSolidRle(surface, rle, r, g, b);
@ -353,7 +521,15 @@ static bool _rasterRle(SwSurface* surface, SwRleData* rle, uint8_t r, uint8_t g,
static bool _transformedRleRGBAImage(SwSurface* surface, const SwImage* image, const Matrix* transform, uint32_t opacity)
{
return _rasterTexmapPolygon(surface, image, transform, nullptr, opacity);
auto ret = _rasterTexmapPolygon(surface, image, transform, nullptr, opacity);
//Masking Composition
if (_compositing(surface) && _masking(surface)) {
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
return ret;
}
@ -363,7 +539,37 @@ static bool _transformedRleRGBAImage(SwSurface* surface, const SwImage* image, c
static bool _rasterScaledMaskedRleRGBAImage(SwSurface* surface, const SwImage* image, const Matrix* itransform, const SwBBox& region, uint32_t opacity, uint32_t halfScale)
{
TVGLOG("SW_ENGINE", "Scaled Masked Translucent Rle Image");
TVGLOG("SW_ENGINE", "Scaled Masked(%d) Rle Image", (int)surface->compositor->method);
auto span = image->rle->spans;
auto method = surface->compositor->method;
if (method == CompositeMethod::AddMask) {
#define SCALED_RLE_IMAGE_ADD_MASK
#include "tvgSwRasterScaledMaskedRleImage.h"
#undef SCALED_RLE_IMAGE_ADD_MASK
} else if (method == CompositeMethod::SubtractMask) {
#define SCALED_RLE_IMAGE_SUB_MASK
#include "tvgSwRasterScaledMaskedRleImage.h"
#undef SCALED_RLE_IMAGE_SUB_MASK
} else if (method == CompositeMethod::IntersectMask) {
#define SCALED_RLE_IMAGE_INT_MASK
#include "tvgSwRasterScaledMaskedRleImage.h"
#undef SCALED_RLE_IMAGE_INT_MASK
} else if (method == CompositeMethod::DifferenceMask) {
#define SCALED_RLE_IMAGE_DIF_MASK
#include "tvgSwRasterScaledMaskedRleImage.h"
#undef SCALED_RLE_IMAGE_DIF_MASK
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
static bool _rasterScaledMattedRleRGBAImage(SwSurface* surface, const SwImage* image, const Matrix* itransform, const SwBBox& region, uint32_t opacity, uint32_t halfScale)
{
TVGLOG("SW_ENGINE", "Scaled Matted(%d) Rle Image", (int)surface->compositor->method);
auto span = image->rle->spans;
auto csize = surface->compositor->image.channelSize;
@ -490,7 +696,8 @@ static bool _scaledRleRGBAImage(SwSurface* surface, const SwImage* image, const
auto halfScale = _halfScale(image->scale);
if (_compositing(surface)) {
return _rasterScaledMaskedRleRGBAImage(surface, image, &itransform, region, opacity, halfScale);
if (_matting(surface)) _rasterScaledMattedRleRGBAImage(surface, image, &itransform, region, opacity, halfScale);
else _rasterScaledMaskedRleRGBAImage(surface, image, &itransform, region, opacity, halfScale);
} else {
return _rasterScaledRleRGBAImage(surface, image, &itransform, region, opacity, halfScale);
}
@ -504,7 +711,96 @@ static bool _scaledRleRGBAImage(SwSurface* surface, const SwImage* image, const
static bool _rasterDirectMaskedRleRGBAImage(SwSurface* surface, const SwImage* image, uint32_t opacity)
{
TVGLOG("SW_ENGINE", "Direct Masked Rle Image");
TVGLOG("SW_ENGINE", "Direct Masked(%d) Rle Image", (int)surface->compositor->method);
auto span = image->rle->spans;
auto cbuffer = surface->compositor->image.buf32;
auto ctride = surface->compositor->image.stride;
auto method = surface->compositor->method;
if (method == CompositeMethod::AddMask) {
for (uint32_t i = 0; i < image->rle->size; ++i, ++span) {
auto src = image->buf32 + (span->y + image->oy) * image->stride + (span->x + image->ox);
auto cmp = &cbuffer[span->y * ctride + span->x];
auto alpha = MULTIPLY(span->coverage, opacity);
if (alpha == 255) {
for (uint32_t x = 0; x < span->len; ++x, ++src, ++cmp) {
*cmp = *src + ALPHA_BLEND(*cmp, IALPHA(*src));
}
} else {
for (uint32_t x = 0; x < span->len; ++x, ++src, ++cmp) {
*cmp = INTERPOLATE(*src, *cmp, alpha);
}
}
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t i = 0; i < image->rle->size; ++i, ++span) {
auto src = image->buf32 + (span->y + image->oy) * image->stride + (span->x + image->ox);
auto cmp = &cbuffer[span->y * ctride + span->x];
auto alpha = MULTIPLY(span->coverage, opacity);
if (alpha == 255) {
for (uint32_t x = 0; x < span->len; ++x, ++src, ++cmp) {
*cmp = ALPHA_BLEND(*cmp, IALPHA(*src));
}
} else {
for (uint32_t x = 0; x < span->len; ++x, ++src, ++cmp) {
auto t = ALPHA_BLEND(*src, alpha);
*cmp = ALPHA_BLEND(*cmp, IALPHA(t));
}
}
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = &cbuffer[y * ctride];
auto x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (y == span->y && x == span->x && x + span->len <= surface->compositor->bbox.max.x) {
auto alpha = MULTIPLY(span->coverage, opacity);
auto src = image->buf32 + (span->y + image->oy) * image->stride + (span->x + image->ox);
if (alpha == 255) {
for (uint32_t i = 0; i < span->len; ++i, ++src) {
cmp[x + i] = ALPHA_BLEND(cmp[x + i], ALPHA(*src));
}
} else {
for (uint32_t i = 0; i < span->len; ++i, ++src) {
auto t = ALPHA_BLEND(*src, alpha);
cmp[x + i] = ALPHA_BLEND(cmp[x + i], ALPHA(t));
}
}
x += span->len;
++span;
} else {
cmp[x] = 0;
++x;
}
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t i = 0; i < image->rle->size; ++i, ++span) {
auto src = image->buf32 + (span->y + image->oy) * image->stride + (span->x + image->ox);
auto cmp = &cbuffer[span->y * ctride + span->x];
auto alpha = MULTIPLY(span->coverage, opacity);
if (alpha == 255) {
for (uint32_t x = 0; x < span->len; ++x, ++src, ++cmp) {
*cmp = ALPHA_BLEND(*src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(*src));
}
} else {
for (uint32_t x = 0; x < span->len; ++x, ++src, ++cmp) {
auto t = ALPHA_BLEND(*src, alpha);
*cmp = ALPHA_BLEND(t, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(t));
}
}
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
static bool _rasterDirectMattedRleRGBAImage(SwSurface* surface, const SwImage* image, uint32_t opacity)
{
TVGLOG("SW_ENGINE", "Direct Matted(%d) Rle Image", (int)surface->compositor->method);
auto span = image->rle->spans;
auto csize = surface->compositor->image.channelSize;
@ -556,7 +852,8 @@ static bool _rasterDirectRleRGBAImage(SwSurface* surface, const SwImage* image,
static bool _directRleRGBAImage(SwSurface* surface, const SwImage* image, uint32_t opacity)
{
if (_compositing(surface)) {
return _rasterDirectMaskedRleRGBAImage(surface, image, opacity);
if (_matting(surface)) return _rasterDirectMattedRleRGBAImage(surface, image, opacity);
else return _rasterDirectMaskedRleRGBAImage(surface, image, opacity);
} else {
return _rasterDirectRleRGBAImage(surface, image, opacity);
}
@ -570,11 +867,20 @@ static bool _directRleRGBAImage(SwSurface* surface, const SwImage* image, uint32
static bool _transformedRGBAImage(SwSurface* surface, const SwImage* image, const Matrix* transform, const SwBBox& region, uint32_t opacity)
{
return _rasterTexmapPolygon(surface, image, transform, &region, opacity);
auto ret = _rasterTexmapPolygon(surface, image, transform, &region, opacity);
//Masking Composition
if (_compositing(surface) && _masking(surface)) {
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
return ret;
}
static bool _transformedRGBAImageMesh(SwSurface* surface, const SwImage* image, const RenderMesh* mesh, const Matrix* transform, const SwBBox* region, uint32_t opacity)
{
//TODO: Not completed for all cases.
return _rasterTexmapPolygonMesh(surface, image, mesh, transform, region, opacity);
}
@ -585,13 +891,45 @@ static bool _transformedRGBAImageMesh(SwSurface* surface, const SwImage* image,
static bool _rasterScaledMaskedRGBAImage(SwSurface* surface, const SwImage* image, const Matrix* itransform, const SwBBox& region, uint32_t opacity, uint32_t halfScale)
{
TVGLOG("SW_ENGINE", "Scaled Masked Image");
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto cstride = surface->compositor->image.stride;
auto method = surface->compositor->method;
TVGLOG("SW_ENGINE", "Scaled Masked(%d) Image [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
if (method == CompositeMethod::AddMask) {
#define SCALED_IMAGE_ADD_MASK
#include "tvgSwRasterScaledMaskedImage.h"
#undef SCALED_IMAGE_ADD_MASK
} else if (method == CompositeMethod::SubtractMask) {
#define SCALED_IMAGE_SUB_MASK
#include "tvgSwRasterScaledMaskedImage.h"
#undef SCALED_IMAGE_SUB_MASK
} else if (method == CompositeMethod::IntersectMask) {
#define SCALED_IMAGE_INT_MASK
#include "tvgSwRasterScaledMaskedImage.h"
#undef SCALED_IMAGE_INT_MASK
} else if (method == CompositeMethod::DifferenceMask) {
#define SCALED_IMAGE_DIF_MASK
#include "tvgSwRasterScaledMaskedImage.h"
#undef SCALED_IMAGE_DIF_MASK
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
static bool _rasterScaledMattedRGBAImage(SwSurface* surface, const SwImage* image, const Matrix* itransform, const SwBBox& region, uint32_t opacity, uint32_t halfScale)
{
auto dbuffer = surface->buf32 + (region.min.y * surface->stride + region.min.x);
auto csize = surface->compositor->image.channelSize;
auto cbuffer = surface->compositor->image.buf8 + (region.min.y * surface->compositor->image.stride + region.min.x) * csize;
auto alpha = surface->blender.alpha(surface->compositor->method);
TVGLOG("SW_ENGINE", "Scaled Matted(%d) Image [Region: %lu %lu %lu %lu]", (int)surface->compositor->method, region.min.x, region.min.y, region.max.x - region.min.x, region.max.y - region.min.y);
// Down-Scaled
if (image->scale < DOWN_SCALE_TOLERANCE) {
for (auto y = region.min.y; y < region.max.y; ++y) {
@ -715,7 +1053,8 @@ static bool _scaledRGBAImage(SwSurface* surface, const SwImage* image, const Mat
auto halfScale = _halfScale(image->scale);
if (_compositing(surface)) {
return _rasterScaledMaskedRGBAImage(surface, image, &itransform, region, opacity, halfScale);
if (_matting(surface)) return _rasterScaledMattedRGBAImage(surface, image, &itransform, region, opacity, halfScale);
else return _rasterScaledMaskedRGBAImage(surface, image, &itransform, region, opacity, halfScale);
} else {
return _rasterScaledRGBAImage(surface, image, &itransform, region, opacity, halfScale);
}
@ -729,28 +1068,136 @@ static bool _scaledRGBAImage(SwSurface* surface, const SwImage* image, const Mat
static bool _rasterDirectMaskedRGBAImage(SwSurface* surface, const SwImage* image, const SwBBox& region, uint32_t opacity)
{
TVGLOG("SW_ENGINE", "Direct Masked Image");
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto cstride = surface->compositor->image.stride;
auto method = surface->compositor->method;
TVGLOG("SW_ENGINE", "Direct Masked(%d) Image [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
if (method == CompositeMethod::AddMask) {
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * cstride + region.min.x); //compositor buffer
auto sbuffer = image->buf32 + (region.min.y + image->oy) * image->stride + (region.min.x + image->ox);
for (uint32_t y = 0; y < h; ++y) {
auto cmp = cbuffer;
auto src = sbuffer;
if (opacity == 255) {
for (uint32_t x = 0; x < w; ++x, ++src, ++cmp) {
*cmp = *src + ALPHA_BLEND(*cmp, IALPHA(*src));
}
} else {
for (uint32_t x = 0; x < w; ++x, ++src, ++cmp) {
*cmp = INTERPOLATE(*src, *cmp, opacity);
}
}
cbuffer += cstride;
sbuffer += image->stride;
}
} else if (method == CompositeMethod::SubtractMask) {
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * cstride + region.min.x); //compositor buffer
auto sbuffer = image->buf32 + (region.min.y + image->oy) * image->stride + (region.min.x + image->ox);
for (uint32_t y = 0; y < h; ++y) {
auto cmp = cbuffer;
auto src = sbuffer;
if (opacity == 255) {
for (uint32_t x = 0; x < w; ++x, ++src, ++cmp) {
*cmp = ALPHA_BLEND(*cmp, IALPHA(*src));
}
} else {
for (uint32_t x = 0; x < w; ++x, ++src, ++cmp) {
auto t = ALPHA_BLEND(*src, opacity);
*cmp = ALPHA_BLEND(*cmp, IALPHA(t));
}
}
cbuffer += cstride;
sbuffer += image->stride;
}
} else if (method == CompositeMethod::IntersectMask) {
auto cbuffer = surface->compositor->image.buf32 + (surface->compositor->bbox.min.y * cstride + surface->compositor->bbox.min.x);
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
if (y == region.min.y) {
auto cbuffer2 = cbuffer;
for (uint32_t y2 = y; y2 < region.max.y; ++y2) {
auto tmp = cbuffer2;
auto x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (x == region.min.x) {
auto src = &image->buf32[(y2 + image->oy) * image->stride + (x + image->ox)];
if (opacity == 255) {
for (uint32_t i = 0; i < w; ++i, ++tmp, ++src) {
*tmp = ALPHA_BLEND(*tmp, ALPHA(*src));
}
} else {
for (uint32_t i = 0; i < w; ++i, ++tmp, ++src) {
auto t = ALPHA_BLEND(*src, opacity);
*tmp = ALPHA_BLEND(*tmp, ALPHA(t));
}
}
x += w;
} else {
*tmp = 0;
++tmp;
++x;
}
}
cbuffer2 += cstride;
}
y += (h - 1);
} else {
rasterRGBA32(cbuffer, 0x00000000, 0, surface->compositor->bbox.max.x - surface->compositor->bbox.min.x);
}
cbuffer += cstride;
}
} else if (method == CompositeMethod::DifferenceMask) {
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * cstride + region.min.x); //compositor buffer
auto sbuffer = image->buf32 + (region.min.y + image->oy) * image->stride + (region.min.x + image->ox);
for (uint32_t y = 0; y < h; ++y) {
auto cmp = cbuffer;
auto src = sbuffer;
if (opacity == 255) {
for (uint32_t x = 0; x < w; ++x, ++src, ++cmp) {
*cmp = ALPHA_BLEND(*src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(*src));
}
} else {
for (uint32_t x = 0; x < w; ++x, ++src, ++cmp) {
auto t = ALPHA_BLEND(*src, opacity);
*cmp = ALPHA_BLEND(t, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(t));
}
}
cbuffer += cstride;
sbuffer += image->stride;
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox);
}
static bool _rasterDirectMattedRGBAImage(SwSurface* surface, const SwImage* image, const SwBBox& region, uint32_t opacity)
{
auto buffer = surface->buf32 + (region.min.y * surface->stride) + region.min.x;
auto h2 = static_cast<uint32_t>(region.max.y - region.min.y);
auto w2 = static_cast<uint32_t>(region.max.x - region.min.x);
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto csize = surface->compositor->image.channelSize;
auto alpha = surface->blender.alpha(surface->compositor->method);
TVGLOG("SW_ENGINE", "Direct Matted(%d) Image [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
auto sbuffer = image->buf32 + (region.min.y + image->oy) * image->stride + (region.min.x + image->ox);
auto cbuffer = surface->compositor->image.buf8 + (region.min.y * surface->compositor->image.stride + region.min.x) * csize; //compositor buffer
for (uint32_t y = 0; y < h2; ++y) {
for (uint32_t y = 0; y < h; ++y) {
auto dst = buffer;
auto cmp = cbuffer;
auto src = sbuffer;
if (opacity == 255) {
for (uint32_t x = 0; x < w2; ++x, ++dst, ++src, cmp += csize) {
for (uint32_t x = 0; x < w; ++x, ++dst, ++src, cmp += csize) {
auto tmp = ALPHA_BLEND(*src, alpha(cmp));
*dst = tmp + ALPHA_BLEND(*dst, IALPHA(tmp));
}
} else {
for (uint32_t x = 0; x < w2; ++x, ++dst, ++src, cmp += csize) {
for (uint32_t x = 0; x < w; ++x, ++dst, ++src, cmp += csize) {
auto tmp = ALPHA_BLEND(*src, MULTIPLY(opacity, alpha(cmp)));
*dst = tmp + ALPHA_BLEND(*dst, IALPHA(tmp));
}
@ -792,7 +1239,8 @@ static bool _rasterDirectRGBAImage(SwSurface* surface, const SwImage* image, con
static bool _directRGBAImage(SwSurface* surface, const SwImage* image, const SwBBox& region, uint32_t opacity)
{
if (_compositing(surface)) {
return _rasterDirectMaskedRGBAImage(surface, image, region, opacity);
if (_matting(surface)) return _rasterDirectMattedRGBAImage(surface, image, region, opacity);
else return _rasterDirectMaskedRGBAImage(surface, image, region, opacity);
} else {
return _rasterDirectRGBAImage(surface, image, region, opacity);
}
@ -825,6 +1273,66 @@ static bool _rasterLinearGradientMaskedRect(SwSurface* surface, const SwBBox& re
{
if (fill->linear.len < FLT_EPSILON) return false;
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto cstride = surface->compositor->image.stride;
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * cstride + region.min.x);
auto method = surface->compositor->method;
TVGLOG("SW_ENGINE", "Masked(%d) Linear Gradient [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
if (method == CompositeMethod::AddMask) {
for (uint32_t y = 0; y < h; ++y) {
fillLinear(fill, cbuffer, region.min.y + y, region.min.x, w, opAddMask, 255);
cbuffer += surface->stride;
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t y = 0; y < h; ++y) {
fillLinear(fill, cbuffer, region.min.y + y, region.min.x, w, opSubMask, 255);
cbuffer += surface->stride;
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = surface->compositor->image.buf32 + (y * cstride + surface->compositor->bbox.min.x);
if (y == region.min.y) {
for (uint32_t y2 = y; y2 < region.max.y; ++y2) {
auto tmp = cmp;
auto x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (x == region.min.x) {
fillLinear(fill, tmp, y2, x, w, opIntMask, 255);
x += w;
tmp += w;
} else {
*tmp = 0;
++tmp;
++x;
}
}
cmp += cstride;
}
y += (h - 1);
} else {
rasterRGBA32(cmp, 0x00000000, 0, surface->compositor->bbox.max.x -surface->compositor->bbox.min.x);
cmp += cstride;
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t y = 0; y < h; ++y) {
fillLinear(fill, cbuffer, region.min.y + y, region.min.x, w, opDifMask, 255);
cbuffer += surface->stride;
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox, 255);
}
static bool _rasterLinearGradientMattedRect(SwSurface* surface, const SwBBox& region, const SwFill* fill)
{
if (fill->linear.len < FLT_EPSILON) return false;
auto buffer = surface->buf32 + (region.min.y * surface->stride) + region.min.x;
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
@ -832,6 +1340,8 @@ static bool _rasterLinearGradientMaskedRect(SwSurface* surface, const SwBBox& re
auto cbuffer = surface->compositor->image.buf8 + (region.min.y * surface->compositor->image.stride + region.min.x) * csize;
auto alpha = surface->blender.alpha(surface->compositor->method);
TVGLOG("SW_ENGINE", "Matted(%d) Linear Gradient [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
for (uint32_t y = 0; y < h; ++y) {
fillLinear(fill, buffer, region.min.y + y, region.min.x, w, cbuffer, alpha, csize, 255);
buffer += surface->stride;
@ -875,7 +1385,8 @@ static bool _rasterSolidLinearGradientRect(SwSurface* surface, const SwBBox& reg
static bool _rasterLinearGradientRect(SwSurface* surface, const SwBBox& region, const SwFill* fill)
{
if (_compositing(surface)) {
return _rasterLinearGradientMaskedRect(surface, region, fill);
if (_matting(surface)) return _rasterLinearGradientMattedRect(surface, region, fill);
else return _rasterLinearGradientMaskedRect(surface, region, fill);
} else {
if (fill->translucent) return _rasterTranslucentLinearGradientRect(surface, region, fill);
else _rasterSolidLinearGradientRect(surface, region, fill);
@ -892,6 +1403,56 @@ static bool _rasterLinearGradientMaskedRle(SwSurface* surface, const SwRleData*
{
if (fill->linear.len < FLT_EPSILON) return false;
TVGLOG("SW_ENGINE", "Masked(%d) Rle Linear Gradient", (int)surface->compositor->method);
auto span = rle->spans;
auto cstride = surface->compositor->image.stride;
auto cbuffer = surface->compositor->image.buf32;
auto method = surface->compositor->method;
if (method == CompositeMethod::AddMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
fillLinear(fill, cmp, span->y, span->x, span->len, opAddMask, span->coverage);
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
fillLinear(fill, cmp, span->y, span->x, span->len, opSubMask, span->coverage);
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = &cbuffer[y * cstride];
uint32_t x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (y == span->y && x == span->x && x + span->len <= surface->compositor->bbox.max.x) {
fillLinear(fill, cmp, span->y, span->x, span->len, opIntMask, span->coverage);
x += span->len;
++span;
} else {
cmp[x] = 0;
++x;
}
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
fillLinear(fill, cmp, span->y, span->x, span->len, opDifMask, span->coverage);
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox, 255);
}
static bool _rasterLinearGradientMattedRle(SwSurface* surface, const SwRleData* rle, const SwFill* fill)
{
if (fill->linear.len < FLT_EPSILON) return false;
TVGLOG("SW_ENGINE", "Matted(%d) Rle Linear Gradient", (int)surface->compositor->method);
auto span = rle->spans;
auto csize = surface->compositor->image.channelSize;
auto cbuffer = surface->compositor->image.buf8;
@ -941,7 +1502,8 @@ static bool _rasterLinearGradientRle(SwSurface* surface, const SwRleData* rle, c
if (!rle) return false;
if (_compositing(surface)) {
return _rasterLinearGradientMaskedRle(surface, rle, fill);
if (_matting(surface)) return _rasterLinearGradientMattedRle(surface, rle, fill);
else return _rasterLinearGradientMaskedRle(surface, rle, fill);
} else {
if (fill->translucent) return _rasterTranslucentLinearGradientRle(surface, rle, fill);
else return _rasterSolidLinearGradientRle(surface, rle, fill);
@ -955,6 +1517,66 @@ static bool _rasterLinearGradientRle(SwSurface* surface, const SwRleData* rle, c
/************************************************************************/
static bool _rasterRadialGradientMaskedRect(SwSurface* surface, const SwBBox& region, const SwFill* fill)
{
if (fill->linear.len < FLT_EPSILON) return false;
auto h = static_cast<uint32_t>(region.max.y - region.min.y);
auto w = static_cast<uint32_t>(region.max.x - region.min.x);
auto cstride = surface->compositor->image.stride;
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * cstride + region.min.x);
auto method = surface->compositor->method;
TVGLOG("SW_ENGINE", "Masked(%d) Radial Gradient [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
if (method == CompositeMethod::AddMask) {
for (uint32_t y = 0; y < h; ++y) {
fillRadial(fill, cbuffer, region.min.y + y, region.min.x, w, opAddMask, 255);
cbuffer += surface->stride;
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t y = 0; y < h; ++y) {
fillRadial(fill, cbuffer, region.min.y + y, region.min.x, w, opSubMask, 255);
cbuffer += surface->stride;
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = surface->compositor->image.buf32 + (y * cstride + surface->compositor->bbox.min.x);
if (y == region.min.y) {
for (uint32_t y2 = y; y2 < region.max.y; ++y2) {
auto tmp = cmp;
auto x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (x == region.min.x) {
fillRadial(fill, tmp, y2, x, w, opIntMask, 255);
x += w;
tmp += w;
} else {
*tmp = 0;
++tmp;
++x;
}
}
cmp += cstride;
}
y += (h - 1);
} else {
rasterRGBA32(cmp, 0x00000000, 0, w);
cmp += cstride;
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t y = 0; y < h; ++y) {
fillRadial(fill, cbuffer, region.min.y + y, region.min.x, w, opDifMask, 255);
cbuffer += surface->stride;
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox, 255);
}
static bool _rasterRadialGradientMattedRect(SwSurface* surface, const SwBBox& region, const SwFill* fill)
{
if (fill->radial.a < FLT_EPSILON) return false;
@ -965,6 +1587,8 @@ static bool _rasterRadialGradientMaskedRect(SwSurface* surface, const SwBBox& re
auto cbuffer = surface->compositor->image.buf8 + (region.min.y * surface->compositor->image.stride + region.min.x) * csize;
auto alpha = surface->blender.alpha(surface->compositor->method);
TVGLOG("SW_ENGINE", "Matted(%d) Radial Gradient [Region: %lu %lu %u %u]", (int)surface->compositor->method, region.min.x, region.min.y, w, h);
for (uint32_t y = 0; y < h; ++y) {
fillRadial(fill, buffer, region.min.y + y, region.min.x, w, cbuffer, alpha, csize, 255);
buffer += surface->stride;
@ -1009,7 +1633,8 @@ static bool _rasterSolidRadialGradientRect(SwSurface* surface, const SwBBox& reg
static bool _rasterRadialGradientRect(SwSurface* surface, const SwBBox& region, const SwFill* fill)
{
if (_compositing(surface)) {
return _rasterRadialGradientMaskedRect(surface, region, fill);
if (_matting(surface)) return _rasterRadialGradientMattedRect(surface, region, fill);
else return _rasterRadialGradientMaskedRect(surface, region, fill);
} else {
if (fill->translucent) return _rasterTranslucentRadialGradientRect(surface, region, fill);
else return _rasterSolidRadialGradientRect(surface, region, fill);
@ -1023,9 +1648,59 @@ static bool _rasterRadialGradientRect(SwSurface* surface, const SwBBox& region,
/************************************************************************/
static bool _rasterRadialGradientMaskedRle(SwSurface* surface, const SwRleData* rle, const SwFill* fill)
{
if (fill->linear.len < FLT_EPSILON) return false;
TVGLOG("SW_ENGINE", "Masked(%d) Rle Radial Gradient", (int)surface->compositor->method);
auto span = rle->spans;
auto cstride = surface->compositor->image.stride;
auto cbuffer = surface->compositor->image.buf32;
auto method = surface->compositor->method;
if (method == CompositeMethod::AddMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
fillLinear(fill, cmp, span->y, span->x, span->len, opAddMask, span->coverage);
}
} else if (method == CompositeMethod::SubtractMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
fillLinear(fill, cmp, span->y, span->x, span->len, opSubMask, span->coverage);
}
} else if (method == CompositeMethod::IntersectMask) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = &cbuffer[y * cstride];
uint32_t x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (y == span->y && x == span->x && x + span->len <= surface->compositor->bbox.max.x) {
fillLinear(fill, cmp, span->y, span->x, span->len, opIntMask, span->coverage);
x += span->len;
++span;
} else {
cmp[x] = 0;
++x;
}
}
}
} else if (method == CompositeMethod::DifferenceMask) {
for (uint32_t i = 0; i < rle->size; ++i, ++span) {
auto cmp = &cbuffer[span->y * cstride + span->x];
fillLinear(fill, cmp, span->y, span->x, span->len, opDifMask, span->coverage);
}
} else return false;
//Masking Composition
return _rasterDirectRGBAImage(surface, &surface->compositor->image, surface->compositor->bbox, 255);
}
static bool _rasterRadialGradientMattedRle(SwSurface* surface, const SwRleData* rle, const SwFill* fill)
{
if (fill->radial.a < FLT_EPSILON) return false;
TVGLOG("SW_ENGINE", "Matted(%d) Rle Radial Gradient", (int)surface->compositor->method);
auto span = rle->spans;
auto csize = surface->compositor->image.channelSize;
auto cbuffer = surface->compositor->image.buf8;
@ -1075,7 +1750,8 @@ static bool _rasterRadialGradientRle(SwSurface* surface, const SwRleData* rle, c
if (!rle) return false;
if (_compositing(surface)) {
return _rasterRadialGradientMaskedRle(surface, rle, fill);
if (_matting(surface)) return _rasterRadialGradientMattedRle(surface, rle, fill);
else return _rasterRadialGradientMaskedRle(surface, rle, fill);
} else {
if (fill->translucent) _rasterTranslucentRadialGradientRle(surface, rle, fill);
else return _rasterSolidRadialGradientRle(surface, rle, fill);
@ -1125,20 +1801,24 @@ bool rasterClear(SwSurface* surface, uint32_t x, uint32_t y, uint32_t w, uint32_
{
if (!surface || !surface->buf32 || surface->stride == 0 || surface->w == 0 || surface->h == 0) return false;
//full clear
//32 bits
if (surface->channelSize == sizeof(uint32_t)) {
//full clear
if (w == surface->stride) {
rasterRGBA32(surface->buf32 + (surface->stride * y), 0x00000000, 0, w * h);
//partial clear
} else {
auto buffer = surface->buf32 + (surface->stride * y + x);
for (uint32_t i = 0; i < h; i++) {
rasterRGBA32(buffer + (surface->stride * i), 0x00000000, 0, w);
}
}
//partial clear
//8 bits
} else if (surface->channelSize == sizeof(uint8_t)) {
//full clear
if (w == surface->stride) {
_rasterGrayscale8(surface->buf8 + (surface->stride * y), 0x00, 0, w * h);
//partial clear
} else {
auto buffer = surface->buf8 + (surface->stride * y + x);
for (uint32_t i = 0; i < h; i++) {

View file

@ -0,0 +1,380 @@
/*
* Copyright (c) 2023 the ThorVG project. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef TEXMAP_INT_MASK
{
float _dudx = dudx, _dvdx = dvdx;
float _dxdya = dxdya, _dxdyb = dxdyb, _dudya = dudya, _dvdya = dvdya;
float _xa = xa, _xb = xb, _ua = ua, _va = va;
auto sbuf = image->buf32;
int32_t sw = static_cast<int32_t>(image->stride);
int32_t sh = image->h;
int32_t x1, x2, ar, ab, iru, irv, px, ay;
int32_t vv = 0, uu = 0;
int32_t minx = INT32_MAX, maxx = INT32_MIN;
float dx, u, v, iptr;
auto cbuffer = surface->compositor->image.buf32;
SwSpan* span = nullptr; //used only when rle based.
if (!_arrange(image, region, yStart, yEnd)) return;
//Clear out of the Polygon vertical ranges
auto size = surface->compositor->bbox.max.x - surface->compositor->bbox.min.x;
if (dirFlag == 1) { //left top case.
for(int y = surface->compositor->bbox.min.y; y < yStart; ++y) {
rasterRGBA32(surface->compositor->image.buf32 + y * surface->compositor->image.stride, 0, surface->compositor->bbox.min.x, size);
}
}
if (dirFlag == 4) { //right bottom case.
for(int y = yEnd; y < surface->compositor->bbox.max.y; ++y) {
rasterRGBA32(surface->compositor->image.buf32 + y * surface->compositor->image.stride, 0, surface->compositor->bbox.min.x, size);
}
}
//Loop through all lines in the segment
uint32_t spanIdx = 0;
if (region) {
minx = region->min.x;
maxx = region->max.x;
} else {
span = image->rle->spans;
while (span->y < yStart) {
++span;
++spanIdx;
}
}
for (int32_t y = yStart; y < yEnd; ++y) {
auto cmp = &cbuffer[y * surface->compositor->image.stride];
x1 = (int32_t)_xa;
x2 = (int32_t)_xb;
if (!region) {
minx = INT32_MAX;
maxx = INT32_MIN;
//one single row, could be consisted of multiple spans.
while (span->y == y && spanIdx < image->rle->size) {
if (minx > span->x) minx = span->x;
if (maxx < span->x + span->len) maxx = span->x + span->len;
++span;
++spanIdx;
}
}
if (x1 < minx) x1 = minx;
if (x2 > maxx) x2 = maxx;
//Anti-Aliasing frames
//FIXME: this aa must be applied before masking op
ay = y - aaSpans->yStart;
if (aaSpans->lines[ay].x[0] > x1) aaSpans->lines[ay].x[0] = x1;
if (aaSpans->lines[ay].x[1] < x2) aaSpans->lines[ay].x[1] = x2;
//Range allowed
if ((x2 - x1) >= 1 && (x1 < maxx) && (x2 > minx)) {
for (int32_t x = surface->compositor->bbox.min.x; x < surface->compositor->bbox.max.x; ++x) {
//Range allowed
if (x >= x1 && x < x2) {
//Perform subtexel pre-stepping on UV
dx = 1 - (_xa - x1);
u = _ua + dx * _dudx;
v = _va + dx * _dvdx;
if ((uint32_t)v >= image->h) {
cmp[x] = 0;
} else {
if (opacity == 255) {
uu = (int) u;
vv = (int) v;
ar = (int)(255 * (1 - modff(u, &iptr)));
ab = (int)(255 * (1 - modff(v, &iptr)));
iru = uu + 1;
irv = vv + 1;
if (vv >= sh) continue;
px = *(sbuf + (vv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* right pixel */
int px2 = *(sbuf + (vv * sw) + iru);
px = INTERPOLATE(px, px2, ar);
}
/* vertical interpolate */
if (irv < sh) {
/* bottom pixel */
int px2 = *(sbuf + (irv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* bottom right pixel */
int px3 = *(sbuf + (irv * sw) + iru);
px2 = INTERPOLATE(px2, px3, ar);
}
px = INTERPOLATE(px, px2, ab);
}
cmp[x] = ALPHA_BLEND(cmp[x], ALPHA(px));
//Step UV horizontally
u += _dudx;
v += _dvdx;
} else {
uu = (int) u;
vv = (int) v;
ar = (int)(255 * (1 - modff(u, &iptr)));
ab = (int)(255 * (1 - modff(v, &iptr)));
iru = uu + 1;
irv = vv + 1;
if (vv >= sh) continue;
px = *(sbuf + (vv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* right pixel */
int px2 = *(sbuf + (vv * sw) + iru);
px = INTERPOLATE(px, px2, ar);
}
/* vertical interpolate */
if (irv < sh) {
/* bottom pixel */
int px2 = *(sbuf + (irv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* bottom right pixel */
int px3 = *(sbuf + (irv * sw) + iru);
px2 = INTERPOLATE(px2, px3, ar);
}
px = INTERPOLATE(px, px2, ab);
}
cmp[x] = ALPHA_BLEND(cmp[x], MULTIPLY(ALPHA(px), opacity));
//Step UV horizontally
u += _dudx;
v += _dvdx;
}
}
} else {
//Clear out of polygon horizontal range
if (x < x1 && (dirFlag == 1 || dirFlag == 2)) cmp[x] = 0;
else if (x >= x2 && (dirFlag == 3 || dirFlag == 4)) cmp[x] = 0;
}
}
}
//Step along both edges
_xa += _dxdya;
_xb += _dxdyb;
_ua += _dudya;
_va += _dvdya;
}
xa = _xa;
xb = _xb;
ua = _ua;
va = _va;
}
#else
{
float _dudx = dudx, _dvdx = dvdx;
float _dxdya = dxdya, _dxdyb = dxdyb, _dudya = dudya, _dvdya = dvdya;
float _xa = xa, _xb = xb, _ua = ua, _va = va;
auto sbuf = image->buf32;
int32_t sw = static_cast<int32_t>(image->stride);
int32_t sh = image->h;
int32_t x1, x2, x, y, ar, ab, iru, irv, px, ay;
int32_t vv = 0, uu = 0;
int32_t minx = INT32_MAX, maxx = INT32_MIN;
float dx, u, v, iptr;
SwSpan* span = nullptr; //used only when rle based.
if (!_arrange(image, region, yStart, yEnd)) return;
//Loop through all lines in the segment
uint32_t spanIdx = 0;
if (region) {
minx = region->min.x;
maxx = region->max.x;
} else {
span = image->rle->spans;
while (span->y < yStart) {
++span;
++spanIdx;
}
}
y = yStart;
while (y < yEnd) {
x1 = (int32_t)_xa;
x2 = (int32_t)_xb;
if (!region) {
minx = INT32_MAX;
maxx = INT32_MIN;
//one single row, could be consisted of multiple spans.
while (span->y == y && spanIdx < image->rle->size) {
if (minx > span->x) minx = span->x;
if (maxx < span->x + span->len) maxx = span->x + span->len;
++span;
++spanIdx;
}
}
if (x1 < minx) x1 = minx;
if (x2 > maxx) x2 = maxx;
//Anti-Aliasing frames
ay = y - aaSpans->yStart;
if (aaSpans->lines[ay].x[0] > x1) aaSpans->lines[ay].x[0] = x1;
if (aaSpans->lines[ay].x[1] < x2) aaSpans->lines[ay].x[1] = x2;
//Range allowed
if ((x2 - x1) >= 1 && (x1 < maxx) && (x2 > minx)) {
//Perform subtexel pre-stepping on UV
dx = 1 - (_xa - x1);
u = _ua + dx * _dudx;
v = _va + dx * _dvdx;
x = x1;
auto cmp = &surface->compositor->image.buf32[y * surface->compositor->image.stride + x1];
if (opacity == 255) {
//Draw horizontal line
while (x++ < x2) {
uu = (int) u;
vv = (int) v;
ar = (int)(255 * (1 - modff(u, &iptr)));
ab = (int)(255 * (1 - modff(v, &iptr)));
iru = uu + 1;
irv = vv + 1;
if (vv >= sh) continue;
px = *(sbuf + (vv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* right pixel */
int px2 = *(sbuf + (vv * sw) + iru);
px = INTERPOLATE(px, px2, ar);
}
/* vertical interpolate */
if (irv < sh) {
/* bottom pixel */
int px2 = *(sbuf + (irv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* bottom right pixel */
int px3 = *(sbuf + (irv * sw) + iru);
px2 = INTERPOLATE(px2, px3, ar);
}
px = INTERPOLATE(px, px2, ab);
}
#ifdef TEXMAP_ADD_MASK
*cmp = px + ALPHA_BLEND(*cmp, IALPHA(px));
#elif defined(TEXMAP_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(px));
#elif defined(TEXMAP_DIF_MASK)
*cmp = ALPHA_BLEND(px, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(px));
#endif
++cmp;
//Step UV horizontally
u += _dudx;
v += _dvdx;
//range over?
if ((uint32_t)v >= image->h) break;
}
} else {
//Draw horizontal line
while (x++ < x2) {
uu = (int) u;
vv = (int) v;
ar = (int)(255 * (1 - modff(u, &iptr)));
ab = (int)(255 * (1 - modff(v, &iptr)));
iru = uu + 1;
irv = vv + 1;
if (vv >= sh) continue;
px = *(sbuf + (vv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* right pixel */
int px2 = *(sbuf + (vv * sw) + iru);
px = INTERPOLATE(px, px2, ar);
}
/* vertical interpolate */
if (irv < sh) {
/* bottom pixel */
int px2 = *(sbuf + (irv * sw) + uu);
/* horizontal interpolate */
if (iru < sw) {
/* bottom right pixel */
int px3 = *(sbuf + (irv * sw) + iru);
px2 = INTERPOLATE(px2, px3, ar);
}
px = INTERPOLATE(px, px2, ab);
}
#ifdef TEXMAP_ADD_MASK
*cmp = INTERPOLATE(px, *cmp, opacity);
#elif defined(TEXMAP_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(ALPHA_BLEND(px, opacity)));
#elif defined(TEXMAP_DIF_MASK)
auto src = ALPHA_BLEND(px, opacity);
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
++cmp;
//Step UV horizontally
u += _dudx;
v += _dvdx;
//range over?
if ((uint32_t)v >= image->h) break;
}
}
}
//Step along both edges
_xa += _dxdya;
_xb += _dxdyb;
_ua += _dudya;
_va += _dvdya;
if (!region && spanIdx >= image->rle->size) break;
++y;
}
xa = _xa;
xb = _xb;
ua = _ua;
va = _va;
}
#endif

View file

@ -0,0 +1,171 @@
/*
* Copyright (c) 2023 the ThorVG project. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef SCALED_IMAGE_INT_MASK
{
auto cbuffer = surface->compositor->image.buf32 + (surface->compositor->bbox.min.y * cstride + surface->compositor->bbox.min.x);
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
if (y == region.min.y) {
auto cbuffer2 = cbuffer;
for (uint32_t y2 = y; y2 < region.max.y; ++y2) {
auto sy = (uint32_t)(y2 * itransform->e22 + itransform->e23);
if (sy >= image->h) continue;
auto tmp = cbuffer2;
auto x = surface->compositor->bbox.min.x;
while (x < surface->compositor->bbox.max.x) {
if (x == region.min.x) {
if (opacity == 255) {
//Down-Scaled
if (image->scale < DOWN_SCALE_TOLERANCE) {
for (uint32_t i = 0; i < w; ++i, ++tmp) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
*tmp = ALPHA_BLEND(*tmp, ALPHA(src));
}
//Up-Scaled
} else {
for (uint32_t i = 0; i < w; ++i, ++tmp) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
*tmp = ALPHA_BLEND(*tmp, ALPHA(src));
}
}
} else {
//Down-Scaled
if (image->scale < DOWN_SCALE_TOLERANCE) {
for (uint32_t i = 0; i < w; ++i, ++tmp) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = ALPHA_BLEND(_interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale), opacity);
*tmp = ALPHA_BLEND(*tmp, ALPHA(src));
}
//Up-Scaled
} else {
for (uint32_t i = 0; i < w; ++i, ++tmp) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = ALPHA_BLEND(_interpUpScaler(image->buf32, image->w, image->h, sx, sy), opacity);
*tmp = ALPHA_BLEND(*tmp, ALPHA(src));
}
}
}
x += w;
} else {
*tmp = 0;
++tmp;
++x;
}
}
cbuffer2 += cstride;
}
y += (h - 1);
} else {
auto tmp = cbuffer;
for (uint32_t x = surface->compositor->bbox.min.x; x < surface->compositor->bbox.max.x; ++x, ++tmp) {
*tmp = 0;
}
}
cbuffer += cstride;
}
}
#else
{
auto cbuffer = surface->compositor->image.buf32 + (region.min.y * cstride + region.min.x);
// Down-Scaled
if (image->scale < DOWN_SCALE_TOLERANCE) {
for (auto y = region.min.y; y < region.max.y; ++y) {
auto sy = (uint32_t)(y * itransform->e22 + itransform->e23);
if (sy >= image->h) continue;
auto cmp = cbuffer;
if (opacity == 255) {
for (auto x = region.min.x; x < region.max.x; ++x, ++cmp) {
auto sx = (uint32_t)(x * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
#ifdef SCALED_IMAGE_ADD_MASK
*cmp = src + ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_IMAGE_DIF_MASK)
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
} else {
for (auto x = region.min.x; x < region.max.x; ++x, ++cmp) {
auto sx = (uint32_t)(x * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
#ifdef SCALED_IMAGE_ADD_MASK
*cmp = INTERPOLATE(src, *cmp, opacity);
#elif defined(SCALED_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(ALPHA_BLEND(src, opacity)));
#elif defined(SCALED_IMAGE_DIF_MASK)
src = ALPHA_BLEND(src, opacity);
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
}
cbuffer += cstride;
}
// Up-Scaled
} else {
for (auto y = region.min.y; y < region.max.y; ++y) {
auto sy = y * itransform->e22 + itransform->e23;
if ((uint32_t)sy >= image->h) continue;
auto cmp = cbuffer;
if (opacity == 255) {
for (auto x = region.min.x; x < region.max.x; ++x, ++cmp) {
auto sx = x * itransform->e11 + itransform->e13;
if ((uint32_t)sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
#ifdef SCALED_IMAGE_ADD_MASK
*cmp = src + ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_IMAGE_DIF_MASK)
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
} else {
for (auto x = region.min.x; x < region.max.x; ++x, ++cmp) {
auto sx = x * itransform->e11 + itransform->e13;
if ((uint32_t)sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
#ifdef SCALED_IMAGE_ADD_MASK
*cmp = INTERPOLATE(src, *cmp, opacity);
#elif defined(SCALED_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(ALPHA_BLEND(src, opacity)));
#elif defined(SCALED_IMAGE_DIF_MASK)
src = ALPHA_BLEND(src, opacity);
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
}
cbuffer += cstride;
}
}
}
#endif

View file

@ -0,0 +1,170 @@
/*
* Copyright (c) 2023 the ThorVG project. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifdef SCALED_RLE_IMAGE_INT_MASK
{
auto cbuffer = surface->compositor->image.buf32;
auto cstride = surface->compositor->image.stride;
//Center (Down-Scaled)
if (image->scale < DOWN_SCALE_TOLERANCE) {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = &cbuffer[y * cstride];
for (uint32_t x = surface->compositor->bbox.min.x; x < surface->compositor->bbox.max.x; ++x) {
if (y == span->y && x == span->x && x + span->len <= surface->compositor->bbox.max.x) {
auto sy = (uint32_t)(span->y * itransform->e22 + itransform->e23);
if (sy >= image->h) continue;
auto alpha = MULTIPLY(span->coverage, opacity);
if (alpha == 255) {
for (uint32_t i = 0; i < span->len; ++i) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
cmp[x + i] = ALPHA_BLEND(cmp[x + i], ALPHA(src));
}
} else {
for (uint32_t i = 0; i < span->len; ++i) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
src = ALPHA_BLEND(src, alpha);
cmp[x + i] = ALPHA_BLEND(cmp[x + i], ALPHA(src));
}
}
x += span->len - 1;
++span;
} else {
cmp[x] = 0;
}
}
}
//Center (Up-Scaled)
} else {
for (uint32_t y = surface->compositor->bbox.min.y; y < surface->compositor->bbox.max.y; ++y) {
auto cmp = &cbuffer[y * cstride];
for (uint32_t x = surface->compositor->bbox.min.x; x < surface->compositor->bbox.max.x; ++x) {
if (y == span->y && x == span->x && x + span->len <= surface->compositor->bbox.max.x) {
auto sy = (uint32_t)(span->y * itransform->e22 + itransform->e23);
if (sy >= image->h) continue;
auto alpha = MULTIPLY(span->coverage, opacity);
if (alpha == 255) {
for (uint32_t i = 0; i < span->len; ++i) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
cmp[x + i] = ALPHA_BLEND(cmp[x + i], ALPHA(src));
}
} else {
for (uint32_t i = 0; i < span->len; ++i) {
auto sx = (uint32_t)((x + i) * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
src = ALPHA_BLEND(src, alpha);
cmp[x + i] = ALPHA_BLEND(cmp[x + i], ALPHA(src));
}
}
x += span->len - 1;
++span;
} else {
cmp[x] = 0;
}
}
}
}
}
#else
{
//Center (Down-Scaled)
if (image->scale < DOWN_SCALE_TOLERANCE) {
for (uint32_t i = 0; i < image->rle->size; ++i, ++span) {
auto sy = (uint32_t)(span->y * itransform->e22 + itransform->e23);
if (sy >= image->h) continue;
auto cmp = &surface->compositor->image.buf32[span->y * surface->compositor->image.stride + span->x];
auto a = MULTIPLY(span->coverage, opacity);
if (a == 255) {
for (uint32_t x = static_cast<uint32_t>(span->x); x < static_cast<uint32_t>(span->x) + span->len; ++x, ++cmp) {
auto sx = (uint32_t)(x * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
#ifdef SCALED_RLE_IMAGE_ADD_MASK
*cmp = src + ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_RLE_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_RLE_IMAGE_DIF_MASK)
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
} else {
for (uint32_t x = static_cast<uint32_t>(span->x); x < static_cast<uint32_t>(span->x) + span->len; ++x, ++cmp) {
auto sx = (uint32_t)(x * itransform->e11 + itransform->e13);
if (sx >= image->w) continue;
auto src = _interpDownScaler(image->buf32, image->stride, image->w, image->h, sx, sy, halfScale);
#ifdef SCALED_RLE_IMAGE_ADD_MASK
*cmp = INTERPOLATE(src, *cmp, a);
#elif defined(SCALED_RLE_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(ALPHA_BLEND(src, a)));
#elif defined(SCALED_RLE_IMAGE_DIF_MASK)
src = ALPHA_BLEND(src, a);
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
}
}
//Center (Up-Scaled)
} else {
for (uint32_t i = 0; i < image->rle->size; ++i, ++span) {
auto sy = span->y * itransform->e22 + itransform->e23;
if ((uint32_t)sy >= image->h) continue;
auto cmp = &surface->compositor->image.buf32[span->y * surface->compositor->image.stride + span->x];
auto a = MULTIPLY(span->coverage, opacity);
if (a == 255) {
for (uint32_t x = static_cast<uint32_t>(span->x); x < static_cast<uint32_t>(span->x) + span->len; ++x, ++cmp) {
auto sx = x * itransform->e11 + itransform->e13;
if ((uint32_t)sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
#ifdef SCALED_RLE_IMAGE_ADD_MASK
*cmp = src + ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_RLE_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(src));
#elif defined(SCALED_RLE_IMAGE_DIF_MASK)
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
} else {
for (uint32_t x = static_cast<uint32_t>(span->x); x < static_cast<uint32_t>(span->x) + span->len; ++x, ++cmp) {
auto sx = x * itransform->e11 + itransform->e13;
if ((uint32_t)sx >= image->w) continue;
auto src = _interpUpScaler(image->buf32, image->w, image->h, sx, sy);
#ifdef SCALED_RLE_IMAGE_ADD_MASK
*cmp = INTERPOLATE(src, *cmp, a);
#elif defined(SCALED_RLE_IMAGE_SUB_MASK)
*cmp = ALPHA_BLEND(*cmp, IALPHA(ALPHA_BLEND(src, a)));
#elif defined(SCALED_RLE_IMAGE_DIF_MASK)
src = ALPHA_BLEND(src, a);
*cmp = ALPHA_BLEND(src, IALPHA(*cmp)) + ALPHA_BLEND(*cmp, IALPHA(src));
#endif
}
}
}
}
}
#endif

View file

@ -69,11 +69,35 @@ static bool _arrange(const SwImage* image, const SwBBox* region, int& yStart, in
}
static void _rasterMaskedPolygonImageSegment(SwSurface* surface, const SwImage* image, const SwBBox* region, int yStart, int yEnd, uint32_t opacity, AASpans* aaSpans)
static void _rasterMaskedPolygonImageSegment(SwSurface* surface, const SwImage* image, const SwBBox* region, int yStart, int yEnd, uint32_t opacity, AASpans* aaSpans, uint8_t dirFlag = 0)
{
#define TEXMAP_MASKING
auto method = surface->compositor->method;
if (method == CompositeMethod::AddMask) {
#define TEXMAP_ADD_MASK
#include "tvgSwRasterMaskedTexmapInternal.h"
#undef TEXMAP_ADD_MASK
} else if (method == CompositeMethod::SubtractMask) {
#define TEXMAP_SUB_MASK
#include "tvgSwRasterMaskedTexmapInternal.h"
#undef TEXMAP_SUB_MASK
} else if (method == CompositeMethod::IntersectMask) {
#define TEXMAP_INT_MASK
#include "tvgSwRasterMaskedTexmapInternal.h"
#undef TEXMAP_INT_MASK
} else if (method == CompositeMethod::DifferenceMask) {
#define TEXMAP_DIF_MASK
#include "tvgSwRasterMaskedTexmapInternal.h"
#undef TEXMAP_DIF_MASK
}
}
static void _rasterMattedPolygonImageSegment(SwSurface* surface, const SwImage* image, const SwBBox* region, int yStart, int yEnd, uint32_t opacity, AASpans* aaSpans)
{
#define TEXMAP_MATTING
#include "tvgSwRasterTexmapInternal.h"
#undef TEXMAP_MASKING
#undef TEXMAP_MATTING
}
@ -147,7 +171,7 @@ static void _rasterPolygonImage(SwSurface* surface, const SwImage* image, const
if (mathEqual(y[1], y[2])) side = x[2] > x[1];
auto regionTop = region ? region->min.y : image->rle->spans->y; //Normal Image or Rle Image?
auto masking = _compositing(surface); //Masking required
auto compositing = _compositing(surface); //Composition required
//Longer edge is on the left side
if (!side) {
@ -173,8 +197,10 @@ static void _rasterPolygonImage(SwSurface* surface, const SwImage* image, const
dxdyb = dxdy[0];
xb = x[0] + dy * dxdyb + (off_y * dxdyb);
if (masking) _rasterMaskedPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
else _rasterPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
if (compositing) {
if (_matting(surface)) _rasterMattedPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
else _rasterMaskedPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans, 1);
} else _rasterPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
upper = true;
}
@ -189,8 +215,10 @@ static void _rasterPolygonImage(SwSurface* surface, const SwImage* image, const
// Set right edge X-slope and perform subpixel pre-stepping
dxdyb = dxdy[2];
xb = x[1] + (1 - (y[1] - yi[1])) * dxdyb + (off_y * dxdyb);
if (masking) _rasterMaskedPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
else _rasterPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
if (compositing) {
if (_matting(surface)) _rasterMattedPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
else _rasterMaskedPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans, 2);
} else _rasterPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
}
//Longer edge is on the right side
} else {
@ -213,8 +241,10 @@ static void _rasterPolygonImage(SwSurface* surface, const SwImage* image, const
ua = u[0] + dy * dudya + (off_y * dudya);
va = v[0] + dy * dvdya + (off_y * dvdya);
if (masking) _rasterMaskedPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
else _rasterPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
if (compositing) {
if (_matting(surface)) _rasterMattedPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
else _rasterMaskedPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans, 3);
} else _rasterPolygonImageSegment(surface, image, region, yi[0], yi[1], opacity, aaSpans);
upper = true;
}
@ -232,8 +262,10 @@ static void _rasterPolygonImage(SwSurface* surface, const SwImage* image, const
ua = u[1] + dy * dudya + (off_y * dudya);
va = v[1] + dy * dvdya + (off_y * dvdya);
if (masking) _rasterMaskedPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
else _rasterPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
if (compositing) {
if (_matting(surface)) _rasterMattedPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
else _rasterMaskedPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans, 4);
} else _rasterPolygonImageSegment(surface, image, region, yi[1], yi[2], opacity, aaSpans);
}
}
}

View file

@ -36,8 +36,7 @@
uint32_t* buf;
SwSpan* span = nullptr; //used only when rle based.
#ifdef TEXMAP_MASKING
uint8_t* cmp;
#ifdef TEXMAP_MATTING
auto csize = surface->compositor->image.channelSize;
auto alpha = surface->blender.alpha(surface->compositor->method);
#endif
@ -83,8 +82,8 @@
if (aaSpans->lines[ay].x[0] > x1) aaSpans->lines[ay].x[0] = x1;
if (aaSpans->lines[ay].x[1] < x2) aaSpans->lines[ay].x[1] = x2;
//Range exception
if ((x2 - x1) < 1 || (x1 >= maxx) || (x2 <= minx)) goto next;
//Range allowed
if ((x2 - x1) >= 1 && (x1 < maxx) && (x2 > minx)) {
//Perform subtexel pre-stepping on UV
dx = 1 - (_xa - x1);
@ -95,8 +94,8 @@
x = x1;
#ifdef TEXMAP_MASKING
cmp = &surface->compositor->image.buf8[(y * surface->compositor->image.stride + x1) * csize];
#ifdef TEXMAP_MATTING
auto cmp = &surface->compositor->image.buf8[(y * surface->compositor->image.stride + x1) * csize];
#endif
if (opacity == 255) {
//Draw horizontal line
@ -132,7 +131,7 @@
}
px = INTERPOLATE(px, px2, ab);
}
#ifdef TEXMAP_MASKING
#ifdef TEXMAP_MATTING
auto src = ALPHA_BLEND(px, alpha(cmp));
cmp += csize;
#else
@ -181,7 +180,7 @@
}
px = INTERPOLATE(px, px2, ab);
}
#ifdef TEXMAP_MASKING
#ifdef TEXMAP_MATTING
auto src = ALPHA_BLEND(px, MULTIPLY(opacity, alpha(cmp)));
cmp += csize;
#else
@ -197,7 +196,7 @@
if ((uint32_t)v >= image->h) break;
}
}
next:
}
//Step along both edges
_xa += _dxdya;
_xb += _dxdyb;

View file

@ -166,6 +166,7 @@ bool Paint::Impl::render(RenderMethod& renderer)
Create a composition image. */
if (compData && compData->method != CompositeMethod::ClipPath && !(compData->target->pImpl->ctxFlag & ContextFlag::FastTrack)) {
auto region = smethod->bounds(renderer);
if (MASK_OPERATION(compData->method)) region.add(compData->target->pImpl->smethod->bounds(renderer));
if (region.w == 0 || region.h == 0) return true;
cmp = renderer.target(region, COMPOSITE_TO_COLORSPACE(renderer, compData->method));
if (renderer.beginComposite(cmp, CompositeMethod::None, 255)) {

View file

@ -98,6 +98,20 @@ struct RenderRegion
if (w < 0) w = 0;
if (h < 0) h = 0;
}
void add(const RenderRegion& rhs)
{
if (rhs.x < x) {
w += (x - rhs.x);
x = rhs.x;
}
if (rhs.y < y) {
h += (y - rhs.y);
y = rhs.y;
}
if (rhs.x + rhs.w > x + w) w = (rhs.x + rhs.w) - x;
if (rhs.y + rhs.h > y + h) h = (rhs.y + rhs.h) - y;
}
};
struct RenderTransform
@ -238,6 +252,25 @@ public:
virtual bool endComposite(Compositor* cmp) = 0;
};
static inline bool MASK_OPERATION(CompositeMethod method)
{
switch(method) {
case CompositeMethod::AlphaMask:
case CompositeMethod::InvAlphaMask:
case CompositeMethod::LumaMask:
case CompositeMethod::InvLumaMask:
return false;
case CompositeMethod::AddMask:
case CompositeMethod::SubtractMask:
case CompositeMethod::IntersectMask:
case CompositeMethod::DifferenceMask:
return true;
default:
TVGERR("COMMON", "Unsupported Composite Size! = %d", (int)method);
return false;
}
}
static inline uint8_t CHANNEL_SIZE(ColorSpace cs)
{
switch(cs) {
@ -263,6 +296,10 @@ static inline ColorSpace COMPOSITE_TO_COLORSPACE(RenderMethod& renderer, Composi
return ColorSpace::Grayscale8;
case CompositeMethod::LumaMask:
case CompositeMethod::InvLumaMask:
case CompositeMethod::AddMask:
case CompositeMethod::SubtractMask:
case CompositeMethod::IntersectMask:
case CompositeMethod::DifferenceMask:
return renderer.colorSpace();
default:
TVGERR("COMMON", "Unsupported Composite Size! = %d", (int)method);