|
| static void | scaleToGray2Low (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *sumtab, l_uint8 *valtab) |
| |
| static l_uint32 * | makeSumTabSG2 (void) |
| |
| static l_uint8 * | makeValTabSG2 (void) |
| |
| static void | scaleToGray3Low (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *sumtab, l_uint8 *valtab) |
| |
| static l_uint32 * | makeSumTabSG3 (void) |
| |
| static l_uint8 * | makeValTabSG3 (void) |
| |
| static void | scaleToGray4Low (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *sumtab, l_uint8 *valtab) |
| |
| static l_uint32 * | makeSumTabSG4 (void) |
| |
| static l_uint8 * | makeValTabSG4 (void) |
| |
| static void | scaleToGray6Low (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab8, l_uint8 *valtab) |
| |
| static l_uint8 * | makeValTabSG6 (void) |
| |
| static void | scaleToGray8Low (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab8, l_uint8 *valtab) |
| |
| static l_uint8 * | makeValTabSG8 (void) |
| |
| static void | scaleToGray16Low (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab8) |
| |
| static l_int32 | scaleMipmapLow (l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas1, l_int32 wpls1, l_uint32 *datas2, l_int32 wpls2, l_float32 red) |
| |
| PIX * | pixScaleToGray (PIX *pixs, l_float32 scalefactor) |
| |
| PIX * | pixScaleToGrayFast (PIX *pixs, l_float32 scalefactor) |
| |
| PIX * | pixScaleToGray2 (PIX *pixs) |
| |
| PIX * | pixScaleToGray3 (PIX *pixs) |
| |
| PIX * | pixScaleToGray4 (PIX *pixs) |
| |
| PIX * | pixScaleToGray6 (PIX *pixs) |
| |
| PIX * | pixScaleToGray8 (PIX *pixs) |
| |
| PIX * | pixScaleToGray16 (PIX *pixs) |
| |
| PIX * | pixScaleToGrayMipmap (PIX *pixs, l_float32 scalefactor) |
| |
| PIX * | pixScaleMipmap (PIX *pixs1, PIX *pixs2, l_float32 scale) |
| |
| PIX * | pixExpandReplicate (PIX *pixs, l_int32 factor) |
| |
| PIX * | pixScaleGrayMinMax (PIX *pixs, l_int32 xfact, l_int32 yfact, l_int32 type) |
| |
| PIX * | pixScaleGrayMinMax2 (PIX *pixs, l_int32 type) |
| |
| PIX * | pixScaleGrayRankCascade (PIX *pixs, l_int32 level1, l_int32 level2, l_int32 level3, l_int32 level4) |
| |
| PIX * | pixScaleGrayRank2 (PIX *pixs, l_int32 rank) |
| |
| l_ok | pixScaleAndTransferAlpha (PIX *pixd, PIX *pixs, l_float32 scalex, l_float32 scaley) |
| |
| PIX * | pixScaleWithAlpha (PIX *pixs, l_float32 scalex, l_float32 scaley, PIX *pixg, l_float32 fract) |
| |
Scale-to-gray (1 bpp –> 8 bpp; arbitrary downscaling)
PIX *pixScaleToGray()
PIX *pixScaleToGrayFast() Scale-to-gray (1 bpp –> 8 bpp; integer downscaling)
PIX *pixScaleToGray2()
PIX *pixScaleToGray3()
PIX *pixScaleToGray4()
PIX *pixScaleToGray6()
PIX *pixScaleToGray8()
PIX *pixScaleToGray16() Scale-to-gray by mipmap(1 bpp –> 8 bpp, arbitrary reduction)
PIX *pixScaleToGrayMipmap() Grayscale scaling using mipmap
PIX *pixScaleMipmap() Replicated (integer) expansion (all depths)
PIX *pixExpandReplicate() Grayscale downscaling using min and max
PIX *pixScaleGrayMinMax()
PIX *pixScaleGrayMinMax2() Grayscale downscaling using rank value
PIX *pixScaleGrayRankCascade()
PIX *pixScaleGrayRank2() Helper function for transferring alpha with scaling
l_int32 pixScaleAndTransferAlpha() RGB scaling including alpha (blend) component
PIX *pixScaleWithAlpha() Low-level static functions:
Scale-to-gray 2x
static void scaleToGray2Low()
static l_uint32 *makeSumTabSG2()
static l_uint8 *makeValTabSG2()
Scale-to-gray 3x
static void scaleToGray3Low()
static l_uint32 *makeSumTabSG3()
static l_uint8 *makeValTabSG3()
Scale-to-gray 4x
static void scaleToGray4Low()
static l_uint32 *makeSumTabSG4()
static l_uint8 *makeValTabSG4()
Scale-to-gray 6x
static void scaleToGray6Low()
static l_uint8 *makeValTabSG6()
Scale-to-gray 8x
static void scaleToGray8Low()
static l_uint8 *makeValTabSG8()
Scale-to-gray 16x
static void scaleToGray16Low()
Grayscale mipmap
static l_int32 scaleMipmapLow()
Definition in file scale2.c.
| PIX* pixScaleToGray |
( |
PIX * |
pixs, |
|
|
l_float32 |
scalefactor |
|
) |
| |
pixScaleToGray()
- Parameters
-
| [in] | pixs | 1 bpp |
| [in] | scalefactor | reduction: must be > 0.0 and < 1.0 |
- Returns
- pixd 8 bpp, scaled down by scalefactor in each direction, or NULL on error.
Notes:
For faster scaling in the range of scalefactors from 0.0625 to 0.5,
with very little difference in quality, use pixScaleToGrayFast().
Binary images have sharp edges, so they intrinsically have very
high frequency content. To avoid aliasing, they must be low-pass
filtered, which tends to blur the edges. How can we keep relatively
crisp edges without aliasing? The trick is to do binary upscaling
followed by a power-of-2 scaleToGray. For large reductions, where
you don't end up with much detail, some corners can be cut.
The intent here is to get high quality reduced grayscale
images with relatively little computation. We do binary
pre-scaling followed by scaleToGrayN() for best results,
esp. to avoid excess blur when the scale factor is near
an inverse power of 2. Where a low-pass filter is required,
we use simple convolution kernels: either the hat filter for
linear interpolation or a flat filter for larger downscaling.
Other choices, such as a perfect bandpass filter with infinite extent
(the sinc) or various approximations to it (e.g., lanczos), are
unnecessarily expensive.
The choices made are as follows:
(1) Do binary upscaling before scaleToGrayN() for scalefactors > 1/8
(2) Do binary downscaling before scaleToGray8() for scalefactors
between 1/16 and 1/8.
(3) Use scaleToGray16() before grayscale downscaling for
scalefactors less than 1/16
Another reasonable choice would be to start binary downscaling
for scalefactors below 1/4, rather than below 1/8 as we do here. The general scaling rules, not all of which are used here, go as follows:
(1) For grayscale upscaling, use pixScaleGrayLI(). However,
note that edges will be visibly blurred for scalefactors
near (but above) 1.0. Replication will avoid edge blur,
and should be considered for factors very near 1.0.
(2) For grayscale downscaling with a scale factor larger than
about 0.7, use pixScaleGrayLI(). For scalefactors near
(but below) 1.0, you tread between Scylla and Charybdis.
pixScaleGrayLI() again gives edge blurring, but
pixScaleBySampling() gives visible aliasing.
(3) For grayscale downscaling with a scale factor smaller than
about 0.7, use pixScaleSmooth()
(4) For binary input images, do as much scale to gray as possible
using the special integer functions (2, 3, 4, 8 and 16).
(5) It is better to upscale in binary, followed by scaleToGrayN()
than to do scaleToGrayN() followed by an upscale using either
LI or oversampling.
(6) It may be better to downscale in binary, followed by
scaleToGrayN() than to first use scaleToGrayN() followed by
downscaling. For downscaling between 8x and 16x, this is
a reasonable option.
(7) For reductions greater than 16x, it's reasonable to use
scaleToGray16() followed by further grayscale downscaling.
Definition at line 204 of file scale2.c.
Referenced by pixaDisplayTiledAndScaled().
| PIX* pixScaleToGrayMipmap |
( |
PIX * |
pixs, |
|
|
l_float32 |
scalefactor |
|
) |
| |
pixScaleToGrayMipmap()
- Parameters
-
| [in] | pixs | 1 bpp |
| [in] | scalefactor | reduction: must be > 0.0 and < 1.0 |
- Returns
- pixd 8 bpp, scaled down by scalefactor in each direction, or NULL on error.
Notes:
This function is here mainly for pedagogical reasons.
Mip-mapping is widely used in graphics for texture mapping, because
the texture changes smoothly with scale. This is accomplished by
constructing a multiresolution pyramid and, for each pixel,
doing a linear interpolation between corresponding pixels in
the two planes of the pyramid that bracket the desired resolution.
The computation is very efficient, and is implemented in hardware
in high-end graphics cards.
We can use mip-mapping for scale-to-gray by using two scale-to-gray
reduced images (we don't need the entire pyramid) selected from
the set {2x, 4x, ... 16x}, and interpolating. However, we get
severe aliasing, probably because we are subsampling from the
higher resolution image. The method is very fast, but the result
is very poor. In fact, the results don't look any better than
either subsampling off the higher-res grayscale image or oversampling
on the lower-res image. Consequently, this method should NOT be used
for generating reduced images, scale-to-gray or otherwise.
Definition at line 722 of file scale2.c.
| PIX* pixScaleWithAlpha |
( |
PIX * |
pixs, |
|
|
l_float32 |
scalex, |
|
|
l_float32 |
scaley, |
|
|
PIX * |
pixg, |
|
|
l_float32 |
fract |
|
) |
| |
pixScaleWithAlpha()
- Parameters
-
| [in] | pixs | 32 bpp rgb or cmapped |
| [in] | scalex,scaley | must be > 0.0 |
| [in] | pixg | [optional] 8 bpp, can be null |
| [in] | fract | between 0.0 and 1.0, with 0.0 fully transparent and 1.0 fully opaque |
- Returns
- pixd 32 bpp rgba, or NULL on error
Notes:
(1) The alpha channel is transformed separately from pixs,
and aligns with it, being fully transparent outside the
boundary of the transformed pixs. For pixels that are fully
transparent, a blending function like pixBlendWithGrayMask()
will give zero weight to corresponding pixels in pixs.
(2) Scaling is done with area mapping or linear interpolation,
depending on the scale factors. Default sharpening is done.
(3) If pixg is NULL, it is generated as an alpha layer that is
partially opaque, using fract. Otherwise, it is cropped
to pixs if required, and fract is ignored. The alpha
channel in pixs is never used.
(4) Colormaps are removed to 32 bpp.
(5) The default setting for the border values in the alpha channel
is 0 (transparent) for the outermost ring of pixels and
(0.5 * fract * 255) for the second ring. When blended over
a second image, this
(a) shrinks the visible image to make a clean overlap edge
with an image below, and
(b) softens the edges by weakening the aliasing there.
Use l_setAlphaMaskBorder() to change these values.
(6) A subtle use of gamma correction is to remove gamma correction
before scaling and restore it afterwards. This is done
by sandwiching this function between a gamma/inverse-gamma
photometric transform:
pixt = pixGammaTRCWithAlpha(NULL, pixs, 1.0 / gamma, 0, 255);
pixd = pixScaleWithAlpha(pixt, scalex, scaley, NULL, fract);
pixGammaTRCWithAlpha(pixd, pixd, gamma, 0, 255);
pixDestroy(&pixt);
This has the side-effect of producing artifacts in the very
dark regions.
Definition at line 1436 of file scale2.c.
References pixGetDimensions().
| static void scaleToGray3Low |
( |
l_uint32 * |
datad, |
|
|
l_int32 |
wd, |
|
|
l_int32 |
hd, |
|
|
l_int32 |
wpld, |
|
|
l_uint32 * |
datas, |
|
|
l_int32 |
wpls, |
|
|
l_uint32 * |
sumtab, |
|
|
l_uint8 * |
valtab |
|
) |
| |
|
static |
scaleToGray3Low()
- Parameters
-
| [in] | datad | dest data |
| [in] | wd,hd | dest width, height |
| [in] | wpld | dest words/line |
| [in] | datas | src data |
| [in] | wpls | src words/line |
| [in] | sumtab | made from makeSumTabSG3() |
| [in] | valtab | made from makeValTabSG3() |
- Returns
- 0 if OK; 1 on error
Notes:
Each set of 8 3x3 bit-blocks in the source image, which
consist of 72 pixels arranged 24 pixels wide by 3 scanlines,
is converted to a row of 8 8-bit pixels in the dest image.
These 72 pixels of the input image are runs of 24 pixels
in three adjacent scanlines. Each run of 24 pixels is
stored in the 24 LSbits of a 32-bit word. We use 2 LUTs.
The first, sumtab, takes 6 of these bits and stores
sum, taken 3 bits at a time, in two bytes. (See
makeSumTabSG3). This is done for each of the 3 scanlines,
and the results are added. We now have the sum of ON pixels
in the first two 3x3 blocks in two bytes. The valtab LUT
then converts these values (which go from 0 to 9) to
grayscale values between between 255 and 0. (See makeValTabSG3).
This process is repeated for each of the other 3 sets of
6x3 input pixels, giving 8 output pixels in total.
Note: because the input image is processed in groups of
24 x 3 pixels, the process clips the input height to
(h - h % 3) and the input width to (w - w % 24).
Definition at line 1679 of file scale2.c.
References GET_DATA_BYTE, and SET_DATA_BYTE.
| static void scaleToGray6Low |
( |
l_uint32 * |
datad, |
|
|
l_int32 |
wd, |
|
|
l_int32 |
hd, |
|
|
l_int32 |
wpld, |
|
|
l_uint32 * |
datas, |
|
|
l_int32 |
wpls, |
|
|
l_int32 * |
tab8, |
|
|
l_uint8 * |
valtab |
|
) |
| |
|
static |
scaleToGray6Low()
- Parameters
-
| [in] | datad | dest data |
| [in] | wd,hd | dest width, height |
| [in] | wpld | dest words/line |
| [in] | datas | src data |
| [in] | wpls | src words/line |
| [in] | tab8 | made from makePixelSumTab8() |
| [in] | valtab | made from makeValTabSG6() |
- Returns
- 0 if OK; 1 on error
Notes:
Each set of 4 6x6 bit-blocks in the source image, which
consist of 144 pixels arranged 24 pixels wide by 6 scanlines,
is converted to a row of 4 8-bit pixels in the dest image.
These 144 pixels of the input image are runs of 24 pixels
in six adjacent scanlines. Each run of 24 pixels is
stored in the 24 LSbits of a 32-bit word. We use 2 LUTs.
The first, tab8, takes 6 of these bits and stores
sum in one byte. This is done for each of the 6 scanlines,
and the results are added.
We now have the sum of ON pixels in the first 6x6 block. The
valtab LUT then converts these values (which go from 0 to 36) to
grayscale values between between 255 and 0. (See makeValTabSG6).
This process is repeated for each of the other 3 sets of
6x6 input pixels, giving 4 output pixels in total.
Note: because the input image is processed in groups of
24 x 6 pixels, the process clips the input height to
(h - h % 6) and the input width to (w - w % 24).
Definition at line 1956 of file scale2.c.
References GET_DATA_BYTE, and SET_DATA_BYTE.