/usr/include/movit/ycbcr_422interleaved_input.h is in libmovit-dev 1.3.1-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | #ifndef _MOVIT_YCBCR_422INTERLEAVED_INPUT_H
#define _MOVIT_YCBCR_422INTERLEAVED_INPUT_H 1
// YCbCr422InterleavedInput is for handling 4:2:2 interleaved 8-bit Y'CbCr,
// which you can get from e.g. certain capture cards. (Most other Y'CbCr
// encodings are planar, which is handled by YCbCrInput.) Currently we only
// handle the UYVY variant, although YUY2 should be easy to support if needed.
//
// Horizontal chroma placement is freely choosable as with YCbCrInput,
// but BT.601 (which at least DeckLink claims to conform to, under the
// name CCIR 601) seems to specify chroma positioning to the far left
// (that is 0.0); BT.601 Annex 1 (page 7) says “C R and C B samples co-sited
// with odd (1st, 3rd, 5th, etc.) Y samples in each line”, and I assume they do
// not start counting from 0 when they use the “1st” moniker.
//
// Interpolation is bilinear as in YCbCrInput (done by the GPU's normal
// scaling, except for the Y channel which of course needs some fiddling),
// and is done in non-linear light (since that's what everything specifies,
// except Rec. 2020 lets you choose between the two). A higher-quality
// choice would be to use a single pass of ResampleEffect to scale the
// chroma, but for now we are consistent between the two.
//
// There is a disparity between the interleaving and the way OpenGL typically
// expects to sample. In lieu of accessible hardware support (a lot of hardware
// supports native interleaved 4:2:2 sampling, but OpenGL drivers seem to
// rarely support it), we simply upload the same data twice; once as a
// full-width RG texture (from which we sample luma) and once as a half-width
// RGBA texture (from which we sample chroma). We throw away half of the color
// channels each time, so bandwidth is wasted, but it makes for a very
// uncomplicated shader.
//
// Note that if you can shuffle your data around very cheaply on the CPU
// (say, while you're decoding it out of some other buffer anyway),
// regular YCbCrInput with YCBCR_INPUT_SPLIT_Y_AND_CBCR will probably be
// more efficient, as it doesn't need this bandwidth waste.
#include <epoxy/gl.h>
#include <string>
#include "effect.h"
#include "effect_chain.h"
#include "image_format.h"
#include "input.h"
#include "ycbcr.h"
namespace movit {
class ResourcePool;
class YCbCr422InterleavedInput : public Input {
public:
// <ycbcr_format> must be consistent with 4:2:2 sampling; specifically:
//
// * chroma_subsampling_x must be 2.
// * chroma_subsampling_y must be 1.
//
// <width> must obviously be an even number. It is the true width of the image
// in pixels, ie., the number of horizontal luma samples.
YCbCr422InterleavedInput(const ImageFormat &image_format,
const YCbCrFormat &ycbcr_format,
unsigned width, unsigned height);
~YCbCr422InterleavedInput();
virtual std::string effect_type_id() const { return "YCbCr422InterleavedInput"; }
virtual bool can_output_linear_gamma() const { return false; }
virtual AlphaHandling alpha_handling() const { return OUTPUT_BLANK_ALPHA; }
std::string output_fragment_shader();
// Uploads the texture if it has changed since last time.
void set_gl_state(GLuint glsl_program_num, const std::string& prefix, unsigned *sampler_num);
unsigned get_width() const { return width; }
unsigned get_height() const { return height; }
Colorspace get_color_space() const { return image_format.color_space; }
GammaCurve get_gamma_curve() const { return image_format.gamma_curve; }
virtual bool can_supply_mipmaps() const { return false; }
// Tells the input where to fetch the actual pixel data. Note that if you change
// this data, you must either call set_pixel_data() again (using the same pointer
// is fine), or invalidate_pixel_data(). Otherwise, the texture won't be re-uploaded
// on subsequent frames.
//
// The data can either be a regular pointer (if pbo==0), or a byte offset
// into a PBO. The latter will allow you to start uploading the texture data
// asynchronously to the GPU, if you have any CPU-intensive work between the
// call to set_pixel_data() and the actual rendering. Also, since we upload
// the data twice, using a PBO can save texture upload bandwidth. In either case,
// the pointer (and PBO, if set) has to be valid at the time of the render call.
void set_pixel_data(const unsigned char *pixel_data, GLuint pbo = 0)
{
this->pixel_data = pixel_data;
this->pbo = pbo;
invalidate_pixel_data();
}
void invalidate_pixel_data();
void set_pitch(unsigned pitch) {
assert(pitch % ycbcr_format.chroma_subsampling_x == 0);
pitches[CHANNEL_LUMA] = pitch;
pitches[CHANNEL_CHROMA] = pitch / ycbcr_format.chroma_subsampling_x;
invalidate_pixel_data();
}
virtual void inform_added(EffectChain *chain)
{
resource_pool = chain->get_resource_pool();
}
bool set_int(const std::string& key, int value);
private:
ImageFormat image_format;
YCbCrFormat ycbcr_format;
GLuint pbo;
// Luma texture is 0, chroma texture is 1.
enum Channel {
CHANNEL_LUMA,
CHANNEL_CHROMA
};
GLuint texture_num[2];
GLuint widths[2];
unsigned pitches[2];
unsigned width, height;
const unsigned char *pixel_data;
ResourcePool *resource_pool;
GLint uniform_tex_y, uniform_tex_cbcr;
};
} // namespace movit
#endif // !defined(_MOVIT_YCBCR_422INTERLEAVED_INPUT_H)
|