This commit is contained in:
Ivan
2022-04-05 11:42:28 +03:00
commit 6dc0eb0fcf
5565 changed files with 1200500 additions and 0 deletions

View File

@@ -0,0 +1,362 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/debayer.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#ifdef HAVE_DC1394
# include <dc1394/conversions.h>
const bool have_dc1394 = true;
#else
const bool have_dc1394 = false;
#endif
namespace pangolin
{
pangolin::StreamInfo BayerOutputFormat( const StreamInfo& stream_in, bayer_method_t method, size_t start_offset)
{
const bool downsample = (method == BAYER_METHOD_DOWNSAMPLE) || (method == BAYER_METHOD_DOWNSAMPLE_MONO);
const size_t w = downsample ? stream_in.Width() / 2 : stream_in.Width();
const size_t h = downsample ? stream_in.Height() / 2 : stream_in.Height();
pangolin::PixelFormat fmt =
(method == BAYER_METHOD_NONE) ?
stream_in.PixFormat() :
pangolin::PixelFormatFromString(
(stream_in.PixFormat().bpp == 16) ?
(method == BAYER_METHOD_DOWNSAMPLE_MONO ? "GRAY16LE" : "RGB48") :
(method == BAYER_METHOD_DOWNSAMPLE_MONO ? "GRAY8" : "RGB24")
);
fmt.channel_bit_depth = stream_in.PixFormat().channel_bit_depth;
return pangolin::StreamInfo( fmt, w, h, w*fmt.bpp / 8, (unsigned char*)0 + start_offset );
}
DebayerVideo::DebayerVideo(std::unique_ptr<VideoInterface> &src_, const std::vector<bayer_method_t>& bayer_method, color_filter_t tile)
: src(std::move(src_)), size_bytes(0), methods(bayer_method), tile(tile)
{
if(!src.get()) {
throw VideoException("DebayerVideo: VideoInterface in must not be null");
}
videoin.push_back(src.get());
while(methods.size() < src->Streams().size()) {
methods.push_back(BAYER_METHOD_NONE);
}
for(size_t s=0; s< src->Streams().size(); ++s) {
if( (methods[s] < BAYER_METHOD_NONE) && (!have_dc1394 || src->Streams()[0].IsPitched()) ) {
pango_print_warn("debayer: Switching to simple downsampling method because No DC1394 or image is pitched.\n");
methods[s] = BAYER_METHOD_DOWNSAMPLE;
}
const StreamInfo& stin = src->Streams()[s];
streams.push_back(BayerOutputFormat(stin, methods[s], size_bytes));
size_bytes += streams.back().SizeBytes();
}
buffer = std::unique_ptr<unsigned char[]>(new unsigned char[src->SizeBytes()]);
}
DebayerVideo::~DebayerVideo()
{
}
//! Implement VideoInput::Start()
void DebayerVideo::Start()
{
videoin[0]->Start();
}
//! Implement VideoInput::Stop()
void DebayerVideo::Stop()
{
videoin[0]->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t DebayerVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& DebayerVideo::Streams() const
{
return streams;
}
unsigned int DebayerVideo::AvailableFrames() const
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin[0]);
if(!vpi)
{
pango_print_warn("Debayer: child interface is not buffer aware.");
return 0;
}
else
{
return vpi->AvailableFrames();
}
}
bool DebayerVideo::DropNFrames(uint32_t n)
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin[0]);
if(!vpi)
{
pango_print_warn("Debayer: child interface is not buffer aware.");
return false;
}
else
{
return vpi->DropNFrames(n);
}
}
template<typename Tup, typename Tout, typename Tin>
void DownsampleToMono(Image<Tout>& out, const Image<Tin>& in)
{
for(int y=0; y< (int)out.h; ++y) {
Tout* pixout = out.RowPtr(y);
const Tin* irow0 = in.RowPtr(2*y);
const Tin* irow1 = in.RowPtr(2*y+1);
for(size_t x=0; x<out.w; ++x) {
Tup val = ((Tup)irow0[0] + (Tup)irow0[1] + (Tup)irow1[0] + (Tup)irow1[1]) / 4;
*(pixout++) = (Tout)std::min(std::max(static_cast<Tup>(0), val), static_cast<Tup>(std::numeric_limits<Tout>::max()));
irow0 += 2;
irow1 += 2;
}
}
}
template<typename Tout, typename Tin>
void DownsampleDebayer(Image<Tout>& out, const Image<Tin>& in, color_filter_t tile)
{
switch(tile) {
case DC1394_COLOR_FILTER_RGGB:
for(int y=0; y< (int)out.h; ++y) {
Tout* pixout = out.RowPtr(y);
const Tin* irow0 = in.RowPtr(2*y);
const Tin* irow1 = in.RowPtr(2*y+1);
for(size_t x=0; x<out.w; ++x) {
*(pixout++) = irow0[2*x];
*(pixout++) = (irow0[2*x+1] + irow1[2*x]) >> 1;
*(pixout++) = irow1[2*x+1];
}
}
break;
case DC1394_COLOR_FILTER_GBRG:
for(int y=0; y< (int)out.h; ++y) {
Tout* pixout = out.RowPtr(y);
const Tin* irow0 = in.RowPtr(2*y);
const Tin* irow1 = in.RowPtr(2*y+1);
for(size_t x=0; x<out.w; ++x) {
*(pixout++) = irow1[2*x];
*(pixout++) = (irow0[2*x] + irow1[2*x+1]) >> 1;
*(pixout++) = irow0[2*x+1];
}
}
break;
case DC1394_COLOR_FILTER_GRBG:
for(int y=0; y< (int)out.h; ++y) {
Tout* pixout = out.RowPtr(y);
const Tin* irow0 = in.RowPtr(2*y);
const Tin* irow1 = in.RowPtr(2*y+1);
for(size_t x=0; x<out.w; ++x) {
*(pixout++) = irow0[2*x+1];
*(pixout++) = (irow0[2*x] + irow1[2*x+1]) >> 1;
*(pixout++) = irow1[2*x];
}
}
break;
case DC1394_COLOR_FILTER_BGGR:
for(int y=0; y< (int)out.h; ++y) {
Tout* pixout = out.RowPtr(y);
const Tin* irow0 = in.RowPtr(2*y);
const Tin* irow1 = in.RowPtr(2*y+1);
for(size_t x=0; x<out.w; ++x) {
*(pixout++) = irow1[2*x+1];
*(pixout++) = (irow0[2*x+1] + irow1[2*x]) >> 1;
*(pixout++) = irow0[2*x];
}
}
break;
}
}
template<typename T>
void PitchedImageCopy( Image<T>& img_out, const Image<T>& img_in ) {
if( img_out.w != img_in.w || img_out.h != img_in.h || sizeof(T) * img_in.w > img_out.pitch) {
throw std::runtime_error("PitchedImageCopy: Incompatible image sizes");
}
for(size_t y=0; y < img_out.h; ++y) {
std::memcpy(img_out.RowPtr((int)y), img_in.RowPtr((int)y), sizeof(T) * img_in.w);
}
}
template<typename Tout, typename Tin>
void ProcessImage(Image<Tout>& img_out, const Image<Tin>& img_in, bayer_method_t method, color_filter_t tile)
{
if(method == BAYER_METHOD_NONE) {
PitchedImageCopy(img_out, img_in.template UnsafeReinterpret<Tout>() );
}else if(method == BAYER_METHOD_DOWNSAMPLE_MONO) {
if( sizeof(Tout) == 1) {
DownsampleToMono<int,Tout, Tin>(img_out, img_in);
}else{
DownsampleToMono<double,Tout, Tin>(img_out, img_in);
}
}else if(method == BAYER_METHOD_DOWNSAMPLE) {
DownsampleDebayer(img_out, img_in, tile);
}else{
#ifdef HAVE_DC1394
if(sizeof(Tout) == 1) {
dc1394_bayer_decoding_8bit(
(uint8_t*)img_in.ptr, (uint8_t*)img_out.ptr, img_in.w, img_in.h,
(dc1394color_filter_t)tile, (dc1394bayer_method_t)method
);
}else if(sizeof(Tout) == 2) {
dc1394_bayer_decoding_16bit(
(uint16_t*)img_in.ptr, (uint16_t*)img_out.ptr, img_in.w, img_in.h,
(dc1394color_filter_t)tile, (dc1394bayer_method_t)method,
16
);
}
#endif
}
}
void DebayerVideo::ProcessStreams(unsigned char* out, const unsigned char *in)
{
for(size_t s=0; s<streams.size(); ++s) {
const StreamInfo& stin = videoin[0]->Streams()[s];
Image<unsigned char> img_in = stin.StreamImage(in);
Image<unsigned char> img_out = Streams()[s].StreamImage(out);
if(methods[s] == BAYER_METHOD_NONE) {
const size_t num_bytes = std::min(img_in.w, img_out.w) * stin.PixFormat().bpp / 8;
for(size_t y=0; y < img_out.h; ++y) {
std::memcpy(img_out.RowPtr((int)y), img_in.RowPtr((int)y), num_bytes);
}
}else if(stin.PixFormat().bpp == 8) {
ProcessImage(img_out, img_in, methods[s], tile);
}else if(stin.PixFormat().bpp == 16){
Image<uint16_t> img_in16 = img_in.UnsafeReinterpret<uint16_t>();
Image<uint16_t> img_out16 = img_out.UnsafeReinterpret<uint16_t>();
ProcessImage(img_out16, img_in16, methods[s], tile);
}else {
throw std::runtime_error("debayer: unhandled format combination: " + stin.PixFormat().format );
}
}
}
//! Implement VideoInput::GrabNext()
bool DebayerVideo::GrabNext( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNext(buffer.get(),wait)) {
ProcessStreams(image, buffer.get());
return true;
}else{
return false;
}
}
//! Implement VideoInput::GrabNewest()
bool DebayerVideo::GrabNewest( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNewest(buffer.get(),wait)) {
ProcessStreams(image, buffer.get());
return true;
}else{
return false;
}
}
std::vector<VideoInterface*>& DebayerVideo::InputStreams()
{
return videoin;
}
color_filter_t DebayerVideo::ColorFilterFromString(std::string str)
{
if(!str.compare("rggb") || !str.compare("RGGB")) return DC1394_COLOR_FILTER_RGGB;
else if(!str.compare("gbrg") || !str.compare("GBRG")) return DC1394_COLOR_FILTER_GBRG;
else if(!str.compare("grbg") || !str.compare("GRBG")) return DC1394_COLOR_FILTER_GRBG;
else if(!str.compare("bggr") || !str.compare("BGGR")) return DC1394_COLOR_FILTER_BGGR;
else {
pango_print_error("Debayer error, %s is not a valid tile type using RGGB\n", str.c_str());
return DC1394_COLOR_FILTER_RGGB;
}
}
bayer_method_t DebayerVideo::BayerMethodFromString(std::string str)
{
if(!str.compare("nearest")) return BAYER_METHOD_NEAREST;
else if(!str.compare("simple")) return BAYER_METHOD_SIMPLE;
else if(!str.compare("bilinear")) return BAYER_METHOD_BILINEAR;
else if(!str.compare("hqlinear")) return BAYER_METHOD_HQLINEAR;
else if(!str.compare("downsample")) return BAYER_METHOD_DOWNSAMPLE;
else if(!str.compare("edgesense")) return BAYER_METHOD_EDGESENSE;
else if(!str.compare("vng")) return BAYER_METHOD_VNG;
else if(!str.compare("ahd")) return BAYER_METHOD_AHD;
else if(!str.compare("mono")) return BAYER_METHOD_DOWNSAMPLE_MONO;
else if(!str.compare("none")) return BAYER_METHOD_NONE;
else {
pango_print_error("Debayer error, %s is not a valid debayer method using downsample\n", str.c_str());
return BAYER_METHOD_DOWNSAMPLE;
}
}
PANGOLIN_REGISTER_FACTORY(DebayerVideo)
{
struct DebayerVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
const std::string tile_string = uri.Get<std::string>("tile","rggb");
const std::string method = uri.Get<std::string>("method","none");
const color_filter_t tile = DebayerVideo::ColorFilterFromString(tile_string);
std::vector<bayer_method_t> methods;
for(size_t s=0; s < subvid->Streams().size(); ++s) {
const std::string key = std::string("method") + ToString(s+1);
std::string method_s = uri.Get<std::string>(key, method);
methods.push_back(DebayerVideo::BayerMethodFromString(method_s));
}
return std::unique_ptr<VideoInterface>( new DebayerVideo(subvid, methods, tile) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<DebayerVideoFactory>(), 10, "debayer");
}
}

View File

@@ -0,0 +1,107 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/deinterlace.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#include <dc1394/conversions.h>
namespace pangolin
{
DeinterlaceVideo::DeinterlaceVideo(std::unique_ptr<VideoInterface> &videoin_)
: videoin(std::move(videoin_)), buffer(0)
{
if(videoin->Streams().size() != 1)
throw VideoException("FirewireDeinterlace input must have exactly one stream");
const StreamInfo& stmin = videoin->Streams()[0];
StreamInfo stm1(PixelFormatFromString("GRAY8"), stmin.Width(), stmin.Height(), stmin.Width(), 0);
StreamInfo stm2(PixelFormatFromString("GRAY8"), stmin.Width(), stmin.Height(), stmin.Width(), (unsigned char*)0 + stmin.Width()*stmin.Height());
streams.push_back(stm1);
streams.push_back(stm2);
buffer = new unsigned char[videoin->SizeBytes()];
std::cout << videoin->Streams()[0].Width() << ", " << videoin->Streams()[0].Height() << std::endl;
}
DeinterlaceVideo::~DeinterlaceVideo()
{
delete[] buffer;
}
size_t DeinterlaceVideo::SizeBytes() const
{
return videoin->SizeBytes();
}
const std::vector<StreamInfo>& DeinterlaceVideo::Streams() const
{
return streams;
}
void DeinterlaceVideo::Start()
{
videoin->Start();
}
void DeinterlaceVideo::Stop()
{
videoin->Stop();
}
bool DeinterlaceVideo::GrabNext( unsigned char* image, bool wait )
{
if(videoin->GrabNext(buffer, wait)) {
return ( dc1394_deinterlace_stereo(buffer,image, videoin->Streams()[0].Width(), 2*videoin->Streams()[0].Height() ) == DC1394_SUCCESS );
}
return false;
}
bool DeinterlaceVideo::GrabNewest( unsigned char* image, bool wait )
{
if(videoin->GrabNewest(buffer, wait)) {
return ( dc1394_deinterlace_stereo(buffer,image, videoin->Streams()[0].Width(), 2*videoin->Streams()[0].Height() ) == DC1394_SUCCESS );
}
return false;
}
PANGOLIN_REGISTER_FACTORY(DeinterlaceVideo)
{
struct DeinterlaceVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
return std::unique_ptr<VideoInterface>( new DeinterlaceVideo(subvid) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<DeinterlaceVideoFactory>(), 10, "deinterlace");
}
}

View File

@@ -0,0 +1,656 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/depthsense.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#include <iomanip>
namespace pangolin
{
const size_t ROGUE_ADDR = 0x01;
const double MAX_DELTA_TIME = 20000.0; //u_s
DepthSenseContext& DepthSenseContext::I()
{
static DepthSenseContext s;
return s;
}
DepthSense::Context& DepthSenseContext::Context()
{
return g_context;
}
void DepthSenseContext::NewDeviceRunning()
{
running_devices++;
if(running_devices == 1) {
StartNodes();
}
}
void DepthSenseContext::DeviceClosing()
{
running_devices--;
if(running_devices == 0) {
StopNodes();
// Force destruction of current context
g_context = DepthSense::Context();
}
}
DepthSenseVideo* DepthSenseContext::GetDepthSenseVideo(size_t device_num, DepthSenseSensorType s1, DepthSenseSensorType s2, ImageDim dim1, ImageDim dim2, unsigned int fps1, unsigned int fps2, const Uri& uri)
{
if(running_devices == 0) {
// Initialise SDK
g_context = DepthSense::Context::create("localhost");
}
// Get the list of currently connected devices
std::vector<DepthSense::Device> da = g_context.getDevices();
if( da.size() > device_num )
{
return new DepthSenseVideo(da[device_num], s1, s2, dim1, dim2, fps1, fps2, uri);
}
throw VideoException("DepthSense device not connected.");
}
DepthSenseContext::DepthSenseContext()
: is_running(false), running_devices(0)
{
}
DepthSenseContext::~DepthSenseContext()
{
StopNodes();
}
void DepthSenseContext::StartNodes()
{
if(!is_running) {
// Launch EventLoop thread
event_thread = std::thread(&DepthSenseContext::EventLoop, this );
}
}
void DepthSenseContext::StopNodes()
{
if(is_running && event_thread.joinable()) {
g_context.quit();
event_thread.join();
}
}
void DepthSenseContext::EventLoop()
{
is_running = true;
g_context.startNodes();
g_context.run();
g_context.stopNodes();
is_running = false;
}
DepthSenseVideo::DepthSenseVideo(DepthSense::Device device, DepthSenseSensorType s1, DepthSenseSensorType s2, ImageDim dim1, ImageDim dim2, unsigned int fps1, unsigned int fps2, const Uri& uri)
: device(device), fill_image(0), depthmap_stream(-1), rgb_stream(-1), gotDepth(0), gotColor(0),
enableDepth(false), enableColor(false), depthTs(0.0), colorTs(0.0), size_bytes(0)
{
streams_properties = &frame_properties["streams"];
*streams_properties = picojson::value(picojson::array_type, false);
streams_properties->get<picojson::array>().resize(2);
sensorConfig[0] = {s1, dim1, fps1};
sensorConfig[1] = {s2, dim2, fps2};
ConfigureNodes(uri);
DepthSenseContext::I().NewDeviceRunning();
}
DepthSenseVideo::~DepthSenseVideo()
{
if (g_cnode.isSet()) DepthSenseContext::I().Context().unregisterNode(g_cnode);
if (g_dnode.isSet()) DepthSenseContext::I().Context().unregisterNode(g_dnode);
fill_image = (unsigned char*)ROGUE_ADDR;
cond_image_requested.notify_all();
DepthSenseContext::I().DeviceClosing();
}
picojson::value Json(DepthSense::IntrinsicParameters& p)
{
picojson::value js;
js["model"] = "polynomial";
js["width"] = p.width;
js["height"] = p.height;
js["RDF"] = "[1,0,0; 0,1,0; 0,0,1]";
js["fx"] = p.fx;
js["fy"] = p.fy;
js["u0"] = p.cx;
js["v0"] = p.cy;
js["k1"] = p.k1;
js["k2"] = p.k2;
js["k3"] = p.k3;
js["p1"] = p.p1;
js["p2"] = p.p2;
return js;
}
picojson::value Json(DepthSense::ExtrinsicParameters& p)
{
picojson::value js;
js["rows"] = "3";
js["cols"] = "4";
std::ostringstream oss;
oss << std::setprecision(17);
oss << "[" << p.r11 << "," << p.r12 << "," << p.r13 << "," << p.t1 << ";";
oss << p.r21 << "," << p.r22 << "," << p.r23 << "," << p.t2 << ";";
oss << p.r31 << "," << p.r32 << "," << p.r33 << "," << p.t3 << "]";
js["data"] = oss.str();
return js;
}
void DepthSenseVideo::ConfigureNodes(const Uri& uri)
{
std::vector<DepthSense::Node> nodes = device.getNodes();
for (int i = 0; i<2; ++i)
{
switch (sensorConfig[i].type)
{
case DepthSenseDepth:
{
for (int n = 0; n < (int)nodes.size(); n++)
{
DepthSense::Node node = nodes[n];
if ((node.is<DepthSense::DepthNode>()) && (!g_dnode.isSet()))
{
depthmap_stream = i;
g_dnode = node.as<DepthSense::DepthNode>();
ConfigureDepthNode(sensorConfig[i], uri);
DepthSenseContext::I().Context().registerNode(node);
}
}
break;
}
case DepthSenseRgb:
{
for (int n = 0; n < (int)nodes.size(); n++)
{
DepthSense::Node node = nodes[n];
if ((node.is<DepthSense::ColorNode>()) && (!g_cnode.isSet()))
{
rgb_stream = i;
g_cnode = node.as<DepthSense::ColorNode>();
ConfigureColorNode(sensorConfig[i], uri);
DepthSenseContext::I().Context().registerNode(node);
}
}
break;
}
default:
continue;
}
}
DepthSense::StereoCameraParameters scp = device.getStereoCameraParameters();
//Set json device properties for intrinsics and extrinsics
picojson::value& jsintrinsics = device_properties["intrinsics"];
if (jsintrinsics.is<picojson::null>()) {
jsintrinsics = picojson::value(picojson::array_type, false);
jsintrinsics.get<picojson::array>().resize(streams.size());
if (depthmap_stream >= 0) jsintrinsics[depthmap_stream] = Json(scp.depthIntrinsics);
if (rgb_stream >= 0) jsintrinsics[rgb_stream] = Json(scp.colorIntrinsics);
}
picojson::value& jsextrinsics = device_properties["extrinsics"];
if(jsextrinsics.is<picojson::null>()){
jsextrinsics = Json(scp.extrinsics);
}
}
inline DepthSense::FrameFormat ImageDim2FrameFormat(const ImageDim& dim)
{
DepthSense::FrameFormat retVal = DepthSense::FRAME_FORMAT_UNKNOWN;
if(dim.x == 160 && dim.y == 120)
{
retVal = DepthSense::FRAME_FORMAT_QQVGA;
}
else if(dim.x == 176 && dim.y == 144)
{
retVal = DepthSense::FRAME_FORMAT_QCIF;
}
else if(dim.x == 240 && dim.y == 160)
{
retVal = DepthSense::FRAME_FORMAT_HQVGA;
}
else if(dim.x == 320 && dim.y == 240)
{
retVal = DepthSense::FRAME_FORMAT_QVGA;
}
else if(dim.x == 352 && dim.y == 288)
{
retVal = DepthSense::FRAME_FORMAT_CIF;
}
else if(dim.x == 480 && dim.y == 320)
{
retVal = DepthSense::FRAME_FORMAT_HVGA;
}
else if(dim.x == 640 && dim.y == 480)
{
retVal = DepthSense::FRAME_FORMAT_VGA;
}
else if(dim.x == 1280 && dim.y == 720)
{
retVal = DepthSense::FRAME_FORMAT_WXGA_H;
}
else if(dim.x == 320 && dim.y == 120)
{
retVal = DepthSense::FRAME_FORMAT_DS311;
}
else if(dim.x == 1024 && dim.y == 768)
{
retVal = DepthSense::FRAME_FORMAT_XGA;
}
else if(dim.x == 800 && dim.y == 600)
{
retVal = DepthSense::FRAME_FORMAT_SVGA;
}
else if(dim.x == 636 && dim.y == 480)
{
retVal = DepthSense::FRAME_FORMAT_OVVGA;
}
else if(dim.x == 640 && dim.y == 240)
{
retVal = DepthSense::FRAME_FORMAT_WHVGA;
}
else if(dim.x == 640 && dim.y == 360)
{
retVal = DepthSense::FRAME_FORMAT_NHD;
}
return retVal;
}
void DepthSenseVideo::UpdateParameters(const DepthSense::Node& node, const Uri& uri)
{
DepthSense::Type type = node.getType();
picojson::value& jsnode = device_properties[type.name()];
std::vector<DepthSense::PropertyBase> properties = type.getProperties();
for(std::vector<DepthSense::PropertyBase>::const_iterator it = properties.begin(); it != properties.end(); ++it) {
const DepthSense::PropertyBase& prop = *it;
if (prop.is<DepthSense::Property<int32_t> >()) {
DepthSense::Property<int32_t> tprop = prop.as<DepthSense::Property<int32_t> >();
if (uri.Contains(prop.name())) {
if (!prop.isReadOnly()) {
tprop.setValue(node, uri.Get<int32_t>(prop.name(), tprop.getValue(node)));
} else {
pango_print_warn("DepthSense property '%s' is read-only\n", prop.name().c_str() );
}
}
jsnode[prop.name()] = tprop.getValue(node);
} else if (prop.is<DepthSense::Property<float> >()) {
DepthSense::Property<float> tprop = prop.as<DepthSense::Property<float> >();
if (uri.Contains(prop.name())) {
if (!prop.isReadOnly()) {
tprop.setValue(node, uri.Get<float>(prop.name(), tprop.getValue(node)));
} else {
pango_print_warn("DepthSense property '%s' is read-only\n", prop.name().c_str() );
}
}
jsnode[prop.name()] = tprop.getValue(node);
} else if (prop.is<DepthSense::Property<bool> >()) {
DepthSense::Property<bool> tprop = prop.as<DepthSense::Property<bool> >();
if (uri.Contains(prop.name())) {
if (!prop.isReadOnly()) {
tprop.setValue(node, uri.Get<bool>(prop.name(), tprop.getValue(node)));
} else {
pango_print_warn("DepthSense property '%s' is read-only\n", prop.name().c_str() );
}
}
jsnode[prop.name()] = tprop.getValue(node);
} else if (prop.is<DepthSense::Property<std::string> >()){
DepthSense::Property<std::string> tprop = prop.as<DepthSense::Property<std::string> >();
if (uri.Contains(prop.name())) {
if (!prop.isReadOnly()) {
tprop.setValue(node, uri.Get<std::string>(prop.name(), tprop.getValue(node)).c_str() );
} else {
pango_print_warn("DepthSense property '%s' is read-only\n", prop.name().c_str() );
}
}
jsnode[prop.name()] = tprop.getValue(node);
}
}
}
void DepthSenseVideo::ConfigureDepthNode(const SensorConfig& sensorConfig, const Uri& uri)
{
g_dnode.newSampleReceivedEvent().connect(this, &DepthSenseVideo::onNewDepthSample);
DepthSense::DepthNode::Configuration config = g_dnode.getConfiguration();
config.frameFormat = ImageDim2FrameFormat(sensorConfig.dim);
config.framerate = sensorConfig.fps;
config.mode = DepthSense::DepthNode::CAMERA_MODE_CLOSE_MODE;
config.saturation = true;
try {
DepthSenseContext::I().Context().requestControl(g_dnode, 0);
g_dnode.setConfiguration(config);
g_dnode.setEnableDepthMap(true);
} catch (DepthSense::Exception& e) {
throw pangolin::VideoException("DepthSense exception whilst configuring node", e.what());
}
//Set pangolin stream for this channel
const int w = sensorConfig.dim.x;
const int h = sensorConfig.dim.y;
const PixelFormat pfmt = PixelFormatFromString("GRAY16LE");
const StreamInfo stream_info(pfmt, w, h, (w*pfmt.bpp) / 8, (unsigned char*)0);
streams.push_back(stream_info);
size_bytes += stream_info.SizeBytes();
enableDepth = true;
UpdateParameters(g_dnode, uri);
}
void DepthSenseVideo::ConfigureColorNode(const SensorConfig& sensorConfig, const Uri& uri)
{
// connect new color sample handler
g_cnode.newSampleReceivedEvent().connect(this, &DepthSenseVideo::onNewColorSample);
DepthSense::ColorNode::Configuration config = g_cnode.getConfiguration();
config.frameFormat = ImageDim2FrameFormat(sensorConfig.dim);
config.compression = DepthSense::COMPRESSION_TYPE_MJPEG;
config.powerLineFrequency = DepthSense::POWER_LINE_FREQUENCY_50HZ;
config.framerate = sensorConfig.fps;
try {
DepthSenseContext::I().Context().requestControl(g_cnode,0);
g_cnode.setConfiguration(config);
g_cnode.setEnableColorMap(true);
UpdateParameters(g_cnode, uri);
} catch (DepthSense::Exception& e) {
throw pangolin::VideoException("DepthSense exception whilst configuring node", e.what());
}
//Set pangolin stream for this channel
const int w = sensorConfig.dim.x;
const int h = sensorConfig.dim.y;
const PixelFormat pfmt = PixelFormatFromString("BGR24");
const StreamInfo stream_info(pfmt, w, h, (w*pfmt.bpp) / 8, (unsigned char*)0 + size_bytes);
streams.push_back(stream_info);
size_bytes += stream_info.SizeBytes();
enableColor = true;
}
void DepthSenseVideo::onNewColorSample(DepthSense::ColorNode node, DepthSense::ColorNode::NewSampleReceivedData data)
{
{
std::unique_lock<std::mutex> lock(update_mutex);
// Wait for fill request
while (!fill_image) {
cond_image_requested.wait(lock);
}
// Update per-frame parameters
//printf("Color delta: %.1f\n", fabs(colorTs - data.timeOfCapture));
colorTs = data.timeOfCapture;
picojson::value& jsstream = frame_properties["streams"][rgb_stream];
jsstream["time_us"] = data.timeOfCapture;
if (fill_image != (unsigned char*)ROGUE_ADDR) {
// Fill with data
unsigned char* imagePtr = fill_image;
bool copied = false;
for (int i = 0; !copied && i < 2; ++i)
{
switch (sensorConfig[i].type)
{
case DepthSenseDepth:
{
imagePtr += streams[i].SizeBytes();
break;
}
case DepthSenseRgb:
{
// Leave as BGR
std::memcpy(imagePtr, data.colorMap, streams[i].SizeBytes());
copied = true;
break;
}
default:
continue;
}
}
gotColor++;
}
//printf("Got color at: %.1f\n", colorTs);
if(gotDepth)
{
double delta = fabs(GetDeltaTime());
if(delta > MAX_DELTA_TIME)
{
//printf("**** Waiting for another depth, delta: %.1f ****\n", delta);
gotDepth = 0;
return;
}
}
}
cond_image_filled.notify_one();
}
void DepthSenseVideo::onNewDepthSample(DepthSense::DepthNode node, DepthSense::DepthNode::NewSampleReceivedData data)
{
{
std::unique_lock<std::mutex> lock(update_mutex);
// Wait for fill request
while(!fill_image) {
cond_image_requested.wait(lock);
}
// Update per-frame parameters
//printf("Depth delta: %.1f\n", fabs(depthTs - data.timeOfCapture));
depthTs = data.timeOfCapture;
picojson::value& jsstream = frame_properties["streams"][depthmap_stream];
jsstream["time_us"] = depthTs;
if(fill_image != (unsigned char*)ROGUE_ADDR) {
// Fill with data
unsigned char* imagePtr = fill_image;
bool copied = false;
for (int i = 0; i < 2; ++i)
{
switch (sensorConfig[i].type)
{
case DepthSenseDepth:
{
memcpy(imagePtr, data.depthMap, streams[i].SizeBytes());
copied = true;
break;
}
case DepthSenseRgb:
{
imagePtr += streams[i].SizeBytes();
break;
}
default:
continue;
}
if(copied)
{
break;
}
}
gotDepth++;
}
//printf("Got depth at: %.1f\n", depthTs);
if(gotColor)
{
double delta = fabs(GetDeltaTime());
if(delta > MAX_DELTA_TIME)
{
//printf("**** Waiting for another color, delta: %.1f ****\n", delta);
gotColor = 0;
return;
}
}
}
cond_image_filled.notify_one();
}
void DepthSenseVideo::Start()
{
}
void DepthSenseVideo::Stop()
{
}
size_t DepthSenseVideo::SizeBytes() const
{
return size_bytes;
}
const std::vector<StreamInfo>& DepthSenseVideo::Streams() const
{
return streams;
}
bool DepthSenseVideo::GrabNext( unsigned char* image, bool /*wait*/ )
{
if(fill_image) {
throw std::runtime_error("GrabNext Cannot be called concurrently");
}
//printf("#### Grab Next ####\n");
// Request that image is filled with data
fill_image = image;
cond_image_requested.notify_one();
// Wait until it has been filled successfully.
{
std::unique_lock<std::mutex> lock(update_mutex);
while ((enableDepth && !gotDepth) || (enableColor && !gotColor))
{
cond_image_filled.wait(lock);
}
if (gotDepth)
{
gotDepth = 0;
}
if (gotColor)
{
gotColor = 0;
}
fill_image = 0;
}
//printf("Delta time: %.1f\n", fabs(GetDeltaTime()));
return true;
}
bool DepthSenseVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image,wait);
}
double DepthSenseVideo::GetDeltaTime() const
{
return depthTs - colorTs;
}
DepthSenseSensorType depthsense_sensor(const std::string& str)
{
if (!str.compare("rgb")) {
return DepthSenseRgb;
}
else if (!str.compare("depth")) {
return DepthSenseDepth;
}
else if (str.empty()) {
return DepthSenseUnassigned;
}
else{
throw pangolin::VideoException("Unknown DepthSense sensor", str);
}
}
PANGOLIN_REGISTER_FACTORY(DepthSenseVideo)
{
struct DepthSenseVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
DepthSenseSensorType img1 = depthsense_sensor(uri.Get<std::string>("img1", "depth"));
DepthSenseSensorType img2 = depthsense_sensor(uri.Get<std::string>("img2", ""));
const ImageDim dim1 = uri.Get<ImageDim>("size1", img1 == DepthSenseDepth ? ImageDim(320, 240) : ImageDim(640, 480) );
const ImageDim dim2 = uri.Get<ImageDim>("size2", img2 == DepthSenseDepth ? ImageDim(320, 240) : ImageDim(640, 480) );
const unsigned int fps1 = uri.Get<unsigned int>("fps1", 30);
const unsigned int fps2 = uri.Get<unsigned int>("fps2", 30);
return std::unique_ptr<VideoInterface>(
DepthSenseContext::I().GetDepthSenseVideo(0, img1, img2, dim1, dim2, fps1, fps2, uri)
);
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<DepthSenseVideoFactory>(), 10, "depthsense");
}
}

View File

@@ -0,0 +1,953 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <array>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/video/drivers/ffmpeg.h>
// Some versions of FFMPEG define this horrid macro in global scope.
#undef PixelFormat
// It is impossible to keep up with ffmpeg deprecations, so ignore these warnings.
#if defined(_GCC_) || defined(_CLANG_)
# pragma GCC diagnostic ignored "-Wdeprecated"
#endif
extern "C"
{
#include <libavformat/avio.h>
#include <libavutil/mathematics.h>
#include <libavdevice/avdevice.h>
}
#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER
namespace pangolin
{
#ifdef HAVE_FFMPEG_AVPIXELFORMAT
# define TEST_PIX_FMT_RETURN(fmt) case AV_PIX_FMT_##fmt: return #fmt;
#else
# define AV_PIX_FMT_NONE PIX_FMT_NONE
# define AV_PIX_FMT_GRAY8 PIX_FMT_GRAY8
# define TEST_PIX_FMT_RETURN(fmt) case PIX_FMT_##fmt: return #fmt;
#endif
AVPixelFormat FfmpegFmtFromString(const std::string fmt)
{
const std::string lfmt = ToLowerCopy(fmt);
if(!lfmt.compare("gray8") || !lfmt.compare("grey8") || !lfmt.compare("grey")) {
return AV_PIX_FMT_GRAY8;
}
return av_get_pix_fmt(lfmt.c_str());
}
std::string FfmpegFmtToString(const AVPixelFormat fmt)
{
switch( fmt )
{
TEST_PIX_FMT_RETURN(YUV420P);
TEST_PIX_FMT_RETURN(YUYV422);
TEST_PIX_FMT_RETURN(RGB24);
TEST_PIX_FMT_RETURN(BGR24);
TEST_PIX_FMT_RETURN(YUV422P);
TEST_PIX_FMT_RETURN(YUV444P);
TEST_PIX_FMT_RETURN(YUV410P);
TEST_PIX_FMT_RETURN(YUV411P);
TEST_PIX_FMT_RETURN(GRAY8);
TEST_PIX_FMT_RETURN(MONOWHITE);
TEST_PIX_FMT_RETURN(MONOBLACK);
TEST_PIX_FMT_RETURN(PAL8);
TEST_PIX_FMT_RETURN(YUVJ420P);
TEST_PIX_FMT_RETURN(YUVJ422P);
TEST_PIX_FMT_RETURN(YUVJ444P);
#ifdef FF_API_XVMC
TEST_PIX_FMT_RETURN(XVMC_MPEG2_MC);
TEST_PIX_FMT_RETURN(XVMC_MPEG2_IDCT);
#endif
TEST_PIX_FMT_RETURN(UYVY422);
TEST_PIX_FMT_RETURN(UYYVYY411);
TEST_PIX_FMT_RETURN(BGR8);
TEST_PIX_FMT_RETURN(BGR4);
TEST_PIX_FMT_RETURN(BGR4_BYTE);
TEST_PIX_FMT_RETURN(RGB8);
TEST_PIX_FMT_RETURN(RGB4);
TEST_PIX_FMT_RETURN(RGB4_BYTE);
TEST_PIX_FMT_RETURN(NV12);
TEST_PIX_FMT_RETURN(NV21);
TEST_PIX_FMT_RETURN(ARGB);
TEST_PIX_FMT_RETURN(RGBA);
TEST_PIX_FMT_RETURN(ABGR);
TEST_PIX_FMT_RETURN(BGRA);
TEST_PIX_FMT_RETURN(GRAY16BE);
TEST_PIX_FMT_RETURN(GRAY16LE);
TEST_PIX_FMT_RETURN(YUV440P);
TEST_PIX_FMT_RETURN(YUVJ440P);
TEST_PIX_FMT_RETURN(YUVA420P);
#ifdef FF_API_VDPAU
TEST_PIX_FMT_RETURN(VDPAU_H264);
TEST_PIX_FMT_RETURN(VDPAU_MPEG1);
TEST_PIX_FMT_RETURN(VDPAU_MPEG2);
TEST_PIX_FMT_RETURN(VDPAU_WMV3);
TEST_PIX_FMT_RETURN(VDPAU_VC1);
#endif
TEST_PIX_FMT_RETURN(RGB48BE );
TEST_PIX_FMT_RETURN(RGB48LE );
TEST_PIX_FMT_RETURN(RGB565BE);
TEST_PIX_FMT_RETURN(RGB565LE);
TEST_PIX_FMT_RETURN(RGB555BE);
TEST_PIX_FMT_RETURN(RGB555LE);
TEST_PIX_FMT_RETURN(BGR565BE);
TEST_PIX_FMT_RETURN(BGR565LE);
TEST_PIX_FMT_RETURN(BGR555BE);
TEST_PIX_FMT_RETURN(BGR555LE);
TEST_PIX_FMT_RETURN(VAAPI_MOCO);
TEST_PIX_FMT_RETURN(VAAPI_IDCT);
TEST_PIX_FMT_RETURN(VAAPI_VLD);
TEST_PIX_FMT_RETURN(YUV420P16LE);
TEST_PIX_FMT_RETURN(YUV420P16BE);
TEST_PIX_FMT_RETURN(YUV422P16LE);
TEST_PIX_FMT_RETURN(YUV422P16BE);
TEST_PIX_FMT_RETURN(YUV444P16LE);
TEST_PIX_FMT_RETURN(YUV444P16BE);
#ifdef FF_API_VDPAU
TEST_PIX_FMT_RETURN(VDPAU_MPEG4);
#endif
TEST_PIX_FMT_RETURN(DXVA2_VLD);
TEST_PIX_FMT_RETURN(RGB444BE);
TEST_PIX_FMT_RETURN(RGB444LE);
TEST_PIX_FMT_RETURN(BGR444BE);
TEST_PIX_FMT_RETURN(BGR444LE);
TEST_PIX_FMT_RETURN(Y400A );
TEST_PIX_FMT_RETURN(NB );
default: return "";
}
}
#undef TEST_PIX_FMT_RETURN
FfmpegVideo::FfmpegVideo(const std::string filename, const std::string strfmtout, const std::string codec_hint, bool dump_info, int user_video_stream, ImageDim size)
:pFormatCtx(0)
{
InitUrl(filename, strfmtout, codec_hint, dump_info, user_video_stream, size);
}
void FfmpegVideo::InitUrl(const std::string url, const std::string strfmtout, const std::string codec_hint, bool dump_info, int user_video_stream, ImageDim size)
{
if( url.find('*') != url.npos )
throw VideoException("Wildcards not supported. Please use ffmpegs printf style formatting for image sequences. e.g. img-000000%04d.ppm");
// Register all formats and codecs
av_register_all();
// Register all devices
avdevice_register_all();
AVInputFormat* fmt = NULL;
if( !codec_hint.empty() ) {
fmt = av_find_input_format(codec_hint.c_str());
}
#if (LIBAVFORMAT_VERSION_MAJOR >= 53)
AVDictionary* options = nullptr;
if(size.x != 0 && size.y != 0) {
std::string s = std::to_string(size.x) + "x" + std::to_string(size.y);
av_dict_set(&options, "video_size", s.c_str(), 0);
}
if( avformat_open_input(&pFormatCtx, url.c_str(), fmt, &options) )
#else
// Deprecated - can't use with mjpeg
if( av_open_input_file(&pFormatCtx, url.c_str(), fmt, 0, NULL) )
#endif
throw VideoException("Couldn't open stream");
if( !ToLowerCopy(codec_hint).compare("mjpeg") )
#ifdef HAVE_FFMPEG_MAX_ANALYZE_DURATION2
pFormatCtx->max_analyze_duration2 = AV_TIME_BASE * 0.0;
#else
pFormatCtx->max_analyze_duration = AV_TIME_BASE * 0.0;
#endif
// Retrieve stream information
#if (LIBAVFORMAT_VERSION_MAJOR >= 53)
if(avformat_find_stream_info(pFormatCtx, 0)<0)
#else
// Deprecated
if(av_find_stream_info(pFormatCtx)<0)
#endif
throw VideoException("Couldn't find stream information");
if(dump_info) {
// Dump information about file onto standard error
#if (LIBAVFORMAT_VERSION_MAJOR >= 53)
av_dump_format(pFormatCtx, 0, url.c_str(), false);
#else
// Deprecated
dump_format(pFormatCtx, 0, url.c_str(), false);
#endif
}
// Find the first video stream
videoStream=-1;
audioStream=-1;
std::vector<int> videoStreams;
std::vector<int> audioStreams;
for(unsigned i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStreams.push_back(i);
}else if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
{
audioStreams.push_back(i);
}
}
if(videoStreams.size()==0)
throw VideoException("Couldn't find a video stream");
if(0 <= user_video_stream && user_video_stream < (int)videoStreams.size() ) {
videoStream = videoStreams[user_video_stream];
}else{
videoStream = videoStreams[0];
}
// Get a pointer to the codec context for the video stream
pVidCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pVidCodec=avcodec_find_decoder(pVidCodecCtx->codec_id);
if(pVidCodec==0)
throw VideoException("Codec not found");
// Open video codec
#if LIBAVCODEC_VERSION_MAJOR > 52
if(avcodec_open2(pVidCodecCtx, pVidCodec,0)<0)
#else
if(avcodec_open(pVidCodecCtx, pVidCodec)<0)
#endif
throw VideoException("Could not open codec");
// Hack to correct wrong frame rates that seem to be generated by some codecs
if(pVidCodecCtx->time_base.num>1000 && pVidCodecCtx->time_base.den==1)
pVidCodecCtx->time_base.den=1000;
// Allocate video frames
#if LIBAVUTIL_VERSION_MAJOR >= 54
pFrame = av_frame_alloc();
pFrameOut = av_frame_alloc();
#else
// deprecated
pFrame = avcodec_alloc_frame();
pFrameOut = avcodec_alloc_frame();
#endif
if(!pFrame || !pFrameOut)
throw VideoException("Couldn't allocate frames");
fmtout = FfmpegFmtFromString(strfmtout);
if(fmtout == AV_PIX_FMT_NONE )
throw VideoException("Output format not recognised",strfmtout);
// Image dimensions
const int w = pVidCodecCtx->width;
const int h = pVidCodecCtx->height;
// Determine required buffer size and allocate buffer
numBytesOut=avpicture_get_size(fmtout, w, h);
buffer= new uint8_t[numBytesOut];
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pFrameOut, buffer, fmtout, w, h);
// Allocate SWS for converting pixel formats
img_convert_ctx = sws_getContext(w, h,
pVidCodecCtx->pix_fmt,
w, h, fmtout, FFMPEG_POINT,
NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
throw VideoException("Cannot initialize the conversion context");
}
// Populate stream info for users to query
const PixelFormat strm_fmt = PixelFormatFromString(FfmpegFmtToString(fmtout));
const StreamInfo stream(strm_fmt, w, h, (w*strm_fmt.bpp)/8, 0);
streams.push_back(stream);
}
FfmpegVideo::~FfmpegVideo()
{
// Free the RGB image
delete[] buffer;
av_free(pFrameOut);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pVidCodecCtx);
// Close the video file
#if (LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR >= 53 && LIBAVFORMAT_VERSION_MINOR >= 21) )
avformat_close_input(&pFormatCtx);
#else
// Deprecated
av_close_input_file(pFormatCtx);
#endif
// Free pixel conversion context
sws_freeContext(img_convert_ctx);
}
const std::vector<StreamInfo>& FfmpegVideo::Streams() const
{
return streams;
}
size_t FfmpegVideo::SizeBytes() const
{
return numBytesOut;
}
void FfmpegVideo::Start()
{
}
void FfmpegVideo::Stop()
{
}
bool FfmpegVideo::GrabNext(unsigned char* image, bool /*wait*/)
{
int gotFrame = 0;
while(!gotFrame && av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video2(pVidCodecCtx, pFrame, &gotFrame, &packet);
}
// Did we get a video frame?
if(gotFrame) {
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pVidCodecCtx->height, pFrameOut->data, pFrameOut->linesize);
memcpy(image,pFrameOut->data[0],numBytesOut);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
return gotFrame;
}
bool FfmpegVideo::GrabNewest(unsigned char *image, bool wait)
{
return GrabNext(image,wait);
}
void FfmpegConverter::ConvertContext::convert(const unsigned char* src, unsigned char* dst)
{
// avpicture_fill expects uint8_t* w/o const as the second parameter in earlier versions
avpicture_fill((AVPicture*)avsrc, const_cast<unsigned char*>(src + src_buffer_offset), fmtsrc, w, h);
avpicture_fill((AVPicture*)avdst, dst + dst_buffer_offset, fmtdst, w, h);
sws_scale( img_convert_ctx,
avsrc->data, avsrc->linesize, 0, h,
avdst->data, avdst->linesize );
}
FfmpegConverter::FfmpegConverter(std::unique_ptr<VideoInterface> &videoin_, const std::string sfmtdst, FfmpegMethod method )
:videoin(std::move(videoin_))
{
if( !videoin )
throw VideoException("Source video interface not specified");
input_buffer = std::unique_ptr<unsigned char[]>(new unsigned char[videoin->SizeBytes()]);
converters.resize(videoin->Streams().size());
dst_buffer_size = 0;
for(size_t i=0; i < videoin->Streams().size(); ++i) {
const StreamInfo instrm = videoin->Streams()[i];
converters[i].w=instrm.Width();
converters[i].h=instrm.Height();
converters[i].fmtdst = FfmpegFmtFromString(sfmtdst);
converters[i].fmtsrc = FfmpegFmtFromString(instrm.PixFormat());
converters[i].img_convert_ctx = sws_getContext(
instrm.Width(), instrm.Height(), converters[i].fmtsrc,
instrm.Width(), instrm.Height(), converters[i].fmtdst,
method, NULL, NULL, NULL
);
if(!converters[i].img_convert_ctx)
throw VideoException("Could not create SwScale context for pixel conversion");
converters[i].dst_buffer_offset=dst_buffer_size;
converters[i].src_buffer_offset=instrm.Offset() - (unsigned char*)0;
//converters[i].src_buffer_offset=src_buffer_size;
#if LIBAVUTIL_VERSION_MAJOR >= 54
converters[i].avsrc = av_frame_alloc();
converters[i].avdst = av_frame_alloc();
#else
// deprecated
converters[i].avsrc = avcodec_alloc_frame();
converters[i].avdst = avcodec_alloc_frame();
#endif
const PixelFormat pxfmtdst = PixelFormatFromString(sfmtdst);
const StreamInfo sdst( pxfmtdst, instrm.Width(), instrm.Height(), (instrm.Width()*pxfmtdst.bpp)/8, (unsigned char*)0 + converters[i].dst_buffer_offset );
streams.push_back(sdst);
//src_buffer_size += instrm.SizeBytes();
dst_buffer_size += avpicture_get_size(converters[i].fmtdst, instrm.Width(), instrm.Height());
}
}
FfmpegConverter::~FfmpegConverter()
{
for(ConvertContext&c:converters)
{
av_free(c.avsrc);
av_free(c.avdst);
}
}
void FfmpegConverter::Start()
{
// No-Op
}
void FfmpegConverter::Stop()
{
// No-Op
}
size_t FfmpegConverter::SizeBytes() const
{
return dst_buffer_size;
}
const std::vector<StreamInfo>& FfmpegConverter::Streams() const
{
return streams;
}
bool FfmpegConverter::GrabNext( unsigned char* image, bool wait )
{
if( videoin->GrabNext(input_buffer.get(),wait) )
{
for(ConvertContext&c:converters) {
c.convert(input_buffer.get(),image);
}
return true;
}
return false;
}
bool FfmpegConverter::GrabNewest( unsigned char* image, bool wait )
{
if( videoin->GrabNewest(input_buffer.get(),wait) )
{
for(ConvertContext&c:converters) {
c.convert(input_buffer.get(),image);
}
return true;
}
return false;
}
// Based on this example
// http://cekirdek.pardus.org.tr/~ismail/ffmpeg-docs/output-example_8c-source.html
static AVStream* CreateStream(AVFormatContext *oc, CodecID codec_id, uint64_t frame_rate, int bit_rate, AVPixelFormat EncoderFormat, int width, int height)
{
AVCodec* codec = avcodec_find_encoder(codec_id);
if (!(codec)) throw
VideoException("Could not find encoder");
#if (LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR >= 53 && LIBAVFORMAT_VERSION_MINOR >= 21) )
AVStream* stream = avformat_new_stream(oc, codec);
#else
AVStream* stream = av_new_stream(oc, codec_id);
#endif
if (!stream) throw VideoException("Could not allocate stream");
stream->id = oc->nb_streams-1;
switch (codec->type) {
// case AVMEDIA_TYPE_AUDIO:
// stream->id = 1;
// stream->codec->sample_fmt = AV_SAMPLE_FMT_S16;
// stream->codec->bit_rate = 64000;
// stream->codec->sample_rate = 44100;
// stream->codec->channels = 2;
// break;
case AVMEDIA_TYPE_VIDEO:
stream->codec->codec_id = codec_id;
stream->codec->bit_rate = bit_rate;
stream->codec->width = width;
stream->codec->height = height;
stream->codec->time_base.num = 1;
stream->codec->time_base.den = frame_rate;
stream->codec->gop_size = 12;
stream->codec->pix_fmt = EncoderFormat;
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
/* open the codec */
int ret = avcodec_open2(stream->codec, codec, NULL);
if (ret < 0) throw VideoException("Could not open video codec");
return stream;
}
class FfmpegVideoOutputStream
{
public:
FfmpegVideoOutputStream(FfmpegVideoOutput& recorder, CodecID codec_id, uint64_t frame_rate, int bit_rate, const StreamInfo& input_info, bool flip );
~FfmpegVideoOutputStream();
const StreamInfo& GetStreamInfo() const;
void WriteImage(const uint8_t* img, int w, int h, double time);
void Flush();
protected:
void WriteAvPacket(AVPacket* pkt);
void WriteFrame(AVFrame* frame);
double BaseFrameTime();
FfmpegVideoOutput& recorder;
StreamInfo input_info;
AVPixelFormat input_format;
AVPixelFormat output_format;
AVPicture src_picture;
AVPicture dst_picture;
int64_t last_pts;
// These pointers are owned by class
AVStream* stream;
SwsContext *sws_ctx;
AVFrame* frame;
bool flip;
};
void FfmpegVideoOutputStream::WriteAvPacket(AVPacket* pkt)
{
if (pkt->size) {
pkt->stream_index = stream->index;
int64_t pts = pkt->pts;
/* convert unit from CODEC's timestamp to stream's one */
#define C2S(field) \
do { \
if (pkt->field != (int64_t) AV_NOPTS_VALUE) \
pkt->field = av_rescale_q(pkt->field, \
stream->codec->time_base, \
stream->time_base); \
} while (0)
C2S(pts);
C2S(dts);
C2S(duration);
#undef C2S
int ret = av_interleaved_write_frame(recorder.oc, pkt);
if (ret < 0) throw VideoException("Error writing video frame");
if(pkt->pts != (int64_t)AV_NOPTS_VALUE) last_pts = pts;
}
}
void FfmpegVideoOutputStream::WriteFrame(AVFrame* frame)
{
AVPacket pkt;
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int ret;
int got_packet = 1;
#if FF_API_LAVF_FMT_RAWPICTURE
// Setup AVPacket
if (recorder.oc->oformat->flags & AVFMT_RAWPICTURE) {
/* Raw video case - directly store the picture in the packet */
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.data = frame->data[0];
pkt.size = sizeof(AVPicture);
pkt.pts = frame->pts;
ret = 0;
} else {
#else
{
#endif
/* encode the image */
#if (LIBAVFORMAT_VERSION_MAJOR >= 54)
ret = avcodec_encode_video2(stream->codec, &pkt, frame, &got_packet);
#else
// TODO: Why is ffmpeg so fussy about this buffer size?
// Making this too big results in garbled output.
// Too small and it will fail entirely.
pkt.size = 50* FF_MIN_BUFFER_SIZE; //std::max(FF_MIN_BUFFER_SIZE, frame->width * frame->height * 4 );
// TODO: Make sure this is being freed by av_free_packet
pkt.data = (uint8_t*) malloc(pkt.size);
pkt.pts = frame->pts;
ret = avcodec_encode_video(stream->codec, pkt.data, pkt.size, frame);
got_packet = ret > 0;
#endif
if (ret < 0) throw VideoException("Error encoding video frame");
}
if (ret >= 0 && got_packet) {
WriteAvPacket(&pkt);
}
av_free_packet(&pkt);
}
void FfmpegVideoOutputStream::WriteImage(const uint8_t* img, int w, int h, double time=-1.0)
{
const int64_t pts = (time >= 0) ? time / BaseFrameTime() : ++last_pts;
recorder.StartStream();
AVCodecContext *c = stream->codec;
if(flip) {
// Earlier versions of ffmpeg do not annotate img as const, although it is
avpicture_fill(&src_picture,const_cast<uint8_t*>(img),input_format,w,h);
for(int i=0; i<4; ++i) {
src_picture.data[i] += (h-1) * src_picture.linesize[i];
src_picture.linesize[i] *= -1;
}
}else{
// Earlier versions of ffmpeg do not annotate img as const, although it is
avpicture_fill(&src_picture,const_cast<uint8_t*>(img),input_format,w,h);
}
if (c->pix_fmt != input_format || c->width != w || c->height != h) {
if(!sws_ctx) {
sws_ctx = sws_getCachedContext( sws_ctx,
w, h, input_format,
c->width, c->height, c->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL
);
if (!sws_ctx) throw VideoException("Could not initialize the conversion context");
}
sws_scale(sws_ctx,
src_picture.data, src_picture.linesize, 0, h,
dst_picture.data, dst_picture.linesize
);
*((AVPicture *)frame) = dst_picture;
} else {
*((AVPicture *)frame) = src_picture;
}
frame->pts = pts;
frame->width = w;
frame->height = h;
WriteFrame(frame);
}
void FfmpegVideoOutputStream::Flush()
{
#if (LIBAVFORMAT_VERSION_MAJOR >= 54)
if (stream->codec->codec->capabilities & AV_CODEC_CAP_DELAY) {
/* some CODECs like H.264 needs flushing buffered frames by encoding NULL frames. */
/* cf. https://www.ffmpeg.org/doxygen/trunk/group__lavc__encoding.html#ga2c08a4729f72f9bdac41b5533c4f2642 */
AVPacket pkt;
pkt.data = NULL;
pkt.size = 0;
av_init_packet(&pkt);
int got_packet = 1;
while (got_packet) {
int ret = avcodec_encode_video2(stream->codec, &pkt, NULL, &got_packet);
if (ret < 0) throw VideoException("Error encoding video frame");
WriteAvPacket(&pkt);
}
av_free_packet(&pkt);
}
#endif
}
const StreamInfo& FfmpegVideoOutputStream::GetStreamInfo() const
{
return input_info;
}
double FfmpegVideoOutputStream::BaseFrameTime()
{
return (double)stream->codec->time_base.num / (double)stream->codec->time_base.den;
}
FfmpegVideoOutputStream::FfmpegVideoOutputStream(
FfmpegVideoOutput& recorder, CodecID codec_id, uint64_t frame_rate,
int bit_rate, const StreamInfo& input_info, bool flip_image
)
: recorder(recorder), input_info(input_info),
input_format(FfmpegFmtFromString(input_info.PixFormat())),
output_format( FfmpegFmtFromString("YUV420P") ),
last_pts(-1), sws_ctx(NULL), frame(NULL), flip(flip_image)
{
stream = CreateStream(recorder.oc, codec_id, frame_rate, bit_rate, output_format, input_info.Width(), input_info.Height() );
// Allocate the encoded raw picture.
int ret = avpicture_alloc(&dst_picture, stream->codec->pix_fmt, stream->codec->width, stream->codec->height);
if (ret < 0) throw VideoException("Could not allocate picture");
// Allocate frame
#if LIBAVUTIL_VERSION_MAJOR >= 54
frame = av_frame_alloc();
#else
// Deprecated
frame = avcodec_alloc_frame();
#endif
}
FfmpegVideoOutputStream::~FfmpegVideoOutputStream()
{
Flush();
if(sws_ctx) {
sws_freeContext(sws_ctx);
}
av_free(frame);
av_free(dst_picture.data[0]);
avcodec_close(stream->codec);
}
FfmpegVideoOutput::FfmpegVideoOutput(const std::string& filename, int base_frame_rate, int bit_rate, bool flip_image)
: filename(filename), started(false), oc(NULL),
frame_count(0), base_frame_rate(base_frame_rate), bit_rate(bit_rate), is_pipe(pangolin::IsPipe(filename)), flip(flip_image)
{
Initialise(filename);
}
FfmpegVideoOutput::~FfmpegVideoOutput()
{
Close();
}
bool FfmpegVideoOutput::IsPipe() const
{
return is_pipe;
}
void FfmpegVideoOutput::Initialise(std::string filename)
{
av_register_all();
#ifdef HAVE_FFMPEG_AVFORMAT_ALLOC_OUTPUT_CONTEXT2
int ret = avformat_alloc_output_context2(&oc, NULL, NULL, filename.c_str());
#else
oc = avformat_alloc_context();
oc->oformat = av_guess_format(NULL, filename.c_str(), NULL);
int ret = oc->oformat ? 0 : -1;
#endif
if (ret < 0 || !oc) {
pango_print_error("Could not deduce output format from file extension: using MPEG.\n");
#ifdef HAVE_FFMPEG_AVFORMAT_ALLOC_OUTPUT_CONTEXT2
ret = avformat_alloc_output_context2(&oc, NULL, "mpeg", filename.c_str());
#else
oc->oformat = av_guess_format("mpeg", filename.c_str(), NULL);
#endif
if (ret < 0 || !oc) throw VideoException("Couldn't create AVFormatContext");
}
/* open the output file, if needed */
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) throw VideoException("Could not open '%s'\n", filename);
}
}
void FfmpegVideoOutput::StartStream()
{
if(!started) {
#if (LIBAVFORMAT_VERSION_MAJOR >= 53)
av_dump_format(oc, 0, filename.c_str(), 1);
#else
// Deprecated
dump_format(oc, 0, filename.c_str(), 1);
#endif
/* Write the stream header, if any. */
int ret = avformat_write_header(oc, NULL);
if (ret < 0) throw VideoException("Error occurred when opening output file");
started = true;
}
}
void FfmpegVideoOutput::Close()
{
for(std::vector<FfmpegVideoOutputStream*>::iterator i = streams.begin(); i!=streams.end(); ++i)
{
(*i)->Flush();
delete *i;
}
av_write_trailer(oc);
if (!(oc->oformat->flags & AVFMT_NOFILE)) avio_close(oc->pb);
avformat_free_context(oc);
}
const std::vector<StreamInfo>& FfmpegVideoOutput::Streams() const
{
return strs;
}
void FfmpegVideoOutput::SetStreams(const std::vector<StreamInfo>& str, const std::string& /*uri*/, const picojson::value& properties)
{
strs.insert(strs.end(), str.begin(), str.end());
for(std::vector<StreamInfo>::const_iterator i = str.begin(); i!= str.end(); ++i)
{
streams.push_back( new FfmpegVideoOutputStream(
*this, oc->oformat->video_codec, base_frame_rate, bit_rate, *i, flip
) );
}
if(!properties.is<picojson::null>()) {
pango_print_warn("Ignoring attached video properties.");
}
}
int FfmpegVideoOutput::WriteStreams(const unsigned char* data, const picojson::value& /*frame_properties*/)
{
for(std::vector<FfmpegVideoOutputStream*>::iterator i = streams.begin(); i!= streams.end(); ++i)
{
FfmpegVideoOutputStream& s = **i;
Image<unsigned char> img = s.GetStreamInfo().StreamImage(data);
s.WriteImage(img.ptr, img.w, img.h);
}
return frame_count++;
}
PANGOLIN_REGISTER_FACTORY(FfmpegVideo)
{
struct FfmpegVideoFactory : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const std::array<std::string,43> ffmpeg_ext = {{
".3g2",".3gp", ".amv", ".asf", ".avi", ".drc", ".flv", ".f4v",
".f4p", ".f4a", ".f4b", ".gif", ".gifv", ".m4v", ".mkv", ".mng", ".mov", ".qt",
".mp4", ".m4p", ".m4v", ".mpg", ".mp2", ".mpeg", ".mpe", ".mpv", ".mpg", ".mpeg",
".m2v", ".mxf", ".nsv", ".ogv", ".ogg", ".rm", ".rmvb", ".roq", ".svi", ".vob",
".webm", ".wmv", ".yuv", ".h264", ".h265"
}};
if(!uri.scheme.compare("ffmpeg") || !uri.scheme.compare("file") || !uri.scheme.compare("files") )
{
if(!uri.scheme.compare("file") || !uri.scheme.compare("files")) {
const std::string ext = FileLowercaseExtention(uri.url);
if(std::find(ffmpeg_ext.begin(), ffmpeg_ext.end(), ext) == ffmpeg_ext.end()) {
// Don't try to load unknown files without the ffmpeg:// scheme.
return std::unique_ptr<VideoInterface>();
}
}
std::string outfmt = uri.Get<std::string>("fmt","RGB24");
ToUpper(outfmt);
const int video_stream = uri.Get<int>("stream",-1);
return std::unique_ptr<VideoInterface>( new FfmpegVideo(uri.url.c_str(), outfmt, "", false, video_stream) );
}else if( !uri.scheme.compare("v4lmjpeg")) {
const int video_stream = uri.Get<int>("stream",-1);
const ImageDim size = uri.Get<ImageDim>("size",ImageDim(0,0));
return std::unique_ptr<VideoInterface>( new FfmpegVideo(uri.url.c_str(),"RGB24", "video4linux", false, video_stream, size) );
} else if( !uri.scheme.compare("mjpeg")) {
return std::unique_ptr<VideoInterface>( new FfmpegVideo(uri.url.c_str(),"RGB24", "MJPEG" ) );
}else if( !uri.scheme.compare("convert") ) {
std::string outfmt = uri.Get<std::string>("fmt","RGB24");
ToUpper(outfmt);
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
return std::unique_ptr<VideoInterface>( new FfmpegConverter(subvid,outfmt,FFMPEG_POINT) );
}else{
return std::unique_ptr<VideoInterface>();
}
}
};
auto factory = std::make_shared<FfmpegVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "ffmpeg");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "v4lmjpeg");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "mjpeg");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 20, "convert");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 15, "file");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 15, "files");
}
PANGOLIN_REGISTER_FACTORY(FfmpegVideoOutput)
{
struct FfmpegVideoFactory final : public FactoryInterface<VideoOutputInterface> {
std::unique_ptr<VideoOutputInterface> Open(const Uri& uri) override {
const int desired_frame_rate = uri.Get("fps", 60);
const int desired_bit_rate = uri.Get("bps", 20000*1024);
const bool flip = uri.Get("flip", false);
std::string filename = uri.url;
if(uri.Contains("unique_filename")) {
filename = MakeUniqueFilename(filename);
}
return std::unique_ptr<VideoOutputInterface>(
new FfmpegVideoOutput(filename, desired_frame_rate, desired_bit_rate, flip)
);
}
};
auto factory = std::make_shared<FfmpegVideoFactory>();
FactoryRegistry<VideoOutputInterface>::I().RegisterFactory(factory, 10, "ffmpeg");
}
}

View File

@@ -0,0 +1,969 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/firewire.h>
#include <pangolin/video/drivers/deinterlace.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <inttypes.h>
using namespace std;
namespace pangolin
{
void FirewireVideo::init_camera(
uint64_t guid, int dma_frames,
dc1394speed_t iso_speed,
dc1394video_mode_t video_mode,
dc1394framerate_t framerate
) {
if(video_mode>=DC1394_VIDEO_MODE_FORMAT7_0)
throw VideoException("format7 modes need to be initialized through the constructor that allows for specifying the roi");
camera = dc1394_camera_new (d, guid);
if (!camera)
throw VideoException("Failed to initialize camera");
// Attempt to stop camera if it is already running
dc1394switch_t is_iso_on = DC1394_OFF;
dc1394_video_get_transmission(camera, &is_iso_on);
if (is_iso_on==DC1394_ON) {
dc1394_video_set_transmission(camera, DC1394_OFF);
}
cout << "Using camera with GUID " << camera->guid << endl;
//-----------------------------------------------------------------------
// setup capture
//-----------------------------------------------------------------------
if( iso_speed >= DC1394_ISO_SPEED_800)
{
err=dc1394_video_set_operation_mode(camera, DC1394_OPERATION_MODE_1394B);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set DC1394_OPERATION_MODE_1394B");
}
err=dc1394_video_set_iso_speed(camera, iso_speed);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set iso speed");
err=dc1394_video_set_mode(camera, video_mode);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set video mode");
err=dc1394_video_set_framerate(camera, framerate);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set framerate");
err=dc1394_capture_setup(camera,dma_frames, DC1394_CAPTURE_FLAGS_DEFAULT);
if( err != DC1394_SUCCESS )
throw VideoException("Could not setup camera - check settings");
//-----------------------------------------------------------------------
// initialise width and height from mode
//-----------------------------------------------------------------------
dc1394_get_image_size_from_video_mode(camera, video_mode, &width, &height);
init_stream_info();
Start();
}
// Note:
// the following was tested on a IIDC camera over USB therefore might not work as
// well on a camera over proper firewire transport
void FirewireVideo::init_format7_camera(
uint64_t guid, int dma_frames,
dc1394speed_t iso_speed,
dc1394video_mode_t video_mode,
float framerate,
uint32_t width, uint32_t height,
uint32_t left, uint32_t top, bool reset_at_boot
) {
if(video_mode< DC1394_VIDEO_MODE_FORMAT7_0)
throw VideoException("roi can be specified only for format7 modes");
camera = dc1394_camera_new (d, guid);
if (!camera)
throw VideoException("Failed to initialize camera");
// Attempt to stop camera if it is already running
dc1394switch_t is_iso_on = DC1394_OFF;
dc1394_video_get_transmission(camera, &is_iso_on);
if (is_iso_on==DC1394_ON) {
dc1394_video_set_transmission(camera, DC1394_OFF);
}
cout << "Using camera with GUID " << camera->guid << endl;
if(reset_at_boot){
dc1394_camera_reset(camera);
}
//-----------------------------------------------------------------------
// setup mode and roi
//-----------------------------------------------------------------------
if(iso_speed >= DC1394_ISO_SPEED_800)
{
err=dc1394_video_set_operation_mode(camera, DC1394_OPERATION_MODE_1394B);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set DC1394_OPERATION_MODE_1394B");
}
err=dc1394_video_set_iso_speed(camera, iso_speed);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set iso speed");
// check that the required mode is actually supported
dc1394format7mode_t format7_info;
err = dc1394_format7_get_mode_info(camera, video_mode, &format7_info);
if( err != DC1394_SUCCESS )
throw VideoException("Could not get format7 mode info");
// safely set the video mode
err=dc1394_video_set_mode(camera, video_mode);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set format7 video mode");
// set position to 0,0 so that setting any size within min and max is a valid command
err = dc1394_format7_set_image_position(camera, video_mode,0,0);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set format7 image position");
// work out the desired image size
width = nearest_value(width, format7_info.unit_pos_x, 0, format7_info.max_size_x - left);
height = nearest_value(height, format7_info.unit_pos_y, 0, format7_info.max_size_y - top);
// set size
err = dc1394_format7_set_image_size(camera,video_mode,width,height);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set format7 size");
// get the info again since many parameters depend on image size
err = dc1394_format7_get_mode_info(camera, video_mode, &format7_info);
if( err != DC1394_SUCCESS )
throw VideoException("Could not get format7 mode info");
// work out position of roi
left = nearest_value(left, format7_info.unit_size_x, format7_info.unit_size_x, format7_info.max_size_x - width);
top = nearest_value(top, format7_info.unit_size_y, format7_info.unit_size_y, format7_info.max_size_y - height);
// set roi position
err = dc1394_format7_set_image_position(camera,video_mode,left,top);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set format7 size");
this->width = width;
this->height = height;
this->top = top;
this->left = left;
cout<<"roi: "<<left<<" "<<top<<" "<<width<<" "<<height<<" ";
//-----------------------------------------------------------------------
// setup frame rate
//-----------------------------------------------------------------------
err=dc1394_format7_set_packet_size(camera,video_mode, format7_info.max_packet_size);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set format7 packet size");
if((framerate != MAX_FR) && (framerate != EXT_TRIG)){
//set the framerate by using the absolute feature as suggested by the
//folks at PointGrey
err = dc1394_feature_set_absolute_control(camera,DC1394_FEATURE_FRAME_RATE,DC1394_ON);
if( err != DC1394_SUCCESS )
throw VideoException("Could not turn on absolute frame rate control");
err = dc1394_feature_set_mode(camera,DC1394_FEATURE_FRAME_RATE,DC1394_FEATURE_MODE_MANUAL);
if( err != DC1394_SUCCESS )
throw VideoException("Could not make frame rate manual ");
err=dc1394_feature_set_absolute_value(camera,DC1394_FEATURE_FRAME_RATE,framerate);
if( err != DC1394_SUCCESS )
throw VideoException("Could not set format7 framerate ");
}
// ask the camera what is the resulting framerate (this assume that such a rate is actually
// allowed by the shutter time)
float value;
err=dc1394_feature_get_absolute_value(camera,DC1394_FEATURE_FRAME_RATE,&value);
if( err != DC1394_SUCCESS )
throw VideoException("Could not get framerate");
cout<<" framerate(shutter permitting):"<<value<<endl;
//-----------------------------------------------------------------------
// setup capture
//-----------------------------------------------------------------------
err=dc1394_capture_setup(camera,dma_frames, DC1394_CAPTURE_FLAGS_DEFAULT);
if( err != DC1394_SUCCESS )
throw VideoException("Could not setup camera - check settings");
init_stream_info();
Start();
}
std::string Dc1394ColorCodingToString(dc1394color_coding_t coding)
{
switch(coding)
{
case DC1394_COLOR_CODING_RGB8 : return "RGB24";
case DC1394_COLOR_CODING_MONO8 : return "GRAY8";
case DC1394_COLOR_CODING_MONO16 : return "GRAY16LE";
case DC1394_COLOR_CODING_RGB16 : return "RGB48LE";
case DC1394_COLOR_CODING_MONO16S : return "GRAY16BE";
case DC1394_COLOR_CODING_RGB16S : return "RGB48BE";
case DC1394_COLOR_CODING_YUV411 : return "YUV411P";
case DC1394_COLOR_CODING_YUV422 : return "YUV422P";
case DC1394_COLOR_CODING_YUV444 : return "YUV444P";
case DC1394_COLOR_CODING_RAW8 : return "GRAY8";
case DC1394_COLOR_CODING_RAW16 : return "GRAY16LE";
default:
throw VideoException("Unknown colour coding");
}
}
dc1394color_coding_t Dc1394ColorCodingFromString(std::string coding)
{
if( !coding.compare("RGB24")) return DC1394_COLOR_CODING_RGB8;
else if(!coding.compare("GRAY8")) return DC1394_COLOR_CODING_MONO8;
else if(!coding.compare("GRAY16LE")) return DC1394_COLOR_CODING_MONO16;
else if(!coding.compare("RGB48LE")) return DC1394_COLOR_CODING_RGB16;
else if(!coding.compare("GRAY16BE")) return DC1394_COLOR_CODING_MONO16S;
else if(!coding.compare("RGB48BE")) return DC1394_COLOR_CODING_RGB16S;
else if(!coding.compare("YUV411P")) return DC1394_COLOR_CODING_YUV411;
else if(!coding.compare("YUV422P")) return DC1394_COLOR_CODING_YUV422;
else if(!coding.compare("YUV444P")) return DC1394_COLOR_CODING_YUV444;
// else if(!coding.compare("RAW8")) return DC1394_COLOR_CODING_RAW8;
// else if(!coding.compare("RAW16")) return DC1394_COLOR_CODING_RAW16;
throw VideoException("Unknown colour coding");
}
void Dc1394ModeDetails(dc1394video_mode_t mode, unsigned& w, unsigned& h, string& format )
{
switch( mode )
{
// RGB Modes
case DC1394_VIDEO_MODE_1024x768_RGB8:
w=1024; h=768; format = "RGB24";
break;
case DC1394_VIDEO_MODE_640x480_RGB8:
w=640; h=480; format = "RGB24";
break;
case DC1394_VIDEO_MODE_800x600_RGB8:
w=800; h=600; format = "RGB24";
break;
case DC1394_VIDEO_MODE_1280x960_RGB8:
w=1280; h=960; format = "RGB24";
break;
case DC1394_VIDEO_MODE_1600x1200_RGB8:
w=1600; h=1200; format = "RGB24";
break;
// Greyscale modes
case DC1394_VIDEO_MODE_640x480_MONO8:
w=640; h=480; format = "GRAY8";
break;
case DC1394_VIDEO_MODE_800x600_MONO8:
w=800; h=600; format = "GRAY8";
break;
case DC1394_VIDEO_MODE_1024x768_MONO8:
w=1024; h=768; format = "GRAY8";
break;
case DC1394_VIDEO_MODE_1280x960_MONO8:
w=1280; h=960; format = "GRAY8";
break;
case DC1394_VIDEO_MODE_1600x1200_MONO8:
w=1600; h=1200; format = "GRAY8";
break;
case DC1394_VIDEO_MODE_640x480_MONO16:
w=640; h=480; format = "GRAY16";
break;
case DC1394_VIDEO_MODE_800x600_MONO16:
w=800; h=600; format = "GRAY16";
break;
case DC1394_VIDEO_MODE_1024x768_MONO16:
w=1024; h=768; format = "GRAY16";
break;
case DC1394_VIDEO_MODE_1280x960_MONO16:
w=1280; h=960; format = "GRAY16";
break;
case DC1394_VIDEO_MODE_1600x1200_MONO16:
w=1600; h=1200; format = "GRAY16";
break;
// Chrome modes
case DC1394_VIDEO_MODE_640x480_YUV411:
w=640; h=480; format = "YUV411P";
break;
case DC1394_VIDEO_MODE_160x120_YUV444:
w=160; h=120; format = "YUV444P";
break;
case DC1394_VIDEO_MODE_320x240_YUV422:
w=320; h=240; format = "YUV422P";
break;
case DC1394_VIDEO_MODE_640x480_YUV422:
w=640; h=480; format = "YUV422P";
break;
case DC1394_VIDEO_MODE_800x600_YUV422:
w=800; h=600; format = "YUV422P";
break;
case DC1394_VIDEO_MODE_1024x768_YUV422:
w=1024; h=768; format = "YUV422P";
break;
case DC1394_VIDEO_MODE_1600x1200_YUV422:
w=1600; h=1200; format = "YUV422P";
break;
case DC1394_VIDEO_MODE_1280x960_YUV422:
w=1280; h=960; format = "YUV422P";
break;
default:
throw VideoException("Unknown colour coding");
}
}
void FirewireVideo::init_stream_info()
{
streams.clear();
dc1394video_mode_t video_mode;
dc1394color_coding_t color_coding;
dc1394_video_get_mode(camera,&video_mode);
dc1394_get_color_coding_from_video_mode(camera,video_mode,&color_coding);
const std::string strformat = Dc1394ColorCodingToString(color_coding);
const PixelFormat fmt = PixelFormatFromString(strformat);
StreamInfo stream(fmt, width, height, (width*fmt.bpp)/8, 0 );
streams.push_back( stream );
frame_size_bytes = stream.Pitch() * stream.Height();
}
const std::vector<StreamInfo>& FirewireVideo::Streams() const
{
return streams;
}
size_t FirewireVideo::SizeBytes() const
{
return frame_size_bytes;
}
void FirewireVideo::Start()
{
if( !running )
{
err=dc1394_video_set_transmission(camera, DC1394_ON);
if( err != DC1394_SUCCESS )
throw VideoException("Could not start camera iso transmission");
running = true;
}
}
void FirewireVideo::Stop()
{
if( running )
{
// Stop transmission
err=dc1394_video_set_transmission(camera,DC1394_OFF);
if( err != DC1394_SUCCESS )
throw VideoException("Could not stop the camera");
running = false;
}
}
FirewireVideo::FirewireVideo(
Guid guid,
dc1394video_mode_t video_mode,
dc1394framerate_t framerate,
dc1394speed_t iso_speed,
int dma_buffers
) :running(false),top(0),left(0)
{
d = dc1394_new ();
if (!d)
throw VideoException("Failed to get 1394 bus");
init_camera(guid.guid,dma_buffers,iso_speed,video_mode,framerate);
}
FirewireVideo::FirewireVideo(
Guid guid,
dc1394video_mode_t video_mode,
float framerate,
uint32_t width, uint32_t height,
uint32_t left, uint32_t top,
dc1394speed_t iso_speed,
int dma_buffers, bool reset_at_boot
) :running(false)
{
d = dc1394_new ();
if (!d)
throw VideoException("Failed to get 1394 bus");
init_format7_camera(guid.guid,dma_buffers,iso_speed,video_mode,framerate,width,height,left,top, reset_at_boot);
}
FirewireVideo::FirewireVideo(
unsigned deviceid,
dc1394video_mode_t video_mode,
dc1394framerate_t framerate,
dc1394speed_t iso_speed,
int dma_buffers
) :running(false),top(0),left(0)
{
d = dc1394_new ();
if (!d)
throw VideoException("Failed to get 1394 bus");
err=dc1394_camera_enumerate (d, &list);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to enumerate cameras");
if (list->num == 0)
throw VideoException("No cameras found");
if( deviceid >= list->num )
throw VideoException("Invalid camera index");
const uint64_t guid = list->ids[deviceid].guid;
dc1394_camera_free_list (list);
init_camera(guid,dma_buffers,iso_speed,video_mode,framerate);
}
FirewireVideo::FirewireVideo(
unsigned deviceid,
dc1394video_mode_t video_mode,
float framerate,
uint32_t width, uint32_t height,
uint32_t left, uint32_t top,
dc1394speed_t iso_speed,
int dma_buffers, bool reset_at_boot
) :running(false)
{
d = dc1394_new ();
if (!d)
throw VideoException("Failed to get 1394 bus");
err=dc1394_camera_enumerate (d, &list);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to enumerate cameras");
if (list->num == 0)
throw VideoException("No cameras found");
if( deviceid >= list->num )
throw VideoException("Invalid camera index");
const uint64_t guid = list->ids[deviceid].guid;
dc1394_camera_free_list (list);
init_format7_camera(guid,dma_buffers,iso_speed,video_mode,framerate,width,height,left,top, reset_at_boot);
}
bool FirewireVideo::GrabNext( unsigned char* image, bool wait )
{
const dc1394capture_policy_t policy =
wait ? DC1394_CAPTURE_POLICY_WAIT : DC1394_CAPTURE_POLICY_POLL;
dc1394video_frame_t *frame;
err = dc1394_capture_dequeue(camera, policy, &frame);
if( err != DC1394_SUCCESS)
throw VideoException("Could not capture frame", dc1394_error_get_string(err) );
if( frame )
{
memcpy(image,frame->image,frame->image_bytes);
dc1394_capture_enqueue(camera,frame);
return true;
}
return false;
}
bool FirewireVideo::GrabNewest( unsigned char* image, bool wait )
{
dc1394video_frame_t *f;
err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &f);
if( err != DC1394_SUCCESS)
throw VideoException("Could not capture frame", dc1394_error_get_string(err) );
if( f ) {
while( true )
{
dc1394video_frame_t *nf;
err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &nf);
if( err != DC1394_SUCCESS)
throw VideoException("Could not capture frame", dc1394_error_get_string(err) );
if( nf )
{
err=dc1394_capture_enqueue(camera,f);
f = nf;
}else{
break;
}
}
memcpy(image,f->image,f->image_bytes);
err=dc1394_capture_enqueue(camera,f);
return true;
}else if(wait){
return GrabNext(image,true);
}
return false;
}
FirewireFrame FirewireVideo::GetNext(bool wait)
{
const dc1394capture_policy_t policy =
wait ? DC1394_CAPTURE_POLICY_WAIT : DC1394_CAPTURE_POLICY_POLL;
dc1394video_frame_t *frame;
dc1394_capture_dequeue(camera, policy, &frame);
return FirewireFrame(frame);
}
FirewireFrame FirewireVideo::GetNewest(bool wait)
{
dc1394video_frame_t *f;
err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &f);
if( err != DC1394_SUCCESS)
throw VideoException("Could not capture frame", dc1394_error_get_string(err) );
if( f ) {
while( true )
{
dc1394video_frame_t *nf;
err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_POLL, &nf);
if( err != DC1394_SUCCESS)
throw VideoException("Could not capture frame", dc1394_error_get_string(err) );
if( nf )
{
err=dc1394_capture_enqueue(camera,f);
f = nf;
}else{
break;
}
}
return FirewireFrame(f);
}else if(wait){
return GetNext(true);
}
return FirewireFrame(0);
}
void FirewireVideo::PutFrame(FirewireFrame& f)
{
if( f.frame )
{
dc1394_capture_enqueue(camera,f.frame);
f.frame = 0;
}
}
float FirewireVideo::GetGain() const
{
float gain;
err = dc1394_feature_get_absolute_value(camera,DC1394_FEATURE_GAIN,&gain);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to read gain");
return gain;
}
void FirewireVideo::SetAutoGain()
{
dc1394error_t err = dc1394_feature_set_mode(camera, DC1394_FEATURE_GAIN, DC1394_FEATURE_MODE_AUTO);
if (err < 0) {
throw VideoException("Could not set auto gain mode");
}
}
void FirewireVideo::SetGain(float val)
{
dc1394error_t err = dc1394_feature_set_mode(camera, DC1394_FEATURE_GAIN, DC1394_FEATURE_MODE_MANUAL);
if (err < 0) {
throw VideoException("Could not set manual gain mode");
}
err = dc1394_feature_set_absolute_control(camera, DC1394_FEATURE_GAIN, DC1394_ON);
if (err < 0) {
throw VideoException("Could not set absolute control for gain");
}
err = dc1394_feature_set_absolute_value(camera, DC1394_FEATURE_GAIN, val);
if (err < 0) {
throw VideoException("Could not set gain value");
}
}
float FirewireVideo::GetBrightness() const
{
float brightness;
err = dc1394_feature_get_absolute_value(camera,DC1394_FEATURE_BRIGHTNESS,&brightness);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to read brightness");
return brightness;
}
void FirewireVideo::SetAutoBrightness()
{
dc1394error_t err = dc1394_feature_set_mode(camera, DC1394_FEATURE_BRIGHTNESS, DC1394_FEATURE_MODE_AUTO);
if (err < 0) {
throw VideoException("Could not set auto brightness mode");
}
}
void FirewireVideo::SetBrightness(float val)
{
dc1394error_t err = dc1394_feature_set_mode(camera, DC1394_FEATURE_BRIGHTNESS, DC1394_FEATURE_MODE_MANUAL);
if (err < 0) {
throw VideoException("Could not set manual brightness mode");
}
err = dc1394_feature_set_absolute_control(camera, DC1394_FEATURE_BRIGHTNESS, DC1394_ON);
if (err < 0) {
throw VideoException("Could not set absolute control for brightness");
}
err = dc1394_feature_set_absolute_value(camera, DC1394_FEATURE_BRIGHTNESS, val);
if (err < 0) {
throw VideoException("Could not set brightness value");
}
}
float FirewireVideo::GetShutterTime() const
{
float shutter;
err = dc1394_feature_get_absolute_value(camera,DC1394_FEATURE_SHUTTER,&shutter);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to read shutter");
return shutter;
}
void FirewireVideo::SetAutoShutterTime()
{
dc1394error_t err = dc1394_feature_set_mode(camera, DC1394_FEATURE_SHUTTER, DC1394_FEATURE_MODE_AUTO);
if (err < 0) {
throw VideoException("Could not set auto shutter mode");
}
}
void FirewireVideo::SetShutterTime(float val)
{
dc1394error_t err = dc1394_feature_set_mode(camera, DC1394_FEATURE_SHUTTER, DC1394_FEATURE_MODE_MANUAL);
if (err < 0) {
throw VideoException("Could not set manual shutter mode");
}
err = dc1394_feature_set_absolute_control(camera, DC1394_FEATURE_SHUTTER, DC1394_ON);
if (err < 0) {
throw VideoException("Could not set absolute control for shutter");
}
err = dc1394_feature_set_absolute_value(camera, DC1394_FEATURE_SHUTTER, val);
if (err < 0) {
throw VideoException("Could not set shutter value");
}
}
void FirewireVideo::SetShutterTimeQuant(int shutter)
{
// TODO: Set mode as well
err = dc1394_feature_set_value(camera,DC1394_FEATURE_SHUTTER,shutter);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to set shutter");
}
float FirewireVideo::GetGamma() const
{
float gamma;
err = dc1394_feature_get_absolute_value(camera,DC1394_FEATURE_GAMMA,&gamma);
if( err != DC1394_SUCCESS )
throw VideoException("Failed to read gamma");
return gamma;
}
void FirewireVideo::SetInternalTrigger()
{
dc1394error_t err = dc1394_external_trigger_set_power(camera, DC1394_OFF);
if (err < 0) {
throw VideoException("Could not set internal trigger mode");
}
}
void FirewireVideo::SetExternalTrigger(dc1394trigger_mode_t mode, dc1394trigger_polarity_t polarity, dc1394trigger_source_t source)
{
dc1394error_t err = dc1394_external_trigger_set_polarity(camera, polarity);
if (err < 0) {
throw VideoException("Could not set external trigger polarity");
}
err = dc1394_external_trigger_set_mode(camera, mode);
if (err < 0) {
throw VideoException("Could not set external trigger mode");
}
err = dc1394_external_trigger_set_source(camera, source);
if (err < 0) {
throw VideoException("Could not set external trigger source");
}
err = dc1394_external_trigger_set_power(camera, DC1394_ON);
if (err < 0) {
throw VideoException("Could not set external trigger power");
}
}
FirewireVideo::~FirewireVideo()
{
Stop();
// Close camera
dc1394_video_set_transmission(camera, DC1394_OFF);
dc1394_capture_stop(camera);
dc1394_camera_free(camera);
dc1394_free (d);
}
void FirewireVideo::SetRegister(uint64_t offset, uint32_t value){
dc1394error_t err = dc1394_set_register (camera, offset, value);
if (err < 0) {
throw VideoException("Could not set camera register");
}
}
uint32_t FirewireVideo::GetRegister(uint64_t offset)
{
uint32_t value = 0;
dc1394error_t err = dc1394_get_register (camera, offset, &value);
if (err < 0) {
throw VideoException("Could not get camera register");
}
return value;
}
void FirewireVideo::SetControlRegister(uint64_t offset, uint32_t value)
{
dc1394error_t err = dc1394_set_control_register (camera, offset, value);
if (err < 0) {
throw VideoException("Could not set camera control register");
}
}
uint32_t FirewireVideo::GetControlRegister(uint64_t offset)
{
uint32_t value = 0;
dc1394error_t err = dc1394_get_control_register(camera, offset, &value);
if (err < 0) {
throw VideoException("Could not get camera control register");
}
return value;
}
int FirewireVideo::nearest_value(int value, int step, int min, int max)
{
int low, high;
low=value-(value%step);
high=value-(value%step)+step;
if (low<min)
low=min;
if (high>max)
high=max;
if (abs(low-value)<abs(high-value))
return low;
else
return high;
}
double FirewireVideo::bus_period_from_iso_speed(dc1394speed_t iso_speed)
{
double bus_period;
switch(iso_speed){
case DC1394_ISO_SPEED_3200:
bus_period = 15.625e-6;
break;
case DC1394_ISO_SPEED_1600:
bus_period = 31.25e-6;
break;
case DC1394_ISO_SPEED_800:
bus_period = 62.5e-6;
break;
case DC1394_ISO_SPEED_400:
bus_period = 125e-6;
break;
case DC1394_ISO_SPEED_200:
bus_period = 250e-6;
break;
case DC1394_ISO_SPEED_100:
bus_period = 500e-6;
break;
default:
throw VideoException("iso speed not valid");
}
return bus_period;
}
dc1394video_mode_t get_firewire_format7_mode(const std::string fmt)
{
const std::string FMT7_prefix = "FORMAT7_";
if( StartsWith(fmt, FMT7_prefix) )
{
int fmt7_mode = 0;
std::istringstream iss( fmt.substr(FMT7_prefix.size()) );
iss >> fmt7_mode;
if( !iss.fail() ) {
return (dc1394video_mode_t)(DC1394_VIDEO_MODE_FORMAT7_0 + fmt7_mode);
}
}
throw VideoException("Unknown video mode");
}
dc1394video_mode_t get_firewire_mode(unsigned width, unsigned height, const std::string fmt)
{
for( dc1394video_mode_t video_mode=DC1394_VIDEO_MODE_MIN; video_mode<DC1394_VIDEO_MODE_MAX; video_mode = (dc1394video_mode_t)(video_mode +1) )
{
try {
unsigned w,h;
std::string format;
Dc1394ModeDetails(video_mode,w,h,format);
if( w == width && h==height && !fmt.compare(format) )
return video_mode;
} catch (const VideoException& e) {}
}
throw VideoException("Unknown video mode");
}
dc1394framerate_t get_firewire_framerate(float framerate)
{
if(framerate==1.875) return DC1394_FRAMERATE_1_875;
else if(framerate==3.75) return DC1394_FRAMERATE_3_75;
else if(framerate==7.5) return DC1394_FRAMERATE_7_5;
else if(framerate==15) return DC1394_FRAMERATE_15;
else if(framerate==30) return DC1394_FRAMERATE_30;
else if(framerate==60) return DC1394_FRAMERATE_60;
else if(framerate==120) return DC1394_FRAMERATE_120;
else if(framerate==240) return DC1394_FRAMERATE_240;
else throw VideoException("Invalid framerate");
}
PANGOLIN_REGISTER_FACTORY(FirewireVideo)
{
struct FirewireVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::string desired_format = uri.Get<std::string>("fmt","RGB24");
ToUpper(desired_format);
const ImageDim desired_dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const ImageDim desired_xy = uri.Get<ImageDim>("pos", ImageDim(0,0));
const int desired_dma = uri.Get<int>("dma", 10);
const int desired_iso = uri.Get<int>("iso", 400);
const float desired_fps = uri.Get<float>("fps", 30);
const bool deinterlace = uri.Get<bool>("deinterlace", 0);
Guid guid = 0;
unsigned deviceid = 0;
dc1394framerate_t framerate = get_firewire_framerate(desired_fps);
dc1394speed_t iso_speed = (dc1394speed_t)(log(desired_iso/100) / log(2));
int dma_buffers = desired_dma;
VideoInterface* video_raw = nullptr;
if( StartsWith(desired_format, "FORMAT7") )
{
dc1394video_mode_t video_mode = get_firewire_format7_mode(desired_format);
if( guid.guid == 0 ) {
video_raw = new FirewireVideo(deviceid,video_mode,FirewireVideo::MAX_FR, desired_dim.x, desired_dim.y, desired_xy.x, desired_xy.y, iso_speed, dma_buffers,true);
}else{
video_raw = new FirewireVideo(guid,video_mode,FirewireVideo::MAX_FR, desired_dim.x, desired_dim.y, desired_xy.x, desired_xy.y, iso_speed, dma_buffers,true);
}
}else{
dc1394video_mode_t video_mode = get_firewire_mode(desired_dim.x, desired_dim.y,desired_format);
if( guid.guid == 0 ) {
video_raw = new FirewireVideo(deviceid,video_mode,framerate,iso_speed,dma_buffers);
}else{
video_raw = new FirewireVideo(guid,video_mode,framerate,iso_speed,dma_buffers);
}
}
if(deinterlace) {
std::unique_ptr<VideoInterface> video(video_raw);
video_raw = new DeinterlaceVideo(video);
}
return std::unique_ptr<VideoInterface>(video_raw);
}
};
auto factory = std::make_shared<FirewireVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "firewire");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "dc1394");
}
}

View File

@@ -0,0 +1,298 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/video/drivers/images.h>
#include <pangolin/video/iostream_operators.h>
#include <cstring>
#include <fstream>
namespace pangolin
{
bool ImagesVideo::LoadFrame(size_t i)
{
if( i < num_files) {
Frame& frame = loaded[i];
for(size_t c=0; c< num_channels; ++c) {
const std::string& filename = Filename(i,c);
const ImageFileType file_type = FileType(filename);
if(file_type == ImageFileTypeUnknown && unknowns_are_raw) {
frame.push_back( LoadImage( filename, raw_fmt, raw_width, raw_height, raw_fmt.bpp * raw_width / 8) );
}else{
frame.push_back( LoadImage( filename, file_type ) );
}
}
return true;
}
return false;
}
void ImagesVideo::PopulateFilenamesFromJson(const std::string& filename)
{
std::ifstream ifs( PathExpand(filename));
picojson::value json;
const std::string err = picojson::parse(json, ifs);
if(err.empty()) {
const std::string folder = PathParent(filename) + "/";
device_properties = json["device_properties"];
json_frames = json["frames"];
num_files = json_frames.size();
if(num_files == 0) {
throw VideoException("Empty Json Image archive.");
}
num_channels = json_frames[0]["stream_files"].size();
if(num_channels == 0) {
throw VideoException("Empty Json Image archive.");
}
filenames.resize(num_channels);
for(size_t c=0; c < num_channels; ++c) {
filenames[c].resize(num_files);
for(size_t i = 0; i < num_files; ++i) {
const std::string path = json_frames[i]["stream_files"][c].get<std::string>();
filenames[c][i] = (path.size() && path[0] == '/') ? path : (folder + path);
}
}
loaded.resize(num_files);
}else{
throw VideoException(err);
}
}
void ImagesVideo::PopulateFilenames(const std::string& wildcard_path)
{
const std::vector<std::string> wildcards = Expand(wildcard_path, '[', ']', ',');
num_channels = wildcards.size();
if(wildcards.size() == 1 ) {
const std::string expanded_path = PathExpand(wildcards[0]);
const std::string possible_archive_path = expanded_path + "/archive.json";
if (FileLowercaseExtention(expanded_path) == ".json" ) {
PopulateFilenamesFromJson(wildcards[0]);
return;
}else if(FileExists(possible_archive_path)){
PopulateFilenamesFromJson(possible_archive_path);
return;
}
}
filenames.resize(num_channels);
for(size_t i = 0; i < wildcards.size(); ++i) {
const std::string channel_wildcard = PathExpand(wildcards[i]);
FilesMatchingWildcard(channel_wildcard, filenames[i]);
if(num_files == size_t(-1)) {
num_files = filenames[i].size();
}else{
if( num_files != filenames[i].size() ) {
std::cerr << "Warning: Video Channels have unequal number of files" << std::endl;
}
num_files = std::min(num_files, filenames[i].size());
}
if(num_files == 0) {
throw VideoException("No files found for wildcard '" + channel_wildcard + "'");
}
}
// Resize empty frames vector to hold future images.
loaded.resize(num_files);
}
void ImagesVideo::ConfigureStreamSizes()
{
size_bytes = 0;
for(size_t c=0; c < num_channels; ++c) {
const TypedImage& img = loaded[0][c];
const StreamInfo stream_info(img.fmt, img.w, img.h, img.pitch, (unsigned char*)(size_bytes));
streams.push_back(stream_info);
size_bytes += img.h*img.pitch;
}
}
ImagesVideo::ImagesVideo(const std::string& wildcard_path)
: num_files(-1), num_channels(0), next_frame_id(0),
unknowns_are_raw(false)
{
// Work out which files to sequence
PopulateFilenames(wildcard_path);
// Load first image in order to determine stream sizes etc
LoadFrame(next_frame_id);
ConfigureStreamSizes();
// TODO: Queue frames in another thread.
}
ImagesVideo::ImagesVideo(const std::string& wildcard_path,
const PixelFormat& raw_fmt,
size_t raw_width, size_t raw_height
) : num_files(-1), num_channels(0), next_frame_id(0),
unknowns_are_raw(true), raw_fmt(raw_fmt),
raw_width(raw_width), raw_height(raw_height)
{
// Work out which files to sequence
PopulateFilenames(wildcard_path);
// Load first image in order to determine stream sizes etc
LoadFrame(next_frame_id);
ConfigureStreamSizes();
// TODO: Queue frames in another thread.
}
ImagesVideo::~ImagesVideo()
{
}
//! Implement VideoInput::Start()
void ImagesVideo::Start()
{
}
//! Implement VideoInput::Stop()
void ImagesVideo::Stop()
{
}
//! Implement VideoInput::SizeBytes()
size_t ImagesVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& ImagesVideo::Streams() const
{
return streams;
}
//! Implement VideoInput::GrabNext()
bool ImagesVideo::GrabNext( unsigned char* image, bool /*wait*/ )
{
if(next_frame_id < loaded.size()) {
Frame& frame = loaded[next_frame_id];
if(frame.size() != num_channels) {
LoadFrame(next_frame_id);
}
for(size_t c=0; c < num_channels; ++c){
TypedImage& img = frame[c];
if(!img.ptr || img.w != streams[c].Width() || img.h != streams[c].Height() ) {
return false;
}
const StreamInfo& si = streams[c];
std::memcpy(image + (size_t)si.Offset(), img.ptr, si.SizeBytes());
img.Deallocate();
}
frame.clear();
next_frame_id++;
return true;
}
return false;
}
//! Implement VideoInput::GrabNewest()
bool ImagesVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image,wait);
}
size_t ImagesVideo::GetCurrentFrameId() const
{
return (int)next_frame_id - 1;
}
size_t ImagesVideo::GetTotalFrames() const
{
return num_files;
}
size_t ImagesVideo::Seek(size_t frameid)
{
next_frame_id = std::max(size_t(0), std::min(frameid, num_files));
return next_frame_id;
}
const picojson::value& ImagesVideo::DeviceProperties() const
{
return device_properties;
}
const picojson::value& ImagesVideo::FrameProperties() const
{
const size_t frame = GetCurrentFrameId();
if( json_frames.evaluate_as_boolean() && frame < json_frames.size()) {
const picojson::value& frame_props = json_frames[frame];
if(frame_props.contains("frame_properties")) {
return frame_props["frame_properties"];
}
}
return null_props;
}
PANGOLIN_REGISTER_FACTORY(ImagesVideo)
{
struct ImagesVideoVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const bool raw = uri.Contains("fmt");
const std::string path = PathExpand(uri.url);
if(raw) {
const std::string sfmt = uri.Get<std::string>("fmt", "GRAY8");
const PixelFormat fmt = PixelFormatFromString(sfmt);
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640,480));
return std::unique_ptr<VideoInterface>( new ImagesVideo(path, fmt, dim.x, dim.y) );
}else{
return std::unique_ptr<VideoInterface>( new ImagesVideo(path) );
}
}
};
auto factory = std::make_shared<ImagesVideoVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 20, "file");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 20, "files");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "image");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "images");
}
}

View File

@@ -0,0 +1,126 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <iomanip>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/image/image_io.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/video/drivers/images_out.h>
namespace pangolin {
ImagesVideoOutput::ImagesVideoOutput(const std::string& image_folder, const std::string& json_file_out, const std::string& image_file_extension)
: json_frames(picojson::array_type,true),
image_index(0), image_folder( PathExpand(image_folder) + "/" ), image_file_extension(image_file_extension)
{
if(!json_file_out.empty()) {
file.open(json_file_out);
if(!file.is_open()) {
throw std::runtime_error("Unable to open json file for writing, " + json_file_out + ". Make sure output folder already exists.");
}
}
}
ImagesVideoOutput::~ImagesVideoOutput()
{
if(file.is_open())
{
const std::string video_uri = "images://" + image_folder + "archive.json";
picojson::value json_file;
json_file["device_properties"] = device_properties;
json_file["frames"] = json_frames;
json_file["input_uri"] = input_uri;
json_file["video_uri"] = video_uri;
// Serialize json to file.
file << json_file.serialize(true);
}
}
const std::vector<StreamInfo>& ImagesVideoOutput::Streams() const
{
return streams;
}
void ImagesVideoOutput::SetStreams(const std::vector<StreamInfo>& streams, const std::string& uri, const picojson::value& device_properties)
{
this->streams = streams;
this->input_uri = uri;
this->device_properties = device_properties;
}
int ImagesVideoOutput::WriteStreams(const unsigned char* data, const picojson::value& frame_properties)
{
picojson::value json_filenames(picojson::array_type, true);
// Write each stream image to file.
for(size_t s=0; s < streams.size(); ++s) {
const pangolin::StreamInfo& si = streams[s];
const std::string filename = pangolin::FormatString("image_%%%_%.%",std::setfill('0'),std::setw(10),image_index, s, image_file_extension);
json_filenames.push_back(filename);
const Image<unsigned char> img = si.StreamImage(data);
pangolin::SaveImage(img, si.PixFormat(), image_folder + filename);
}
// Add frame_properties to json file.
picojson::value json_frame;
json_frame["frame_properties"] = frame_properties;
json_frame["stream_files"] = json_filenames;
json_frames.push_back(json_frame);
++image_index;
return 0;
}
bool ImagesVideoOutput::IsPipe() const
{
return false;
}
PANGOLIN_REGISTER_FACTORY(ImagesVideoOutput)
{
struct ImagesVideoFactory final : public FactoryInterface<VideoOutputInterface> {
std::unique_ptr<VideoOutputInterface> Open(const Uri& uri) override {
const std::string images_folder = PathExpand(uri.url);
const std::string json_filename = images_folder + "/archive.json";
const std::string image_extension = uri.Get<std::string>("fmt", "png");
if(FileExists(json_filename)) {
throw std::runtime_error("Dataset already exists in directory.");
}
return std::unique_ptr<VideoOutputInterface>(
new ImagesVideoOutput(images_folder, json_filename, image_extension)
);
}
};
auto factory = std::make_shared<ImagesVideoFactory>();
FactoryRegistry<VideoOutputInterface>::I().RegisterFactory(factory, 10, "images");
}
}

View File

@@ -0,0 +1,429 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/drivers/join.h>
#include <pangolin/video/iostream_operators.h>
//#define DEBUGJOIN
#ifdef DEBUGJOIN
#include <pangolin/utils/timer.h>
#define TSTART() pangolin::basetime start,last,now; start = pangolin::TimeNow(); last = start;
#define TGRABANDPRINT(...) now = pangolin::TimeNow(); fprintf(stderr,"JOIN: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " %fms.\n",1000*pangolin::TimeDiff_s(last, now)); last = now;
#define DBGPRINT(...) fprintf(stderr,"JOIN: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr,"\n");
#else
#define TSTART()
#define TGRABANDPRINT(...)
#define DBGPRINT(...)
#endif
namespace pangolin
{
JoinVideo::JoinVideo(std::vector<std::unique_ptr<VideoInterface> > &src_)
: storage(std::move(src_)), size_bytes(0), sync_tolerance_us(0)
{
for(auto& p : storage) {
src.push_back(p.get());
}
// Add individual streams
for(size_t s=0; s< src.size(); ++s)
{
VideoInterface& vid = *src[s];
for(size_t i=0; i < vid.Streams().size(); ++i)
{
const StreamInfo si = vid.Streams()[i];
const PixelFormat fmt = si.PixFormat();
const Image<unsigned char> img_offset = si.StreamImage((unsigned char*)size_bytes);
streams.push_back(StreamInfo(fmt, img_offset));
}
size_bytes += src[s]->SizeBytes();
}
}
JoinVideo::~JoinVideo()
{
for(size_t s=0; s< src.size(); ++s) {
src[s]->Stop();
}
}
size_t JoinVideo::SizeBytes() const
{
return size_bytes;
}
const std::vector<StreamInfo>& JoinVideo::Streams() const
{
return streams;
}
void JoinVideo::Start()
{
for(size_t s=0; s< src.size(); ++s) {
src[s]->Start();
}
}
void JoinVideo::Stop()
{
for(size_t s=0; s< src.size(); ++s) {
src[s]->Stop();
}
}
bool JoinVideo::Sync(int64_t tolerance_us, double transfer_bandwidth_gbps)
{
transfer_bandwidth_bytes_per_us = int64_t((transfer_bandwidth_gbps * 1E3) / 8.0);
// std::cout << "transfer_bandwidth_gbps: " << transfer_bandwidth_gbps << std::endl;
for(size_t s=0; s< src.size(); ++s)
{
picojson::value props = GetVideoDeviceProperties(src[s]);
if(!props.get_value(PANGO_HAS_TIMING_DATA, false)) {
if (props.contains("streams")) {
picojson::value streams = props["streams"];
for (size_t i=0; i<streams.size(); ++i) {
if(!streams[i].get_value(PANGO_HAS_TIMING_DATA, false)) {
sync_tolerance_us = 0;
return false;
}
}
} else {
sync_tolerance_us = 0;
return false;
}
}
}
sync_tolerance_us = tolerance_us;
// std::cout << "transfer_bandwidth_bytes_per_us: " << transfer_bandwidth_bytes_per_us << std::endl;
return true;
}
// Assuming that src_index supports VideoPropertiesInterface and has a valid PANGO_HOST_RECEPTION_TIME_US, or PANGO_ESTIMATED_CENTER_CAPTURE_TIME_US
// returns a capture time adjusted for transfer time and when possible also for exposure.
int64_t JoinVideo::GetAdjustedCaptureTime(size_t src_index)
{
picojson::value props = GetVideoFrameProperties(src[src_index]);
if(props.contains(PANGO_ESTIMATED_CENTER_CAPTURE_TIME_US)) {
// great, the driver already gave us an estimated center of capture
return props[PANGO_ESTIMATED_CENTER_CAPTURE_TIME_US].get<int64_t>();
} else {
if(props.contains(PANGO_HOST_RECEPTION_TIME_US)) {
int64_t transfer_time_us = 0;
if( transfer_bandwidth_bytes_per_us > 0 ) {
transfer_time_us = src[src_index]->SizeBytes() / transfer_bandwidth_bytes_per_us;
}
return props[PANGO_HOST_RECEPTION_TIME_US].get<int64_t>() - transfer_time_us;
} else {
if (props.contains("streams")) {
picojson::value streams = props["streams"];
if(streams.size()>0){
if(streams[0].contains(PANGO_ESTIMATED_CENTER_CAPTURE_TIME_US)) {
// great, the driver already gave us an estimated center of capture
return streams[0][PANGO_ESTIMATED_CENTER_CAPTURE_TIME_US].get<int64_t>();
}
else if( streams[0].contains(PANGO_HOST_RECEPTION_TIME_US)) {
int64_t transfer_time_us = 0;
if( transfer_bandwidth_bytes_per_us > 0 ) {
transfer_time_us = src[src_index]->SizeBytes() / transfer_bandwidth_bytes_per_us;
}
return streams[0][PANGO_HOST_RECEPTION_TIME_US].get<int64_t>() - transfer_time_us;
}
}
}
}
PANGO_ENSURE(false, "JoinVideo: Stream % does contain any timestamp info.\n", src_index);
return 0;
}
}
bool JoinVideo::GrabNext(unsigned char* image, bool wait)
{
size_t offset = 0;
std::vector<size_t> offsets(src.size(), 0);
std::vector<int64_t> capture_us(src.size(), 0);
TSTART()
DBGPRINT("Entering GrabNext:")
for(size_t s=0; s<src.size(); ++s) {
if( src[s]->GrabNext(image+offset,wait) ) {
if(sync_tolerance_us > 0) {
capture_us[s] = GetAdjustedCaptureTime(s);
}else{
capture_us[s] = std::numeric_limits<int64_t>::max();
}
}
offsets[s] = offset;
offset += src[s]->SizeBytes();
TGRABANDPRINT("Stream %ld grab took ",s);
}
// Check if any streams didn't return an image. This means a stream is waiting on data or has finished.
if( std::any_of(capture_us.begin(), capture_us.end(), [](int64_t v){return v == 0;}) ){
return false;
}
// Check Sync if a tolerence has been specified.
if(sync_tolerance_us > 0)
{
auto range = std::minmax_element(capture_us.begin(), capture_us.end());
if( (*range.second - *range.first) > sync_tolerance_us)
{
pango_print_warn("JoinVideo: Source timestamps span %lu us, not within %lu us. Ignoring frames, trying to sync...\n", (unsigned long)((*range.second - *range.first)), (unsigned long)sync_tolerance_us);
// Attempt to resync...
for(size_t n=0; n<10; ++n){
for(size_t s=0; s<src.size(); ++s) {
// Catch up frames that are behind
if(capture_us[s] < (*range.second - sync_tolerance_us))
{
if(src[s]->GrabNext(image+offsets[s],true)) {
capture_us[s] = GetAdjustedCaptureTime(s);
}
}
}
}
}
// Check sync again
range = std::minmax_element(capture_us.begin(), capture_us.end());
if( (*range.second - *range.first) > sync_tolerance_us) {
TGRABANDPRINT("NOT IN SYNC oldest:%ld newest:%ld delta:%ld", *range.first, *range.second, (*range.second - *range.first));
return false;
} else {
TGRABANDPRINT(" IN SYNC oldest:%ld newest:%ld delta:%ld", *range.first, *range.second, (*range.second - *range.first));
return true;
}
}
else
{
pango_print_warn("JoinVideo: sync_tolerance_us = 0, frames are not synced!\n");
return true;
}
}
bool AllInterfacesAreBufferAware(std::vector<VideoInterface*>& src){
for(size_t s=0; s<src.size(); ++s) {
if(!dynamic_cast<BufferAwareVideoInterface*>(src[s])) return false;
}
return true;
}
bool JoinVideo::GrabNewest( unsigned char* image, bool wait )
{
// TODO: Tidy to correspond to GrabNext()
TSTART()
DBGPRINT("Entering GrabNewest:");
if(AllInterfacesAreBufferAware(src)) {
DBGPRINT("All interfaces are BufferAwareVideoInterface.")
unsigned int minN = std::numeric_limits<unsigned int>::max();
//Find smallest number of frames it is safe to drop.
for(size_t s=0; s<src.size(); ++s) {
auto bai = dynamic_cast<BufferAwareVideoInterface*>(src[s]);
unsigned int n = bai->AvailableFrames();
minN = std::min(n, minN);
DBGPRINT("Interface %ld has %u frames available.",s ,n)
}
TGRABANDPRINT("Quering avalable frames took ")
DBGPRINT("Safe number of buffers to drop: %d.",((minN > 1) ? (minN-1) : 0));
//Safely drop minN-1 frames on each interface.
if(minN > 1) {
for(size_t s=0; s<src.size(); ++s) {
auto bai = dynamic_cast<BufferAwareVideoInterface*>(src[s]);
if(!bai->DropNFrames(minN - 1)) {
pango_print_error("Stream %lu did not drop %u frames altough available.\n", (unsigned long)s, (minN-1));
return false;
}
}
TGRABANDPRINT("Dropping %u frames on each interface took ",(minN -1));
}
return GrabNext(image, wait);
} else {
DBGPRINT("NOT all interfaces are BufferAwareVideoInterface.")
// Simply calling GrabNewest on the child streams might cause loss of sync,
// instead we perform as many GrabNext as possible on the first stream and
// then pull the same number of frames from every other stream.
size_t offset = 0;
std::vector<size_t> offsets;
std::vector<int64_t> reception_times;
int64_t newest = std::numeric_limits<int64_t>::min();
int64_t oldest = std::numeric_limits<int64_t>::max();
bool grabbed_any = false;
int first_stream_backlog = 0;
int64_t rt = 0;
bool got_frame = false;
do {
got_frame = src[0]->GrabNext(image+offset,false);
if(got_frame) {
if(sync_tolerance_us > 0) {
rt = GetAdjustedCaptureTime(0);
}
first_stream_backlog++;
grabbed_any = true;
}
} while(got_frame);
offsets.push_back(offset);
offset += src[0]->SizeBytes();
if(sync_tolerance_us > 0) {
reception_times.push_back(rt);
if(newest < rt) newest = rt;
if(oldest > rt) oldest = rt;
}
TGRABANDPRINT("Stream 0 grab took ");
for(size_t s=1; s<src.size(); ++s) {
for (int i=0; i<first_stream_backlog; i++){
grabbed_any |= src[s]->GrabNext(image+offset,true);
if(sync_tolerance_us > 0) {
rt = GetAdjustedCaptureTime(s);
}
}
offsets.push_back(offset);
offset += src[s]->SizeBytes();
if(sync_tolerance_us > 0) {
reception_times.push_back(rt);
if(newest < rt) newest = rt;
if(oldest > rt) oldest = rt;
}
}
TGRABANDPRINT("Stream >=1 grab took ");
if(sync_tolerance_us > 0) {
if(std::abs(newest - oldest) > sync_tolerance_us){
pango_print_warn("Join timestamps not within %lu us trying to sync\n", (unsigned long)sync_tolerance_us);
for(size_t n=0; n<10; ++n){
for(size_t s=0; s<src.size(); ++s) {
if(reception_times[s] < (newest - sync_tolerance_us)) {
VideoInterface& vid = *src[s];
if(vid.GrabNewest(image+offsets[s],false)) {
rt = GetAdjustedCaptureTime(s);
if(newest < rt) newest = rt;
if(oldest > rt) oldest = rt;
reception_times[s] = rt;
}
}
}
}
}
if(std::abs(newest - oldest) > sync_tolerance_us ) {
TGRABANDPRINT("NOT IN SYNC newest:%ld oldest:%ld delta:%ld syncing took ", newest, oldest, (newest - oldest));
return false;
} else {
TGRABANDPRINT(" IN SYNC newest:%ld oldest:%ld delta:%ld syncing took ", newest, oldest, (newest - oldest));
return true;
}
} else {
return true;
}
}
}
std::vector<VideoInterface*>& JoinVideo::InputStreams()
{
return src;
}
std::vector<std::string> SplitBrackets(const std::string src, char open = '{', char close = '}')
{
std::vector<std::string> splits;
int nesting = 0;
int begin = -1;
for(size_t i=0; i < src.length(); ++i) {
if(src[i] == open) {
if(nesting==0) {
begin = (int)i;
}
nesting++;
}else if(src[i] == close) {
nesting--;
if(nesting == 0) {
// matching close bracket.
int str_start = begin+1;
splits.push_back( src.substr(str_start, i-str_start) );
}
}
}
return splits;
}
PANGOLIN_REGISTER_FACTORY(JoinVideo)
{
struct JoinVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::vector<std::string> uris = SplitBrackets(uri.url);
// Standard by which we should measure if frames are in sync.
const unsigned long sync_tol_us = uri.Get<unsigned long>("sync_tolerance_us", 0);
// Bandwidth used to compute exposure end time from reception time for sync logic
const double transfer_bandwidth_gbps = uri.Get<double>("transfer_bandwidth_gbps", 0.0);
if(uris.size() == 0) {
throw VideoException("No VideoSources found in join URL.", "Specify videos to join with curly braces, e.g. join://{test://}{test://}");
}
std::vector<std::unique_ptr<VideoInterface>> src;
for(size_t i=0; i<uris.size(); ++i) {
src.push_back( pangolin::OpenVideo(uris[i]) );
}
JoinVideo* video_raw = new JoinVideo(src);
if(sync_tol_us>0) {
if(!video_raw->Sync(sync_tol_us, transfer_bandwidth_gbps)) {
pango_print_error("WARNING: not all streams in join support sync_tolerance_us option. Not using tolerance.\n");
}
}
return std::unique_ptr<VideoInterface>(video_raw);
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<JoinVideoFactory>(), 10, "join");
}
}
#undef TSTART
#undef TGRABANDPRINT
#undef DBGPRINT

View File

@@ -0,0 +1,86 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/video.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/utils/file_extension.h>
#include <pangolin/utils/transform.h>
#include <fstream>
#include <functional>
namespace pangolin {
PANGOLIN_REGISTER_FACTORY(JsonVideo)
{
struct JsonVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
if(uri.scheme == "json" || (uri.scheme == "file" && FileLowercaseExtention(uri.url) == ".json")) {
const std::string json_filename = PathExpand(uri.url);
std::ifstream f( json_filename );
// Parse json file to determine sub-video
if(f.is_open())
{
picojson::value file_json(picojson::object_type,true);
const std::string err = picojson::parse(file_json,f);
if(err.empty())
{
// Json loaded. Parse output.
std::string input_uri = file_json.get_value<std::string>("video_uri", "");
if(!input_uri.empty())
{
// Transform input_uri based on sub args.
const picojson::value input_uri_params = file_json.get_value<picojson::object>("video_uri_defaults", picojson::object());
input_uri = Transform(input_uri, [&](const std::string& k) {
return uri.Get<std::string>(k, input_uri_params.contains(k) ? input_uri_params[k].to_str() : "#");
});
return pangolin::OpenVideo(input_uri);
}else{
throw VideoException("JsonVideo failed.", "Bad input URI.");
}
}else{
throw VideoException("JsonVideo failed.", err);
}
}else{
throw VideoException("JsonVideo failed. Unable to load file.", json_filename);
}
}else{
// Not applicable for this factory.
return std::unique_ptr<VideoInterface>();
}
}
};
auto factory = std::make_shared<JsonVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "json");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 5, "file");
}
}

View File

@@ -0,0 +1,165 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/merge.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#include <pangolin/plot/range.h>
#include <assert.h> // assert()
#include <assert.h>
namespace pangolin
{
MergeVideo::MergeVideo(std::unique_ptr<VideoInterface>& src_, const std::vector<Point>& stream_pos, size_t w = 0, size_t h = 0 )
: src( std::move(src_) ), buffer(new uint8_t[src->SizeBytes()]), stream_pos(stream_pos)
{
videoin.push_back(src.get());
// Must be a stream_pos for each stream
// Each stream must have the same format.
assert(stream_pos.size() == src->Streams().size());
assert(src->Streams().size() > 0);
const PixelFormat fmt = src->Streams()[0].PixFormat();
for(size_t i=1; i < src->Streams().size(); ++i) {
assert(src->Streams()[i].PixFormat().format == fmt.format);
}
// Compute buffer regions for data copying.
XYRange<size_t> r = XYRange<size_t>::Empty();
for(size_t i=0; i < src->Streams().size(); ++i) {
const StreamInfo& si = src->Streams()[i];
const size_t x = stream_pos[i].x;
const size_t y = stream_pos[i].y;
XYRange<size_t> sr(x, x + si.Width(), y, y + si.Height());
r.Insert(sr);
}
// Use implied min / max based on points
if(!w && !h) {
w = r.x.max;
h = r.y.max;
}
size_bytes = w*h*fmt.bpp/8;
streams.emplace_back(fmt,w,h,w*fmt.bpp/8,(unsigned char*)0);
}
MergeVideo::~MergeVideo()
{
}
//! Implement VideoInput::Start()
void MergeVideo::Start()
{
src->Start();
}
//! Implement VideoInput::Stop()
void MergeVideo::Stop()
{
src->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t MergeVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& MergeVideo::Streams() const
{
return streams;
}
void MergeVideo::CopyBuffer(unsigned char* dst_bytes, unsigned char* src_bytes)
{
Image<unsigned char> dst_image = Streams()[0].StreamImage(dst_bytes);
const size_t dst_pix_bytes = Streams()[0].PixFormat().bpp / 8;
for(size_t i=0; i < stream_pos.size(); ++i) {
const StreamInfo& src_stream = src->Streams()[i];
const Image<unsigned char> src_image = src_stream.StreamImage(src_bytes);
const Point& p = stream_pos[i];
for(size_t y=0; y < src_stream.Height(); ++y) {
// Copy row from src to dst
std::memcpy(
dst_image.RowPtr(y + p.y) + p.x * dst_pix_bytes,
src_image.RowPtr(y), src_stream.RowBytes()
);
}
}
}
//! Implement VideoInput::GrabNext()
bool MergeVideo::GrabNext( unsigned char* image, bool wait )
{
const bool success = src->GrabNext(buffer.get(), wait);
if(success) CopyBuffer(image, buffer.get());
return success;
}
//! Implement VideoInput::GrabNewest()
bool MergeVideo::GrabNewest( unsigned char* image, bool wait )
{
const bool success = src->GrabNewest(buffer.get(), wait);
if(success) CopyBuffer(image, buffer.get());
return success;
}
std::vector<VideoInterface*>& MergeVideo::InputStreams()
{
return videoin;
}
PANGOLIN_REGISTER_FACTORY(MergeVideo)
{
struct MergeVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(0,0));
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
std::vector<Point> points;
Point p(0,0);
for(size_t s=0; s < subvid->Streams().size(); ++s) {
const StreamInfo& si = subvid->Streams()[s];
p = uri.Get<Point>("pos"+std::to_string(s+1), p);
points.push_back(p);
p.x += si.Width();
}
return std::unique_ptr<VideoInterface>(new MergeVideo(subvid, points, dim.x, dim.y));
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<MergeVideoFactory>(), 10, "merge");
}
}

View File

@@ -0,0 +1,589 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/mirror.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
MirrorVideo::MirrorVideo(std::unique_ptr<VideoInterface>& src, const std::vector<MirrorOptions>& flips)
: videoin(std::move(src)), flips(flips), size_bytes(0),buffer(0)
{
if(!videoin) {
throw VideoException("MirrorVideo: VideoInterface in must not be null");
}
inputs.push_back(videoin.get());
for(size_t i=0;i<videoin->Streams().size();i++)
switch (flips[i]) {
case MirrorOptionsFlipX:
case MirrorOptionsFlipY:
case MirrorOptionsFlipXY:
case MirrorOptionsNone:
streams.push_back(videoin->Streams()[i]);
break;
case MirrorOptionsTranspose:
case MirrorOptionsRotateCW:
case MirrorOptionsRotateCCW:
unsigned char*ptr=videoin->Streams()[i].Offset();
size_t w=videoin->Streams()[i].Height();
size_t h=videoin->Streams()[i].Width();
size_t Bpp=videoin->Streams()[i].PixFormat().bpp / 8;
streams.emplace_back(videoin->Streams()[i].PixFormat(),pangolin::Image<unsigned char>(ptr,w,h,w*Bpp));
break;
};
size_bytes = videoin->SizeBytes();
buffer = new unsigned char[size_bytes];
}
MirrorVideo::~MirrorVideo()
{
delete[] buffer;
}
//! Implement VideoInput::Start()
void MirrorVideo::Start()
{
videoin->Start();
}
//! Implement VideoInput::Stop()
void MirrorVideo::Stop()
{
videoin->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t MirrorVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& MirrorVideo::Streams() const
{
return streams;
}
void PitchedImageCopy(
Image<unsigned char>& img_out,
const Image<unsigned char>& img_in,
size_t bytes_per_pixel
) {
if( img_out.w != img_in.w || img_out.h != img_in.h ) {
throw std::runtime_error("PitchedImageCopy: Incompatible image sizes");
}
for(size_t y=0; y < img_out.h; ++y) {
std::memcpy(img_out.RowPtr((int)y), img_in.RowPtr((int)y), bytes_per_pixel * img_in.w);
}
}
void FlipY(
Image<unsigned char>& img_out,
const Image<unsigned char>& img_in,
size_t bytes_per_pixel
) {
if( img_out.w != img_in.w || img_out.h != img_in.h ) {
throw std::runtime_error("FlipY: Incompatible image sizes");
}
for(size_t y_out=0; y_out < img_out.h; ++y_out) {
const size_t y_in = (img_in.h-1) - y_out;
std::memcpy(img_out.RowPtr((int)y_out), img_in.RowPtr((int)y_in), bytes_per_pixel * img_in.w);
}
}
template <typename T>
void ChainSwap2(T& a, T& b)
{
T t = a;
a = b;
b = t;
}
template <typename T>
void ChainSwap4(T& a, T& b, T& c, T& d)
{
T t = a;
a = b;
b = c;
c = d;
d = t;
}
template <size_t BPP, size_t TSZ>
void TiledFlipX(Image<unsigned char>& img_out, const Image<unsigned char>& img_in)
{
const size_t w = img_in.w;
const size_t h = img_in.h;
typedef struct
{
unsigned char d[BPP];
} T;
T d[TSZ][TSZ];
for(size_t xin = 0; xin < w; xin += TSZ)
for(size_t yin = 0; yin < h; yin += TSZ)
{
const size_t xspan = std::min(TSZ, w - xin);
const size_t yspan = std::min(TSZ, h - yin);
const size_t xout = w - xin - TSZ;
const size_t yout = yin;
for(size_t y = 0; y < yspan; y++)
memcpy(d[y], img_in.RowPtr(yin + y) + xin * BPP, xspan * BPP);
for(size_t y = 0; y < TSZ; y++)
for(size_t x = 0; x < TSZ / 2; x++)
ChainSwap2(d[y][x], d[y][TSZ - 1 - x]);
for(size_t y = 0; y < yspan; y++)
memcpy(img_out.RowPtr(yout + y) + (xout + TSZ - xspan) * BPP, d[y] + TSZ - xspan, xspan * BPP);
}
}
template <size_t BPP, size_t TSZ>
void TiledRotate180(Image<unsigned char>& img_out, const Image<unsigned char>& img_in)
{
static_assert(!(TSZ & 1), "Tilesize must be even.");
const size_t w = img_in.w;
const size_t h = img_in.h;
typedef struct
{
unsigned char d[BPP];
} T;
T d[TSZ][TSZ];
for(size_t xin = 0; xin < w; xin += TSZ)
for(size_t yin = 0; yin < h; yin += TSZ)
{
const size_t xspan = std::min(TSZ, w - xin);
const size_t yspan = std::min(TSZ, h - yin);
const size_t xout = w - xin - TSZ;
const size_t yout = h - yin - TSZ;
for(size_t y = 0; y < yspan; y++)
memcpy(d[y], img_in.RowPtr(yin + y) + xin * BPP, xspan * BPP);
for(size_t y = 0; y < TSZ / 2; y++)
for(size_t x = 0; x < TSZ; x++)
ChainSwap2(d[y][x], d[TSZ - 1 - y][TSZ - 1 - x]);
for(size_t y = TSZ - yspan; y < TSZ; y++)
memcpy(img_out.RowPtr(yout + y) + (xout + TSZ - xspan) * BPP, d[y] + TSZ - xspan, xspan * BPP);
}
}
template <size_t BPP, size_t TSZ>
void TiledTranspose(Image<unsigned char>& img_out, const Image<unsigned char>& img_in)
{
const size_t w = img_in.w;
const size_t h = img_in.h;
typedef struct
{
unsigned char d[BPP];
} T;
T d[TSZ][TSZ];
for(size_t xin = 0; xin < w; xin += TSZ)
for(size_t yin = 0; yin < h; yin += TSZ)
{
const size_t xspan = std::min(TSZ, w - xin);
const size_t yspan = std::min(TSZ, h - yin);
const size_t dmin = std::min(xspan, yspan);
const size_t dmax = std::max(xspan, yspan);
const size_t xout = yin;
const size_t yout = xin;
for(size_t y = 0; y < yspan; y++)
memcpy(d[y], img_in.RowPtr(yin + y) + xin * BPP, xspan * BPP);
for(size_t x = 0; x < dmin; x++)
for(size_t y = x + 1; y < dmax; y++)
ChainSwap2(d[x][y], d[y][x]);
for(size_t y = 0; y < xspan; y++)
memcpy(img_out.RowPtr(yout + y) + xout * BPP, d[y], yspan * BPP);
}
}
template <size_t BPP, size_t TSZ>
void TiledRotateCW(Image<unsigned char>& img_out, const Image<unsigned char>& img_in)
{
static_assert(!(TSZ & 1), "Tilesize must be even.");
const size_t w = img_in.w;
const size_t h = img_in.h;
typedef struct
{
unsigned char d[BPP];
} T;
T d[TSZ][TSZ];
for(size_t xin = 0; xin < w; xin += TSZ)
for(size_t yin = 0; yin < h; yin += TSZ)
{
const size_t xspan = std::min(TSZ, w - xin);
const size_t yspan = std::min(TSZ, h - yin);
const size_t xout = h - yin - TSZ;
const size_t yout = xin;
for(size_t y = 0; y < yspan; y++)
memcpy(d[y], img_in.RowPtr(yin + y) + xin * BPP, xspan * BPP);
for(size_t y = 0; y < TSZ / 2; y++)
for(size_t x = 0; x < TSZ / 2; x++)
ChainSwap4(d[TSZ - 1 - x][y], d[TSZ - 1 - y][TSZ - 1 - x], d[x][TSZ - 1 - y], d[y][x]);
for(size_t y = 0; y < xspan; y++)
memcpy(img_out.RowPtr(yout + y) + (xout + TSZ - yspan) * BPP, d[y] + TSZ - yspan, yspan * BPP);
}
}
template <size_t BPP, size_t TSZ>
void TiledRotateCCW(Image<unsigned char>& img_out, const Image<unsigned char>& img_in)
{
static_assert(!(TSZ & 1), "Tilesize must be even.");
const size_t w = img_in.w;
const size_t h = img_in.h;
typedef struct
{
unsigned char d[BPP];
} T;
T d[TSZ][TSZ];
for(size_t xin = 0; xin < w; xin += TSZ)
for(size_t yin = 0; yin < h; yin += TSZ)
{
const size_t xspan = std::min(TSZ, w - xin);
const size_t yspan = std::min(TSZ, h - yin);
const size_t xout = yin;
const size_t yout = w - xin - TSZ;
for(size_t y = 0; y < yspan; y++)
memcpy(d[y], img_in.RowPtr(yin + y) + xin * BPP, xspan * BPP);
for(size_t y = 0; y < TSZ / 2; y++)
for(size_t x = 0; x < TSZ / 2; x++)
ChainSwap4(d[y][x], d[x][TSZ - 1 - y], d[TSZ - 1 - y][TSZ - 1 - x], d[TSZ - 1 - x][y]);
for(size_t y = TSZ - xspan; y < TSZ; y++)
memcpy(img_out.RowPtr(yout + y) + xout * BPP, d[y], yspan * BPP);
}
}
void FlipX(Image<unsigned char>& img_out, const Image<unsigned char>& img_in, size_t bytes_per_pixel)
{
if(bytes_per_pixel == 1)
TiledFlipX<1, 160>(img_out, img_in);
else if(bytes_per_pixel == 2)
TiledFlipX<2, 120>(img_out, img_in);
else if(bytes_per_pixel == 3)
TiledFlipX<3, 80>(img_out, img_in);
else if(bytes_per_pixel == 4)
TiledFlipX<4, 80>(img_out, img_in);
else if(bytes_per_pixel == 6)
TiledFlipX<6, 64>(img_out, img_in);
else {
for(size_t y = 0; y < img_out.h; ++y) {
for(size_t x = 0; x < img_out.w; ++x) {
memcpy(img_out.RowPtr((int)y) + (img_out.w - 1 - x) * bytes_per_pixel,
img_in.RowPtr((int)y) + x * bytes_per_pixel,
bytes_per_pixel);
}
}
}
}
void FlipXY(Image<unsigned char>& img_out, const Image<unsigned char>& img_in, size_t bytes_per_pixel)
{
if(bytes_per_pixel == 1)
TiledRotate180<1, 160>(img_out, img_in);
else if(bytes_per_pixel == 2)
TiledRotate180<2, 120>(img_out, img_in);
else if(bytes_per_pixel == 3)
TiledRotate180<3, 80>(img_out, img_in);
else if(bytes_per_pixel == 4)
TiledRotate180<4, 80>(img_out, img_in);
else if(bytes_per_pixel == 6)
TiledRotate180<6, 64>(img_out, img_in);
else {
for(size_t y_out = 0; y_out < img_out.h; ++y_out) {
for(size_t x = 0; x < img_out.w; ++x) {
const size_t y_in = (img_in.h - 1) - y_out;
memcpy(img_out.RowPtr((int)y_out) + (img_out.w - 1 - x) * bytes_per_pixel,
img_in.RowPtr((int)y_in) + x * bytes_per_pixel,
bytes_per_pixel);
}
}
}
}
void RotateCW(Image<unsigned char>& img_out, const Image<unsigned char>& img_in, size_t bytes_per_pixel)
{
if(bytes_per_pixel == 1)
TiledRotateCW<1, 160>(img_out, img_in);
else if(bytes_per_pixel == 2)
TiledRotateCW<2, 120>(img_out, img_in);
else if(bytes_per_pixel == 3)
TiledRotateCW<3, 80>(img_out, img_in);
else if(bytes_per_pixel == 4)
TiledRotateCW<4, 80>(img_out, img_in);
else if(bytes_per_pixel == 6)
TiledRotateCW<6, 64>(img_out, img_in);
else {
for(size_t yout = 0; yout < img_out.h; ++yout)
for(size_t xout = 0; xout < img_out.w; ++xout) {
size_t xin = yout;
size_t yin = img_out.w - 1 - xout;
memcpy(img_out.RowPtr((int)yout) + xout * bytes_per_pixel,
img_in.RowPtr((int)yin) + xin * bytes_per_pixel,
bytes_per_pixel);
}
}
}
void Transpose(Image<unsigned char>& img_out, const Image<unsigned char>& img_in, size_t bytes_per_pixel)
{
if(bytes_per_pixel == 1)
TiledTranspose<1, 160>(img_out, img_in);
else if(bytes_per_pixel == 2)
TiledTranspose<2, 120>(img_out, img_in);
else if(bytes_per_pixel == 3)
TiledTranspose<3, 80>(img_out, img_in);
else if(bytes_per_pixel == 4)
TiledTranspose<4, 80>(img_out, img_in);
else if(bytes_per_pixel == 6)
TiledTranspose<6, 64>(img_out, img_in);
else {
for(size_t yout = 0; yout < img_out.h; ++yout)
for(size_t xout = 0; xout < img_out.w; ++xout) {
size_t xin = yout;
size_t yin = xout;
memcpy(img_out.RowPtr((int)yout) + xout * bytes_per_pixel,
img_in.RowPtr((int)yin) + xin * bytes_per_pixel,
bytes_per_pixel);
}
}
}
void RotateCCW(Image<unsigned char>& img_out, const Image<unsigned char>& img_in, size_t bytes_per_pixel)
{
if(bytes_per_pixel == 1)
TiledRotateCCW<1, 160>(img_out, img_in);
else if(bytes_per_pixel == 2)
TiledRotateCCW<2, 120>(img_out, img_in);
else if(bytes_per_pixel == 3)
TiledRotateCCW<3, 80>(img_out, img_in);
else if(bytes_per_pixel == 4)
TiledRotateCCW<4, 80>(img_out, img_in);
else if(bytes_per_pixel == 6)
TiledRotateCCW<6, 64>(img_out, img_in);
else {
for(size_t yout = 0; yout < img_out.h; ++yout)
for(size_t xout = 0; xout < img_out.w; ++xout) {
size_t xin = img_out.h - 1 - yout;
size_t yin = xout;
memcpy(img_out.RowPtr((int)yout) + xout * bytes_per_pixel,
img_in.RowPtr((int)yin) + xin * bytes_per_pixel,
bytes_per_pixel);
}
}
}
void MirrorVideo::Process(unsigned char* buffer_out, const unsigned char* buffer_in)
{
for(size_t s=0; s<streams.size(); ++s) {
Image<unsigned char> img_out = Streams()[s].StreamImage(buffer_out);
const Image<unsigned char> img_in = videoin->Streams()[s].StreamImage(buffer_in);
const size_t bytes_per_pixel = Streams()[s].PixFormat().bpp / 8;
switch (flips[s]) {
case MirrorOptionsFlipX:
FlipX(img_out, img_in, bytes_per_pixel);
break;
case MirrorOptionsFlipY:
FlipY(img_out, img_in, bytes_per_pixel);
break;
case MirrorOptionsFlipXY:
FlipXY(img_out, img_in, bytes_per_pixel);
break;
case MirrorOptionsRotateCW:
RotateCW(img_out, img_in, bytes_per_pixel);
break;
case MirrorOptionsRotateCCW:
RotateCCW(img_out, img_in, bytes_per_pixel);
break;
case MirrorOptionsTranspose:
Transpose(img_out, img_in, bytes_per_pixel);
break;
case MirrorOptionsNone:
PitchedImageCopy(img_out, img_in, bytes_per_pixel);
break;
default:
pango_print_warn("MirrorVideo::Process(): Invalid enum %i.\n", flips[s]);
}
}
}
//! Implement VideoInput::GrabNext()
bool MirrorVideo::GrabNext( unsigned char* image, bool wait )
{
if(videoin->GrabNext(buffer,wait)) {
Process(image, buffer);
return true;
}else{
return false;
}
}
//! Implement VideoInput::GrabNewest()
bool MirrorVideo::GrabNewest( unsigned char* image, bool wait )
{
if(videoin->GrabNewest(buffer,wait)) {
Process(image, buffer);
return true;
}else{
return false;
}
}
std::vector<VideoInterface*>& MirrorVideo::InputStreams()
{
return inputs;
}
unsigned int MirrorVideo::AvailableFrames() const
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin.get());
if(!vpi)
{
pango_print_warn("Mirror: child interface is not buffer aware.");
return 0;
}
else
{
return vpi->AvailableFrames();
}
}
bool MirrorVideo::DropNFrames(uint32_t n)
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin.get());
if(!vpi)
{
pango_print_warn("Mirror: child interface is not buffer aware.");
return false;
}
else
{
return vpi->DropNFrames(n);
}
}
std::istream& operator>> (std::istream &is, MirrorOptions &mirror)
{
std::string str_mirror;
is >> str_mirror;
std::transform(str_mirror.begin(), str_mirror.end(), str_mirror.begin(), toupper);
if(!str_mirror.compare("NONE")) {
mirror = MirrorOptionsNone;
}else if(!str_mirror.compare("FLIPX") || !str_mirror.compare("MIRROR")) {
mirror = MirrorOptionsFlipX;
}else if(!str_mirror.compare("FLIPY") || !str_mirror.compare("FLIP")) {
mirror = MirrorOptionsFlipY;
}else if(!str_mirror.compare("ROTATECW")) {
mirror = MirrorOptionsRotateCW;
}else if(!str_mirror.compare("ROTATECCW")) {
mirror = MirrorOptionsRotateCCW;
}else if(!str_mirror.compare("FLIPXY") || !str_mirror.compare("TRANSPOSE")) {
mirror = MirrorOptionsFlipXY;
}else{
pango_print_warn("Unknown mirror option %s.", str_mirror.c_str());
mirror = MirrorOptionsNone;
}
return is;
}
PANGOLIN_REGISTER_FACTORY(MirrorVideo)
{
struct MirrorVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
MirrorOptions default_opt = MirrorOptionsNone;
if(uri.scheme == "flip") default_opt = MirrorOptionsFlipY;
if(uri.scheme == "rotate") default_opt = MirrorOptionsFlipXY;
if(uri.scheme == "transpose") default_opt = MirrorOptionsTranspose;
if(uri.scheme == "rotateCW") default_opt = MirrorOptionsRotateCW;
if(uri.scheme == "rotateCCW") default_opt = MirrorOptionsRotateCCW;
std::vector<MirrorOptions> flips;
for(size_t i=0; i < subvid->Streams().size(); ++i){
std::stringstream ss;
ss << "stream" << i;
const std::string key = ss.str();
flips.push_back(uri.Get<MirrorOptions>(key, default_opt) );
}
return std::unique_ptr<VideoInterface> (new MirrorVideo(subvid, flips));
}
};
auto factory = std::make_shared<MirrorVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "mirror");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "flip");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "rotate");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "transpose");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "rotateCW");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "rotateCCW");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "transform");
}
}

View File

@@ -0,0 +1,299 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/openni.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
OpenNiVideo::OpenNiVideo(OpenNiSensorType s1, OpenNiSensorType s2, ImageDim dim, int fps)
{
sensor_type[0] = s1;
sensor_type[1] = s2;
XnStatus nRetVal = XN_STATUS_OK;
nRetVal = context.Init();
if (nRetVal != XN_STATUS_OK) {
std::cerr << "context.Init: " << xnGetStatusString(nRetVal) << std::endl;
}
XnMapOutputMode mapMode;
mapMode.nXRes = dim.x;
mapMode.nYRes = dim.y;
mapMode.nFPS = fps;
sizeBytes = 0;
bool use_depth = false;
bool use_ir = false;
bool use_rgb = false;
bool depth_to_color = false;
for(int i=0; i<2; ++i) {
PixelFormat fmt;
// Establish output pixel format for sensor streams
switch( sensor_type[i] ) {
case OpenNiDepth_1mm_Registered:
case OpenNiDepth_1mm:
case OpenNiIr:
case OpenNiIrProj:
fmt = PixelFormatFromString("GRAY16LE");
break;
case OpenNiIr8bit:
case OpenNiIr8bitProj:
fmt = PixelFormatFromString("GRAY8");
break;
case OpenNiRgb:
fmt = PixelFormatFromString("RGB24");
break;
default:
continue;
}
switch( sensor_type[i] ) {
case OpenNiDepth_1mm_Registered:
depth_to_color = true;
case OpenNiDepth_1mm:
use_depth = true;
break;
case OpenNiIr:
case OpenNiIr8bit:
use_ir = true;
break;
case OpenNiIrProj:
case OpenNiIr8bitProj:
use_ir = true;
use_depth = true;
break;
case OpenNiRgb:
use_rgb = true;
break;
default:
break;
}
const StreamInfo stream(fmt, mapMode.nXRes, mapMode.nYRes, (mapMode.nXRes * fmt.bpp) / 8, (unsigned char*)0 + sizeBytes);
sizeBytes += stream.SizeBytes();
streams.push_back(stream);
}
if( use_depth ) {
nRetVal = depthNode.Create(context);
if (nRetVal != XN_STATUS_OK) {
throw VideoException( (std::string)"Unable to create DepthNode: " + xnGetStatusString(nRetVal) );
}else{
nRetVal = depthNode.SetMapOutputMode(mapMode);
if (nRetVal != XN_STATUS_OK) {
throw VideoException( (std::string)"Invalid DepthNode mode: " + xnGetStatusString(nRetVal) );
}
}
}
if( use_rgb ) {
nRetVal = imageNode.Create(context);
if (nRetVal != XN_STATUS_OK) {
throw VideoException( (std::string)"Unable to create ImageNode: " + xnGetStatusString(nRetVal) );
}else{
nRetVal = imageNode.SetMapOutputMode(mapMode);
if (nRetVal != XN_STATUS_OK) {
throw VideoException( (std::string)"Invalid ImageNode mode: " + xnGetStatusString(nRetVal) );
}
}
}
if (depth_to_color && use_rgb) {
//Registration
if( depthNode.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT) ) {
nRetVal = depthNode.GetAlternativeViewPointCap().SetViewPoint( imageNode );
if (nRetVal != XN_STATUS_OK) {
std::cerr << "depthNode.GetAlternativeViewPointCap().SetViewPoint(imageNode): " << xnGetStatusString(nRetVal) << std::endl;
}
}
// Frame Sync
if (depthNode.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
{
if (depthNode.GetFrameSyncCap().CanFrameSyncWith(imageNode))
{
nRetVal = depthNode.GetFrameSyncCap().FrameSyncWith(imageNode);
if (nRetVal != XN_STATUS_OK) {
std::cerr << "depthNode.GetFrameSyncCap().FrameSyncWith(imageNode): " << xnGetStatusString(nRetVal) << std::endl;
}
}
}
}
if( use_ir ) {
nRetVal = irNode.Create(context);
if (nRetVal != XN_STATUS_OK) {
throw VideoException( (std::string)"Unable to create IrNode: " + xnGetStatusString(nRetVal) );
}else{
nRetVal = irNode.SetMapOutputMode(mapMode);
if (nRetVal != XN_STATUS_OK) {
throw VideoException( (std::string)"Invalid IrNode mode: " + xnGetStatusString(nRetVal) );
}
}
}
Start();
}
OpenNiVideo::~OpenNiVideo()
{
context.Release();
}
size_t OpenNiVideo::SizeBytes() const
{
return sizeBytes;
}
const std::vector<StreamInfo>& OpenNiVideo::Streams() const
{
return streams;
}
void OpenNiVideo::Start()
{
// XnStatus nRetVal =
context.StartGeneratingAll();
}
void OpenNiVideo::Stop()
{
context.StopGeneratingAll();
}
bool OpenNiVideo::GrabNext( unsigned char* image, bool /*wait*/ )
{
// XnStatus nRetVal = context.WaitAndUpdateAll();
XnStatus nRetVal = context.WaitAnyUpdateAll();
// nRetVal = context.WaitOneUpdateAll(imageNode);
if (nRetVal != XN_STATUS_OK) {
std::cerr << "Failed updating data: " << xnGetStatusString(nRetVal) << std::endl;
return false;
}else{
unsigned char* out_img = image;
for(int i=0; i<2; ++i) {
switch (sensor_type[i]) {
case OpenNiDepth_1mm:
case OpenNiDepth_1mm_Registered:
{
const XnDepthPixel* pDepthMap = depthNode.GetDepthMap();
memcpy(out_img,pDepthMap, streams[i].SizeBytes() );
break;
}
case OpenNiIr:
case OpenNiIrProj:
{
const XnIRPixel* pIrMap = irNode.GetIRMap();
memcpy(out_img, pIrMap, streams[i].SizeBytes() );
break;
}
case OpenNiIr8bit:
case OpenNiIr8bitProj:
{
const XnIRPixel* pIr16Map = irNode.GetIRMap();
// rescale from 16-bit (10 effective) to 8-bit
xn::IRMetaData meta_data;
irNode.GetMetaData(meta_data);
int w = meta_data.XRes();
int h = meta_data.YRes();
// Copy to out_img with conversion
XnUInt8* pIrMapScaled = (XnUInt8*)out_img;
for (int v = 0; v < h; ++v)
for (int u = 0; u < w; ++u) {
int val = *pIr16Map >> 2; // 10bit to 8 bit
pIrMapScaled[w * v + u] = val;
pIr16Map++;
}
break;
}
case OpenNiRgb:
{
const XnUInt8* pImageMap = imageNode.GetImageMap();
memcpy(out_img,pImageMap, streams[i].SizeBytes());
break;
}
default:
continue;
break;
}
out_img += streams[i].SizeBytes();
}
return true;
}
}
bool OpenNiVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image,wait);
}
PANGOLIN_REGISTER_FACTORY(OpenNiVideo)
{
struct OpenNiVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const unsigned int fps = uri.Get<unsigned int>("fps", 30);
const bool autoexposure = uri.Get<bool>("autoexposure", true);
OpenNiSensorType img1 = OpenNiRgb;
OpenNiSensorType img2 = OpenNiUnassigned;
if( uri.Contains("img1") ){
img1 = openni_sensor(uri.Get<std::string>("img1", "depth"));
}
if( uri.Contains("img2") ){
img2 = openni_sensor(uri.Get<std::string>("img2","rgb"));
}
OpenNiVideo* oniv = new OpenNiVideo(img1, img2, dim, fps);
oniv->SetAutoExposure(autoexposure);
return std::unique_ptr<VideoInterface>(oniv);
}
};
auto factory = std::make_shared<OpenNiVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "openni1");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 100, "openni");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 100, "oni");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 100, "kinect");
}
}

View File

@@ -0,0 +1,690 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Richard Newcombe
* 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/drivers/openni2.h>
#include <OniVersion.h>
#include <PS1080.h>
namespace pangolin
{
PixelFormat VideoFormatFromOpenNI2(openni::PixelFormat fmt)
{
std::string pvfmt;
switch (fmt) {
case openni::PIXEL_FORMAT_DEPTH_1_MM: pvfmt = "GRAY16LE"; break;
case openni::PIXEL_FORMAT_DEPTH_100_UM: pvfmt = "GRAY16LE"; break;
case openni::PIXEL_FORMAT_SHIFT_9_2: pvfmt = "GRAY16LE"; break; // ?
case openni::PIXEL_FORMAT_SHIFT_9_3: pvfmt = "GRAY16LE"; break; // ?
case openni::PIXEL_FORMAT_RGB888: pvfmt = "RGB24"; break;
case openni::PIXEL_FORMAT_GRAY8: pvfmt = "GRAY8"; break;
case openni::PIXEL_FORMAT_GRAY16: pvfmt = "GRAY16LE"; break;
case openni::PIXEL_FORMAT_YUV422: pvfmt = "YUYV422"; break;
#if ONI_VERSION_MAJOR >= 2 && ONI_VERSION_MINOR >= 2
case openni::PIXEL_FORMAT_YUYV: pvfmt = "Y400A"; break;
#endif
default:
throw VideoException("Unknown OpenNI pixel format");
break;
}
return PixelFormatFromString(pvfmt);
}
void OpenNi2Video::PrintOpenNI2Modes(openni::SensorType sensorType)
{
// Query supported modes for device
const openni::Array<openni::VideoMode>& modes =
devices[0].getSensorInfo(sensorType)->getSupportedVideoModes();
switch (sensorType) {
case openni::SENSOR_COLOR: pango_print_info("OpenNI Colour Modes:\n"); break;
case openni::SENSOR_DEPTH: pango_print_info("OpenNI Depth Modes:\n"); break;
case openni::SENSOR_IR: pango_print_info("OpenNI IR Modes:\n"); break;
}
for(int i = 0; i < modes.getSize(); i++) {
std::string sfmt = "PangolinUnknown";
try{
sfmt = VideoFormatFromOpenNI2(modes[i].getPixelFormat()).format;
}catch(const VideoException&){}
pango_print_info( " %dx%d, %d fps, %s\n",
modes[i].getResolutionX(), modes[i].getResolutionY(),
modes[i].getFps(), sfmt.c_str()
);
}
}
openni::VideoMode OpenNi2Video::FindOpenNI2Mode(
openni::Device & device,
openni::SensorType sensorType,
int width, int height,
int fps, openni::PixelFormat fmt
) {
// Query supported modes for device
const openni::Array<openni::VideoMode>& modes =
device.getSensorInfo(sensorType)->getSupportedVideoModes();
// Select last listed mode which matches parameters
int best_mode = -1;
for(int i = 0; i < modes.getSize(); i++) {
if( (!width || modes[i].getResolutionX() == width) &&
(!height || modes[i].getResolutionY() == height) &&
(!fps || modes[i].getFps() == fps) &&
(!fmt || modes[i].getPixelFormat() == fmt)
) {
best_mode = i;
}
}
if(best_mode >= 0) {
return modes[best_mode];
}
throw pangolin::VideoException("Video mode not supported");
}
inline openni::SensorType SensorType(const OpenNiSensorType sensor)
{
switch (sensor) {
case OpenNiRgb:
case OpenNiGrey:
return openni::SENSOR_COLOR;
case OpenNiDepth_1mm:
case OpenNiDepth_100um:
case OpenNiDepth_1mm_Registered:
return openni::SENSOR_DEPTH;
case OpenNiIr:
case OpenNiIr8bit:
case OpenNiIr24bit:
case OpenNiIrProj:
case OpenNiIr8bitProj:
return openni::SENSOR_IR;
default:
throw std::invalid_argument("OpenNI: Bad sensor type");
}
}
OpenNi2Video::OpenNi2Video(ImageDim dim, ImageRoi roi, int fps)
{
InitialiseOpenNI();
openni::Array<openni::DeviceInfo> deviceList;
openni::OpenNI::enumerateDevices(&deviceList);
if (deviceList.getSize() < 1) {
throw VideoException("No OpenNI Devices available. Ensure your camera is plugged in.");
}
for(int i = 0 ; i < deviceList.getSize(); i ++) {
const char* device_uri = deviceList[i].getUri();
const int dev_id = AddDevice(device_uri);
AddStream(OpenNiStreamMode( OpenNiDepth_1mm, dim, roi, fps, dev_id) );
AddStream(OpenNiStreamMode( OpenNiRgb, dim, roi, fps, dev_id) );
}
SetupStreamModes();
}
OpenNi2Video::OpenNi2Video(const std::string& device_uri)
{
InitialiseOpenNI();
const int dev_id = AddDevice(device_uri);
AddStream(OpenNiStreamMode( OpenNiDepth_1mm, ImageDim(), ImageRoi(), 30, dev_id) );
AddStream(OpenNiStreamMode( OpenNiRgb, ImageDim(), ImageRoi(), 30, dev_id) );
SetupStreamModes();
}
OpenNi2Video::OpenNi2Video(const std::string& device_uri, std::vector<OpenNiStreamMode> &stream_modes)
{
InitialiseOpenNI();
AddDevice(device_uri);
for(size_t i=0; i < stream_modes.size(); ++i) {
OpenNiStreamMode& mode = stream_modes[i];
AddStream(mode);
}
SetupStreamModes();
}
OpenNi2Video::OpenNi2Video(std::vector<OpenNiStreamMode>& stream_modes)
{
InitialiseOpenNI();
openni::Array<openni::DeviceInfo> deviceList;
openni::OpenNI::enumerateDevices(&deviceList);
if (deviceList.getSize() < 1) {
throw VideoException("OpenNI2: No devices available. Ensure your camera is plugged in.");
}
for(int i = 0 ; i < deviceList.getSize(); i ++) {
const char* device_uri = deviceList[i].getUri();
AddDevice(device_uri);
}
for(size_t i=0; i < stream_modes.size(); ++i) {
OpenNiStreamMode& mode = stream_modes[i];
AddStream(mode);
}
SetupStreamModes();
}
void OpenNi2Video::InitialiseOpenNI()
{
// Initialise member variables
numDevices = 0;
numStreams = 0;
current_frame_index = 0;
total_frames = std::numeric_limits<size_t>::max();
openni::Status rc = openni::STATUS_OK;
rc = openni::OpenNI::initialize();
if (rc != openni::STATUS_OK) {
throw VideoException( "Unable to initialise OpenNI library", openni::OpenNI::getExtendedError() );
}
}
int OpenNi2Video::AddDevice(const std::string& device_uri)
{
const size_t dev_id = numDevices;
openni::Status rc = devices[dev_id].open(device_uri.c_str());
if (rc != openni::STATUS_OK) {
throw VideoException( "OpenNI2: Couldn't open device.", openni::OpenNI::getExtendedError() );
}
++numDevices;
return dev_id;
}
void OpenNi2Video::AddStream(const OpenNiStreamMode& mode)
{
sensor_type[numStreams] = mode;
openni::Device& device = devices[mode.device];
openni::VideoStream& stream = video_stream[numStreams];
openni::Status rc = stream.create(device, SensorType(mode.sensor_type));
if (rc != openni::STATUS_OK) {
throw VideoException( "OpenNI2: Couldn't create stream.", openni::OpenNI::getExtendedError() );
}
openni::PlaybackControl* control = device.getPlaybackControl();
if(control && numStreams==0) {
total_frames = std::min(total_frames, (size_t)control->getNumberOfFrames(stream));
}
numStreams++;
}
void OpenNi2Video::SetupStreamModes()
{
streams_properties = &frame_properties["streams"];
*streams_properties = picojson::value(picojson::array_type,false);
streams_properties->get<picojson::array>().resize(numStreams);
use_depth = false;
use_ir = false;
use_rgb = false;
depth_to_color = false;
use_ir_and_rgb = false;
sizeBytes =0;
for(size_t i=0; i<numStreams; ++i) {
const OpenNiStreamMode& mode = sensor_type[i];
openni::SensorType nisensortype;
openni::PixelFormat nipixelfmt;
switch( mode.sensor_type ) {
case OpenNiDepth_1mm_Registered:
depth_to_color = true;
nisensortype = openni::SENSOR_DEPTH;
nipixelfmt = openni::PIXEL_FORMAT_DEPTH_1_MM;
use_depth = true;
break;
case OpenNiDepth_1mm:
nisensortype = openni::SENSOR_DEPTH;
nipixelfmt = openni::PIXEL_FORMAT_DEPTH_1_MM;
use_depth = true;
break;
case OpenNiDepth_100um:
nisensortype = openni::SENSOR_DEPTH;
nipixelfmt = openni::PIXEL_FORMAT_DEPTH_100_UM;
use_depth = true;
break;
case OpenNiIrProj:
case OpenNiIr:
nisensortype = openni::SENSOR_IR;
nipixelfmt = openni::PIXEL_FORMAT_GRAY16;
use_ir = true;
break;
case OpenNiIr24bit:
nisensortype = openni::SENSOR_IR;
nipixelfmt = openni::PIXEL_FORMAT_RGB888;
use_ir = true;
break;
case OpenNiIr8bitProj:
case OpenNiIr8bit:
nisensortype = openni::SENSOR_IR;
nipixelfmt = openni::PIXEL_FORMAT_GRAY8;
use_ir = true;
break;
case OpenNiRgb:
nisensortype = openni::SENSOR_COLOR;
nipixelfmt = openni::PIXEL_FORMAT_RGB888;
use_rgb = true;
break;
case OpenNiGrey:
nisensortype = openni::SENSOR_COLOR;
nipixelfmt = openni::PIXEL_FORMAT_GRAY8;
use_rgb = true;
break;
case OpenNiUnassigned:
default:
continue;
}
openni::VideoMode onivmode;
try {
onivmode = FindOpenNI2Mode(devices[mode.device], nisensortype, mode.dim.x, mode.dim.y, mode.fps, nipixelfmt);
}catch(const VideoException& e) {
pango_print_error("Unable to find compatible OpenNI Video Mode. Please choose from:\n");
PrintOpenNI2Modes(nisensortype);
fflush(stdout);
throw e;
}
openni::Status rc;
if(!devices[mode.device].isFile()){//trying to setVideoMode on a file results in an OpenNI error
rc = video_stream[i].setVideoMode(onivmode);
if(rc != openni::STATUS_OK)
throw VideoException("Couldn't set OpenNI VideoMode", openni::OpenNI::getExtendedError());
}
int outputWidth = onivmode.getResolutionX();
int outputHeight = onivmode.getResolutionY();
if (mode.roi.w && mode.roi.h) {
rc = video_stream[i].setCropping(mode.roi.x,mode.roi.y,mode.roi.w,mode.roi.h);
if(rc != openni::STATUS_OK)
throw VideoException("Couldn't set OpenNI cropping", openni::OpenNI::getExtendedError());
outputWidth = mode.roi.w;
outputHeight = mode.roi.h;
}
const PixelFormat fmt = VideoFormatFromOpenNI2(nipixelfmt);
const StreamInfo stream(
fmt, outputWidth, outputHeight,
(outputWidth * fmt.bpp) / 8,
(unsigned char*)0 + sizeBytes
);
sizeBytes += stream.SizeBytes();
streams.push_back(stream);
}
SetRegisterDepthToImage(depth_to_color);
use_ir_and_rgb = use_rgb && use_ir;
}
void OpenNi2Video::UpdateProperties()
{
picojson::value& jsopenni = device_properties["openni"];
picojson::value& jsdevices = jsopenni["devices"];
jsdevices = picojson::value(picojson::array_type,false);
jsdevices.get<picojson::array>().resize(numDevices);
for (size_t i=0; i<numDevices; ++i) {
picojson::value& jsdevice = jsdevices[i];
#define SET_PARAM(param_type, param) \
{ \
param_type val; \
if(devices[i].getProperty(param, &val) == openni::STATUS_OK) { \
jsdevice[#param] = val; \
} \
}
SET_PARAM( unsigned long long, XN_MODULE_PROPERTY_USB_INTERFACE );
SET_PARAM( bool, XN_MODULE_PROPERTY_MIRROR );
char serialNumber[1024];
devices[i].getProperty(ONI_DEVICE_PROPERTY_SERIAL_NUMBER, &serialNumber);
jsdevice["ONI_DEVICE_PROPERTY_SERIAL_NUMBER"] = std::string(serialNumber);
#undef SET_PARAM
}
picojson::value& stream = jsopenni["streams"];
stream = picojson::value(picojson::array_type,false);
stream.get<picojson::array>().resize(Streams().size());
for(unsigned int i=0; i<Streams().size(); ++i) {
if(sensor_type[i].sensor_type != OpenNiUnassigned)
{
#define SET_PARAM(param_type, param) \
{\
param_type val; \
if(video_stream[i].getProperty(param, &val) == openni::STATUS_OK) { \
jsstream[#param] = val; \
} \
}
picojson::value& jsstream = stream[i];
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_INPUT_FORMAT );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_CROPPING_MODE );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_CLOSE_RANGE );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_WHITE_BALANCE_ENABLED );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_GAIN );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_HOLE_FILTER );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_REGISTRATION_TYPE );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_CONST_SHIFT );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_PIXEL_SIZE_FACTOR );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_MAX_SHIFT );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_PARAM_COEFF );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_SHIFT_SCALE );
SET_PARAM( unsigned long long, XN_STREAM_PROPERTY_ZERO_PLANE_DISTANCE );
SET_PARAM( double, XN_STREAM_PROPERTY_ZERO_PLANE_PIXEL_SIZE );
SET_PARAM( double, XN_STREAM_PROPERTY_EMITTER_DCMOS_DISTANCE );
SET_PARAM( double, XN_STREAM_PROPERTY_DCMOS_RCMOS_DISTANCE );
#undef SET_PARAM
}
}
}
void OpenNi2Video::SetMirroring(bool enable)
{
// Set this property on all streams. It doesn't matter if it fails.
for(unsigned int i=0; i<Streams().size(); ++i) {
video_stream[i].setMirroringEnabled(enable);
}
}
void OpenNi2Video::SetAutoExposure(bool enable)
{
// Set this property on all streams exposing CameraSettings
for(unsigned int i=0; i<Streams().size(); ++i) {
openni::CameraSettings* cam = video_stream[i].getCameraSettings();
if(cam) cam->setAutoExposureEnabled(enable);
}
}
void OpenNi2Video::SetAutoWhiteBalance(bool enable)
{
// Set this property on all streams exposing CameraSettings
for(unsigned int i=0; i<Streams().size(); ++i) {
openni::CameraSettings* cam = video_stream[i].getCameraSettings();
if(cam) cam->setAutoWhiteBalanceEnabled(enable);
}
}
void OpenNi2Video::SetDepthCloseRange(bool enable)
{
// Set this property on all streams. It doesn't matter if it fails.
for(unsigned int i=0; i<Streams().size(); ++i) {
video_stream[i].setProperty(XN_STREAM_PROPERTY_CLOSE_RANGE, enable);
}
}
void OpenNi2Video::SetDepthHoleFilter(bool enable)
{
// Set this property on all streams. It doesn't matter if it fails.
for(unsigned int i=0; i<Streams().size(); ++i) {
video_stream[i].setProperty(XN_STREAM_PROPERTY_HOLE_FILTER, enable);
video_stream[i].setProperty(XN_STREAM_PROPERTY_GAIN,50);
}
}
void OpenNi2Video::SetDepthColorSyncEnabled(bool enable)
{
for(size_t i = 0 ; i < numDevices; i++) {
devices[i].setDepthColorSyncEnabled(enable);
}
}
void OpenNi2Video::SetFastCrop(bool enable)
{
const uint32_t pango_XN_STREAM_PROPERTY_FAST_ZOOM_CROP = 0x1080F009;
for (unsigned int i = 0; i < Streams().size(); ++i) {
video_stream[i].setProperty(pango_XN_STREAM_PROPERTY_FAST_ZOOM_CROP, enable);
video_stream[i].setProperty(XN_STREAM_PROPERTY_CROPPING_MODE, enable ? XN_CROPPING_MODE_INCREASED_FPS : XN_CROPPING_MODE_NORMAL);
}
}
void OpenNi2Video::SetRegisterDepthToImage(bool enable)
{
if(enable) {
for(size_t i = 0 ; i < numDevices; i++) {
devices[i].setImageRegistrationMode(openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR);
}
}else{
for(size_t i = 0 ; i < numDevices ; i++) {
devices[i].setImageRegistrationMode(openni::IMAGE_REGISTRATION_OFF);
}
}
}
void OpenNi2Video::SetPlaybackSpeed(float speed)
{
for(size_t i = 0 ; i < numDevices; i++) {
openni::PlaybackControl* control = devices[i].getPlaybackControl();
if(control) control->setSpeed(speed);
}
}
void OpenNi2Video::SetPlaybackRepeat(bool enabled)
{
for(size_t i = 0 ; i < numDevices; i++) {
openni::PlaybackControl* control = devices[i].getPlaybackControl();
if(control) control->setRepeatEnabled(enabled);
}
}
OpenNi2Video::~OpenNi2Video()
{
Stop();
for(size_t i=0; i<numStreams; ++i) {
if( video_stream[i].isValid()) {
video_stream[i].destroy();
}
}
openni::OpenNI::shutdown();
}
size_t OpenNi2Video::SizeBytes() const
{
return sizeBytes;
}
const std::vector<StreamInfo>& OpenNi2Video::Streams() const
{
return streams;
}
void OpenNi2Video::Start()
{
for(unsigned int i=0; i<Streams().size(); ++i) {
video_stream[i].start();
}
}
void OpenNi2Video::Stop()
{
for(unsigned int i=0; i<Streams().size(); ++i) {
video_stream[i].stop();
}
}
openni::VideoStream * OpenNi2Video::GetVideoStream(int stream){
if(video_stream[stream].isValid()) {
return &video_stream[stream];
}else{
pango_print_error("Error getting stream: %d \n%s",stream, openni::OpenNI::getExtendedError() );
return NULL;
}
}
bool OpenNi2Video::GrabNext( unsigned char* image, bool /*wait*/ )
{
unsigned char* out_img = image;
openni::Status rc = openni::STATUS_OK;
for(unsigned int i=0; i<Streams().size(); ++i) {
if(sensor_type[i].sensor_type == OpenNiUnassigned) {
rc = openni::STATUS_OK;
continue;
}
if(!video_stream[i].isValid()) {
rc = openni::STATUS_NO_DEVICE;
continue;
}
if(use_ir_and_rgb) video_stream[i].start();
rc = video_stream[i].readFrame(&video_frame[i]);
video_frame[0].getFrameIndex();
if(rc != openni::STATUS_OK) {
pango_print_error("Error reading frame:\n%s", openni::OpenNI::getExtendedError() );
}
const bool toGreyscale = false;
if(toGreyscale) {
const int w = streams[i].Width();
const int h = streams[i].Height();
openni::RGB888Pixel* pColour = (openni::RGB888Pixel*)video_frame[i].getData();
for(int i = 0 ; i < w*h;i++){
openni::RGB888Pixel rgb = pColour[i];
int grey = ((int)(rgb.r&0xFF) + (int)(rgb.g&0xFF) + (int)(rgb.b&0xFF))/3;
grey = std::min(255,std::max(0,grey));
out_img[i] = grey;
}
}else{
memcpy(out_img, video_frame[i].getData(), streams[i].SizeBytes());
}
// update frame properties
(*streams_properties)[i]["devtime_us"] = video_frame[i].getTimestamp();
if(use_ir_and_rgb) video_stream[i].stop();
out_img += streams[i].SizeBytes();
}
current_frame_index = video_frame[0].getFrameIndex();
return rc == openni::STATUS_OK;
}
bool OpenNi2Video::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image,wait);
}
size_t OpenNi2Video::GetCurrentFrameId() const
{
return current_frame_index;
}
size_t OpenNi2Video::GetTotalFrames() const
{
return total_frames;
}
size_t OpenNi2Video::Seek(size_t frameid)
{
openni::PlaybackControl* control = devices[0].getPlaybackControl();
if(control) {
control->seek(video_stream[0], frameid);
return frameid;
}else{
return -1;
}
}
PANGOLIN_REGISTER_FACTORY(OpenNi2Video)
{
struct OpenNI2VideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const bool realtime = uri.Contains("realtime");
const ImageDim default_dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const ImageRoi default_roi = uri.Get<ImageRoi>("roi", ImageRoi(0,0,0,0));
const unsigned int default_fps = uri.Get<unsigned int>("fps", 30);
std::vector<OpenNiStreamMode> stream_modes;
int num_streams = 0;
std::string simg= "img1";
while(uri.Contains(simg)) {
OpenNiStreamMode stream = uri.Get<OpenNiStreamMode>(simg, OpenNiStreamMode(OpenNiRgb,default_dim,default_roi,default_fps,0));
stream_modes.push_back(stream);
++num_streams;
simg = "img" + ToString(num_streams+1);
}
OpenNi2Video* nivid;
if(!uri.url.empty()) {
nivid = new OpenNi2Video(pangolin::PathExpand(uri.url));
}else if(stream_modes.size()) {
nivid = new OpenNi2Video(stream_modes);
}else{
nivid = new OpenNi2Video(default_dim, default_roi, default_fps);
}
nivid->SetDepthCloseRange( uri.Get<bool>("closerange",false) );
nivid->SetDepthHoleFilter( uri.Get<bool>("holefilter",false) );
nivid->SetDepthColorSyncEnabled( uri.Get<bool>("coloursync",false) );
nivid->SetFastCrop( uri.Get<bool>("fastcrop",false) );
nivid->SetPlaybackSpeed(realtime ? 1.0f : -1.0f);
nivid->SetAutoExposure(true);
nivid->SetAutoWhiteBalance(true);
nivid->SetMirroring(false);
nivid->UpdateProperties();
nivid->Start();
return std::unique_ptr<VideoInterface>(nivid);
}
};
auto factory = std::make_shared<OpenNI2VideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "openni");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "openni2");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "oni");
}
}

View File

@@ -0,0 +1,260 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/pack.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#ifdef DEBUGUNPACK
#include <pangolin/utils/timer.h>
#define TSTART() pangolin::basetime start,last,now; start = pangolin::TimeNow(); last = start;
#define TGRABANDPRINT(...) now = pangolin::TimeNow(); fprintf(stderr,"UNPACK: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " %fms.\n",1000*pangolin::TimeDiff_s(last, now)); last = now;
#define DBGPRINT(...) fprintf(stderr,"UNPACK: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr,"\n");
#else
#define TSTART()
#define TGRABANDPRINT(...)
#define DBGPRINT(...)
#endif
namespace pangolin
{
PackVideo::PackVideo(std::unique_ptr<VideoInterface> &src_, PixelFormat out_fmt)
: src(std::move(src_)), size_bytes(0), buffer(0)
{
if( !src || out_fmt.channels != 1) {
throw VideoException("PackVideo: Only supports single channel input.");
}
videoin.push_back(src.get());
for(size_t s=0; s< src->Streams().size(); ++s) {
const size_t w = src->Streams()[s].Width();
const size_t h = src->Streams()[s].Height();
// Check compatibility of formats
const PixelFormat in_fmt = src->Streams()[s].PixFormat();
if(in_fmt.channels > 1 || in_fmt.bpp > 16) {
throw VideoException("PackVideo: Only supports one channel input.");
}
// round up to ensure enough bytes for packing
const size_t pitch = (w*out_fmt.bpp)/ 8 + ((w*out_fmt.bpp) % 8 > 0? 1 : 0);
streams.push_back(pangolin::StreamInfo( out_fmt, w, h, pitch, (unsigned char*)0 + size_bytes ));
size_bytes += h*pitch;
}
buffer = new unsigned char[src->SizeBytes()];
}
PackVideo::~PackVideo()
{
delete[] buffer;
}
//! Implement VideoInput::Start()
void PackVideo::Start()
{
videoin[0]->Start();
}
//! Implement VideoInput::Stop()
void PackVideo::Stop()
{
videoin[0]->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t PackVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& PackVideo::Streams() const
{
return streams;
}
template<typename T>
void ConvertTo8bit(
Image<unsigned char>& out,
const Image<unsigned char>& in
) {
for(size_t r=0; r<out.h; ++r) {
T* pout = (T*)(out.ptr + r*out.pitch);
uint8_t* pin = in.ptr + r*in.pitch;
const uint8_t* pin_end = in.ptr + (r+1)*in.pitch;
while(pin != pin_end) {
*(pout++) = *(pin++);
}
}
}
template<typename T>
void ConvertTo10bit(
Image<unsigned char>& out,
const Image<unsigned char>& in
) {
for(size_t r=0; r<out.h; ++r) {
uint8_t* pout = out.ptr + r*out.pitch;
T* pin = (T*)(in.ptr + r*in.pitch);
const T* pin_end = (T*)(in.ptr + (r+1)*in.pitch);
while(pin != pin_end) {
uint64_t val = (*(pin++) & 0x00000003FF);
val |= uint64_t(*(pin++) & 0x00000003FF) << 10;
val |= uint64_t(*(pin++) & 0x00000003FF) << 20;
val |= uint64_t(*(pin++) & 0x00000003FF) << 30;
*(pout++) = uint8_t( val & 0x00000000FF);
*(pout++) = uint8_t((val & 0x000000FF00) >> 8);
*(pout++) = uint8_t((val & 0x0000FF0000) >> 16);
*(pout++) = uint8_t((val & 0x00FF000000) >> 24);
*(pout++) = uint8_t((val & 0xFF00000000) >> 32);
}
}
}
template<typename T>
void ConvertTo12bit(
Image<unsigned char>& out,
const Image<unsigned char>& in
) {
for(size_t r=0; r<out.h; ++r) {
uint8_t* pout = out.ptr + r*out.pitch;
T* pin = (T*)(in.ptr + r*in.pitch);
const T* pin_end = (T*)(in.ptr + (r+1)*in.pitch);
while(pin != pin_end) {
uint32_t val = (*(pin++) & 0x00000FFF);
val |= uint32_t(*(pin++) & 0x00000FFF) << 12;
*(pout++) = uint8_t( val & 0x000000FF);
*(pout++) = uint8_t((val & 0x0000FF00) >> 8);
*(pout++) = uint8_t((val & 0x00FF0000) >> 16);
}
}
}
void PackVideo::Process(unsigned char* image, const unsigned char* buffer)
{
TSTART()
for(size_t s=0; s<streams.size(); ++s) {
const Image<unsigned char> img_in = videoin[0]->Streams()[s].StreamImage(buffer);
Image<unsigned char> img_out = Streams()[s].StreamImage(image);
const int bits_out = Streams()[s].PixFormat().bpp;
if(videoin[0]->Streams()[s].PixFormat().format == "GRAY16LE") {
if(bits_out == 8) {
ConvertTo8bit<uint16_t>(img_out, img_in);
}else if( bits_out == 10) {
ConvertTo10bit<uint16_t>(img_out, img_in);
}else if( bits_out == 12){
ConvertTo12bit<uint16_t>(img_out, img_in);
}else{
throw pangolin::VideoException("Unsupported bitdepths.");
}
}else{
throw pangolin::VideoException("Unsupported input pix format.");
}
}
TGRABANDPRINT("Packing took ")
}
//! Implement VideoInput::GrabNext()
bool PackVideo::GrabNext( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNext(buffer,wait)) {
Process(image,buffer);
return true;
}else{
return false;
}
}
//! Implement VideoInput::GrabNewest()
bool PackVideo::GrabNewest( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNewest(buffer,wait)) {
Process(image,buffer);
return true;
}else{
return false;
}
}
std::vector<VideoInterface*>& PackVideo::InputStreams()
{
return videoin;
}
unsigned int PackVideo::AvailableFrames() const
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin[0]);
if(!vpi)
{
pango_print_warn("Pack: child interface is not buffer aware.");
return 0;
}
else
{
return vpi->AvailableFrames();
}
}
bool PackVideo::DropNFrames(uint32_t n)
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin[0]);
if(!vpi)
{
pango_print_warn("Pack: child interface is not buffer aware.");
return false;
}
else
{
return vpi->DropNFrames(n);
}
}
PANGOLIN_REGISTER_FACTORY(PackVideo)
{
struct PackVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
const std::string fmt = uri.Get("fmt", std::string("GRAY16LE") );
return std::unique_ptr<VideoInterface>(
new PackVideo(subvid, PixelFormatFromString(fmt) )
);
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<PackVideoFactory>(), 10, "pack");
}
}
#undef TSTART
#undef TGRABANDPRINT
#undef DBGPRINT

View File

@@ -0,0 +1,243 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/log/playback_session.h>
#include <pangolin/utils/file_extension.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/utils/signal_slot.h>
#include <pangolin/video/drivers/pango.h>
#include <pangolin/video/iostream_operators.h>
#include <functional>
namespace pangolin
{
const std::string pango_video_type = "raw_video";
PangoVideo::PangoVideo(const std::string& filename, std::shared_ptr<PlaybackSession> playback_session)
: _filename(filename),
_playback_session(playback_session),
_reader(_playback_session->Open(filename)),
_event_promise(_playback_session->Time()),
_src_id(FindPacketStreamSource()),
_source(nullptr)
{
PANGO_ENSURE(_src_id != -1, "No appropriate video streams found in log.");
_source = &_reader->Sources()[_src_id];
SetupStreams(*_source);
// Make sure we time-seek with other playback devices
session_seek = _playback_session->Time().OnSeek.Connect(
[&](SyncTime::TimePoint t){
_event_promise.Cancel();
_reader->Seek(_src_id, t);
_event_promise.WaitAndRenew(_source->NextPacketTime());
}
);
_event_promise.WaitAndRenew(_source->NextPacketTime());
}
PangoVideo::~PangoVideo()
{
}
size_t PangoVideo::SizeBytes() const
{
return _size_bytes;
}
const std::vector<StreamInfo>& PangoVideo::Streams() const
{
return _streams;
}
void PangoVideo::Start()
{
}
void PangoVideo::Stop()
{
}
bool PangoVideo::GrabNext(unsigned char* image, bool /*wait*/)
{
try
{
Packet fi = _reader->NextFrame(_src_id);
_frame_properties = fi.meta;
if(_fixed_size) {
fi.Stream().read(reinterpret_cast<char*>(image), _size_bytes);
}else{
for(size_t s=0; s < _streams.size(); ++s) {
StreamInfo& si = _streams[s];
pangolin::Image<unsigned char> dst = si.StreamImage(image);
if(stream_decoder[s]) {
pangolin::TypedImage img = stream_decoder[s](fi.Stream());
PANGO_ENSURE(img.IsValid());
// TODO: We can avoid this copy by decoding directly into img
for(size_t row =0; row < dst.h; ++row) {
std::memcpy(dst.RowPtr(row), img.RowPtr(row), si.RowBytes());
}
}else{
for(size_t row =0; row < dst.h; ++row) {
fi.Stream().read((char*)dst.RowPtr(row), si.RowBytes());
}
}
}
}
_event_promise.WaitAndRenew(_source->NextPacketTime());
return true;
}
catch(...)
{
_frame_properties = picojson::value();
return false;
}
}
bool PangoVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image, wait);
}
size_t PangoVideo::GetCurrentFrameId() const
{
return (int)(_reader->Sources()[_src_id].next_packet_id) - 1;
}
size_t PangoVideo::GetTotalFrames() const
{
return _source->index.size();
}
size_t PangoVideo::Seek(size_t next_frame_id)
{
// Get time for seek
if(next_frame_id < _source->index.size()) {
const int64_t capture_time = _source->index[next_frame_id].capture_time;
_playback_session->Time().Seek(SyncTime::TimePoint(std::chrono::microseconds(capture_time)));
return next_frame_id;
}else{
return _source->next_packet_id;
}
}
std::string PangoVideo::GetSourceUri()
{
return _source_uri;
}
int PangoVideo::FindPacketStreamSource()
{
for(const auto& src : _reader->Sources())
{
if (!src.driver.compare(pango_video_type))
{
return static_cast<int>(src.id);
}
}
return -1;
}
void PangoVideo::SetupStreams(const PacketStreamSource& src)
{
// Read sources header
_fixed_size = src.data_size_bytes != 0;
_size_bytes = src.data_size_bytes;
_source_uri = src.uri;
_device_properties = src.info["device"];
const picojson::value& json_streams = src.info["streams"];
const size_t num_streams = json_streams.size();
for (size_t i = 0; i < num_streams; ++i)
{
const picojson::value& json_stream = json_streams[i];
std::string encoding = json_stream["encoding"].get<std::string>();
// Check if the stream is compressed
if(json_stream.contains("decoded")) {
const std::string compressed_encoding = encoding;
encoding = json_stream["decoded"].get<std::string>();
const PixelFormat decoded_fmt = PixelFormatFromString(encoding);
stream_decoder.push_back(StreamEncoderFactory::I().GetDecoder(compressed_encoding, decoded_fmt));
}else{
stream_decoder.push_back(nullptr);
}
PixelFormat fmt = PixelFormatFromString(encoding);
fmt.channel_bit_depth = json_stream.get_value<int64_t>("channel_bit_depth", 0);
StreamInfo si(
fmt,
json_stream["width"].get<int64_t>(),
json_stream["height"].get<int64_t>(),
json_stream["pitch"].get<int64_t>(),
(unsigned char*) 0 + json_stream["offset"].get<int64_t>()
);
if(!_fixed_size) {
_size_bytes += si.SizeBytes();
}
_streams.push_back(si);
}
}
PANGOLIN_REGISTER_FACTORY(PangoVideo)
{
struct PangoVideoFactory : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const std::string path = PathExpand(uri.url);
if( !uri.scheme.compare("pango") || FileType(uri.url) == ImageFileTypePango ) {
return std::unique_ptr<VideoInterface>(new PangoVideo(path.c_str(), PlaybackSession::ChooseFromParams(uri)));
}
return std::unique_ptr<VideoInterface>();
}
};
auto factory = std::make_shared<PangoVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "pango");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 5, "file");
}
}

View File

@@ -0,0 +1,303 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/utils/memstreambuf.h>
#include <pangolin/utils/picojson.h>
#include <pangolin/utils/sigstate.h>
#include <pangolin/utils/timer.h>
#include <pangolin/video/drivers/pango_video_output.h>
#include <pangolin/video/iostream_operators.h>
#include <pangolin/video/video_interface.h>
#include <set>
#include <future>
#ifndef _WIN_
# include <unistd.h>
#endif
namespace pangolin
{
const std::string pango_video_type = "raw_video";
void SigPipeHandler(int sig)
{
SigState::I().sig_callbacks.at(sig).value = true;
}
PangoVideoOutput::PangoVideoOutput(const std::string& filename, size_t buffer_size_bytes, const std::map<size_t, std::string> &stream_encoder_uris)
: filename(filename),
packetstream_buffer_size_bytes(buffer_size_bytes),
packetstreamsrcid(-1),
total_frame_size(0),
is_pipe(pangolin::IsPipe(filename)),
fixed_size(true),
stream_encoder_uris(stream_encoder_uris)
{
if(!is_pipe)
{
packetstream.Open(filename, packetstream_buffer_size_bytes);
}
else
{
RegisterNewSigCallback(&SigPipeHandler, (void*)this, SIGPIPE);
}
// Instantiate encoders
}
PangoVideoOutput::~PangoVideoOutput()
{
}
const std::vector<StreamInfo>& PangoVideoOutput::Streams() const
{
return streams;
}
bool PangoVideoOutput::IsPipe() const
{
return is_pipe;
}
void PangoVideoOutput::SetStreams(const std::vector<StreamInfo>& st, const std::string& uri, const picojson::value& properties)
{
std::set<unsigned char*> unique_ptrs;
for (size_t i = 0; i < st.size(); ++i)
{
unique_ptrs.insert(st[i].Offset());
}
if (unique_ptrs.size() < st.size())
throw std::invalid_argument("Each image must have unique offset into buffer.");
if (packetstreamsrcid == -1)
{
input_uri = uri;
streams = st;
device_properties = properties;
picojson::value json_header(picojson::object_type, false);
picojson::value& json_streams = json_header["streams"];
json_header["device"] = device_properties;
stream_encoders.resize(streams.size());
fixed_size = true;
total_frame_size = 0;
for (unsigned int i = 0; i < streams.size(); ++i)
{
StreamInfo& si = streams[i];
total_frame_size = std::max(total_frame_size, (size_t) si.Offset() + si.SizeBytes());
picojson::value& json_stream = json_streams.push_back();
std::string encoder_name = si.PixFormat().format;
if(stream_encoder_uris.find(i) != stream_encoder_uris.end() && !stream_encoder_uris[i].empty() ) {
// instantiate encoder and write it's name to the stream properties
json_stream["decoded"] = si.PixFormat().format;
encoder_name = stream_encoder_uris[i];
stream_encoders[i] = StreamEncoderFactory::I().GetEncoder(encoder_name, si.PixFormat());
fixed_size = false;
}
json_stream["channel_bit_depth"] = si.PixFormat().channel_bit_depth;
json_stream["encoding"] = encoder_name;
json_stream["width"] = si.Width();
json_stream["height"] = si.Height();
json_stream["pitch"] = si.Pitch();
json_stream["offset"] = (size_t) si.Offset();
}
PacketStreamSource pss;
pss.driver = pango_video_type;
pss.uri = input_uri;
pss.info = json_header;
pss.data_size_bytes = fixed_size ? total_frame_size : 0;
pss.data_definitions = "struct Frame{ uint8 stream_data[" + pangolin::Convert<std::string, size_t>::Do(total_frame_size) + "];};";
packetstreamsrcid = (int)packetstream.AddSource(pss);
} else {
throw std::runtime_error("Unable to add new streams");
}
}
int PangoVideoOutput::WriteStreams(const unsigned char* data, const picojson::value& frame_properties)
{
const int64_t host_reception_time_us = frame_properties.get_value(PANGO_HOST_RECEPTION_TIME_US, Time_us(TimeNow()));
#ifndef _WIN_
if (is_pipe)
{
// If there is a reader waiting on the other side of the pipe, open
// a file descriptor to the file and close it only after the file
// has been opened by the PacketStreamWriter. This avoids the reader
// from seeing EOF on its next read because all file descriptors on
// the write side have been closed.
//
// When the stream is already open but the reader has disappeared,
// opening a file descriptor will fail and errno will be ENXIO.
int fd = WritablePipeFileDescriptor(filename);
if (!packetstream.IsOpen())
{
if (fd != -1)
{
packetstream.Open(filename, packetstream_buffer_size_bytes);
close(fd);
}
}
else
{
if (fd != -1)
{
// There's a reader on the other side of the pipe.
close(fd);
}
else
{
if (errno == ENXIO)
{
packetstream.ForceClose();
SigState::I().sig_callbacks.at(SIGPIPE).value = false;
// This should be unnecessary since per the man page,
// data should be dropped from the buffer upon closing the
// writable file descriptors.
pangolin::FlushPipe(filename);
}
}
}
if (!packetstream.IsOpen())
return 0;
}
#endif
if(!fixed_size) {
// TODO: Make this more efficient (without so many allocs and memcpy's)
std::vector<memstreambuf> encoded_stream_data;
// Create buffers for compressed data: the first will be reused for all the data later
encoded_stream_data.emplace_back(total_frame_size);
for(size_t i=1; i < streams.size(); ++i) {
encoded_stream_data.emplace_back(streams[i].SizeBytes());
}
// lambda encodes frame data i to encoded_stream_data[i]
auto encode_stream = [&](int i){
encoded_stream_data[i].clear();
std::ostream encode_stream(&encoded_stream_data[i]);
const StreamInfo& si = streams[i];
const Image<unsigned char> stream_image = si.StreamImage(data);
if(stream_encoders[i]) {
// Encode to buffer
stream_encoders[i](encode_stream, stream_image);
}else{
if(stream_image.IsContiguous()) {
encode_stream.write((char*)stream_image.ptr, streams[i].SizeBytes());
}else{
for(size_t row=0; row < stream_image.h; ++row) {
encode_stream.write((char*)stream_image.RowPtr(row), si.RowBytes());
}
}
}
return true;
};
// Compress each stream (>0 in another thread)
std::vector<std::future<bool>> encode_finished;
for(size_t i=1; i < streams.size(); ++i) {
encode_finished.emplace_back(std::async(std::launch::async, [&,i](){
return encode_stream(i);
}));
}
// Encode stream 0 in this thread
encode_stream(0);
// Reuse our first compression stream for the rest of the data too.
std::vector<uint8_t>& encoded = encoded_stream_data[0].buffer;
// Wait on all threads to finish and copy into data packet
for(size_t i=1; i < streams.size(); ++i) {
encode_finished[i-1].get();
encoded.insert(encoded.end(), encoded_stream_data[i].buffer.begin(), encoded_stream_data[i].buffer.end());
}
packetstream.WriteSourcePacket(packetstreamsrcid, reinterpret_cast<const char*>(encoded.data()), host_reception_time_us, encoded.size(), frame_properties);
}else{
packetstream.WriteSourcePacket(packetstreamsrcid, reinterpret_cast<const char*>(data), host_reception_time_us, total_frame_size, frame_properties);
}
return 0;
}
PANGOLIN_REGISTER_FACTORY(PangoVideoOutput)
{
struct PangoVideoFactory final : public FactoryInterface<VideoOutputInterface> {
std::unique_ptr<VideoOutputInterface> Open(const Uri& uri) override {
const size_t mb = 1024*1024;
const size_t buffer_size_bytes = uri.Get("buffer_size_mb", 100) * mb;
std::string filename = uri.url;
if(uri.Contains("unique_filename")) {
filename = MakeUniqueFilename(filename);
}
// Default encoder
std::string default_encoder = "";
if(uri.Contains("encoder")) {
default_encoder = uri.Get<std::string>("encoder","");
}
// Encoders for each stream
std::map<size_t, std::string> stream_encoder_uris;
for(size_t i=0; i<100; ++i)
{
const std::string encoder_key = pangolin::FormatString("encoder%",i+1);
stream_encoder_uris[i] = uri.Get<std::string>(encoder_key, default_encoder);
}
return std::unique_ptr<VideoOutputInterface>(
new PangoVideoOutput(filename, buffer_size_bytes, stream_encoder_uris)
);
}
};
auto factory = std::make_shared<PangoVideoFactory>();
FactoryRegistry<VideoOutputInterface>::I().RegisterFactory(factory, 10, "pango");
FactoryRegistry<VideoOutputInterface>::I().RegisterFactory(factory, 10, "file");
}
}

View File

@@ -0,0 +1,744 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2015 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/pleora.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#include <thread>
#ifdef DEBUGPLEORA
#include <pangolin/utils/timer.h>
#define TSTART() pangolin::basetime start,last,now; start = pangolin::TimeNow(); last = start;
#define TGRABANDPRINT(...) now = pangolin::TimeNow(); fprintf(stderr," PLEORA: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " %fms.\n",1000*pangolin::TimeDiff_s(last, now)); last = now;
#define DBGPRINT(...) fprintf(stderr," PLEORA: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr,"\n");
#else
#define TSTART()
#define TGRABANDPRINT(...)
#define DBGPRINT(...)
#endif
namespace pangolin
{
inline void ThrowOnFailure(const PvResult& res)
{
if(res.IsFailure()) {
throw std::runtime_error("Failure: " + std::string(res.GetCodeString().GetAscii()) );
}
}
template<typename T>
struct PleoraParamTraits;
template<> struct PleoraParamTraits<bool> {
typedef PvGenBoolean PvType;
};
template<> struct PleoraParamTraits<int64_t> {
typedef PvGenInteger PvType;
};
template<> struct PleoraParamTraits<float> {
typedef PvGenFloat PvType;
};
template<> struct PleoraParamTraits<std::string> {
typedef PvGenString PvType;
};
template<typename T>
T GetParam(PvGenParameterArray* params, const char* name)
{
typedef typename PleoraParamTraits<T>::PvType PvType;
PvType* param = dynamic_cast<PvType*>( params->Get(name) );
if(!param) {
throw std::runtime_error("Incorrect type");
}
T ret;
PvResult res = param->GetValue(ret);
if(res.IsFailure()) {
throw std::runtime_error("Cannot get value: " + std::string(res.GetCodeString().GetAscii()) );
}
return ret;
}
template<typename T>
bool SetParam(PvGenParameterArray* params, const char* name, T val)
{
typedef typename PleoraParamTraits<T>::PvType PvType;
PvType* param = dynamic_cast<PvType*>( params->Get(name) );
if(!param) {
throw std::runtime_error("Unable to get parameter handle: " + std::string(name) );
}
if(!param->IsWritable()) {
throw std::runtime_error("Cannot set value for " + std::string(name) );
}
PvResult res = param->SetValue(val);
if(res.IsFailure()) {
throw std::runtime_error("Cannot set value: " + std::string(res.GetCodeString().GetAscii()) );
}
return true;
}
inline const PvDeviceInfo* SelectDevice( PvSystem& aSystem, const char* model_name = 0, const char* serial_num = 0, size_t index = 0 )
{
aSystem.Find();
// Enumerate all devices, select first that matches criteria
size_t matches = 0;
for ( uint32_t i = 0; i < aSystem.GetInterfaceCount(); i++ ) {
const PvInterface *lInterface = dynamic_cast<const PvInterface *>( aSystem.GetInterface( i ) );
if ( lInterface ) {
for ( uint32_t j = 0; j < lInterface->GetDeviceCount(); j++ ) {
const PvDeviceInfo *lDI = dynamic_cast<const PvDeviceInfo *>( lInterface->GetDeviceInfo( j ) );
if ( lDI && lDI->IsConfigurationValid() ) {
if( model_name && strcmp(lDI->GetModelName().GetAscii(), model_name) )
continue;
if( serial_num && strcmp(lDI->GetSerialNumber().GetAscii(), serial_num) )
continue;
if(matches == index) {
return lDI;
}
++matches;
}
}
}
}
return 0;
}
PixelFormat PleoraFormat(const PvGenEnum* pfmt)
{
std::string spfmt = pfmt->ToString().GetAscii();
if( !spfmt.compare("Mono8") ) {
return PixelFormatFromString("GRAY8");
} else if( !spfmt.compare("Mono10p") ) {
return PixelFormatFromString("GRAY10");
} else if( !spfmt.compare("Mono12p") ) {
return PixelFormatFromString("GRAY12");
} else if( !spfmt.compare("Mono10") || !spfmt.compare("Mono12")) {
return PixelFormatFromString("GRAY16LE");
} else if( !spfmt.compare("RGB8") ) {
return PixelFormatFromString("RGB24");
} else if( !spfmt.compare("BGR8") ) {
return PixelFormatFromString("BGR24");
} else if( !spfmt.compare("BayerBG8") ) {
return PixelFormatFromString("GRAY8");
} else if( !spfmt.compare("BayerBG12") ) {
return PixelFormatFromString("GRAY16LE");
} else {
throw VideoException("Unknown Pleora pixel format", spfmt);
}
}
PleoraVideo::PleoraVideo(const Params& p): size_bytes(0), lPvSystem(0), lDevice(0), lStream(0), lDeviceParams(0), lStart(0), lStop(0),
lTemperatureCelcius(0), getTemp(false), lStreamParams(0), validGrabbedBuffers(0)
{
std::string sn;
std::string mn;
int index = 0;
size_t buffer_count = PleoraVideo::DEFAULT_BUFFER_COUNT;
Params device_params;
for(Params::ParamMap::const_iterator it = p.params.begin(); it != p.params.end(); it++) {
if(it->first == "model"){
mn = it->second;
} else if(it->first == "sn"){
sn = it->second;
} else if(it->first == "idx"){
index = p.Get<int>("idx", 0);
} else if(it->first == "size") {
const ImageDim dim = p.Get<ImageDim>("size", ImageDim(0,0) );
device_params.Set("Width" , dim.x);
device_params.Set("Height" , dim.y);
} else if(it->first == "pos") {
const ImageDim pos = p.Get<ImageDim>("pos", ImageDim(0,0) );
device_params.Set("OffsetX" , pos.x);
device_params.Set("OffsetY" , pos.y);
} else if(it->first == "roi") {
const ImageRoi roi = p.Get<ImageRoi>("roi", ImageRoi(0,0,0,0) );
device_params.Set("Width" , roi.w);
device_params.Set("Height" , roi.h);
device_params.Set("OffsetX", roi.x);
device_params.Set("OffsetY", roi.y);
} else {
device_params.Set(it->first, it->second);
}
}
InitDevice(mn.empty() ? 0 : mn.c_str(), sn.empty() ? 0 : sn.c_str(), index);
SetDeviceParams(device_params);
InitStream();
InitPangoStreams();
InitPangoDeviceProperties();
InitBuffers(buffer_count);
}
PleoraVideo::~PleoraVideo()
{
Stop();
DeinitBuffers();
DeinitStream();
DeinitDevice();
}
std::string PleoraVideo::GetParameter(const std::string& name) {
PvGenParameter* par = lDeviceParams->Get(PvString(name.c_str()));
if(par) {
PvString ret = par->ToString();
return std::string(ret.GetAscii());
} else {
pango_print_error("Parameter %s not recognized\n", name.c_str());
return "";
}
}
void PleoraVideo::SetParameter(const std::string& name, const std::string& value) {
PvGenParameter* par = lDeviceParams->Get(PvString(name.c_str()));
if(par) {
PvResult r = par->FromString(PvString(value.c_str()));
if(!r.IsOK()){
pango_print_error("Error setting parameter %s to:%s Reason:%s\n", name.c_str(), value.c_str(), r.GetDescription().GetAscii());
} else {
pango_print_info("Setting parameter %s to:%s\n", name.c_str(), value.c_str());
}
} else {
pango_print_error("Parameter %s not recognized\n", name.c_str());
}
}
void PleoraVideo::InitDevice(
const char* model_name, const char* serial_num, size_t index
) {
lPvSystem = new PvSystem();
if ( !lPvSystem ) {
throw pangolin::VideoException("Pleora: Unable to create PvSystem");
}
lDeviceInfo = SelectDevice(*lPvSystem, model_name, serial_num, index);
if ( !lDeviceInfo ) {
delete lPvSystem;
throw pangolin::VideoException("Pleora: Unable to select device");
}
PvResult lResult;
lDevice = PvDevice::CreateAndConnect( lDeviceInfo, &lResult );
if ( !lDevice ) {
delete lPvSystem;
throw pangolin::VideoException("Pleora: Unable to connect to device", lResult.GetDescription().GetAscii() );
}
lDeviceParams = lDevice->GetParameters();
}
void PleoraVideo::DeinitDevice()
{
if(lDevice) {
lDevice->Disconnect();
PvDevice::Free( lDevice );
lDevice = 0;
}
delete lPvSystem;
lPvSystem = 0;
}
void PleoraVideo::InitStream()
{
// Setup Stream
PvResult lResult;
lStream = PvStream::CreateAndOpen( lDeviceInfo->GetConnectionID(), &lResult );
if ( !lStream ) {
DeinitDevice();
throw pangolin::VideoException("Pleora: Unable to open stream", lResult.GetDescription().GetAscii() );
}
lStreamParams = lStream->GetParameters();
}
void PleoraVideo::DeinitStream()
{
if(lStream) {
lStream->Close();
PvStream::Free( lStream );
lStream = 0;
}
}
void PleoraVideo::SetDeviceParams(Params& p) {
lStart = dynamic_cast<PvGenCommand*>( lDeviceParams->Get( "AcquisitionStart" ) );
lStop = dynamic_cast<PvGenCommand*>( lDeviceParams->Get( "AcquisitionStop" ) );
for(Params::ParamMap::iterator it = p.params.begin(); it != p.params.end(); it++) {
if(it->first == "get_temperature"){
getTemp = p.Get<bool>("get_temperature",false);
} else {
if (it->second == "Execute") {
// This is a command, deal with it accordingly.
PvGenCommand* cmd = dynamic_cast<PvGenCommand*>(lDeviceParams->Get(it->first.c_str()));
if(cmd) {
PvResult r = cmd->Execute();
if(!r.IsOK()){
pango_print_error("Error executing command %s Reason:%s\n", it->first.c_str(), r.GetDescription().GetAscii());
} else {
pango_print_info("Executed Command %s\n", it->first.c_str());
}
bool done;
int attempts = 20;
do {
cmd->IsDone(done);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
attempts--;
} while(!done && (attempts > 0));
if(attempts == 0) {
pango_print_error("Timeout while waiting for command %s done\n", it->first.c_str());
}
} else {
pango_print_error("Command %s not recognized\n", it->first.c_str());
}
} else {
try {
PvGenParameter* par = lDeviceParams->Get(PvString(it->first.c_str()));
if(par) {
PvResult r = par->FromString(PvString(it->second.c_str()));
if(!r.IsOK()){
pango_print_error("Error setting parameter %s to:%s Reason:%s\n", it->first.c_str(), it->second.c_str(), r.GetDescription().GetAscii());
} else {
pango_print_info("Setting parameter %s to:%s\n", it->first.c_str(), it->second.c_str());
}
} else {
pango_print_error("Parameter %s not recognized\n", it->first.c_str());
}
} catch(std::runtime_error e) {
pango_print_error("Set parameter %s: %s\n", it->first.c_str(), e.what());
}
}
}
}
// Get Handles to properties we'll be using.
lAnalogGain = lDeviceParams->GetInteger("AnalogGain");
lGamma = lDeviceParams->GetFloat("Gamma");
lAnalogBlackLevel = lDeviceParams->GetInteger("AnalogBlackLevel");
lExposure = lDeviceParams->GetFloat("ExposureTime");
lAquisitionMode = lDeviceParams->GetEnum("AcquisitionMode");
lTriggerSource = lDeviceParams->GetEnum("TriggerSource");
lTriggerMode = lDeviceParams->GetEnum("TriggerMode");
if(getTemp) {
lTemperatureCelcius = lDeviceParams->GetFloat("DeviceTemperatureCelsius");
pango_print_warn("Warning: get_temperature might add a blocking call taking several ms to each frame read.");
}
}
void PleoraVideo::InitBuffers(size_t buffer_count)
{
// Reading payload size from device
const uint32_t lSize = lDevice->GetPayloadSize();
// Use buffer_count or the maximum number of buffers, whichever is smaller
const uint32_t lBufferCount = ( lStream->GetQueuedBufferMaximum() < buffer_count ) ?
lStream->GetQueuedBufferMaximum() :
buffer_count;
// Allocate buffers and queue
for( uint32_t i = 0; i < lBufferCount; i++ ) {
PvBuffer *lBuffer = new PvBuffer;
lBuffer->Alloc( static_cast<uint32_t>( lSize ) );
lBufferList.push_back( lBuffer );
}
}
void PleoraVideo::DeinitBuffers()
{
// Free buffers
for( BufferList::iterator lIt = lBufferList.begin(); lIt != lBufferList.end(); lIt++ ) {
delete *lIt;
}
lBufferList.clear();
}
void PleoraVideo::InitPangoStreams()
{
// Get actual width, height and payload size
const int w = DeviceParam<int64_t>("Width");
const int h = DeviceParam<int64_t>("Height");
const uint32_t lSize = lDevice->GetPayloadSize();
// Setup pangolin for stream
PvGenEnum* lpixfmt = dynamic_cast<PvGenEnum*>( lDeviceParams->Get("PixelFormat") );
const PixelFormat fmt = PleoraFormat(lpixfmt);
streams.push_back(StreamInfo(fmt, w, h, (w*fmt.bpp)/8));
size_bytes = lSize;
}
void PleoraVideo::InitPangoDeviceProperties()
{
// Store camera details in device properties
device_properties["SerialNumber"] = std::string(lDeviceInfo->GetSerialNumber().GetAscii());
device_properties["VendorName"] = std::string(lDeviceInfo->GetVendorName().GetAscii());
device_properties["ModelName"] = std::string(lDeviceInfo->GetModelName().GetAscii());
device_properties["ManufacturerInfo"] = std::string(lDeviceInfo->GetManufacturerInfo().GetAscii());
device_properties["Version"] = std::string(lDeviceInfo->GetVersion().GetAscii());
device_properties["DisplayID"] = std::string(lDeviceInfo->GetDisplayID().GetAscii());
device_properties["UniqueID"] = std::string(lDeviceInfo->GetUniqueID().GetAscii());
device_properties["ConnectionID"] = std::string(lDeviceInfo->GetConnectionID().GetAscii());
picojson::value props(picojson::object_type, true);
for(size_t i=0; i < lDeviceParams->GetCount(); ++i) {
PvGenParameter* p = (*lDeviceParams)[i];
if(p->IsReadable()) {
props[p->GetName().GetAscii()] = p->ToString().GetAscii();
}
}
device_properties["properties"] = props;
}
unsigned int PleoraVideo::AvailableFrames() const
{
return validGrabbedBuffers;
}
bool PleoraVideo::DropNFrames(uint32_t n)
{
if(n > validGrabbedBuffers) return false;
while(n > 0) {
lStream->QueueBuffer(lGrabbedBuffList.front().buff);
lGrabbedBuffList.pop_front();
--validGrabbedBuffers;
--n;
DBGPRINT("DropNFrames: removed 1 frame from the list and requeued it.")
}
return true;
}
void PleoraVideo::Start()
{
if(lStream->GetQueuedBufferCount() == 0) {
// Queue all buffers in the stream
for( BufferList::iterator lIt = lBufferList.begin(); lIt != lBufferList.end(); lIt++ ) {
lStream->QueueBuffer( *lIt );
}
lDevice->StreamEnable();
lStart->Execute();
} else {
// // It isn't an error to repeatedly start
// pango_print_warn("PleoraVideo: Already started.\n");
}
}
void PleoraVideo::Stop()
{
// stop grab thread
if(lStream->GetQueuedBufferCount() > 0) {
lStop->Execute();
lDevice->StreamDisable();
// Abort all buffers from the stream and dequeue
lStream->AbortQueuedBuffers();
while ( lStream->GetQueuedBufferCount() > 0 ) {
PvBuffer *lBuffer = NULL;
PvResult lOperationResult;
lStream->RetrieveBuffer( &lBuffer, &lOperationResult );
}
} else {
// // It isn't an error to repeatedly stop
// pango_print_warn("PleoraVideo: Already stopped.\n");
}
}
size_t PleoraVideo::SizeBytes() const
{
return size_bytes;
}
const std::vector<StreamInfo>& PleoraVideo::Streams() const
{
return streams;
}
bool PleoraVideo::ParseBuffer(PvBuffer* lBuffer, unsigned char* image)
{
TSTART()
if ( lBuffer->GetPayloadType() == PvPayloadTypeImage ) {
PvImage *lImage = lBuffer->GetImage();
TGRABANDPRINT("GetImage took ")
std::memcpy(image, lImage->GetDataPointer(), size_bytes);
TGRABANDPRINT("memcpy took ")
// Required frame properties
frame_properties[PANGO_CAPTURE_TIME_US] = picojson::value(lBuffer->GetTimestamp());
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(lBuffer->GetReceptionTime());
TGRABANDPRINT("Frame properties took ")
// Optional frame properties
if(lTemperatureCelcius != 0) {
double val;
PvResult lResult = lTemperatureCelcius->GetValue(val);
if(lResult.IsSuccess()) {
frame_properties[PANGO_SENSOR_TEMPERATURE_C] = picojson::value(val);
} else {
pango_print_error("DeviceTemperatureCelsius %f fail\n", val);
}
}
TGRABANDPRINT("GetTemperature took ")
return true;
} else {
return false;
}
}
bool PleoraVideo::GrabNext( unsigned char* image, bool wait)
{
const uint32_t timeout = wait ? 1000 : 0;
bool good = false;
TSTART()
DBGPRINT("GrabNext no thread:")
RetriveAllAvailableBuffers((validGrabbedBuffers==0) ? timeout : 0);
TGRABANDPRINT("Retriving all available buffers (valid frames in queue=%d, queue size=%ld) took ",validGrabbedBuffers ,lGrabbedBuffList.size())
if(validGrabbedBuffers == 0) return false;
// Retrieve next buffer from list and parse it
GrabbedBufferList::iterator front = lGrabbedBuffList.begin();
if ( front->res.IsOK() ) {
good = ParseBuffer(front->buff, image);
}
TGRABANDPRINT("Parsing buffer took ")
lStream->QueueBuffer(front->buff);
TGRABANDPRINT("\tPLEORA:QueueBuffer: ")
// Remove used buffer from list.
lGrabbedBuffList.pop_front();
--validGrabbedBuffers;
return good;
}
bool PleoraVideo::GrabNewest( unsigned char* image, bool wait )
{
const uint32_t timeout = wait ? 0xFFFFFFFF : 0;
bool good = false;
TSTART()
DBGPRINT("GrabNewest no thread:")
RetriveAllAvailableBuffers((validGrabbedBuffers==0) ? timeout : 0);
TGRABANDPRINT("Retriving all available buffers (valid frames in queue=%d, queue size=%ld) took ",validGrabbedBuffers ,lGrabbedBuffList.size())
if(validGrabbedBuffers == 0) {
DBGPRINT("No valid buffers, returning.")
return false;
}
if(validGrabbedBuffers > 1) DropNFrames(validGrabbedBuffers-1);
TGRABANDPRINT("Dropping %d frames took ", (validGrabbedBuffers-1))
// Retrieve next buffer from list and parse it
GrabbedBufferList::iterator front = lGrabbedBuffList.begin();
if ( front->res.IsOK() ) {
good = ParseBuffer(front->buff, image);
}
TGRABANDPRINT("Parsing buffer took ")
lStream->QueueBuffer(front->buff);
TGRABANDPRINT("Requeueing buffer took ")
// Remove used buffer from list.
lGrabbedBuffList.pop_front();
--validGrabbedBuffers;
return good;
}
void PleoraVideo::RetriveAllAvailableBuffers(uint32_t timeout){
PvBuffer *lBuffer = NULL;
PvResult lOperationResult;
PvResult lResult;
TSTART()
do {
lResult = lStream->RetrieveBuffer( &lBuffer, &lOperationResult, timeout);
if ( !lResult.IsOK() ) {
if(lResult && !(lResult.GetCode() == PvResult::Code::TIMEOUT)) {
pango_print_warn("Pleora error: %s,\n'%s'\n", lResult.GetCodeString().GetAscii(), lResult.GetDescription().GetAscii() );
}
return;
} else if( !lOperationResult.IsOK() ) {
pango_print_warn("Pleora error %s,\n'%s'\n", lOperationResult.GetCodeString().GetAscii(), lResult.GetDescription().GetAscii() );
lStream->QueueBuffer( lBuffer );
return;
}
lGrabbedBuffList.push_back(GrabbedBuffer(lBuffer,lOperationResult,true));
++validGrabbedBuffers;
TGRABANDPRINT("Attempt retrieving buffer (timeout=%d validbuffer=%d) took ", timeout, validGrabbedBuffers)
timeout = 0;
} while (lResult.IsOK());
}
int64_t PleoraVideo::GetGain()
{
int64_t val;
if(lAnalogGain) {
ThrowOnFailure( lAnalogGain->GetValue(val) );
}
return val;
}
void PleoraVideo::SetGain(int64_t val)
{
if(val >= 0 && lAnalogGain && lAnalogGain->IsWritable()) {
ThrowOnFailure( lAnalogGain->SetValue(val) );
frame_properties[PANGO_ANALOG_GAIN] = picojson::value(val);
}
}
int64_t PleoraVideo::GetAnalogBlackLevel()
{
int64_t val;
if(lAnalogBlackLevel) {
ThrowOnFailure( lAnalogBlackLevel->GetValue(val) );
}
return val;
}
void PleoraVideo::SetAnalogBlackLevel(int64_t val)
{
if(val >= 0 && lAnalogBlackLevel&& lAnalogBlackLevel->IsWritable()) {
ThrowOnFailure( lAnalogBlackLevel->SetValue(val) );
frame_properties[PANGO_ANALOG_BLACK_LEVEL] = picojson::value(val);
}
}
double PleoraVideo::GetExposure()
{
double val;
if( lExposure ) {
ThrowOnFailure( lExposure->GetValue(val));
}
return val;
}
void PleoraVideo::SetExposure(double val)
{
if(val > 0 && lExposure && lExposure->IsWritable() ) {
ThrowOnFailure( lExposure->SetValue(val) );
frame_properties[PANGO_EXPOSURE_US] = picojson::value(val);
}
}
double PleoraVideo::GetGamma()
{
double val;
if(lGamma) {
ThrowOnFailure(lGamma->GetValue(val));
}
return val;
}
void PleoraVideo::SetGamma(double val)
{
if(val > 0 && lGamma && lGamma->IsWritable() ) {
ThrowOnFailure( lGamma->SetValue(val) );
frame_properties[PANGO_GAMMA] = picojson::value(val);
}
}
//use 0,0,1 for line0 hardware trigger.
//use 2,252,0 for software continuous
void PleoraVideo::SetupTrigger(bool triggerActive, int64_t triggerSource, int64_t acquisitionMode)
{
if(lAquisitionMode && lTriggerSource && lTriggerMode &&
lAquisitionMode->IsWritable() && lTriggerSource->IsWritable() && lTriggerMode->IsWritable() ) {
// Check input is valid.
const PvGenEnumEntry* entry_src;
const PvGenEnumEntry* entry_acq;
lTriggerSource->GetEntryByValue(triggerSource, &entry_src);
lAquisitionMode->GetEntryByValue(acquisitionMode, &entry_acq);
if(entry_src && entry_acq) {
ThrowOnFailure(lTriggerMode->SetValue(triggerActive ? 1 : 0));
if(triggerActive) {
pango_print_debug("Pleora: external trigger active\n");
ThrowOnFailure(lTriggerSource->SetValue(triggerSource));
ThrowOnFailure(lAquisitionMode->SetValue(acquisitionMode));
}
}else{
pango_print_error("Bad values for trigger options.");
}
}
}
template<typename T>
T PleoraVideo::DeviceParam(const char* name)
{
return GetParam<T>(lDeviceParams, name);
}
template<typename T>
bool PleoraVideo::SetDeviceParam(const char* name, T val)
{
return SetParam<T>(lDeviceParams, name, val);
}
template<typename T>
T PleoraVideo::StreamParam(const char* name)
{
return GetParam<T>(lStreamParams, name);
}
template<typename T>
bool PleoraVideo::SetStreamParam(const char* name, T val)
{
return SetParam<T>(lStreamParams, name, val);
}
PANGOLIN_REGISTER_FACTORY(PleoraVideo)
{
struct PleoraVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
return std::unique_ptr<VideoInterface>(new PleoraVideo(uri));
}
};
auto factory = std::make_shared<PleoraVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "pleora");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "u3v");
}
}
#undef TSTART
#undef TGRABANDPRINT
#undef DBGPRINT

View File

@@ -0,0 +1,136 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/utils/file_utils.h>
#include <pangolin/video/drivers/pvn.h>
#include <pangolin/video/iostream_operators.h>
#include <iostream>
using namespace std;
namespace pangolin
{
PvnVideo::PvnVideo(const std::string& filename, bool realtime )
: frame_size_bytes(0), realtime(realtime), last_frame(TimeNow())
{
file.open( PathExpand(filename).c_str(), ios::binary );
if(!file.is_open() )
throw VideoException("Cannot open file - does not exist or bad permissions.");
ReadFileHeader();
}
PvnVideo::~PvnVideo()
{
}
void PvnVideo::ReadFileHeader()
{
string sfmt;
float framerate;
unsigned w, h;
file >> sfmt;
file >> w;
file >> h;
file >> framerate;
file.get();
if(file.bad() || !(w >0 && h >0) )
throw VideoException("Unable to read video header");
const PixelFormat fmt = PixelFormatFromString(sfmt);
StreamInfo strm0( fmt, w, h, (w*fmt.bpp) / 8, 0);
frame_size_bytes += strm0.Pitch() * strm0.Height();
// frame_interval = TimeFromSeconds( 1.0 / framerate);
streams.push_back(strm0);
}
void PvnVideo::Start()
{
}
void PvnVideo::Stop()
{
}
size_t PvnVideo::SizeBytes() const
{
return frame_size_bytes;
}
const std::vector<StreamInfo>& PvnVideo::Streams() const
{
return streams;
}
bool PvnVideo::GrabNext( unsigned char* image, bool /*wait*/ )
{
file.read((char*)image, frame_size_bytes);
const basetime next_frame = TimeAdd(last_frame, frame_interval);
if( realtime ) {
WaitUntil(next_frame);
}
last_frame = TimeNow();
return file.good();
}
bool PvnVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image,wait);
}
PANGOLIN_REGISTER_FACTORY(PvnVideo)
{
struct PvnVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const std::string path = PathExpand(uri.url);
if( !uri.scheme.compare("pvn") || FileType(uri.url) == ImageFileTypePvn ) {
const bool realtime = uri.Contains("realtime");
return std::unique_ptr<VideoInterface>(new PvnVideo(path.c_str(), realtime));
}
return std::unique_ptr<VideoInterface>();
}
};
auto factory = std::make_shared<PvnVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "pvn");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "file");
}
}

View File

@@ -0,0 +1,100 @@
#include <librealsense/rs.hpp>
#include <pangolin/video/drivers/realsense.h>
#include <pangolin/factory/factory_registry.h>
namespace pangolin {
RealSenseVideo::RealSenseVideo(ImageDim dim, int fps)
: dim_(dim), fps_(fps) {
ctx_ = new rs::context();
sizeBytes = 0;
for (int32_t i=0; i<ctx_->get_device_count(); ++i) {
devs_.push_back(ctx_->get_device(i));
devs_[i]->enable_stream(rs::stream::depth, dim_.x, dim_.y, rs::format::z16, fps_);
StreamInfo streamD(PixelFormatFromString("GRAY16LE"), dim_.x, dim_.y, dim_.x*2, 0);
streams.push_back(streamD);
sizeBytes += streamD.SizeBytes();
devs_[i]->enable_stream(rs::stream::color, dim_.x, dim_.y, rs::format::rgb8, fps_);
StreamInfo streamRGB(PixelFormatFromString("RGB24"), dim_.x, dim_.y, dim_.x*3, (uint8_t*)0+sizeBytes);
streams.push_back(streamRGB);
sizeBytes += streamRGB.SizeBytes();
devs_[i]->start();
}
total_frames = std::numeric_limits<int>::max();
}
RealSenseVideo::~RealSenseVideo() {
delete ctx_;
}
void RealSenseVideo::Start() {
for (int32_t i=0; i<ctx_->get_device_count(); ++i) {
devs_[i]->stop();
devs_[i]->start();
}
current_frame_index = 0;
}
void RealSenseVideo::Stop() {
for (int32_t i=0; i<ctx_->get_device_count(); ++i) {
devs_[i]->stop();
}
}
size_t RealSenseVideo::SizeBytes() const {
return sizeBytes;
}
const std::vector<StreamInfo>& RealSenseVideo::Streams() const {
return streams;
}
bool RealSenseVideo::GrabNext(unsigned char* image, bool wait) {
unsigned char* out_img = image;
for (int32_t i=0; i<ctx_->get_device_count(); ++i) {
if (wait) {
devs_[i]->wait_for_frames();
}
memcpy(out_img, devs_[i]->get_frame_data(rs::stream::depth), streams[i*2].SizeBytes());
out_img += streams[i*2].SizeBytes();
memcpy(out_img, devs_[i]->get_frame_data(rs::stream::color), streams[i*2+1].SizeBytes());
out_img += streams[i*2+1].SizeBytes();
}
return true;
}
bool RealSenseVideo::GrabNewest(unsigned char* image, bool wait) {
return GrabNext(image, wait);
}
size_t RealSenseVideo::GetCurrentFrameId() const {
return current_frame_index;
}
size_t RealSenseVideo::GetTotalFrames() const {
return total_frames;
}
size_t RealSenseVideo::Seek(size_t frameid) {
// TODO
return -1;
}
PANGOLIN_REGISTER_FACTORY(RealSenseVideo)
{
struct RealSenseVideoFactory : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const unsigned int fps = uri.Get<unsigned int>("fps", 30);
return std::unique_ptr<VideoInterface>( new RealSenseVideo(dim, fps) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<RealSenseVideoFactory>(), 10, "realsense");
}
}

View File

@@ -0,0 +1,114 @@
#include <librealsense2/rs.hpp>
#include <pangolin/video/drivers/realsense2.h>
#include <pangolin/factory/factory_registry.h>
namespace pangolin {
RealSense2Video::RealSense2Video(ImageDim dim, int fps)
: dim_(dim), fps_(fps) {
sizeBytes = 0;
// Create RealSense pipeline, encapsulating the actual device and sensors
pipe = new rs2::pipeline();
//Configure the pipeline
cfg = new rs2::config();
{ //config depth
cfg->enable_stream(RS2_STREAM_DEPTH, dim_.x, dim_.y, RS2_FORMAT_Z16, fps_);
StreamInfo streamD(PixelFormatFromString("GRAY16LE"), dim_.x, dim_.y, dim_.x*2, 0);
streams.push_back(streamD);
sizeBytes += streamD.SizeBytes();
}
{ //config color
cfg->enable_stream(RS2_STREAM_COLOR, dim_.x, dim_.y, RS2_FORMAT_RGB8, fps_);
StreamInfo streamRGB(PixelFormatFromString("RGB24"), dim_.x, dim_.y, dim_.x*3, (uint8_t*)0+sizeBytes);
streams.push_back(streamRGB);
sizeBytes += streamRGB.SizeBytes();
}
// Start streaming with default recommended configuration
pipe->start(*cfg);
rs2::pipeline_profile profile = pipe->get_active_profile();
auto sensor = profile.get_device().first<rs2::depth_sensor>();
auto scale = sensor.get_depth_scale();
std::cout << "Depth scale is: " << scale << std::endl;
total_frames = std::numeric_limits<int>::max();
}
RealSense2Video::~RealSense2Video() {
delete pipe;
pipe = nullptr;
delete cfg;
cfg = nullptr;
}
void RealSense2Video::Start() {
pipe->start(*cfg);
current_frame_index = 0;
}
void RealSense2Video::Stop() {
pipe->stop();
}
size_t RealSense2Video::SizeBytes() const {
return sizeBytes;
}
const std::vector<StreamInfo>& RealSense2Video::Streams() const {
return streams;
}
bool RealSense2Video::GrabNext(unsigned char* image, bool /*wait*/) {
unsigned char* out_img = image;
rs2::frameset data = pipe->wait_for_frames(); // Wait for next set of frames from the camera
rs2::frame depth = data.get_depth_frame(); // Get the depth data
rs2::frame color = data.get_color_frame(); // Get the color data
memcpy(out_img, depth.get_data(), streams[0].SizeBytes());
out_img += streams[0].SizeBytes();
memcpy(out_img, color.get_data(), streams[1].SizeBytes());
out_img += streams[1].SizeBytes();
return true;
}
bool RealSense2Video::GrabNewest(unsigned char* image, bool wait) {
return GrabNext(image, wait);
}
size_t RealSense2Video::GetCurrentFrameId() const {
return current_frame_index;
}
size_t RealSense2Video::GetTotalFrames() const {
return total_frames;
}
size_t RealSense2Video::Seek(size_t /*frameid*/) {
// TODO
return -1;
}
PANGOLIN_REGISTER_FACTORY(RealSense2Video)
{
struct RealSense2VideoFactory : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const unsigned int fps = uri.Get<unsigned int>("fps", 30);
return std::unique_ptr<VideoInterface>( new RealSense2Video(dim, fps) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<RealSense2VideoFactory>(), 10, "realsense2");
}
}

View File

@@ -0,0 +1,99 @@
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/drivers/shared_memory.h>
#include <pangolin/video/iostream_operators.h>
using namespace std;
namespace pangolin
{
SharedMemoryVideo::SharedMemoryVideo(size_t w, size_t h, std::string pix_fmt,
const std::shared_ptr<SharedMemoryBufferInterface>& shared_memory,
const std::shared_ptr<ConditionVariableInterface>& buffer_full) :
_fmt(PixelFormatFromString(pix_fmt)),
_frame_size(w*h*_fmt.bpp/8),
_shared_memory(shared_memory),
_buffer_full(buffer_full)
{
const size_t pitch = w * _fmt.bpp/8;
const StreamInfo stream(_fmt, w, h, pitch, 0);
_streams.push_back(stream);
}
SharedMemoryVideo::~SharedMemoryVideo()
{
}
void SharedMemoryVideo::Start()
{
}
void SharedMemoryVideo::Stop()
{
}
size_t SharedMemoryVideo::SizeBytes() const
{
return _frame_size;
}
const std::vector<StreamInfo>& SharedMemoryVideo::Streams() const
{
return _streams;
}
bool SharedMemoryVideo::GrabNext(unsigned char* image, bool wait)
{
// If a condition variable exists, try waiting on it.
if(_buffer_full) {
timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
if (wait) {
_buffer_full->wait();
} else if (!_buffer_full->wait(ts)) {
return false;
}
}
// Read the buffer.
_shared_memory->lock();
memcpy(image, _shared_memory->ptr(), _frame_size);
_shared_memory->unlock();
return true;
}
bool SharedMemoryVideo::GrabNewest(unsigned char* image, bool wait)
{
return GrabNext(image,wait);
}
PANGOLIN_REGISTER_FACTORY(SharedMemoryVideo)
{
struct SharedMemoryVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(0, 0));
const std::string sfmt = uri.Get<std::string>("fmt", "GRAY8");
const PixelFormat fmt = PixelFormatFromString(sfmt);
const std::string shmem_name = std::string("/") + uri.url;
std::shared_ptr<SharedMemoryBufferInterface> shmem_buffer =
open_named_shared_memory_buffer(shmem_name, true);
if (dim.x == 0 || dim.y == 0 || !shmem_buffer) {
throw VideoException("invalid shared memory parameters");
}
const std::string cond_name = shmem_name + "_cond";
std::shared_ptr<ConditionVariableInterface> buffer_full =
open_named_condition_variable(cond_name);
return std::unique_ptr<VideoInterface>(
new SharedMemoryVideo(dim.x, dim.y, fmt, shmem_buffer,buffer_full)
);
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<SharedMemoryVideoFactory>(), 10, "shmem");
}
}

View File

@@ -0,0 +1,159 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/shift.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
ShiftVideo::ShiftVideo(std::unique_ptr<VideoInterface> &src_, PixelFormat out_fmt, int shift_right_bits, unsigned int mask)
: src(std::move(src_)), size_bytes(0), buffer(0), shift_right_bits(shift_right_bits), mask(mask)
{
if(!src) {
throw VideoException("ShiftVideo: VideoInterface in must not be null");
}
videoin.push_back(src.get());
for(size_t s=0; s< src->Streams().size(); ++s) {
const size_t w = src->Streams()[s].Width();
const size_t h = src->Streams()[s].Height();
// Check compatibility of formats
const PixelFormat in_fmt = src->Streams()[s].PixFormat();
if(in_fmt.channels != out_fmt.channels) {
throw VideoException("ShiftVideo: output format is not compatible with input format for shifting.");
}
if(out_fmt.channels > 1 || out_fmt.bpp != 8 || in_fmt.bpp != 16) {
// TODO: Remove restriction
throw VideoException("ShiftVideo: currently only supports one channel input formats of 16 bits.");
}
streams.push_back(pangolin::StreamInfo( out_fmt, w, h, w*out_fmt.bpp / 8, (unsigned char*)0 + size_bytes ));
size_bytes += w*h*out_fmt.bpp / 8;
}
buffer = new unsigned char[src->SizeBytes()];
}
ShiftVideo::~ShiftVideo()
{
delete[] buffer;
}
//! Implement VideoInput::Start()
void ShiftVideo::Start()
{
videoin[0]->Start();
}
//! Implement VideoInput::Stop()
void ShiftVideo::Stop()
{
videoin[0]->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t ShiftVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& ShiftVideo::Streams() const
{
return streams;
}
void DoShift16to8(
Image<unsigned char>& out,
const Image<unsigned char>& in,
int shift_right_bits,
unsigned int mask
) {
for(size_t y=0; y<out.h; ++y) {
for(size_t x=0; x<out.w; ++x) {
const unsigned short vin = ((unsigned short*)(in.ptr + y * in.pitch))[x];
out.ptr[y*out.pitch+x] = (vin >> shift_right_bits) & mask;
}
}
}
//! Implement VideoInput::GrabNext()
bool ShiftVideo::GrabNext( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNext(buffer,wait)) {
for(size_t s=0; s<streams.size(); ++s) {
Image<unsigned char> img_in = videoin[0]->Streams()[s].StreamImage(buffer);
Image<unsigned char> img_out = Streams()[s].StreamImage(image);
DoShift16to8(img_out, img_in, shift_right_bits, mask);
}
return true;
}else{
return false;
}
}
//! Implement VideoInput::GrabNewest()
bool ShiftVideo::GrabNewest( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNewest(buffer,wait)) {
for(size_t s=0; s<streams.size(); ++s) {
Image<unsigned char> img_in = videoin[0]->Streams()[s].StreamImage(buffer);
Image<unsigned char> img_out = Streams()[s].StreamImage(image);
DoShift16to8(img_out, img_in, shift_right_bits, mask);
}
return true;
}else{
return false;
}
}
std::vector<VideoInterface*>& ShiftVideo::InputStreams()
{
return videoin;
}
PANGOLIN_REGISTER_FACTORY(ShiftVideo)
{
struct ShiftVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const int shift_right = uri.Get<int>("shift", 0);
const int mask = uri.Get<int>("mask", 0xffff);
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
return std::unique_ptr<VideoInterface>(
new ShiftVideo(subvid, PixelFormatFromString("GRAY8"), shift_right, mask)
);
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<ShiftVideoFactory>(), 10, "shift");
}
}

View File

@@ -0,0 +1,158 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/split.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
SplitVideo::SplitVideo(std::unique_ptr<VideoInterface> &src_, const std::vector<StreamInfo>& streams)
: src(std::move(src_)), streams(streams)
{
videoin.push_back(src.get());
// Warn if stream over-runs input stream
for(unsigned int i=0; i < streams.size(); ++i) {
if(src->SizeBytes() < (size_t)streams[i].Offset() + streams[i].SizeBytes() ) {
pango_print_warn("VideoSplitter: stream extends past end of input.\n");
break;
}
}
}
SplitVideo::~SplitVideo()
{
}
size_t SplitVideo::SizeBytes() const
{
return videoin[0]->SizeBytes();
}
const std::vector<StreamInfo>& SplitVideo::Streams() const
{
return streams;
}
void SplitVideo::Start()
{
videoin[0]->Start();
}
void SplitVideo::Stop()
{
videoin[0]->Stop();
}
bool SplitVideo::GrabNext( unsigned char* image, bool wait )
{
return videoin[0]->GrabNext(image, wait);
}
bool SplitVideo::GrabNewest( unsigned char* image, bool wait )
{
return videoin[0]->GrabNewest(image, wait);
}
std::vector<VideoInterface*>& SplitVideo::InputStreams()
{
return videoin;
}
PANGOLIN_REGISTER_FACTORY(SplitVideo)
{
struct SplitVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::vector<StreamInfo> streams;
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
if(subvid->Streams().size() == 0) {
throw VideoException("VideoSplitter input must have at least one stream");
}
while(true) {
const size_t n = streams.size() + 1;
std::string key_roi = std::string("roi") + pangolin::Convert<std::string, size_t>::Do(n);
std::string key_mem = std::string("mem") + pangolin::Convert<std::string, size_t>::Do(n);
std::string key_str = std::string("stream") + pangolin::Convert<std::string, size_t>::Do(n);
if(uri.Contains(key_roi)) {
const StreamInfo& st1 = subvid->Streams()[0];
const ImageRoi& roi = uri.Get<ImageRoi>(key_roi, ImageRoi() );
if(roi.w == 0 || roi.h == 0) {
throw VideoException("split: empty ROI.");
}
const size_t start1 = roi.y * st1.Pitch() + st1.PixFormat().bpp * roi.x / 8;
streams.push_back( StreamInfo( st1.PixFormat(), roi.w, roi.h, st1.Pitch(), (unsigned char*)0 + start1 ) );
}else if(uri.Contains(key_mem)) {
const StreamInfo& info = uri.Get<StreamInfo>(key_mem, subvid->Streams()[0] );
streams.push_back(info);
}else if(uri.Contains(key_str)) {
const size_t old_stream = uri.Get<size_t>(key_str, 0) -1;
if(old_stream >= subvid->Streams().size()) {
throw VideoException("split: requesting source stream which does not exist.");
}
streams.push_back(subvid->Streams()[old_stream]);
}else{
break;
}
}
// Default split if no arguments
if(streams.size() == 0) {
const StreamInfo& st1 = subvid->Streams()[0];
const size_t subw = st1.Width();
const size_t subh = st1.Height();
ImageRoi roi1, roi2;
if(subw > subh) {
// split horizontally
roi1 = ImageRoi(0,0, subw/2, subh );
roi2 = ImageRoi(subw/2,0, subw/2, subh );
}else{
// split vertically
roi1 = ImageRoi(0,0, subw, subh/2 );
roi2 = ImageRoi(0,subh/2, subw, subh/2 );
}
const size_t start1 = roi1.y * st1.Pitch() + st1.PixFormat().bpp * roi1.x / 8;
const size_t start2 = roi2.y * st1.Pitch() + st1.PixFormat().bpp * roi2.x / 8;
streams.push_back( StreamInfo( st1.PixFormat(), roi1.w, roi1.h, st1.Pitch(), (unsigned char*)(start1) ) );
streams.push_back( StreamInfo( st1.PixFormat(), roi2.w, roi2.h, st1.Pitch(), (unsigned char*)(start2) ) );
}
return std::unique_ptr<VideoInterface>( new SplitVideo(subvid,streams) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<SplitVideoFactory>(), 10, "split");
}
}

View File

@@ -0,0 +1,550 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2015 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <XmlFeatures.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/drivers/teli.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
// Represet lifetime of Teli SDK. Destructed by static deinitialisation.
class TeliSystem
{
public:
static TeliSystem& Instance() {
static TeliSystem sys;
return sys;
}
private:
TeliSystem()
{
Teli::CAM_API_STATUS uiStatus = Teli::Sys_Initialize();
if (uiStatus != Teli::CAM_API_STS_SUCCESS && uiStatus != Teli::CAM_API_STS_ALREADY_INITIALIZED)
throw pangolin::VideoException("Unable to initialise TeliSDK.");
}
~TeliSystem()
{
Teli::CAM_API_STATUS uiStatus = Teli::Sys_Terminate();
if (uiStatus != Teli::CAM_API_STS_SUCCESS) {
pango_print_warn("TeliSDK: Error uninitialising.");
}
}
};
std::string GetNodeValStr(Teli::CAM_HANDLE cam, Teli::CAM_NODE_HANDLE node, std::string node_str)
{
Teli::TC_NODE_TYPE node_type;
Teli::CAM_API_STATUS st = Teli::Nd_GetType(cam, node, &node_type);
if(st != Teli::CAM_API_STS_SUCCESS) {
throw std::runtime_error("TeliSDK: Unable to get Teli node type.");
}
Teli::CAM_API_STATUS status;
switch(node_type) {
case Teli::TC_NODE_TYPE_INTEGER:
{
int64_t val;
status = Teli::Nd_GetIntValue(cam, node, &val);
if(status == Teli::CAM_API_STS_SUCCESS) {
return pangolin::Convert<std::string, int64_t>::Do(val);
}
break;
}
case Teli::TC_NODE_TYPE_BOOLEAN:
{
bool8_t val;
status = Teli::Nd_GetBoolValue(cam, node, &val);
if(status == Teli::CAM_API_STS_SUCCESS) {
return pangolin::Convert<std::string, bool8_t>::Do(val);
}
break;
}
case Teli::TC_NODE_TYPE_FLOAT:
{
float64_t val;
status = Teli::Nd_GetFloatValue(cam, node, &val);
if(status == Teli::CAM_API_STS_SUCCESS) {
return pangolin::Convert<std::string, float64_t>::Do(val);
}
break;
}
case Teli::TC_NODE_TYPE_STRING:
{
uint32_t buffer_size = 10*1024;
char* buffer = new char[buffer_size];
status = Teli::Nd_GetStrValue(cam, node, buffer, &buffer_size);
std::string val(buffer);
delete[] buffer;
if(status == Teli::CAM_API_STS_SUCCESS) {
return val;
}
break;
}
case Teli::TC_NODE_TYPE_ENUMERATION:
{
uint32_t buffer_size = 10*1024;
char* buffer = new char[buffer_size];
status = Teli::Nd_GetEnumStrValue(cam, node, buffer, &buffer_size);
std::string val(buffer);
if(status == Teli::CAM_API_STS_SUCCESS) {
return val;
}
break;
}
case Teli::TC_NODE_TYPE_COMMAND:
case Teli::TC_NODE_TYPE_REGISTER:
case Teli::TC_NODE_TYPE_CATEGORY:
case Teli::TC_NODE_TYPE_ENUM_ENTRY:
case Teli::TC_NODE_TYPE_PORT:
default:
throw VideoException("TeliSDK: Unsupported node_type: " + node_type);
}
if(status != Teli::CAM_API_STS_SUCCESS) {
Teli::CAM_GENICAM_ERR_MSG psErrMsg;
Teli::Misc_GetLastGenICamError(&psErrMsg);
throw VideoException("TeliSDK: Unable to get Teli parameter, " + node_str, psErrMsg.pszDescription);
}else{
throw VideoException("TeliSDK: Unable to get Teli parameter, " + node_str);
}
}
void TeliVideo::SetNodeValStr(Teli::CAM_HANDLE cam, Teli::CAM_NODE_HANDLE node, std::string node_str, std::string val_str)
{
Teli::TC_NODE_TYPE node_type;
Teli::CAM_API_STATUS st = Teli::Nd_GetType(cam, node, &node_type);
if(st != Teli::CAM_API_STS_SUCCESS) {
throw VideoException("TeliSDK: Unable to get Teli node type.");
}
Teli::CAM_API_STATUS status = Teli::CAM_API_STS_SUCCESS;
switch(node_type) {
case Teli::TC_NODE_TYPE_INTEGER:
{
const int64_t val = pangolin::Convert<int64_t, std::string>::Do(val_str);
status = Teli::Nd_SetIntValue(cam, node, val);
break;
}
case Teli::TC_NODE_TYPE_BOOLEAN:
{
const bool8_t val = pangolin::Convert<bool8_t, std::string>::Do(val_str);
status = Teli::Nd_SetBoolValue(cam, node, val);
break;
}
case Teli::TC_NODE_TYPE_FLOAT:
{
const float64_t val = pangolin::Convert<float64_t, std::string>::Do(val_str);
status = Teli::Nd_SetFloatValue(cam, node, val);
break;
}
case Teli::TC_NODE_TYPE_STRING:
{
status = Teli::Nd_SetStrValue(cam, node, val_str.c_str());
break;
}
case Teli::TC_NODE_TYPE_ENUMERATION:
{
status = Teli::Nd_SetEnumStrValue(cam, node, val_str.c_str());
break;
}
case Teli::TC_NODE_TYPE_COMMAND:
{
status = Teli::Nd_CmdExecute(cam, node, true);
if (status != Teli::CAM_API_STS_SUCCESS) {
pango_print_error("TeliVideo: Nd_CmdExecute returned error, %u", status);
break;
}
// Confirm command is successful
bool done = false;
for(int attempts=20; attempts > 0; --attempts) {
// Confirm whether the execution has been accomplished.
status = Teli::Nd_GetCmdIsDone(cam, node, &done);
if (status != Teli::CAM_API_STS_SUCCESS) {
pango_print_error("TeliVideo: Nd_GetCmdIsDone returned error, %u", status);
break;
}
if(done) break;
}
pango_print_error("Timeout while waiting for command %s done\n", node_str.c_str());
break;
}
case Teli::TC_NODE_TYPE_REGISTER:
case Teli::TC_NODE_TYPE_CATEGORY:
case Teli::TC_NODE_TYPE_ENUM_ENTRY:
case Teli::TC_NODE_TYPE_PORT:
default:
throw VideoException("TeliSDK: Unsupported node_type: " + node_type);
}
if(status != Teli::CAM_API_STS_SUCCESS) {
Teli::CAM_GENICAM_ERR_MSG psErrMsg;
Teli::Misc_GetLastGenICamError(&psErrMsg);
throw VideoException("TeliSDK: Unable to set Teli parameter, " + node_str, psErrMsg.pszDescription);
}
}
TeliVideo::TeliVideo(const Params& p)
: cam(0), strm(0), hStrmCmpEvt(0), transfer_bandwidth_gbps(0), exposure_us(0)
{
TeliSystem::Instance();
uint32_t num_cams = 0;
Teli::CAM_API_STATUS uiStatus = Teli::Sys_GetNumOfCameras(&num_cams);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("Unable to enumerate TeliSDK cameras.");
if (num_cams == 0)
throw pangolin::VideoException("No TeliSDK Cameras available.");
// Default to rogue values
std::string sn;
std::string mn;
int cam_index = 0;
Params device_params;
for(Params::ParamMap::const_iterator it = p.params.begin(); it != p.params.end(); it++) {
if(it->first == "model"){
mn = it->second;
} else if(it->first == "sn"){
sn = it->second;
} else if(it->first == "idx"){
cam_index = p.Get<int>("idx", 0);
} else if(it->first == "size") {
const ImageDim dim = p.Get<ImageDim>("size", ImageDim(0,0) );
device_params.Set("Width" , dim.x);
device_params.Set("Height" , dim.y);
} else if(it->first == "pos") {
const ImageDim pos = p.Get<ImageDim>("pos", ImageDim(0,0) );
device_params.Set("OffsetX" , pos.x);
device_params.Set("OffsetY" , pos.y);
} else if(it->first == "roi") {
const ImageRoi roi = p.Get<ImageRoi>("roi", ImageRoi(0,0,0,0) );
device_params.Set("Width" , roi.w);
device_params.Set("Height" , roi.h);
device_params.Set("OffsetX", roi.x);
device_params.Set("OffsetY", roi.y);
} else {
device_params.Set(it->first, it->second);
}
}
if(sn.empty() && mn.empty()) {
uiStatus = Teli::Cam_Open(cam_index, &cam, 0, true, 0);
}else{
uiStatus = Teli::Cam_OpenFromInfo(
(sn.empty() ? 0 : sn.c_str()),
(mn.empty() ? 0 : mn.c_str()),
0, &cam, 0, true, 0
);
}
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException(FormatString("TeliSDK: Error opening camera, sn='%'", sn));
SetDeviceParams(device_params);
Initialise();
}
bool TeliVideo::GetParameter(const std::string& name, std::string& result)
{
Teli::CAM_NODE_HANDLE node;
Teli::CAM_API_STATUS st = Teli::Nd_GetNode(cam, name.c_str(), &node);
if( st == Teli::CAM_API_STS_SUCCESS) {
result = GetNodeValStr(cam, node, name);
return true;
}else{
pango_print_warn("TeliSDK: Unable to get reference to node: %s", name.c_str());
return false;
}
}
bool TeliVideo::SetParameter(const std::string& name, const std::string& value)
{
Teli::CAM_NODE_HANDLE node;
Teli::CAM_API_STATUS st = Teli::Nd_GetNode(cam, name.c_str(), &node);
if( st == Teli::CAM_API_STS_SUCCESS) {
SetNodeValStr(cam, node, name, value);
return true;
}else{
pango_print_warn("TeliSDK: Unable to get reference to node: %s", name.c_str());
return false;
}
}
void TeliVideo::Initialise()
{
Teli::CAM_API_STATUS uiStatus = Teli::CAM_API_STS_SUCCESS;
// Create completion event object for stream.
#ifdef _WIN_
hStrmCmpEvt = CreateEvent(NULL, FALSE, FALSE, NULL);
if (hStrmCmpEvt == NULL)
throw pangolin::VideoException("TeliSDK: Error creating event.");
#endif
#ifdef _LINUX_
uiStatus = Teli::Sys_CreateSignal(&hStrmCmpEvt);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("TeliSDK: Error creating event.");
#endif
uint32_t uiPyldSize = 0;
uiStatus = Teli::Strm_OpenSimple(cam, &strm, &uiPyldSize, hStrmCmpEvt);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("TeliSDK: Error opening camera stream.");
// Read pixel format
PixelFormat pfmt;
Teli::CAM_PIXEL_FORMAT teli_fmt;
uiStatus = Teli::GetCamPixelFormat(cam, &teli_fmt);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("TeliSDK: Error calling GetCamPixelFormat.");
switch( teli_fmt) {
case Teli::PXL_FMT_Mono8:
case Teli::PXL_FMT_BayerGR8:
case Teli::PXL_FMT_BayerBG8:
pfmt = pangolin::PixelFormatFromString("GRAY8");
break;
case Teli::PXL_FMT_Mono10:
case Teli::PXL_FMT_Mono12:
case Teli::PXL_FMT_Mono16:
case Teli::PXL_FMT_BayerGR10:
case Teli::PXL_FMT_BayerGR12:
case Teli::PXL_FMT_BayerBG10:
case Teli::PXL_FMT_BayerBG12:
pfmt = pangolin::PixelFormatFromString("GRAY16LE");
break;
case Teli::PXL_FMT_RGB8:
pfmt = pangolin::PixelFormatFromString("RGB24");
break;
case Teli::PXL_FMT_BGR8:
pfmt = pangolin::PixelFormatFromString("BGR24");
break;
default:
throw std::runtime_error("TeliSDK: Unknown pixel format: " + ToString<int>(teli_fmt) );
}
size_bytes = 0;
// Use width and height reported by camera
uint32_t w = 0;
uint32_t h = 0;
if( Teli::GetCamWidth(cam, &w) != Teli::CAM_API_STS_SUCCESS || Teli::GetCamHeight(cam, &h) != Teli::CAM_API_STS_SUCCESS) {
throw pangolin::VideoException("TeliSDK: Unable to establish stream dimensions.");
}
const int n = 1;
for(size_t c=0; c < n; ++c) {
const StreamInfo stream_info(pfmt, w, h, (w*pfmt.bpp) / 8, 0);
streams.push_back(stream_info);
size_bytes += uiPyldSize;
}
InitPangoDeviceProperties();
}
void TeliVideo::InitPangoDeviceProperties()
{
Teli::CAM_INFO info;
Teli::Cam_GetInformation(cam, 0, &info);
// Store camera details in device properties
device_properties["SerialNumber"] = std::string(info.szSerialNumber);
device_properties["VendorName"] = std::string(info.szManufacturer);
device_properties["ModelName"] = std::string(info.szModelName);
device_properties["ManufacturerInfo"] = std::string(info.sU3vCamInfo.szManufacturerInfo);
device_properties["Version"] = std::string(info.sU3vCamInfo.szDeviceVersion);
device_properties[PANGO_HAS_TIMING_DATA] = true;
// TODO: Enumerate other settings.
}
void TeliVideo::SetDeviceParams(const Params& p)
{
for(Params::ParamMap::const_iterator it = p.params.begin(); it != p.params.end(); it++) {
if(it->first == "transfer_bandwidth_gbps") {
transfer_bandwidth_gbps = atof(it->second.c_str());
} else {
try{
if (it->second == "Execute") {
//
std::runtime_error("TeliSDK: Execution commands not yet supported.");
} else {
SetParameter(it->first, it->second);
}
}catch(std::exception& e) {
std::cerr << e.what() << std::endl;
}
}
}
}
TeliVideo::~TeliVideo()
{
Teli::CAM_API_STATUS uiStatus = Teli::Strm_Close(strm);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
pango_print_warn("TeliSDK: Error closing camera stream.");
uiStatus = Teli::Cam_Close(cam);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
pango_print_warn("TeliSDK: Error closing camera.");
}
//! Implement VideoInput::Start()
void TeliVideo::Start()
{
Teli::CAM_API_STATUS uiStatus = Teli::Strm_Start(strm);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("TeliSDK: Error starting stream.");
}
//! Implement VideoInput::Stop()
void TeliVideo::Stop()
{
Teli::CAM_API_STATUS uiStatus = Teli::Strm_Stop(strm);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("TeliSDK: Error stopping stream.");
}
//! Implement VideoInput::SizeBytes()
size_t TeliVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& TeliVideo::Streams() const
{
return streams;
}
void TeliVideo::PopulateEstimatedCenterCaptureTime(basetime host_reception_time)
{
if(transfer_bandwidth_gbps) {
const float transfer_time_us = size_bytes / int64_t((transfer_bandwidth_gbps * 1E3) / 8.0);
frame_properties[PANGO_ESTIMATED_CENTER_CAPTURE_TIME_US] = picojson::value(int64_t(pangolin::Time_us(host_reception_time) - (exposure_us/2.0) - transfer_time_us));
}
}
bool TeliVideo::GrabNext(unsigned char* image, bool /*wait*/)
{
#ifdef _WIN_
unsigned int uiRet = WaitForSingleObject(hStrmCmpEvt, 2000);
if (uiRet == WAIT_OBJECT_0) {
#endif
#ifdef _LINUX_
unsigned int uiRet = Teli::Sys_WaitForSignal(hStrmCmpEvt, 2000);
if (uiRet == Teli::CAM_API_STS_SUCCESS) {
#endif
Teli::CAM_IMAGE_INFO sImageInfo;
uint32_t uiPyldSize = (uint32_t)size_bytes;
Teli::CAM_API_STATUS uiStatus = Teli::Strm_ReadCurrentImage(strm, image, &uiPyldSize, &sImageInfo);
frame_properties[PANGO_EXPOSURE_US] = picojson::value(exposure_us);
frame_properties[PANGO_CAPTURE_TIME_US] = picojson::value(sImageInfo.ullTimestamp/1000);
basetime now = pangolin::TimeNow();
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(pangolin::Time_us(now));
PopulateEstimatedCenterCaptureTime(now);
return (uiStatus == Teli::CAM_API_STS_SUCCESS);
}
return false;
}
//! Implement VideoInput::GrabNewest()
bool TeliVideo::GrabNewest(unsigned char* image, bool wait)
{
return GrabNext(image,wait);
}
//! Returns number of available frames
uint32_t TeliVideo::AvailableFrames() const
{
uint32_t puiCount = 0;
Teli::CAM_API_STATUS uiStatus = Teli::GetCamImageBufferFrameCount(cam, &puiCount);
if (uiStatus != Teli::CAM_API_STS_SUCCESS)
throw pangolin::VideoException("TeliSDK: Error reading frame buffer frame count.");
return puiCount;
}
//! Drops N frames in the queue starting from the oldest
//! returns false if less than n frames arae available
bool TeliVideo::DropNFrames(uint32_t n)
{
for (uint32_t i=0;i<n;++i) {
#ifdef _WIN_
unsigned int uiRet = WaitForSingleObject(hStrmCmpEvt, 2000);
if (uiRet == WAIT_OBJECT_0) {
#endif
#ifdef _LINUX_
unsigned int uiRet = Teli::Sys_WaitForSignal(hStrmCmpEvt, 2000);
if (uiRet == Teli::CAM_API_STS_SUCCESS) {
#endif
Teli::CAM_IMAGE_INFO sImageInfo;
uint32_t uiPyldSize = 0 ;
Teli::Strm_ReadCurrentImage(strm, 0, &uiPyldSize, &sImageInfo);
} else {
return false;
}
}
return true;
}
//! Access JSON properties of device
const picojson::value& TeliVideo::DeviceProperties() const
{
return device_properties;
}
//! Access JSON properties of most recently captured frame
const picojson::value& TeliVideo::FrameProperties() const
{
return frame_properties;
}
PANGOLIN_REGISTER_FACTORY(TeliVideo)
{
struct TeliVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
return std::unique_ptr<VideoInterface>(new TeliVideo(uri));
}
};
auto factory = std::make_shared<TeliVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "teli");
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "u3v");
}
}

View File

@@ -0,0 +1,111 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/test.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
void setRandomData(unsigned char * arr, size_t size){
for(size_t i = 0 ; i < size;i++) {
// arr[i] = (unsigned char)(i * 255.0 / size);
arr[i] = (unsigned char)(rand()/(RAND_MAX/255.0));
}
}
TestVideo::TestVideo(size_t w, size_t h, size_t n, std::string pix_fmt)
{
const PixelFormat pfmt = PixelFormatFromString(pix_fmt);
size_bytes = 0;
for(size_t c=0; c < n; ++c) {
const StreamInfo stream_info(pfmt, w, h, (w*pfmt.bpp)/8, 0);
streams.push_back(stream_info);
size_bytes += w*h*(pfmt.bpp)/8;
}
}
TestVideo::~TestVideo()
{
}
//! Implement VideoInput::Start()
void TestVideo::Start()
{
}
//! Implement VideoInput::Stop()
void TestVideo::Stop()
{
}
//! Implement VideoInput::SizeBytes()
size_t TestVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& TestVideo::Streams() const
{
return streams;
}
//! Implement VideoInput::GrabNext()
bool TestVideo::GrabNext( unsigned char* image, bool /*wait*/ )
{
setRandomData(image, size_bytes);
return true;
}
//! Implement VideoInput::GrabNewest()
bool TestVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image,wait);
}
PANGOLIN_REGISTER_FACTORY(TestVideo)
{
struct TestVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const int n = uri.Get<int>("n", 1);
std::string fmt = uri.Get<std::string>("fmt","RGB24");
return std::unique_ptr<VideoInterface>(new TestVideo(dim.x,dim.y,n,fmt));
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<TestVideoFactory>(), 10, "test");
}
}

View File

@@ -0,0 +1,269 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/drivers/thread.h>
#include <pangolin/video/iostream_operators.h>
#ifdef DEBUGTHREAD
#include <pangolin/utils/timer.h>
#define TSTART() pangolin::basetime start,last,now; start = pangolin::TimeNow(); last = start;
#define TGRABANDPRINT(...) now = pangolin::TimeNow(); fprintf(stderr,"THREAD: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " %fms.\n",1000*pangolin::TimeDiff_s(last, now)); last = now;
#define DBGPRINT(...) fprintf(stderr,"THREAD: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr,"\n");
#else
#define TSTART()
#define TGRABANDPRINT(...)
#define DBGPRINT(...)
#endif
namespace pangolin
{
const uint64_t grab_fail_thread_sleep_us = 1000;
const uint64_t capture_timout_ms = 5000;
ThreadVideo::ThreadVideo(std::unique_ptr<VideoInterface> &src_, size_t num_buffers)
: src(std::move(src_)), quit_grab_thread(true)
{
if(!src) {
throw VideoException("ThreadVideo: VideoInterface in must not be null");
}
videoin.push_back(src.get());
// // queue init allocates buffers.
// queue.init(num_buffers, (unsigned int)videoin[0]->SizeBytes());
for(size_t i=0; i < num_buffers; ++i)
{
queue.returnOrAddUsedBuffer( GrabResult(videoin[0]->SizeBytes()) );
}
}
ThreadVideo::~ThreadVideo()
{
Stop();
src.reset();
}
//! Implement VideoInput::Start()
void ThreadVideo::Start()
{
// Only start thread if not already running.
if(quit_grab_thread) {
videoin[0]->Start();
quit_grab_thread = false;
grab_thread = std::thread(std::ref(*this));
}
}
//! Implement VideoInput::Stop()
void ThreadVideo::Stop()
{
quit_grab_thread = true;
if(grab_thread.joinable()) {
grab_thread.join();
}
videoin[0]->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t ThreadVideo::SizeBytes() const
{
return videoin[0]->SizeBytes();
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& ThreadVideo::Streams() const
{
return videoin[0]->Streams();
}
const picojson::value& ThreadVideo::DeviceProperties() const
{
device_properties = GetVideoDeviceProperties(videoin[0]);
return device_properties;
}
const picojson::value& ThreadVideo::FrameProperties() const
{
return frame_properties;
}
uint32_t ThreadVideo::AvailableFrames() const
{
return (uint32_t)queue.AvailableFrames();
}
bool ThreadVideo::DropNFrames(uint32_t n)
{
return queue.DropNFrames(n);
}
//! Implement VideoInput::GrabNext()
bool ThreadVideo::GrabNext( unsigned char* image, bool wait )
{
TSTART()
if(queue.EmptyBuffers() == 0) {
pango_print_warn("Thread %12p has run out of %d buffers\n", this, (int)queue.AvailableFrames());
}
if(queue.AvailableFrames() == 0 && !wait) {
// No frames available, no wait, simply return false.
DBGPRINT("GrabNext no available frames no wait.");
return false;
}else{
if(queue.AvailableFrames() == 0 && wait) {
// Must return a frame so block on notification from grab thread.
std::unique_lock<std::mutex> lk(cvMtx);
DBGPRINT("GrabNext no available frames wait for notification.");
if(cv.wait_for(lk, std::chrono::milliseconds(capture_timout_ms)) == std::cv_status::timeout)
{
pango_print_warn("ThreadVideo: GrabNext blocking read for frames reached timeout.");
return false;
}
}
// At least one valid frame in queue, return it.
GrabResult grab = queue.getNext();
if(grab.return_status) {
DBGPRINT("GrabNext at least one frame available.");
std::memcpy(image, grab.buffer.get(), videoin[0]->SizeBytes());
frame_properties = grab.frame_properties;
}else{
DBGPRINT("GrabNext returned false")
}
queue.returnOrAddUsedBuffer(std::move(grab));
TGRABANDPRINT("GrabNext took")
return grab.return_status;
}
}
//! Implement VideoInput::GrabNewest()
bool ThreadVideo::GrabNewest( unsigned char* image, bool wait )
{
TSTART()
if(queue.AvailableFrames() == 0 && !wait) {
// No frames available, no wait, simply return false.
DBGPRINT("GrabNext no available frames no wait.");
return false;
}else{
if(queue.AvailableFrames() == 0 && wait) {
// Must return a frame so block on notification from grab thread.
std::unique_lock<std::mutex> lk(cvMtx);
DBGPRINT("GrabNewest no available frames wait for notification.");
if(cv.wait_for(lk, std::chrono::milliseconds(capture_timout_ms)) == std::cv_status::timeout)
{
pango_print_warn("ThreadVideo: GrabNext blocking read for frames reached timeout.");
return false;
}
}
// At least one valid frame in queue, return it.
DBGPRINT("GrabNewest at least one frame available.");
GrabResult grab = queue.getNewest();
const bool success = grab.return_status;
if(success) {
std::memcpy(image, grab.buffer.get(), videoin[0]->SizeBytes());
frame_properties = grab.frame_properties;
}
queue.returnOrAddUsedBuffer(std::move(grab));
TGRABANDPRINT("GrabNewest memcpy of available frame took")
return success;
}
}
void ThreadVideo::operator()()
{
DBGPRINT("Grab thread Started.")
// Spinning thread attempting to read from videoin[0] as fast as possible
// relying on the videoin[0] blocking grab.
while(!quit_grab_thread) {
// Get a buffer from the queue;
if(queue.EmptyBuffers() > 0) {
GrabResult grab = queue.getFreeBuffer();
// Blocking grab (i.e. GrabNext with wait = true).
try{
grab.return_status = videoin[0]->GrabNext(grab.buffer.get(), true);
}catch(const VideoException& e) {
// User doesn't have the opportunity to catch exceptions here.
pango_print_warn("ThreadVideo caught VideoException (%s)\n", e.what());
grab.return_status = false;
}catch(const std::exception& e){
// User doesn't have the opportunity to catch exceptions here.
pango_print_warn("ThreadVideo caught exception (%s)\n", e.what());
grab.return_status = false;
}
if(grab.return_status){
grab.frame_properties = GetVideoFrameProperties(videoin[0]);
}else{
std::this_thread::sleep_for(std::chrono::microseconds(grab_fail_thread_sleep_us) );
}
queue.addValidBuffer(std::move(grab));
DBGPRINT("Grab thread got frame. valid:%d free:%d",queue.AvailableFrames(),queue.EmptyBuffers())
// Let listening threads know we got a frame in case they are waiting.
cv.notify_all();
}else{
std::this_thread::sleep_for(std::chrono::microseconds(grab_fail_thread_sleep_us) );
}
std::this_thread::yield();
}
DBGPRINT("Grab thread Stopped.")
return;
}
std::vector<VideoInterface*>& ThreadVideo::InputStreams()
{
return videoin;
}
PANGOLIN_REGISTER_FACTORY(ThreadVideo)
{
struct ThreadVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
const int num_buffers = uri.Get<int>("num_buffers", 30);
return std::unique_ptr<VideoInterface>(new ThreadVideo(subvid, num_buffers));
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<ThreadVideoFactory>(), 10, "thread");
}
}
#undef TSTART
#undef TGRABANDPRINT
#undef DBGPRINT

View File

@@ -0,0 +1,110 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/truncate.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
TruncateVideo::TruncateVideo(std::unique_ptr<VideoInterface> &src_, size_t begin, size_t end)
: src(std::move(src_)), streams(src->Streams()), begin(begin), end(end), next_frame_to_grab(0)
{
videoin.push_back(src.get());
if(VideoPlaybackInterface* v = GetVideoPlaybackInterface()){
// Guard against the obscure case of nested TruncateVideo filters
if( !pangolin::FindFirstMatchingVideoInterface<TruncateVideo>(*src_) ) {
next_frame_to_grab = v->Seek(begin);
}
}
}
TruncateVideo::~TruncateVideo()
{
}
size_t TruncateVideo::SizeBytes() const
{
return videoin[0]->SizeBytes();
}
const std::vector<StreamInfo>& TruncateVideo::Streams() const
{
return streams;
}
void TruncateVideo::Start()
{
videoin[0]->Start();
}
void TruncateVideo::Stop()
{
videoin[0]->Stop();
}
bool TruncateVideo::GrabNext( unsigned char* image, bool wait )
{
if(next_frame_to_grab < end) {
bool grab_success = videoin[0]->GrabNext(image, wait);
return grab_success && (next_frame_to_grab++) >= begin;
}
return false;
}
bool TruncateVideo::GrabNewest( unsigned char* image, bool wait )
{
return videoin[0]->GrabNewest(image, wait);
}
std::vector<VideoInterface*>& TruncateVideo::InputStreams()
{
return videoin;
}
PANGOLIN_REGISTER_FACTORY(TruncateVideo)
{
struct TruncateVideoFactory : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
if(subvid->Streams().size() == 0) {
throw VideoException("VideoTruncater input must have at least one stream");
}
const size_t start = uri.Get<size_t>("begin", 0);
const size_t end = uri.Get<size_t>("end", std::numeric_limits<size_t>::max());
return std::unique_ptr<VideoInterface>( new TruncateVideo(subvid,start,end) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<TruncateVideoFactory>(), 10, "truncate");
}
}

View File

@@ -0,0 +1,268 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2014 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/drivers/unpack.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/iostream_operators.h>
#ifdef DEBUGUNPACK
#include <pangolin/utils/timer.h>
#define TSTART() pangolin::basetime start,last,now; start = pangolin::TimeNow(); last = start;
#define TGRABANDPRINT(...) now = pangolin::TimeNow(); fprintf(stderr,"UNPACK: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " %fms.\n",1000*pangolin::TimeDiff_s(last, now)); last = now;
#define DBGPRINT(...) fprintf(stderr,"UNPACK: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr,"\n");
#else
#define TSTART()
#define TGRABANDPRINT(...)
#define DBGPRINT(...)
#endif
namespace pangolin
{
UnpackVideo::UnpackVideo(std::unique_ptr<VideoInterface> &src_, PixelFormat out_fmt)
: src(std::move(src_)), size_bytes(0), buffer(0)
{
if( !src || out_fmt.channels != 1) {
throw VideoException("UnpackVideo: Only supports single channel output.");
}
videoin.push_back(src.get());
for(size_t s=0; s< src->Streams().size(); ++s) {
const size_t w = src->Streams()[s].Width();
const size_t h = src->Streams()[s].Height();
// Check compatibility of formats
const PixelFormat in_fmt = src->Streams()[s].PixFormat();
if(in_fmt.channels > 1 || in_fmt.bpp > 16) {
throw VideoException("UnpackVideo: Only supports one channel input.");
}
const size_t pitch = (w*out_fmt.bpp)/ 8;
streams.push_back(pangolin::StreamInfo( out_fmt, w, h, pitch, (unsigned char*)0 + size_bytes ));
size_bytes += h*pitch;
}
buffer = new unsigned char[src->SizeBytes()];
}
UnpackVideo::~UnpackVideo()
{
delete[] buffer;
}
//! Implement VideoInput::Start()
void UnpackVideo::Start()
{
videoin[0]->Start();
}
//! Implement VideoInput::Stop()
void UnpackVideo::Stop()
{
videoin[0]->Stop();
}
//! Implement VideoInput::SizeBytes()
size_t UnpackVideo::SizeBytes() const
{
return size_bytes;
}
//! Implement VideoInput::Streams()
const std::vector<StreamInfo>& UnpackVideo::Streams() const
{
return streams;
}
template<typename T>
void ConvertFrom8bit(
Image<unsigned char>& out,
const Image<unsigned char>& in
) {
for(size_t r=0; r<out.h; ++r) {
T* pout = (T*)(out.ptr + r*out.pitch);
uint8_t* pin = in.ptr + r*in.pitch;
const uint8_t* pin_end = in.ptr + (r+1)*in.pitch;
while(pin != pin_end) {
*(pout++) = *(pin++);
}
}
}
template<typename T>
void ConvertFrom10bit(
Image<unsigned char>& out,
const Image<unsigned char>& in
) {
for(size_t r=0; r<out.h; ++r) {
T* pout = (T*)(out.ptr + r*out.pitch);
uint8_t* pin = in.ptr + r*in.pitch;
const uint8_t* pin_end = in.ptr + (r+1)*in.pitch;
while(pin != pin_end) {
uint64_t val = *(pin++);
val |= uint64_t(*(pin++)) << 8;
val |= uint64_t(*(pin++)) << 16;
val |= uint64_t(*(pin++)) << 24;
val |= uint64_t(*(pin++)) << 32;
*(pout++) = T( val & 0x00000003FF);
*(pout++) = T((val & 0x00000FFC00) >> 10);
*(pout++) = T((val & 0x003FF00000) >> 20);
*(pout++) = T((val & 0xFFC0000000) >> 30);
}
}
}
template<typename T>
void ConvertFrom12bit(
Image<unsigned char>& out,
const Image<unsigned char>& in
) {
for(size_t r=0; r<out.h; ++r) {
T* pout = (T*)(out.ptr + r*out.pitch);
uint8_t* pin = in.ptr + r*in.pitch;
const uint8_t* pin_end = in.ptr + (r+1)*in.pitch;
while(pin != pin_end) {
uint32_t val = *(pin++);
val |= uint32_t(*(pin++)) << 8;
val |= uint32_t(*(pin++)) << 16;
*(pout++) = T( val & 0x000FFF);
*(pout++) = T((val & 0xFFF000) >> 12);
}
}
}
void UnpackVideo::Process(unsigned char* image, const unsigned char* buffer)
{
TSTART()
for(size_t s=0; s<streams.size(); ++s) {
const Image<unsigned char> img_in = videoin[0]->Streams()[s].StreamImage(buffer);
Image<unsigned char> img_out = Streams()[s].StreamImage(image);
const int bits_in = videoin[0]->Streams()[s].PixFormat().bpp;
if(Streams()[s].PixFormat().format == "GRAY32F") {
if( bits_in == 8) {
ConvertFrom8bit<float>(img_out, img_in);
}else if( bits_in == 10) {
ConvertFrom10bit<float>(img_out, img_in);
}else if( bits_in == 12){
ConvertFrom12bit<float>(img_out, img_in);
}else{
throw pangolin::VideoException("Unsupported bitdepths.");
}
}else if(Streams()[s].PixFormat().format == "GRAY16LE") {
if( bits_in == 8) {
ConvertFrom8bit<uint16_t>(img_out, img_in);
}else if( bits_in == 10) {
ConvertFrom10bit<uint16_t>(img_out, img_in);
}else if( bits_in == 12){
ConvertFrom12bit<uint16_t>(img_out, img_in);
}else{
throw pangolin::VideoException("Unsupported bitdepths.");
}
}else{
}
}
TGRABANDPRINT("Unpacking took ")
}
//! Implement VideoInput::GrabNext()
bool UnpackVideo::GrabNext( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNext(buffer,wait)) {
Process(image,buffer);
return true;
}else{
return false;
}
}
//! Implement VideoInput::GrabNewest()
bool UnpackVideo::GrabNewest( unsigned char* image, bool wait )
{
if(videoin[0]->GrabNewest(buffer,wait)) {
Process(image,buffer);
return true;
}else{
return false;
}
}
std::vector<VideoInterface*>& UnpackVideo::InputStreams()
{
return videoin;
}
unsigned int UnpackVideo::AvailableFrames() const
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin[0]);
if(!vpi)
{
pango_print_warn("Unpack: child interface is not buffer aware.");
return 0;
}
else
{
return vpi->AvailableFrames();
}
}
bool UnpackVideo::DropNFrames(uint32_t n)
{
BufferAwareVideoInterface* vpi = dynamic_cast<BufferAwareVideoInterface*>(videoin[0]);
if(!vpi)
{
pango_print_warn("Unpack: child interface is not buffer aware.");
return false;
}
else
{
return vpi->DropNFrames(n);
}
}
PANGOLIN_REGISTER_FACTORY(UnpackVideo)
{
struct UnpackVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
std::unique_ptr<VideoInterface> subvid = pangolin::OpenVideo(uri.url);
const std::string fmt = uri.Get("fmt", std::string("GRAY16LE") );
return std::unique_ptr<VideoInterface>(
new UnpackVideo(subvid, PixelFormatFromString(fmt) )
);
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<UnpackVideoFactory>(), 10, "unpack");
}
}
#undef TSTART
#undef TGRABANDPRINT
#undef DBGPRINT

364
thirdparty/Pangolin/src/video/drivers/uvc.cpp vendored Executable file
View File

@@ -0,0 +1,364 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/drivers/uvc.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
UvcVideo::UvcVideo(int vendor_id, int product_id, const char* sn, int device_id, int width, int height, int fps)
: ctx_(NULL),
dev_(NULL),
devh_(NULL),
frame_(NULL),
is_streaming(false)
{
uvc_init(&ctx_, NULL);
if(!ctx_) {
throw VideoException("Unable to open UVC Context");
}
InitDevice(vendor_id, product_id, sn, device_id, width, height, fps);
InitPangoDeviceProperties();
// FIX: CRASHING IF WE DON'T START STREAMING STRAIGHT AWAY
Start();
}
UvcVideo::~UvcVideo()
{
DeinitDevice();
if(devh_) uvc_close(devh_);
if(dev_) uvc_unref_device(dev_);
if (ctx_) {
uvc_exit(ctx_);
ctx_ = 0;
}
}
uvc_error_t UvcVideo::FindDevice(
uvc_context_t *ctx, uvc_device_t **dev,
int vid, int pid, const char *sn, int device_id) {
uvc_error_t ret = UVC_SUCCESS;
uvc_device_t **list;
uvc_device_t *test_dev;
ret = uvc_get_device_list(ctx, &list);
int cnt = 0;
while(list[cnt++]!=NULL);
pango_print_info("UVC Descriptor list contains %d devices.\n", (cnt-1));
if (ret != UVC_SUCCESS) {
return ret;
}
int dev_idx = 0;
int num_found = 0;
bool found_dev = false;
while (!found_dev && (test_dev = list[dev_idx++]) != NULL) {
uvc_device_descriptor_t *desc;
if (uvc_get_device_descriptor(test_dev, &desc) != UVC_SUCCESS)
continue;
const bool matches = (!vid || desc->idVendor == vid)
&& (!pid || desc->idProduct == pid)
&& (!sn || (desc->serialNumber && !strcmp(desc->serialNumber, sn)));
uvc_free_device_descriptor(desc);
if (matches) {
if(device_id == num_found) {
found_dev = true;
break;
}
num_found++;
}
}
if (found_dev)
uvc_ref_device(test_dev);
uvc_free_device_list(list, 1);
if (found_dev) {
*dev = test_dev;
return UVC_SUCCESS;
} else {
return UVC_ERROR_NO_DEVICE;
}
}
void UvcVideo::InitDevice(int vid, int pid, const char* sn, int device_id, int width, int height, int fps)
{
uvc_error_t find_err = FindDevice(ctx_, &dev_, vid, pid, sn, device_id );
if (find_err != UVC_SUCCESS) {
uvc_perror(find_err, "uvc_find_device");
throw VideoException("Unable to open UVC Device");
}
if(!dev_) {
throw VideoException("Unable to open UVC Device - no pointer returned.");
}
uvc_error_t open_err = uvc_open(dev_, &devh_);
if (open_err != UVC_SUCCESS) {
uvc_perror(open_err, "uvc_open");
uvc_unref_device(dev_);
throw VideoException("Unable to open device");
}
//uvc_print_diag(devh_, stderr);
uvc_error_t mode_err = uvc_get_stream_ctrl_format_size(
devh_, &ctrl_,
UVC_FRAME_FORMAT_ANY,
width, height,
fps);
//uvc_print_stream_ctrl(&ctrl_, stderr);
if (mode_err != UVC_SUCCESS) {
uvc_perror(mode_err, "uvc_get_stream_ctrl_format_size");
uvc_close(devh_);
uvc_unref_device(dev_);
throw VideoException("Unable to set device mode.");
}
uvc_error_t strm_err = uvc_stream_open_ctrl(devh_, &strm_, &ctrl_);
if(strm_err != UVC_SUCCESS) {
uvc_perror(strm_err, "uvc_stream_open_ctrl");
uvc_close(devh_);
uvc_unref_device(dev_);
throw VideoException("Unable to open device stream.");
}
// Default to greyscale.
PixelFormat pfmt = PixelFormatFromString("GRAY8");
const uvc_format_desc_t* uvc_fmt = uvc_get_format_descs(devh_);
while( uvc_fmt->bFormatIndex != ctrl_.bFormatIndex && uvc_fmt ) {
uvc_fmt = uvc_fmt->next;
}
if(uvc_fmt) {
// TODO: Use uvc_fmt->fourccFormat
if( uvc_fmt->bBitsPerPixel == 16 ) {
pfmt = PixelFormatFromString("GRAY16LE");
}
}
const StreamInfo stream_info(pfmt, width, height, (width*pfmt.bpp)/8, 0);
streams.push_back(stream_info);
}
void UvcVideo::InitPangoDeviceProperties()
{
// Store camera details in device properties
device_properties["BusNumber"] = std::to_string(uvc_get_bus_number(dev_));
device_properties["DeviceAddress"] = std::to_string(uvc_get_device_address(dev_));
device_properties[PANGO_HAS_TIMING_DATA] = true;
}
void UvcVideo::DeinitDevice()
{
Stop();
if (frame_) {
uvc_free_frame(frame_);
frame_ = 0;
}
}
void UvcVideo::Start()
{
if(!is_streaming) {
uvc_error_t stream_err = uvc_stream_start(strm_, NULL, this, 0);
if (stream_err != UVC_SUCCESS) {
uvc_perror(stream_err, "uvc_stream_start");
uvc_close(devh_);
uvc_unref_device(dev_);
throw VideoException("Unable to start streaming.");
}else{
is_streaming = true;
}
if (frame_) {
uvc_free_frame(frame_);
}
size_bytes = ctrl_.dwMaxVideoFrameSize;
frame_ = uvc_allocate_frame(size_bytes);
if(!frame_) {
throw VideoException("Unable to allocate frame.");
}
}
}
void UvcVideo::Stop()
{
if(is_streaming && devh_) {
uvc_stop_streaming(devh_);
}
is_streaming = false;
}
size_t UvcVideo::SizeBytes() const
{
return size_bytes;
}
const std::vector<StreamInfo>& UvcVideo::Streams() const
{
return streams;
}
bool UvcVideo::GrabNext( unsigned char* image, bool wait )
{
uvc_frame_t* frame = NULL;
uvc_error_t err = uvc_stream_get_frame(strm_, &frame, wait ? 0 : -1);
if(err!= UVC_SUCCESS) {
pango_print_error("UvcVideo Error: %s", uvc_strerror(err) );
return false;
}else{
if(frame) {
memcpy(image, frame->data, frame->data_bytes );
// This is a hack, this ts sould come from the device.
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(pangolin::Time_us(pangolin::TimeNow()));
return true;
}else{
if(wait) {
pango_print_debug("UvcVideo: No frame data");
}
return false;
}
}
}
bool UvcVideo::GrabNewest( unsigned char* image, bool wait )
{
return GrabNext(image, wait);
}
int UvcVideo::IoCtrl(uint8_t unit, uint8_t ctrl, unsigned char* data, int len, UvcRequestCode req_code)
{
if(req_code == UVC_SET_CUR) {
return uvc_set_ctrl(devh_, unit, ctrl, data, len);
}else{
return uvc_get_ctrl(devh_, unit, ctrl, data, len, (uvc_req_code)req_code);
}
}
bool UvcVideo::SetExposure(int exp_us)
{
uint32_t e = uint32_t(exp_us);
if (uvc_set_exposure_abs(devh_, e) < 0) {
pango_print_warn("UvcVideo::setExposure() ioctl error: %s\n", strerror(errno));
return false;
} else {
return true;
}
}
bool UvcVideo::GetExposure(int& exp_us)
{
uint32_t e;
if (uvc_get_exposure_abs(devh_, &e, uvc_req_code::UVC_GET_CUR) < 0) {
pango_print_warn("UvcVideo::GetExposure() ioctl error: %s\n", strerror(errno));
return false;
} else {
exp_us = e;
return true;
}
}
bool UvcVideo::SetGain(float gain)
{
uint16_t g = uint16_t(gain);
if (uvc_set_gain(devh_, g) < 0) {
pango_print_warn("UvcVideo::setGain() ioctl error: %s\n", strerror(errno));
return false;
} else {
return true;
}
}
bool UvcVideo::GetGain(float& gain)
{
uint16_t g;
if (uvc_get_gain(devh_, &g, uvc_req_code::UVC_GET_CUR) < 0) {
pango_print_warn("UvcVideo::GetGain() ioctl error: %s\n", strerror(errno));
return false;
} else {
gain = g;
return true;
}
}
//! Access JSON properties of device
const picojson::value& UvcVideo::DeviceProperties() const
{
return device_properties;
}
//! Access JSON properties of most recently captured frame
const picojson::value& UvcVideo::FrameProperties() const
{
return frame_properties;
}
PANGOLIN_REGISTER_FACTORY(UvcVideo)
{
struct UvcVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
int vid = 0;
int pid = 0;
std::istringstream(uri.Get<std::string>("vid","0x0000")) >> std::hex >> vid;
std::istringstream(uri.Get<std::string>("pid","0x0000")) >> std::hex >> pid;
const unsigned int dev_id = uri.Get<int>("num",0);
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640,480));
const unsigned int fps = uri.Get<unsigned int>("fps", 0); // 0 means unspecified.
return std::unique_ptr<VideoInterface>( new UvcVideo(vid,pid,0,dev_id,dim.x,dim.y,fps) );
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<UvcVideoFactory>(), 10, "uvc");
}
}

View File

@@ -0,0 +1,660 @@
#ifndef NOMINMAX
# define NOMINMAX
#endif
#include <mfapi.h>
#include <mferror.h>
#include <mfidl.h>
#include <mfreadwrite.h>
#include <dshow.h>
#include <ks.h>
#include <ksmedia.h>
#include <ksproxy.h>
#include <vidcap.h>
#include <pangolin/factory/factory_registry.h>
#include <pangolin/utils/timer.h>
#include <pangolin/video/drivers/uvc_mediafoundation.h>
#include <pangolin/video/iostream_operators.h>
namespace pangolin
{
static constexpr DWORD KS_CONTROL_NODE_ID_INVALID = ~0;
const GUID GUID_EXTENSION_UNIT_DESCRIPTOR_OV580{
0x2ccb0bda, 0x6331, 0x4fdb, 0x85, 0x0e, 0x79, 0x05, 0x4d, 0xbd, 0x56, 0x71};
UvcMediaFoundationVideo::UvcMediaFoundationVideo(int vendorId, int productId, int deviceId, size_t width, size_t height, int fps)
: size_bytes(0),
mediaSource(nullptr),
sourceReader(nullptr),
baseFilter(nullptr),
ksControl(nullptr),
ksControlNodeId(KS_CONTROL_NODE_ID_INVALID)
{
if(FAILED(CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED)))
{
throw VideoException("CoInitializeEx failed");
}
if(FAILED(MFStartup(MF_VERSION)))
{
throw VideoException("MfStartup failed");
}
if(!FindDevice(vendorId, productId, deviceId))
{
throw VideoException("Unable to open UVC media source");
}
InitDevice(width, height, fps);
device_properties[PANGO_HAS_TIMING_DATA] = true;
}
UvcMediaFoundationVideo::~UvcMediaFoundationVideo()
{
DeinitDevice();
HRESULT hr = MFShutdown();
if(FAILED(hr))
{
pango_print_warn("MFShutdown failed with result %X", hr);
}
CoUninitialize();
}
void UvcMediaFoundationVideo::Start()
{
}
void UvcMediaFoundationVideo::Stop()
{
}
size_t UvcMediaFoundationVideo::SizeBytes() const
{
return size_bytes;
}
const std::vector<pangolin::StreamInfo>& UvcMediaFoundationVideo::Streams() const
{
return streams;
}
bool UvcMediaFoundationVideo::GrabNext(unsigned char* image, bool wait)
{
IMFSample* sample = nullptr;
DWORD streamIndex = 0;
DWORD flags = 0;
LONGLONG timeStamp;
HRESULT hr = sourceReader->ReadSample(
(DWORD)MF_SOURCE_READER_FIRST_VIDEO_STREAM, 0, &streamIndex, &flags, &timeStamp, &sample);
if(SUCCEEDED(hr))
{
if((flags & MF_SOURCE_READERF_ENDOFSTREAM) != 0)
{
return false;
}
if(!sample)
{
return false;
}
}
IMFMediaBuffer* mediaBuffer = nullptr;
if(SUCCEEDED(hr))
{
hr = sample->ConvertToContiguousBuffer(&mediaBuffer);
}
if(SUCCEEDED(hr))
{
// Use the 2D buffer interface if it's available
IMF2DBuffer* mediaBuffer2d = nullptr;
hr = mediaBuffer->QueryInterface(&mediaBuffer2d);
if(SUCCEEDED(hr))
{
hr = mediaBuffer2d->ContiguousCopyTo(image, (DWORD)size_bytes);
mediaBuffer2d->Release();
mediaBuffer2d = nullptr;
}
else
{
// No 2D buffer is available
byte* buffer;
DWORD bufferSize = 0;
hr = mediaBuffer->Lock(&buffer, nullptr, &bufferSize);
if(SUCCEEDED(hr))
{
size_t copySize = std::min((size_t)bufferSize, size_bytes);
memcpy(image, buffer, copySize);
mediaBuffer->Unlock();
}
}
}
if(SUCCEEDED(hr))
{
using namespace std::chrono;
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(pangolin::Time_us(pangolin::TimeNow()));
}
if(mediaBuffer)
{
mediaBuffer->Release();
mediaBuffer = nullptr;
}
if(sample)
{
sample->Release();
sample = nullptr;
}
return SUCCEEDED(hr);
}
bool UvcMediaFoundationVideo::GrabNewest(unsigned char* image, bool wait)
{
return GrabNext(image, wait);
}
int UvcMediaFoundationVideo::IoCtrl(uint8_t unit, uint8_t ctrl, unsigned char* data, int len, UvcRequestCode req_code)
{
if(!ksControl || ksControlNodeId == KS_CONTROL_NODE_ID_INVALID)
{
return -1;
}
HRESULT hr;
KSP_NODE s = {};
ULONG ulBytesReturned;
s.Property.Set = GUID_EXTENSION_UNIT_DESCRIPTOR_OV580;
s.Property.Id = 2;
s.NodeId = ksControlNodeId;
s.Property.Flags = KSPROPERTY_TYPE_TOPOLOGY;
if(req_code == UVC_GET_CUR)
{
s.Property.Flags |= KSPROPERTY_TYPE_GET;
}
else if(req_code == UVC_SET_CUR)
{
s.Property.Flags |= KSPROPERTY_TYPE_SET;
}
hr = ksControl->KsProperty((PKSPROPERTY)&s, sizeof(s), &data[0], len, &ulBytesReturned);
if(FAILED(hr))
{
pango_print_error("KsProperty failed on UVC device");
return -1;
}
return 0;
}
bool UvcMediaFoundationVideo::GetExposure(int& exp_us)
{
pango_print_warn("GetExposure not implemented for UvcMediaFoundationVideo");
return false;
}
bool UvcMediaFoundationVideo::SetExposure(int exp_us)
{
pango_print_warn("SetExposure not implemented for UvcMediaFoundationVideo");
return false;
}
bool UvcMediaFoundationVideo::GetGain(float& gain)
{
pango_print_warn("GetGain not implemented for UvcMediaFoundationVideo");
return false;
}
bool UvcMediaFoundationVideo::SetGain(float gain)
{
pango_print_warn("SetGain not implemented for UvcMediaFoundationVideo");
return false;
}
const picojson::value& UvcMediaFoundationVideo::DeviceProperties() const
{
return device_properties;
}
const picojson::value& UvcMediaFoundationVideo::FrameProperties() const
{
return frame_properties;
}
bool UvcMediaFoundationVideo::FindDevice(int vendorId, int productId, int deviceId)
{
// Create attributes for finding UVC devices
IMFAttributes* searchAttributes = nullptr;
HRESULT hr = MFCreateAttributes(&searchAttributes, 1);
if(FAILED(hr))
{
pango_print_error("Unable to create UVC device search attributes");
}
// Request video capture devices
if(SUCCEEDED(hr))
{
hr = searchAttributes->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID);
if(FAILED(hr))
{
pango_print_error("Unable to set UVC source type attribute");
}
}
// Enumerate the devices
IMFActivate** devices = nullptr;
UINT32 deviceCount = 0;
if(SUCCEEDED(hr))
{
hr = MFEnumDeviceSources(searchAttributes, &devices, &deviceCount);
if(FAILED(hr))
{
pango_print_error("Unable to enumerate UVC device sources");
}
}
std::wstring symLink;
bool activatedDevice = false;
if(SUCCEEDED(hr))
{
for(UINT32 i = 0; i < deviceCount; ++i)
{
// Get this device's sym link
WCHAR* symLinkWCStr = nullptr;
UINT32 symLinkLength = 0;
hr = devices[i]->GetAllocatedString(
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &symLinkWCStr, &symLinkLength);
if(FAILED(hr))
{
hr = S_OK;
continue;
}
std::wstring checkSymLink(symLinkWCStr);
// Check if this device matches the requested vendor ID and product ID
if(!DeviceMatches(checkSymLink, vendorId, productId))
{
continue;
}
if(deviceId == i)
{
hr = devices[i]->ActivateObject(IID_PPV_ARGS(&mediaSource));
activatedDevice = SUCCEEDED(hr);
if(activatedDevice)
{
symLink = std::move(checkSymLink);
}
break;
}
}
if(!activatedDevice)
{
pango_print_error("Unable to activate UVC device source");
hr = E_FAIL;
}
}
for(UINT32 i = 0; i < deviceCount; ++i)
{
devices[i]->Release();
devices[i] = nullptr;
}
devices = nullptr;
CoTaskMemFree(devices);
if(searchAttributes != nullptr)
{
searchAttributes->Release();
searchAttributes = nullptr;
}
// Find the DirectShow device
ICreateDevEnum* dshowDevices = nullptr;
if(SUCCEEDED(hr))
{
hr = CoCreateInstance(CLSID_SystemDeviceEnum, 0, CLSCTX_INPROC, IID_ICreateDevEnum, (void**)&dshowDevices);
}
IEnumMoniker* videoInputEnumerator = nullptr;
if(SUCCEEDED(hr))
{
hr = dshowDevices->CreateClassEnumerator(CLSID_VideoInputDeviceCategory, &videoInputEnumerator, 0);
}
if(SUCCEEDED(hr))
{
IMoniker* moniker = nullptr;
while((hr = videoInputEnumerator->Next(1, &moniker, 0)) == S_OK)
{
IPropertyBag* propertyBag = nullptr;
hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, (void**)&propertyBag);
if(FAILED(hr))
{
moniker->Release();
moniker = nullptr;
continue;
}
VARIANT variantPath;
VariantInit(&variantPath);
hr = propertyBag->Read(L"DevicePath", &variantPath, nullptr);
bool pathMatches;
if(SUCCEEDED(hr) && variantPath.vt == VT_BSTR)
{
// Determine if this is the correct device by comparing its path against the source's symbolic link
// This breaks the rules, but it seems to be the only way to make sure it's the correct device
// DirectShow and MediaFoundation appear to each create their own symbolic link which contains a GUID
// Ignore the GUID portion of the link, leaving just the path information
size_t braceOffset = symLink.find(L'{');
pathMatches = 0 == std::wcsncmp(symLink.c_str(), variantPath.bstrVal, braceOffset);
}
else
{
pathMatches = false;
}
VARIANT variantFriendlyName;
VariantInit(&variantFriendlyName);
hr = propertyBag->Read(L"FriendlyName", &variantFriendlyName, nullptr);
if(!pathMatches)
{
moniker->Release();
moniker = nullptr;
continue;
}
// Found the correct video input
break;
}
if(moniker)
{
hr = moniker->BindToObject(0, 0, IID_IBaseFilter, (void**)&baseFilter);
}
else
{
hr = E_FAIL;
}
IKsTopologyInfo* ksTopologyInfo = nullptr;
if(SUCCEEDED(hr))
{
hr = baseFilter->QueryInterface(__uuidof(IKsTopologyInfo), (void**)&ksTopologyInfo);
}
DWORD numberOfNodes = 0;
if(SUCCEEDED(hr))
{
hr = ksTopologyInfo->get_NumNodes(&numberOfNodes);
}
GUID nodeGuid;
for(DWORD nodeIndex = 0; nodeIndex < numberOfNodes; ++nodeIndex)
{
if(FAILED(ksTopologyInfo->get_NodeType(nodeIndex, &nodeGuid)))
{
continue;
}
if(nodeGuid == KSNODETYPE_DEV_SPECIFIC)
{
// This is the extension node
IKsNodeControl* pUnknown = nullptr;
hr = ksTopologyInfo->CreateNodeInstance(nodeIndex, __uuidof(IUnknown), (void**)&pUnknown);
if(SUCCEEDED(hr) && pUnknown != nullptr)
{
hr = pUnknown->QueryInterface(__uuidof(IKsControl), (void**)&ksControl);
}
if(SUCCEEDED(hr))
{
ksControlNodeId = nodeIndex;
}
if(pUnknown)
{
pUnknown->Release();
pUnknown = nullptr;
}
}
}
if(ksTopologyInfo)
{
ksTopologyInfo->Release();
ksTopologyInfo = nullptr;
}
if(moniker)
{
moniker->Release();
moniker = nullptr;
}
}
if(videoInputEnumerator)
{
videoInputEnumerator->Release();
}
if(dshowDevices)
{
dshowDevices->Release();
}
return SUCCEEDED(hr);
}
void UvcMediaFoundationVideo::InitDevice(size_t width, size_t height, int fps)
{
HRESULT hr = MFCreateSourceReaderFromMediaSource(mediaSource, nullptr, &sourceReader);
if(FAILED(hr))
{
pango_print_error("Unable to create source reader from UVC media source");
}
// Find the closest supported resolution
UINT32 stride;
PixelFormat pixelFormat;
if(SUCCEEDED(hr))
{
IMFMediaType* bestMediaType = nullptr;
int bestError = std::numeric_limits<int>::max();
UINT32 bestWidth;
UINT32 bestHeight;
UINT32 bestStride;
GUID bestGuid;
for(DWORD i = 0;; ++i)
{
IMFMediaType* checkMediaType;
hr = sourceReader->GetNativeMediaType(MF_SOURCE_READER_FIRST_VIDEO_STREAM, i, &checkMediaType);
if(FAILED(hr))
{
if(hr == MF_E_NO_MORE_TYPES)
{
// Reached the end of the available media types
hr = S_OK;
}
else
{
pango_print_error("Failed to get UVC native media type");
}
break;
}
UINT32 checkWidth;
UINT32 checkHeight;
if(FAILED(MFGetAttributeSize(checkMediaType, MF_MT_FRAME_SIZE, &checkWidth, &checkHeight)))
{
checkWidth = 0;
checkHeight = 0;
}
int checkError = abs(int(checkWidth) - int(width)) + abs(int(checkHeight) - int(height));
if(bestError > checkError)
{
// Release the previous best
if(bestMediaType)
{
bestMediaType->Release();
}
bestError = checkError;
bestMediaType = checkMediaType;
bestWidth = checkWidth;
bestHeight = checkHeight;
if(FAILED(checkMediaType->GetGUID(MF_MT_SUBTYPE, &bestGuid)))
{
bestGuid = {0};
}
bestStride = MFGetAttributeUINT32(checkMediaType, MF_MT_DEFAULT_STRIDE, checkWidth);
}
else
{
checkMediaType->Release();
}
}
if(bestMediaType)
{
if(SUCCEEDED(hr))
{
sourceReader->SetCurrentMediaType(MF_SOURCE_READER_FIRST_VIDEO_STREAM, nullptr, bestMediaType);
width = bestWidth;
height = bestHeight;
stride = bestStride;
if(bestGuid == MFVideoFormat_YUY2)
{
pixelFormat = PixelFormatFromString("GRAY8");
}
else
{
pango_print_warn("Unexpected MFVideoFormat with FOURCC %c%c%c%c",
(unsigned char)((bestGuid.Data1 >> 0) & 0xff),
(unsigned char)((bestGuid.Data1 >> 8) & 0xff),
(unsigned char)((bestGuid.Data1 >> 16) & 0xff),
(unsigned char)((bestGuid.Data1 >> 24) & 0xff));
}
}
bestMediaType->Release();
}
else
{
width = 0;
height = 0;
}
}
size_bytes = stride * height;
streams.emplace_back(pixelFormat, width, height, stride);
}
void UvcMediaFoundationVideo::DeinitDevice()
{
if(ksControl)
{
ksControl->Release();
ksControl = nullptr;
}
ksControlNodeId = KS_CONTROL_NODE_ID_INVALID;
if(baseFilter)
{
baseFilter->Release();
baseFilter = nullptr;
}
if(sourceReader)
{
sourceReader->Release();
sourceReader = nullptr;
}
if(mediaSource)
{
mediaSource->Shutdown();
mediaSource->Release();
mediaSource = nullptr;
}
streams.clear();
}
bool UvcMediaFoundationVideo::DeviceMatches(const std::wstring& symLink, int vendorId, int productId)
{
// Example symlink:
// \\?\usb#vid_05a9&pid_0581&mi_00#6&2ff327a4&2&0000#{e5323777-f976-4f5b-9b55-b94699c46e44}\global
// ^^^^^^^^ ^^^^^^^^
std::wstring symLinkString(symLink);
if(vendorId != 0 && !SymLinkIDMatches(symLinkString, L"vid_", vendorId))
{
return false;
}
if(productId != 0 && !SymLinkIDMatches(symLinkString, L"pid_", productId))
{
return false;
}
return true;
}
bool UvcMediaFoundationVideo::SymLinkIDMatches(const std::wstring& symLink, const wchar_t* idStr, int id)
{
// Find the ID prefix
size_t idOffset = symLink.find(idStr);
if(idOffset == std::wstring::npos)
{
// Unable to find the prefix
return false;
}
// Parse the ID as a hexadecimal number
return id == std::wcstol(&symLink[idOffset + std::wcslen(idStr)], nullptr, 16);
}
PANGOLIN_REGISTER_FACTORY(UvcMediaFoundationVideo)
{
struct UvcVideoFactory final : public FactoryInterface<VideoInterface>
{
std::unique_ptr<VideoInterface> Open(const Uri& uri) override
{
int vendorId = 0;
int productId = 0;
std::istringstream(uri.Get<std::string>("vid", "0x0000")) >> std::hex >> vendorId;
std::istringstream(uri.Get<std::string>("pid", "0x0000")) >> std::hex >> productId;
const unsigned int deviceId = uri.Get<int>("num", 0);
const ImageDim dim = uri.Get<ImageDim>("size", ImageDim(640, 480));
const unsigned int fps = uri.Get<unsigned int>("fps", 0); // 0 means unspecified
return std::unique_ptr<VideoInterface>(new UvcMediaFoundationVideo(vendorId, productId, deviceId, dim.x, dim.y, fps));
}
};
FactoryRegistry<VideoInterface>::I().RegisterFactory(std::make_shared<UvcVideoFactory>(), 10, "uvc");
}
}

798
thirdparty/Pangolin/src/video/drivers/v4l.cpp vendored Executable file
View File

@@ -0,0 +1,798 @@
/* This file is part of the Pangolin Project,
* http://github.com/stevenlovegrove/Pangolin
* Copyright (c) 2011 Steven Lovegrove
*
* adapted from V4L2 video capture example
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/utils/timer.h>
#include <pangolin/video/drivers/v4l.h>
#include <pangolin/video/iostream_operators.h>
#include <assert.h>
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/usb/video.h>
#include <linux/uvcvideo.h>
#include <malloc.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#define CLEAR(x) memset (&(x), 0, sizeof (x))
using namespace std;
namespace pangolin
{
static int xioctl(int fd, int request, void* arg)
{
int r;
do r = ioctl (fd, request, arg);
while (-1 == r && EINTR == errno);
return r;
}
inline std::string V4lToString(int32_t v)
{
// v = ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
char cc[5];
cc[0] = v & 0xff;
cc[1] = (v>>8) & 0xff;
cc[2] = (v>>16) & 0xff;
cc[3] = (v>>24) & 0xff;
cc[4] = 0;
return std::string(cc);
}
V4lVideo::V4lVideo(const char* dev_name, io_method io, unsigned iwidth, unsigned iheight)
: io(io), fd(-1), buffers(0), n_buffers(0), running(false)
{
open_device(dev_name);
init_device(dev_name,iwidth,iheight,0);
InitPangoDeviceProperties();
Start();
}
V4lVideo::~V4lVideo()
{
if(running)
{
Stop();
}
uninit_device();
close_device();
}
void V4lVideo::InitPangoDeviceProperties()
{
// Store camera details in device properties
device_properties[PANGO_HAS_TIMING_DATA] = true;
}
const std::vector<StreamInfo>& V4lVideo::Streams() const
{
return streams;
}
size_t V4lVideo::SizeBytes() const
{
return image_size;
}
bool V4lVideo::GrabNext( unsigned char* image, bool /*wait*/ )
{
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO (&fds);
FD_SET (fd, &fds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select (fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
// This is a terminal condition that must be propogated up.
throw VideoException ("select", strerror(errno));
}
if (0 == r) {
// Timeout has occured - This is longer than any reasonable frame interval,
// but not necessarily terminal, so return false to indicate that no frame was captured.
return false;
}
if (ReadFrame(image))
break;
/* EAGAIN - continue select loop. */
}
return true;
}
bool V4lVideo::GrabNewest( unsigned char* image, bool wait )
{
// TODO: Implement
return GrabNext(image,wait);
}
int V4lVideo::ReadFrame(unsigned char* image)
{
struct v4l2_buffer buf;
unsigned int i;
switch (io) {
case IO_METHOD_READ:
if (-1 == read (fd, buffers[0].start, buffers[0].length)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
throw VideoException("read", strerror(errno));
}
}
// This is a hack, this ts sould come from the device.
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(pangolin::Time_us(pangolin::TimeNow()));
// process_image(buffers[0].start);
memcpy(image,buffers[0].start,buffers[0].length);
break;
case IO_METHOD_MMAP:
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
throw VideoException("VIDIOC_DQBUF", strerror(errno));
}
}
// This is a hack, this ts sould come from the device.
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(pangolin::Time_us(pangolin::TimeNow()));
assert (buf.index < n_buffers);
// process_image (buffers[buf.index].start);
memcpy(image,buffers[buf.index].start,buffers[buf.index].length);
if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))
throw VideoException("VIDIOC_QBUF", strerror(errno));
break;
case IO_METHOD_USERPTR:
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {
switch (errno) {
case EAGAIN:
return 0;
case EIO:
/* Could ignore EIO, see spec. */
/* fall through */
default:
throw VideoException("VIDIOC_DQBUF", strerror(errno));
}
}
// This is a hack, this ts sould come from the device.
frame_properties[PANGO_HOST_RECEPTION_TIME_US] = picojson::value(pangolin::Time_us(pangolin::TimeNow()));
for (i = 0; i < n_buffers; ++i)
if (buf.m.userptr == (unsigned long) buffers[i].start
&& buf.length == buffers[i].length)
break;
assert (i < n_buffers);
// process_image ((void *) buf.m.userptr);
memcpy(image,(void *)buf.m.userptr,buf.length);
if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))
throw VideoException("VIDIOC_QBUF", strerror(errno));
break;
}
return 1;
}
void V4lVideo::Stop()
{
if(running) {
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl (fd, VIDIOC_STREAMOFF, &type))
throw VideoException("VIDIOC_STREAMOFF", strerror(errno));
break;
}
running = false;
}
}
void V4lVideo::Start()
{
if(!running) {
unsigned int i;
enum v4l2_buf_type type;
switch (io) {
case IO_METHOD_READ:
/* Nothing to do. */
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))
throw VideoException("VIDIOC_QBUF", strerror(errno));
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl (fd, VIDIOC_STREAMON, &type))
throw VideoException("VIDIOC_STREAMON", strerror(errno));
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_USERPTR;
buf.index = i;
buf.m.userptr = (unsigned long) buffers[i].start;
buf.length = buffers[i].length;
if (-1 == xioctl (fd, VIDIOC_QBUF, &buf))
throw VideoException("VIDIOC_QBUF", strerror(errno));
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl (fd, VIDIOC_STREAMON, &type))
throw VideoException ("VIDIOC_STREAMON", strerror(errno));
break;
}
running = true;
}
}
void V4lVideo::uninit_device()
{
unsigned int i;
switch (io) {
case IO_METHOD_READ:
free (buffers[0].start);
break;
case IO_METHOD_MMAP:
for (i = 0; i < n_buffers; ++i)
if (-1 == munmap (buffers[i].start, buffers[i].length))
throw VideoException ("munmap");
break;
case IO_METHOD_USERPTR:
for (i = 0; i < n_buffers; ++i)
free (buffers[i].start);
break;
}
free (buffers);
}
void V4lVideo::init_read(unsigned int buffer_size)
{
buffers = (buffer*)calloc (1, sizeof (buffer));
if (!buffers) {
throw VideoException("Out of memory\n");
}
buffers[0].length = buffer_size;
buffers[0].start = malloc (buffer_size);
if (!buffers[0].start) {
throw VideoException("Out of memory\n");
}
}
void V4lVideo::init_mmap(const char* /*dev_name*/)
{
struct v4l2_requestbuffers req;
CLEAR (req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl (fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
throw VideoException("does not support memory mapping", strerror(errno));
} else {
throw VideoException ("VIDIOC_REQBUFS", strerror(errno));
}
}
if (req.count < 2) {
throw VideoException("Insufficient buffer memory");
}
buffers = (buffer*)calloc(req.count, sizeof(buffer));
if (!buffers) {
throw VideoException( "Out of memory\n");
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl (fd, VIDIOC_QUERYBUF, &buf))
throw VideoException ("VIDIOC_QUERYBUF", strerror(errno));
buffers[n_buffers].length = buf.length;
buffers[n_buffers].start =
mmap (NULL /* start anywhere */,
buf.length,
PROT_READ | PROT_WRITE /* required */,
MAP_SHARED /* recommended */,
fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start)
throw VideoException ("mmap");
}
}
void V4lVideo::init_userp(const char* /*dev_name*/, unsigned int buffer_size)
{
struct v4l2_requestbuffers req;
unsigned int page_size;
page_size = getpagesize ();
buffer_size = (buffer_size + page_size - 1) & ~(page_size - 1);
CLEAR (req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl (fd, VIDIOC_REQBUFS, &req)) {
if (EINVAL == errno) {
throw VideoException( "Does not support user pointer i/o", strerror(errno));
} else {
throw VideoException ("VIDIOC_REQBUFS", strerror(errno));
}
}
buffers = (buffer*)calloc(4, sizeof(buffer));
if (!buffers) {
throw VideoException( "Out of memory\n");
}
for (n_buffers = 0; n_buffers < 4; ++n_buffers) {
buffers[n_buffers].length = buffer_size;
buffers[n_buffers].start = memalign (/* boundary */ page_size,
buffer_size);
if (!buffers[n_buffers].start) {
throw VideoException( "Out of memory\n");
}
}
}
void V4lVideo::init_device(const char* dev_name, unsigned iwidth, unsigned iheight, unsigned ifps, unsigned v4l_format, v4l2_field field)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
struct v4l2_streamparm strm;
unsigned int min;
if (-1 == xioctl (fd, VIDIOC_QUERYCAP, &cap)) {
if (EINVAL == errno) {
throw VideoException("Not a V4L2 device", strerror(errno));
} else {
throw VideoException ("VIDIOC_QUERYCAP", strerror(errno));
}
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
throw VideoException("Not a video capture device");
}
switch (io) {
case IO_METHOD_READ:
if (!(cap.capabilities & V4L2_CAP_READWRITE)) {
throw VideoException("Does not support read i/o");
}
break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
throw VideoException("Does not support streaming i/o");
}
break;
}
/* Select video input, video standard and tune here. */
CLEAR (cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl (fd, VIDIOC_CROPCAP, &cropcap)) {
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c = cropcap.defrect; /* reset to default */
if (-1 == xioctl (fd, VIDIOC_S_CROP, &crop)) {
switch (errno) {
case EINVAL:
/* Cropping not supported. */
break;
default:
/* Errors ignored. */
break;
}
}
} else {
/* Errors ignored. */
}
CLEAR (fmt);
if(iwidth!=0 && iheight!=0) {
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.width = iwidth;
fmt.fmt.pix.height = iheight;
fmt.fmt.pix.pixelformat = v4l_format;
fmt.fmt.pix.field = field;
if (-1 == xioctl (fd, VIDIOC_S_FMT, &fmt))
throw VideoException("VIDIOC_S_FMT", strerror(errno));
}else{
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/* Preserve original settings as set by v4l2-ctl for example */
if (-1 == xioctl(fd, VIDIOC_G_FMT, &fmt))
throw VideoException("VIDIOC_G_FMT", strerror(errno));
}
/* Buggy driver paranoia. */
min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
/* Note VIDIOC_S_FMT may change width and height. */
width = fmt.fmt.pix.width;
height = fmt.fmt.pix.height;
image_size = fmt.fmt.pix.sizeimage;
if(ifps!=0)
{
CLEAR(strm);
strm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
strm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
strm.parm.capture.timeperframe.numerator = 1;
strm.parm.capture.timeperframe.denominator = ifps;
if (-1 == xioctl (fd, VIDIOC_S_PARM, &fmt))
throw VideoException("VIDIOC_S_PARM", strerror(errno));
fps = (float)strm.parm.capture.timeperframe.denominator / strm.parm.capture.timeperframe.numerator;
}else{
fps = 0;
}
switch (io) {
case IO_METHOD_READ:
init_read (fmt.fmt.pix.sizeimage);
break;
case IO_METHOD_MMAP:
init_mmap (dev_name );
break;
case IO_METHOD_USERPTR:
init_userp (dev_name, fmt.fmt.pix.sizeimage);
break;
}
uint32_t bit_depth = 0;
std::string spix="GRAY8";
if(fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_GREY) {
spix="GRAY8";
bit_depth = 8;
}else if(fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
spix="YUYV422";
bit_depth = 8;
} else if(fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY) {
spix="UYVY422";
bit_depth = 8;
}else if(fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_Y16) {
spix="GRAY16LE";
bit_depth = 16;
}else if(fmt.fmt.pix.pixelformat == V4L2_PIX_FMT_Y10) {
spix="GRAY10";
bit_depth = 10;
}else{
// TODO: Add method to translate from V4L to FFMPEG type.
std::cerr << "V4L Format " << V4lToString(fmt.fmt.pix.pixelformat)
<< " not recognised. Defaulting to '" << spix << std::endl;
}
PixelFormat pfmt = PixelFormatFromString(spix);
pfmt.channel_bit_depth = bit_depth;
const StreamInfo stream_info(pfmt, width, height, (width*pfmt.bpp)/8, 0);
streams.push_back(stream_info);
}
bool V4lVideo::SetExposure(int exposure_us)
{
struct v4l2_ext_controls ctrls = {};
struct v4l2_ext_control ctrl = {};
ctrl.id = V4L2_CID_EXPOSURE_ABSOLUTE;
// v4l specifies exposure in 100us units
ctrl.value = int(exposure_us / 100.0);
ctrls.ctrl_class = V4L2_CTRL_CLASS_CAMERA;
ctrls.count = 1;
ctrls.controls = &ctrl;
if (-1 == xioctl(fd, VIDIOC_S_EXT_CTRLS, &ctrls)){
pango_print_warn("V4lVideo::SetExposure() ioctl error: %s\n", strerror(errno));
return false;
} else {
return true;
}
}
bool V4lVideo::GetExposure(int& exposure_us)
{
struct v4l2_ext_controls ctrls = {};
struct v4l2_ext_control ctrl = {};
ctrl.id = V4L2_CID_EXPOSURE_ABSOLUTE;
ctrls.ctrl_class = V4L2_CTRL_CLASS_CAMERA;
ctrls.count = 1;
ctrls.controls = &ctrl;
if (-1 == xioctl(fd, VIDIOC_G_EXT_CTRLS, &ctrls)){
pango_print_warn("V4lVideo::GetExposure() ioctl error: %s\n", strerror(errno));
return false;
} else {
// v4l specifies exposure in 100us units
exposure_us = ctrls.controls->value * 100;
return true;
}
}
bool V4lVideo::SetGain(float gain)
{
struct v4l2_control control;
control.id = V4L2_CID_GAIN;
control.value = gain;
if (-1 == xioctl (fd, VIDIOC_S_CTRL, &control)) {
pango_print_warn("V4lVideo::SetGain() ioctl error: %s\n", strerror(errno));
return false;
} else {
return true;
}
}
bool V4lVideo::GetGain(float& gain)
{
struct v4l2_control control;
control.id = V4L2_CID_GAIN;
if (-1 == xioctl (fd, VIDIOC_G_CTRL, &control)) {
pango_print_warn("V4lVideo::GetGain() ioctl error: %s\n", strerror(errno));
return false;
} else {
gain = control.value;
return true;
}
}
void V4lVideo::close_device()
{
if (-1 == close (fd))
throw VideoException("close");
fd = -1;
}
void V4lVideo::open_device(const char* dev_name)
{
struct stat st;
if (-1 == stat (dev_name, &st)) {
throw VideoException("Cannot stat device", strerror(errno));
}
if (!S_ISCHR (st.st_mode)) {
throw VideoException("Not device");
}
fd = open (dev_name, O_RDWR /* required */ | O_NONBLOCK, 0);
if (-1 == fd) {
throw VideoException("Cannot open device");
}
}
int V4lVideo::IoCtrl(uint8_t unit, uint8_t ctrl, unsigned char* data, int len, UvcRequestCode req_code)
{
struct uvc_xu_control_query xu;
xu.unit = unit;
xu.selector = ctrl;
xu.size = len;
xu.data = data;
xu.query = req_code;
int ret = ioctl(fd, UVCIOC_CTRL_QUERY, &xu);
if (ret == -1) {
pango_print_warn("V4lVideo::IoCtrl() ioctl error: %d\n", errno);
return ret;
}
return 0;
}
//! Access JSON properties of device
const picojson::value& V4lVideo::DeviceProperties() const
{
return device_properties;
}
//! Access JSON properties of most recently captured frame
const picojson::value& V4lVideo::FrameProperties() const
{
return frame_properties;
}
PANGOLIN_REGISTER_FACTORY(V4lVideo)
{
struct V4lVideoFactory final : public FactoryInterface<VideoInterface> {
std::unique_ptr<VideoInterface> Open(const Uri& uri) override {
const std::string smethod = uri.Get<std::string>("method","mmap");
const ImageDim desired_dim = uri.Get<ImageDim>("size", ImageDim(0,0));
io_method method = IO_METHOD_MMAP;
if(smethod == "read" ) {
method = IO_METHOD_READ;
}else if(smethod == "mmap" ) {
method = IO_METHOD_MMAP;
}else if(smethod == "userptr" ) {
method = IO_METHOD_USERPTR;
}
V4lVideo* video_raw = new V4lVideo(uri.url.c_str(), method, desired_dim.x, desired_dim.y );
if(video_raw && uri.Contains("ExposureTime")) {
static_cast<V4lVideo*>(video_raw)->SetExposure(uri.Get<int>("ExposureTime", 10000));
}
if(video_raw && uri.Contains("Gain")) {
static_cast<V4lVideo*>(video_raw)->SetGain(uri.Get<int>("Gain", 1));
}
return std::unique_ptr<VideoInterface>(video_raw);
}
};
auto factory = std::make_shared<V4lVideoFactory>();
FactoryRegistry<VideoInterface>::I().RegisterFactory(factory, 10, "v4l");
}
}

View File

@@ -0,0 +1,62 @@
#include <pangolin/video/stream_encoder_factory.h>
#include <cctype>
#include <pangolin/utils/file_utils.h>
#include <pangolin/utils/type_convert.h>
namespace pangolin {
StreamEncoderFactory& StreamEncoderFactory::I()
{
static StreamEncoderFactory instance;
return instance;
}
struct EncoderDetails
{
std::string encoder_name;
ImageFileType file_type;
float quality;
};
inline EncoderDetails EncoderDetailsFromString(const std::string& encoder_spec)
{
std::string::const_reverse_iterator rit = encoder_spec.rbegin();
for(; std::isdigit(*rit) && rit != encoder_spec.rend(); ++rit );
// png, tga, ...
std::string encoder_name(encoder_spec.begin(), rit.base());
ToLower(encoder_name);
// Quality of encoding for lossy encoders [0..100]
float quality = 100.0;
if(rit != encoder_spec.rbegin()) {
quality = pangolin::Convert<float,std::string>::Do(std::string(rit.base(),encoder_spec.end()));
}
return { encoder_name, NameToImageFileType(encoder_name), quality};
}
ImageEncoderFunc StreamEncoderFactory::GetEncoder(const std::string& encoder_spec, const PixelFormat& fmt)
{
const EncoderDetails encdet = EncoderDetailsFromString(encoder_spec);
if(encdet.file_type == ImageFileTypeUnknown)
throw std::invalid_argument("Unsupported encoder format: " + encoder_spec);
return [fmt,encdet](std::ostream& os, const Image<unsigned char>& img){
SaveImage(img,fmt,os,encdet.file_type,true,encdet.quality);
};
}
ImageDecoderFunc StreamEncoderFactory::GetDecoder(const std::string& encoder_spec, const PixelFormat& fmt)
{
const EncoderDetails encdet = EncoderDetailsFromString(encoder_spec);
PANGO_ENSURE(encdet.file_type != ImageFileTypeUnknown);
return [fmt,encdet](std::istream& is){
return LoadImage(is,encdet.file_type);
};
}
}

80
thirdparty/Pangolin/src/video/video.cpp vendored Normal file
View File

@@ -0,0 +1,80 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/factory/factory_registry.h>
#include <pangolin/video/video.h>
#include <pangolin/video/video_output.h>
#include <pangolin/video_drivers.h>
namespace pangolin
{
bool one_time_init = false;
std::unique_ptr<VideoInterface> OpenVideo(const std::string& str_uri)
{
return OpenVideo( ParseUri(str_uri) );
}
std::unique_ptr<VideoInterface> OpenVideo(const Uri& uri)
{
if(!one_time_init) {
one_time_init = LoadBuiltInVideoDrivers();
}
std::unique_ptr<VideoInterface> video =
FactoryRegistry<VideoInterface>::I().Open(uri);
if(!video) {
throw VideoExceptionNoKnownHandler(uri.scheme);
}
return video;
}
std::unique_ptr<VideoOutputInterface> OpenVideoOutput(const std::string& str_uri)
{
return OpenVideoOutput( ParseUri(str_uri) );
}
std::unique_ptr<VideoOutputInterface> OpenVideoOutput(const Uri& uri)
{
if(!one_time_init) {
one_time_init = LoadBuiltInVideoDrivers();
}
std::unique_ptr<VideoOutputInterface> video =
FactoryRegistry<VideoOutputInterface>::I().Open(uri);
if(!video) {
throw VideoException("No known video handler for URI '" + uri.scheme + "'");
}
return video;
}
}

View File

@@ -0,0 +1,220 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/video_input.h>
#include <pangolin/video/video_output.h>
namespace pangolin
{
VideoInput::VideoInput()
: frame_num(0), record_frame_skip(1), record_once(false), record_continuous(false)
{
}
VideoInput::VideoInput(
const std::string& input_uri,
const std::string& output_uri
) : frame_num(0), record_frame_skip(1), record_once(false), record_continuous(false)
{
Open(input_uri, output_uri);
}
void VideoInput::Open(
const std::string& input_uri,
const std::string& output_uri
)
{
uri_input = ParseUri(input_uri);
uri_output = ParseUri(output_uri);
if (uri_output.scheme == "file") {
// Default to pango output
uri_output.scheme = "pango";
}
// Start off playing from video_src
video_src = OpenVideo(input_uri);
// Reset state
frame_num = 0;
videos.resize(1);
videos[0] = video_src.get();
}
void VideoInput::Close()
{
// Reset this first so that recording data gets written out to disk ASAP.
video_recorder.reset();
video_src.reset();
videos.clear();
}
VideoInput::~VideoInput()
{
Close();
}
const std::string& VideoInput::LogFilename() const
{
return uri_output.url;
}
std::string& VideoInput::LogFilename()
{
return uri_output.url;
}
bool VideoInput::Grab( unsigned char* buffer, std::vector<Image<unsigned char> >& images, bool wait, bool newest)
{
if( !video_src ) throw VideoException("No video source open");
bool success;
if(newest) {
success = GrabNewest(buffer, wait);
}else{
success = GrabNext(buffer, wait);
}
if(success) {
images.clear();
for(size_t s=0; s < Streams().size(); ++s) {
images.push_back(Streams()[s].StreamImage(buffer));
}
}
return success;
}
void VideoInput::InitialiseRecorder()
{
video_recorder.reset();
video_recorder = OpenVideoOutput(uri_output);
video_recorder->SetStreams(
video_src->Streams(), uri_input.full_uri,
GetVideoDeviceProperties(video_src.get())
);
}
void VideoInput::Record()
{
// Switch sub-video
videos.resize(1);
videos[0] = video_src.get();
// Initialise recorder and ensure src is started
InitialiseRecorder();
video_src->Start();
frame_num = 0;
record_continuous = true;
}
void VideoInput::RecordOneFrame()
{
// Append to existing video.
if(!video_recorder) {
InitialiseRecorder();
}
record_continuous = false;
record_once = true;
// Switch sub-video
videos.resize(1);
videos[0] = video_src.get();
}
size_t VideoInput::SizeBytes() const
{
if( !video_src ) throw VideoException("No video source open");
return video_src->SizeBytes();
}
const std::vector<StreamInfo>& VideoInput::Streams() const
{
return video_src->Streams();
}
void VideoInput::Start()
{
video_src->Start();
}
void VideoInput::Stop()
{
if(IsRecording()) {
video_recorder.reset();
}else{
video_src->Stop();
}
}
bool VideoInput::GrabNext( unsigned char* image, bool wait )
{
frame_num++;
const bool should_record = (record_continuous && !(frame_num % record_frame_skip)) || record_once;
const bool success = video_src->GrabNext(image, wait);
if( should_record && video_recorder != 0 && success) {
video_recorder->WriteStreams(image, GetVideoFrameProperties(video_src.get()) );
record_once = false;
}
return success;
}
bool VideoInput::GrabNewest( unsigned char* image, bool wait )
{
frame_num++;
const bool should_record = (record_continuous && !(frame_num % record_frame_skip)) || record_once;
const bool success = video_src->GrabNewest(image,wait);
if( should_record && video_recorder != 0 && success)
{
video_recorder->WriteStreams(image, GetVideoFrameProperties(video_src.get()) );
record_once = false;
}
return success;
}
void VideoInput::SetTimelapse(size_t one_in_n_frames)
{
record_frame_skip = one_in_n_frames;
}
bool VideoInput::IsRecording() const
{
return video_recorder != 0;
}
}

View File

@@ -0,0 +1,42 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011-2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/video_interface.h>
#include <pangolin/factory/factory_registry.h>
namespace pangolin
{
template<>
FactoryRegistry<VideoInterface>& FactoryRegistry<VideoInterface>::I()
{
// Singleton instance
static FactoryRegistry instance;
return instance;
}
}

View File

@@ -0,0 +1,137 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011-2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/video.h>
#include <pangolin/video/video_output.h>
#include <pangolin/video/drivers/pango_video_output.h>
#include <pangolin/utils/file_utils.h>
namespace pangolin
{
VideoOutput::VideoOutput()
{
}
VideoOutput::VideoOutput(const std::string& uri)
{
Open(uri);
}
VideoOutput::~VideoOutput()
{
}
bool VideoOutput::IsOpen() const
{
return recorder.get() != nullptr;
}
void VideoOutput::Open(const std::string& str_uri)
{
Close();
uri = ParseUri(str_uri);
recorder = OpenVideoOutput(uri);
}
void VideoOutput::Close()
{
recorder.reset();
}
const std::vector<StreamInfo>& VideoOutput::Streams() const
{
return recorder->Streams();
}
void VideoOutput::SetStreams(const std::vector<StreamInfo>& streams,
const std::string& uri,
const picojson::value& properties)
{
recorder->SetStreams(streams, uri, properties);
}
int VideoOutput::WriteStreams(const unsigned char* data, const picojson::value& frame_properties)
{
return recorder->WriteStreams(data, frame_properties);
}
bool VideoOutput::IsPipe() const
{
return recorder->IsPipe();
}
void VideoOutput::AddStream(const PixelFormat& pf, size_t w, size_t h, size_t pitch)
{
streams.emplace_back(pf, w, h, pitch, nullptr);
}
void VideoOutput::AddStream(const PixelFormat& pf, size_t w, size_t h)
{
AddStream(pf, w, h, w * pf.bpp / 8);
}
void VideoOutput::SetStreams(const std::string& uri, const picojson::value& properties)
{
size_t offset = 0;
for(size_t i = 0; i < streams.size(); i++)
{
streams[i] = StreamInfo(streams[i].PixFormat(),
streams[i].Width(),
streams[i].Height(),
streams[i].Pitch(),
(unsigned char*)offset);
offset += streams[i].SizeBytes();
}
SetStreams(streams, uri, properties);
}
size_t VideoOutput::SizeBytes(void) const
{
size_t total = 0;
for(const StreamInfo& si : recorder->Streams())
total += si.SizeBytes();
return total;
}
std::vector<Image<unsigned char>> VideoOutput::GetOutputImages(unsigned char* buffer) const
{
std::vector<Image<unsigned char>> images;
for(size_t s = 0; s < recorder->Streams().size(); ++s)
{
images.push_back(recorder->Streams()[s].StreamImage(buffer));
}
return images;
}
std::vector<Image<unsigned char>> VideoOutput::GetOutputImages(std::vector<unsigned char>& buffer) const
{
buffer.resize(SizeBytes());
return GetOutputImages(buffer.data());
}
}

View File

@@ -0,0 +1,42 @@
/* This file is part of the Pangolin Project.
* http://github.com/stevenlovegrove/Pangolin
*
* Copyright (c) 2011-2013 Steven Lovegrove
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <pangolin/video/video_output_interface.h>
#include <pangolin/factory/factory_registry.h>
namespace pangolin
{
template<>
FactoryRegistry<VideoOutputInterface>& FactoryRegistry<VideoOutputInterface>::I()
{
// Singleton instance
static FactoryRegistry instance;
return instance;
}
}