I'm trying to display a mesh resource in Rviz. It must be pretty straightforward as was in ROS1, but I'm not sure why it is not working.
Here is a sample code I tried:
// meshpub.cpp
#include <rclcpp/rclcpp.hpp>
#include <std_msgs/msg/color_rgba.hpp>
#include <geometry_msgs/msg/pose.hpp>
#include <visualization_msgs/msg/marker.hpp>
#include <ament_index_cpp/get_package_share_directory.hpp>
using namespace std;
int main(int argc, char ** argv)
{
(void) argc;
(void) argv;
rclcpp::init(argc, argv);
rclcpp::Node node("shape_pubilsher");
string base_frame = "base_link";
string topic = "/marker";
// Getting the model file path:
auto package_share_directory = ament_index_cpp::get_package_share_directory("package");
auto file_name = package_share_directory.append("/meshes/model.obj");
auto qos = rclcpp::QoS(1000);
auto publisher = node.create_publisher<visualization_msgs::msg::Marker>(topic, qos);
RCLCPP_INFO(node.get_logger(), "Waiting for Rviz to load...");
while(node.get_node_graph_interface()->count_subscribers(topic) == 0) {
rclcpp::sleep_for(200ms);
}
// Creating the marker and initialising its fields
geometry_msgs::msg::Pose pose;
pose.position.x = 0;
pose.position.y = 0;
pose.position.z = 0;
pose.orientation.x = 0;
pose.orientation.y = 0;
pose.orientation.z = 0;
pose.orientation.w = 0;
std_msgs::msg::ColorRGBA colour;
colour.a = 1;
colour.r = 1;
colour.g = 0;
colour.b = 0;
visualization_msgs::msg::Marker marker;
marker.header.frame_id = base_frame;
marker.header.stamp = node.now();
marker.action = visualization_msgs::msg::Marker::ADD;
marker.type = visualization_msgs::msg::Marker::MESH_RESOURCE;
marker.pose = pose;
marker.id = 0;
marker.mesh_resource = file_name;
marker.scale.x = 2;
marker.scale.y = 2;
marker.scale.z = 2;
marker.color = colour;
RCLCPP_INFO(node.get_logger(), "Attempting to publish mesh");
publisher->publish(marker);
while(rclcpp::ok());
return 0;
}
I have installed the meshes folder which contains 3D model files onto the package share directory. I have tried different model file formats like .obj, .fbx, .blend and .dae (all were supported in ROS1 Rviz), but Rviz refuses to display it, and all I get is this heartwarming congratulation message:
[INFO] [launch]: Default logging verbosity is set to INFO
[INFO] [rviz2-1]: process started with pid [4733]
[INFO] [meshpub-2]: process started with pid [4735]
[meshpub-2] [INFO] [1664444431.191595500] [meshpub]: Waiting for Rviz to load...
[rviz2-1] QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-amirint'
[rviz2-1] [INFO] [1664444431.789824400] [rviz2]: Stereo is NOT SUPPORTED
[rviz2-1] [INFO] [1664444431.790017200] [rviz2]: OpenGl version: 3.1 (GLSL 1.4)
[rviz2-1] [INFO] [1664444431.863061600] [rviz2]: Stereo is NOT SUPPORTED
[meshpub-2] [INFO] [1664444433.394043100] [meshpub]: Attempting to publish mesh
[rviz2-1] [ERROR] [1664444434.023035400] [rviz2]: Could not load resource [/mnt/e/avnv/ros2_visually/install/package/share/package/meshes/model.obj]: Unable to open file "/mnt/e/avnv/ros2_visually/install/package/share/package/meshes/model.obj".
Although /mnt/e/avnv/ros2_visually/install/package/share/package/meshes/model.obj is the correct path of the resource, the error message does not specifically tell whether this is a no such file or directory or a file format not supported sort of thing. It only says Unable to open file ....
I'm using Ubuntu 20.04 on wsl2 with ROS_DISTRO=galactic.
File paths have to be in the form file:///path/to/file or package://path/to/file. I mistakenly assumed
ament_index_cpp::get_package_share_directory("package") will by default return the path in the mentioned form.
So just manually appending a file:// to the returned path string fixed the issue.
Related
I'm trying to run TensorRT inference in C++. Sometimes the code crashes when trying to build a new engine or load the engine from the file. It happens occasionally (sometimes it runs without any problem). I follow the below steps to prepare network:
initLibNvInferPlugins(&gLogger.getTRTLogger(), "");
if (mParams.loadEngine.size() > 0)
{
std::vector<char> trtModelStream;
size_t size{0};
std::ifstream file(mParams.loadEngine, std::ios::binary);
if (file.good())
{
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream.resize(size);
file.read(trtModelStream.data(), size);
file.close();
}
IRuntime* infer_Runtime = nvinfer1::createInferRuntime(gLogger);
if (mParams.dlaCore >= 0)
{
infer_Runtime->setDLACore(mParams.dlaCore);
}
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
infer_Runtime->deserializeCudaEngine(trtModelStream.data(), size, nullptr), samplesCommon::InferDeleter());
gLogInfo << "TRT Engine loaded from: " << mParams.loadEngine << endl;
infer_Runtime->destroy();
if (!mEngine)
{
return false;
}
else
{
return true;
}
}
auto builder = SampleUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(gLogger.getTRTLogger()));
const auto explicitBatch = 1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
auto network = SampleUniquePtr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch));
auto config = SampleUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
auto parser = SampleUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, gLogger.getTRTLogger()));
mEngine = nullptr;
parser->parseFromFile(
locateFile(mParams.onnxFileName, mParams.dataDirs).c_str(), static_cast<int>(gLogger.getReportableSeverity()));
// Calibrator life time needs to last until after the engine is built.
std::unique_ptr<IInt8Calibrator> calibrator;
config->setAvgTimingIterations(1);
config->setMinTimingIterations(1);
config->setMaxWorkspaceSize(4_GiB);
builder->setMaxBatchSize(mParams.batchSize);
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
builder->buildEngineWithConfig(*network, *config), samplesCommon::InferDeleter());
The error occurs here:
[05/12/2021-16:46:42] [I] [TRT] Detected 1 inputs and 1 output network tensors.
16:46:42: The program has unexpectedly finished.
This line crashes when loading existing engine:
mEngine = std::shared_ptr<nvinfer1::ICudaEngine(
infer_Runtime->deserializeCudaEngine(trtModelStream.data(), size, nullptr), samplesCommon::InferDeleter());
Or when building the engine:
mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
builder->buildEngineWithConfig(*network, *config), samplesCommon::InferDeleter());
More info:
TensorRT 7.2.3
Ubuntu 18.04
cuDNN 8.1.1
CUDA 11.1 update1
ONNX 1.6.0
Pytorch 1.5.0
Finally got it!
I rewrote the CMake.txt and add all required libs and paths and removed duplicate ones. That might be a lib conflict in cuBLAS.
I'm trying to run a script in jit(using script) in torchscript for FasterRCNN.
I installed CUDA 10.1, compatible cudnn, LibTorch (C++) 1.7.1 and Torch Vision 0.8.2
I followed the instructions in both torchscript and vision and I have the following:
--- CMakeLists.txt ---
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(load_and_run_model_proj)
list(APPEND CMAKE_PREFIX_PATH "/home/fstrati/libtorch_shared_cuda_10.1/libtorch")
list(APPEND CMAKE_PREFIX_PATH "/opt/vision_0.8.2")
find_package(Torch REQUIRED)
find_package(TorchVision REQUIRED)
add_executable(load_and_run_model src/load_and_run_model.cpp)
# target_link_libraries(load_and_run_model "${TORCH_LIBRARIES}")
target_link_libraries(load_and_run_model PUBLIC TorchVision::TorchVision)
set_property(TARGET load_and_run_model PROPERTY CXX_STANDARD 14)
--- CMakeLists.txt ---
and
--- src/load_and_run_model.cpp ---
#include <torch/script.h> // One-stop header.
#include <torchvision/vision.h>
#include <torchvision/nms.h>
#include <iostream>
#include <memory>
int main(int argc, const char* argv[])
{
if (argc != 2)
{
std::cerr << "usage: example-app <path-to-exported-script-module>\n";
return -1;
}
torch::jit::script::Module module;
try
{
// Deserialize the ScriptModule from a file using torch::jit::load().
module = torch::jit::load(argv[1]);
}
catch (const c10::Error& e)
{
std::cerr << e.what() << std::endl;
std::cerr << "error loading the model\n";
return -1;
}
std::cout << "ok\n";
return 0;
}
--- src/load_and_run_model.cpp ---
I compile & link fine.
When I try to run it with traced script TorchCcript for fasterRCNN creates with jit.script
I get the following error:
terminate called after throwing an instance of 'torch::jit::ErrorReport'
what():
Unknown builtin op: torchvision::nms.
Could not find any similar ops to torchvision::nms. This op may not exist or may not be currently supported in TorchScript.
:
File "C:\Users\andre\anaconda3\envs\pytorch\lib\site-packages\torchvision\ops\boxes.py", line 42
"""
_assert_has_ops()
return torch.ops.torchvision.nms(boxes, scores, iou_threshold)
~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
Serialized File "code/__torch__/torchvision/ops/boxes.py", line 93
_42 = __torch__.torchvision.extension._assert_has_ops
_43 = _42()
_44 = ops.torchvision.nms(boxes, scores, iou_threshold)
~~~~~~~~~~~~~~~~~~~ <--- HERE
return _44
'nms' is being compiled since it was called from 'batched_nms'
File "C:\Users\andre\anaconda3\envs\pytorch\lib\site-packages\torchvision\ops\boxes.py", line 88
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
~~~ <--- HERE
return keep
Serialized File "code/__torch__/torchvision/ops/boxes.py", line 50
_18 = torch.slice(offsets, 0, 0, 9223372036854775807, 1)
boxes_for_nms = torch.add(boxes, torch.unsqueeze(_18, 1), alpha=1)
keep = __torch__.torchvision.ops.boxes.nms(boxes_for_nms, scores, iou_threshold, )
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
_11 = keep
return _11
'batched_nms' is being compiled since it was called from 'RegionProposalNetwork.filter_proposals'
Serialized File "code/__torch__/torchvision/models/detection/rpn.py", line 64
_11 = __torch__.torchvision.ops.boxes.clip_boxes_to_image
_12 = __torch__.torchvision.ops.boxes.remove_small_boxes
_13 = __torch__.torchvision.ops.boxes.batched_nms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
num_images = (torch.size(proposals))[0]
device = ops.prim.device(proposals)
'RegionProposalNetwork.filter_proposals' is being compiled since it was called from 'RegionProposalNetwork.forward'
File "C:\Users\andre\anaconda3\envs\pytorch\lib\site-packages\torchvision\models\detection\rpn.py", line 344
proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)
proposals = proposals.view(num_images, -1, 4)
boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)
~~~~~~~~~~~~~~~~~~~~~ <--- HERE
losses = {}
Serialized File "code/__torch__/torchvision/models/detection/rpn.py", line 37
proposals = (self.box_coder).decode(torch.detach(pred_bbox_deltas0), anchors, )
proposals0 = torch.view(proposals, [num_images, -1, 4])
_8 = (self).filter_proposals(proposals0, objectness0, images.image_sizes, num_anchors_per_level, )
~~~~~~~~~~~~~~~~~~~~~ <--- HERE
boxes, scores, = _8
losses = annotate(Dict[str, Tensor], {})
Any ideas or suggestion on how to cure this error: seems like the operator nms is not registered.
By the way the master branch of torchvision with cuda is not compiling so I report the error
for the tag v0.8.2 of torch vision.
After researching this issue I came about
this commit in master that is not in
v0.8.2 of torch vision:
https://github.com/pytorch/vision/pull/2798/commits/fb893e7ba390d1b668efb4b84b3376cf634bd043
Applying the commit to v0.8.2 solved the problem:
now operators are correctly registered in torch script
I try to read in a gpkg file to extract geo informations like streets and buildings.
Therefor I started with this code:
#include "gdal_priv.h"
#include <iostream>
int main() {
GDALDataset* poDataset;
GDALAllRegister();
std::cout << "driver# " << GetGDALDriverManager()->GetDriverCount()
<< std::endl;
for (int i = 0; i < GetGDALDriverManager()->GetDriverCount(); i++) {
auto driver = GetGDALDriverManager()->GetDriver(i);
auto info = driver->GetDescription();
std::cout << "driver " << i << ": " << info << std::endl;
}
auto driver = GetGDALDriverManager()->GetDriverByName("GPKG");
poDataset = (GDALDataset*)GDALOpen("Building_LoD1.gpkg", GA_ReadOnly);
if (poDataset == NULL) {
// ...;
}
return 0;
}
The driver list contains GPKG, but the reading fails with an error that the file is not recognized as supported file format.
Doing a gdalinfo Building_LoD1.gpkg leads to the same error in the console. But I can open the file in QGIS.
And a gdalsrsinfo Building_LoD1.gpk reports:
PROJ.4 : +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=2600000 +y_0=1200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs
OGC WKT :
PROJCS["CH1903+ / LV95",
GEOGCS["CH1903+",
DATUM["CH1903+",
SPHEROID["Bessel 1841",6377397.155,299.1528128,
AUTHORITY["EPSG","7004"]],
TOWGS84[674.374,15.056,405.346,0,0,0,0],
AUTHORITY["EPSG","6150"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4150"]],
PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],
PARAMETER["latitude_of_center",46.95240555555556],
PARAMETER["longitude_of_center",7.439583333333333],
PARAMETER["azimuth",90],
PARAMETER["rectified_grid_angle",90],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",2600000],
PARAMETER["false_northing",1200000],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","2056"]]
Does anyone know why a gpkg file might be reported as not supported?
The gdal version is 2.3.2.
I figured out the problem. The reason for the message is not that the file format is not support by gdal, but that I used the wrong function to open the file.
If I want to read in a file that has vector information then I need to use:
GDALDataset* poDS;
poDS = (GDALDataset*)GDALOpenEx( "Building_LoD1.gpkg", GDAL_OF_VECTOR, NULL, NULL, NULL);
it is possible to use live555 lib with armv7s? Becasue I try to compile it with config
# Change the following version number, if necessary, before running "genMakefiles iphoneos"
IOS_VERSION = 7.0
DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer
TOOL_PATH = $(DEVELOPER_PATH)/usr/bin
SDK_PATH = $(DEVELOPER_PATH)/SDKs
SDK = $(SDK_PATH)/iPhoneOS$(IOS_VERSION).sdk
COMPILE_OPTS = $(INCLUDES) -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch armv7s --sysroot=$(SDK)
C = c
C_COMPILER = $(TOOL_PATH)/gcc
C_FLAGS = $(COMPILE_OPTS)
CPP = cpp
CPLUSPLUS_COMPILER = $(TOOL_PATH)/g++
CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
OBJ = o
LINK = $(TOOL_PATH)/g++ -o
LINK_OPTS = -L. -arch armv7s --sysroot=$(SDK) -L$(SDK)/usr/lib/system
CONSOLE_LINK_OPTS = $(LINK_OPTS)
LIBRARY_LINK = libtool -s -o
LIBRARY_LINK_OPTS =
LIB_SUFFIX = a
LIBS_FOR_CONSOLE_APPLICATION =
LIBS_FOR_GUI_APPLICATION =
EXE =
then I try copy&paste program testMP3Reciver to my iOS project (yes I use .mm as postfix instead of .m, and include every header is needed), but still I got 14 errors Undefinded symbols for architecture armv7s
My code:
#import "TViewController.h"
#include "liveMedia.hh"
#include "GroupsockHelper.hh"
#include "BasicUsageEnvironment.hh"
#interface TViewController ()
#end
#implementation TViewController
UsageEnvironment* env;
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
struct sessionState_t {
FramedSource* source;
FileSink* sink;
RTCPInstance* rtcpInstance;
} sessionState;
- (IBAction)start:(id)sender {
if (!wasClicked) {
//start
[self startButton];
wasClicked = true;
} else {
//stop
[self stopButton];
wasClicked = false;
}
}
-(void)startButton{
// Begin by setting up our usage environment:
TaskScheduler* scheduler = BasicTaskScheduler::createNew();
env = BasicUsageEnvironment::createNew(*scheduler);
// Create the data sink for 'stdout':
sessionState.sink = FileSink::createNew(*env, "stdout");
// Note: The string "stdout" is handled as a special case.
// A real file name could have been used instead.
// Create 'groupsocks' for RTP and RTCP:
char const* sessionAddressStr
#ifdef USE_SSM
= "232.255.42.42";
#else
= "239.255.42.42";
// Note: If the session is unicast rather than multicast,
// then replace this string with "0.0.0.0"
#endif
const unsigned short rtpPortNum = 6666;
const unsigned short rtcpPortNum = rtpPortNum+1;
#ifndef USE_SSM
const unsigned char ttl = 1; // low, in case routers don't admin scope
#endif
struct in_addr sessionAddress;
sessionAddress.s_addr = our_inet_addr(sessionAddressStr);
const Port rtpPort(rtpPortNum);
const Port rtcpPort(rtcpPortNum);
#ifdef USE_SSM
char* sourceAddressStr = "aaa.bbb.ccc.ddd";
// replace this with the real source address
struct in_addr sourceFilterAddress;
sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr);
Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort);
Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort);
rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0);
// our RTCP "RR"s are sent back using unicast
#else
Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl);
Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl);
#endif
RTPSource* rtpSource;
#ifndef STREAM_USING_ADUS
// Create the data source: a "MPEG Audio RTP source"
rtpSource = MPEG1or2AudioRTPSource::createNew(*env, &rtpGroupsock);
#else
// Create the data source: a "MP3 *ADU* RTP source"
unsigned char rtpPayloadFormat = 96; // a dynamic payload type
rtpSource
= MP3ADURTPSource::createNew(*env, &rtpGroupsock, rtpPayloadFormat);
#endif
// Create (and start) a 'RTCP instance' for the RTP source:
const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share
const unsigned maxCNAMElen = 100;
unsigned char CNAME[maxCNAMElen+1];
gethostname((char*)CNAME, maxCNAMElen);
CNAME[maxCNAMElen] = '\0'; // just in case
sessionState.rtcpInstance
= RTCPInstance::createNew(*env, &rtcpGroupsock,
estimatedSessionBandwidth, CNAME,
NULL /* we're a client */, rtpSource);
// Note: This starts RTCP running automatically
sessionState.source = rtpSource;
#ifdef STREAM_USING_ADUS
// Add a filter that deinterleaves the ADUs after depacketizing them:
sessionState.source
= MP3ADUdeinterleaver::createNew(*env, sessionState.source);
if (sessionState.source == NULL) {
*env << "Unable to create an ADU deinterleaving filter for the source\n";
exit(1);
}
// Add another filter that converts these ADUs to MP3s:
sessionState.source
= MP3FromADUSource::createNew(*env, sessionState.source);
if (sessionState.source == NULL) {
*env << "Unable to create an ADU->MP3 filter for the source\n";
exit(1);
}
#endif
// Finally, start receiving the multicast stream:
*env << "Beginning receiving multicast stream...\n";
sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
env->taskScheduler().doEventLoop(); // does not return
}
void afterPlaying(void* /*clientData*/) {
*env << "...done receiving\n";
// End by closing the media:
Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE
Medium::close(sessionState.sink);
Medium::close(sessionState.source);
}
-(void)stopButton{
afterPlaying;
}
#end
So I ask again, it's even posible to use live555 on armv7s? Maybe I should use another lib?
Solution/Update
I have to throw away armv7s, instead of it I use this project to create fat lib for iOS.
Step-by-step solution:
Clone this project git clone git#github.com:weevilgenius/live555-ios.git
Build lib, and copy to your project. At start I was have problem with finding compiled lib. So for those who don't know how to find it. After building lib in Xcode file tree open folder Products and right-click on libLive555.a then select "Show in Finder".
Add to your project every .hh. Those files are in folder include in every subfolder of folder Live555. Now everything should be just fine :)
I'm working on a C++ project using Visual Studio 2010 on Windows. I'm linking dynamically against x264 which I built myself as a shared library using MinGW following the guide at
http://www.ayobamiadewole.com/Blog/Others/x264compilation.aspx
The strange thing is that my x264 code is working perfectly sometimes. Then when I change some line of code (or even change the comments in the file!) and recompile everything crashes on the line
encoder_ = x264_encoder_open(¶m);
With the message
Access violation reading location 0x00000000
I'm not doing anything funky at all so it's probably not my code that is wrong but I guess there is something going wrong with the linking or maybe something is wrong with how I compiled x264.
The full initialization code:
x264_param_t param = { 0 };
if (x264_param_default_preset(¶m, "ultrafast", "zerolatency") < 0) {
throw KStreamerException("x264_param_default_preset failed");
}
param.i_threads = 1;
param.i_width = 640;
param.i_height = 480;
param.i_fps_num = 10;
param.i_fps_den = 1;
encoder_ = x264_encoder_open(¶m); // <-----
if (encoder_ == 0) {
throw KStreamerException("x264_encoder_open failed");
}
x264_picture_alloc(&pic_, X264_CSP_I420, 640, 480);
Edit: It turns out that it always works in Release mode and when using superfast instead of ultrafast it also works in Debug mode 100%. Could it be that the ultrafast mode is doing some crazy optimizations that the debugger doesn't like?
I've met this problem too with libx264-120.
libx264-120 was built on MinGW and configuration option like below.
$ ./configure --disable-cli --enable-shared --extra-ldflags=-Wl,--output-def=libx264-120.def --enable-debug --enable-win32thread
platform: X86
system: WINDOWS
cli: no
libx264: internal
shared: yes
static: no
asm: yes
interlaced: yes
avs: yes
lavf: no
ffms: no
gpac: no
gpl: yes
thread: win32
filters: crop select_every
debug: yes
gprof: no
strip: no
PIC: no
visualize: no
bit depth: 8
chroma format: all
$ make -j8
lib /def:libx264-120.def /machine:x86
#include "stdafx.h"
#include <iostream>
#include <cassert>
using namespace std;
#include <stdint.h>
extern "C"{
#include <x264.h>
}
int _tmain(int argc, _TCHAR* argv[])
{
int width(640);
int height(480);
int err(-1);
x264_param_t x264_param = {0};
//x264_param_default(&x264_param);
err =
x264_param_default_preset(&x264_param, "veryfast", "zerolatency");
assert(0==err);
x264_param.i_threads = 8;
x264_param.i_width = width;
x264_param.i_height = height;
x264_param.i_fps_num = 60;//fps;
x264_param.i_fps_den = 1;
// Intra refres:
x264_param.i_keyint_max = 60;//fps;
x264_param.b_intra_refresh = 1;
//Rate control:
x264_param.rc.i_rc_method = X264_RC_CRF;
x264_param.rc.f_rf_constant = 25;
x264_param.rc.f_rf_constant_max = 35;
//For streaming:
x264_param.b_repeat_headers = 1;
x264_param.b_annexb = 1;
err = x264_param_apply_profile(&x264_param, "baseline");
assert(0==err);
x264_t *x264_encoder = x264_encoder_open(&x264_param);
x264_encoder = x264_encoder;
x264_encoder_close( x264_encoder );
getchar();
return 0;
}
This program succeeds sometime. But will fail often on x264_encoder_open with the access violation.
The information for this is not existing on Google. And how to initialize x264_param_t and how to use x264_encoder_open are unclear.
It seems that behavior caused from x264's setting values, but I can't know these without reading some open source programs that using libx264.
And, this access violation seems doesn't occurs on FIRST TIME EXECUTION and on compilation with MinGW's gcc (e.g gcc -o test test.c -lx264;./test)
Since this behavior, I think that libx264 doing some strange processes of resources in DLL version of ilbx264 that was built on MinGW's gcc.
I had the same problem. The only way I was able to fix it was to build the x264 dll without the asm option (ie. specify --disable-asm)