I am writing a GStreamer application (GStreamer uses DirectShow under the hood on Windows) that captures a computer's microphone and videocamera. It works fine, but requires me to specify the device names manually. I would like to have my program detect these automatically. Does anyone know how to do that?
It would surprise me if GStreamer doesn't have capabilities to enumerate devices, but DirectShow definitely has.
See the article on using the system device enumerator and use it with the correct filter categories - in your case CLSID_AudioInputDeviceCategory and CLSID_VideoInputDeviceCategory.
You should use GStreamer's probing interface which allows you to list all possible values for a given property, in your case 'device-name'.
Here is an example:
GList*
gst_camera_capturer_enum_devices(gchar* device_name)
{
GstElement* device;
GstPropertyProbe* probe;
GValueArray* va;
GList* list=NULL;
guint i=0;
device = gst_element_factory_make (device_name, "source");
gst_element_set_state(device, GST_STATE_READY);
gst_element_get_state(device, NULL, NULL, 5 * GST_SECOND);
if (!device || !GST_IS_PROPERTY_PROBE(device))
goto finish;
probe = GST_PROPERTY_PROBE (device);
va = gst_property_probe_get_values_name (probe, "device-name");
if (!va)
goto finish;
for(i=0; i < va->n_values; ++i) {
GValue* v = g_value_array_get_nth(va, i);
list = g_list_append(list, g_string_new(g_value_get_string(v)));
}
g_value_array_free(va);
finish:
{
gst_element_set_state (device, GST_STATE_NULL);
gst_object_unref(GST_OBJECT (device));
return list;
}
}
GList*
gst_camera_capturer_enum_video_devices(void)
{
return gst_camera_capturer_enum_devices("dshowvideosrc");
}
GList*
gst_camera_capturer_enum_audio_devices(void)
{
return gst_camera_capturer_enum_devices("dshowaudiosrc");
}
Related
I've been trying to setup a simple sendonly WebRTC client with GStreamer but I'm having issues with getting the actual video to display on the WebRTC receiver side. I am new to both GStreamer and WebRTC.
I'm using the examples from https://gitlab.freedesktop.org/gstreamer/gst-examples/-/tree/master/webrtc to try and come up with a combination of certain parts. I've had 1:1 communication working but I wanted to introduce the rooms so I can have more clients viewing the "view-only" stream from GStreamer.
My current code is based on the multiparty-sendrecv example where I swapped out the audio for video. Furthermore, I'm using a modified version of the signalling server and a modified version of the javascript webrtc client. If necessary I could provide code for all of the above, but to keep things simple I won't. This is because I don't think the problem lies in either the signalling server or webrtc client, because the ICE candidates have been successfully negotiated along with the SDP offer & answer according to chrome://webrtc-internals/. See the image below.
In order to figure out what's going on I've exported a graph that shows the GStreamer pipeline after a user has joined the room and was added to the pipeline. See graph below.
As far as I can tell I should be receiving video data on my frontend, but I'm not. I've had a single weird case where the videotestsrc did show up, but I haven't been able to reproduce it. But because of this, it makes me think that the pipeline itself isn't neccesarily wrong, but perhaps we're dealing with some kind of race condition.
I've added the modified example of multiparty-sendrecv below, please take a look at it. Most of the methods have purposely been left out due to Stackoverflow's character limit.
Main functions
static void
handle_media_stream(GstPad* pad, GstElement* pipe, const char* convert_name,
const char* sink_name)
{
GstPad* qpad;
GstElement* q, * conv, * sink;
GstPadLinkReturn ret;
q = gst_element_factory_make("queue", NULL);
g_assert_nonnull(q);
conv = gst_element_factory_make(convert_name, NULL);
g_assert_nonnull(conv);
sink = gst_element_factory_make(sink_name, NULL);
g_assert_nonnull(sink);
gst_bin_add_many(GST_BIN(pipe), q, conv, sink, NULL);
gst_element_sync_state_with_parent(q);
gst_element_sync_state_with_parent(conv);
gst_element_sync_state_with_parent(sink);
gst_element_link_many(q, conv, sink, NULL);
qpad = gst_element_get_static_pad(q, "sink");
ret = gst_pad_link(pad, qpad);
g_assert_cmpint(ret, == , GST_PAD_LINK_OK);
}
static void
on_incoming_decodebin_stream(GstElement* decodebin, GstPad* pad,
GstElement* pipe)
{
GstCaps* caps;
const gchar* name;
if (!gst_pad_has_current_caps(pad)) {
g_printerr("Pad '%s' has no caps, can't do anything, ignoring\n",
GST_PAD_NAME(pad));
return;
}
caps = gst_pad_get_current_caps(pad);
name = gst_structure_get_name(gst_caps_get_structure(caps, 0));
if (g_str_has_prefix(name, "video")) {
handle_media_stream(pad, pipe, "videoconvert", "autovideosink");
}
else if (g_str_has_prefix(name, "audio")) {
handle_media_stream(pad, pipe, "audioconvert", "autoaudiosink");
}
else {
g_printerr("Unknown pad %s, ignoring", GST_PAD_NAME(pad));
}
}
static void
on_incoming_stream(GstElement* webrtc, GstPad* pad, GstElement* pipe)
{
GstElement* decodebin;
GstPad* sinkpad;
if (GST_PAD_DIRECTION(pad) != GST_PAD_SRC)
return;
decodebin = gst_element_factory_make("decodebin", NULL);
g_signal_connect(decodebin, "pad-added",
G_CALLBACK(on_incoming_decodebin_stream), pipe);
gst_bin_add(GST_BIN(pipe), decodebin);
gst_element_sync_state_with_parent(decodebin);
sinkpad = gst_element_get_static_pad(decodebin, "sink");
gst_pad_link(pad, sinkpad);
gst_object_unref(sinkpad);
}
static void
add_peer_to_pipeline(const gchar* peer_id, gboolean offer)
{
int ret;
gchar* tmp;
GstElement* tee, * webrtc, * q;
GstPad* srcpad, * sinkpad;
tmp = g_strdup_printf("queue-%s", peer_id);
q = gst_element_factory_make("queue", tmp);
g_free(tmp);
webrtc = gst_element_factory_make("webrtcbin", peer_id);
g_object_set(webrtc, "bundle-policy", GST_WEBRTC_BUNDLE_POLICY_MAX_BUNDLE, NULL);
gst_bin_add_many(GST_BIN(pipeline), q, webrtc, NULL);
srcpad = gst_element_get_static_pad(q, "src");
g_assert_nonnull(srcpad);
sinkpad = gst_element_get_request_pad(webrtc, "sink_%u");
g_assert_nonnull(sinkpad);
ret = gst_pad_link(srcpad, sinkpad);
g_assert_cmpint(ret, == , GST_PAD_LINK_OK);
gst_object_unref(srcpad);
gst_object_unref(sinkpad);
tee = gst_bin_get_by_name(GST_BIN(pipeline), "videotee");
g_assert_nonnull(tee);
srcpad = gst_element_get_request_pad(tee, "src_%u");
g_assert_nonnull(srcpad);
gst_object_unref(tee);
sinkpad = gst_element_get_static_pad(q, "sink");
g_assert_nonnull(sinkpad);
ret = gst_pad_link(srcpad, sinkpad);
g_assert_cmpint(ret, == , GST_PAD_LINK_OK);
gst_object_unref(srcpad);
gst_object_unref(sinkpad);
/* This is the gstwebrtc entry point where we create the offer and so on. It
* will be called when the pipeline goes to PLAYING.
* XXX: We must connect this after webrtcbin has been linked to a source via
* get_request_pad() and before we go from NULL->READY otherwise webrtcbin
* will create an SDP offer with no media lines in it. */
if (offer)
g_signal_connect(webrtc, "on-negotiation-needed",
G_CALLBACK(on_negotiation_needed), (gpointer)peer_id);
/* We need to transmit this ICE candidate to the browser via the websockets
* signalling server. Incoming ice candidates from the browser need to be
* added by us too, see on_server_message() */
g_signal_connect(webrtc, "on-ice-candidate",
G_CALLBACK(send_ice_candidate_message), (gpointer)peer_id);
/* Incoming streams will be exposed via this signal */
g_signal_connect(webrtc, "pad-added", G_CALLBACK(on_incoming_stream),
pipeline);
/* Set to pipeline branch to PLAYING */
ret = gst_element_sync_state_with_parent(q);
g_assert_true(ret);
ret = gst_element_sync_state_with_parent(webrtc);
g_assert_true(ret);
GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
}
static gboolean
start_pipeline(void)
{
GstStateChangeReturn ret;
GError* error = NULL;
/* NOTE: webrtcbin currently does not support dynamic addition/removal of
* streams, so we use a separate webrtcbin for each peer, but all of them are
* inside the same pipeline. We start by connecting it to a fakesink so that
* we can preroll early. */
/*pipeline = gst_parse_launch("tee name=videotee ! queue ! fakesink "
"videotestsrc is-live=true pattern=ball ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! "
"queue ! " RTP_CAPS_VP8 "96 ! videotee. ", &error);*/
pipeline = gst_parse_launch("tee name=videotee ! queue ! fakesink "
"videotestsrc is-live=true pattern=ball ! videoconvert ! queue ! vp8enc deadline=1 ! rtpvp8pay ! "
"queue ! " RTP_CAPS_VP8 "96 ! videotee. ", &error);
if (error) {
g_printerr("Failed to parse launch: %s\n", error->message);
g_error_free(error);
goto err;
}
g_print("Starting pipeline, not transmitting yet\n");
ret = gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE)
goto err;
return TRUE;
err:
g_print("State change failure\n");
if (pipeline)
g_clear_object(&pipeline);
return FALSE;
}
/*
* When we join a room, we are responsible for calling by starting negotiation
* with each peer in it by sending an SDP offer and ICE candidates.
*/
static void
do_join_room(const gchar* text)
{
gint ii, len;
gchar** peer_ids;
if (app_state != ROOM_JOINING) {
cleanup_and_quit_loop("ERROR: Received ROOM_OK when not calling",
ROOM_JOIN_ERROR);
return;
}
app_state = ROOM_JOINED;
g_print("Room joined\n");
/* Start recording, but not transmitting */
if (!start_pipeline()) {
cleanup_and_quit_loop("ERROR: Failed to start pipeline", ROOM_CALL_ERROR);
return;
}
peer_ids = g_strsplit(text, " ", -1);
g_assert_cmpstr(peer_ids[0], == , "ROOM_OK");
len = g_strv_length(peer_ids);
/* There are peers in the room already. We need to start negotiation
* (exchange SDP and ICE candidates) and transmission of media. */
if (len > 1 && strlen(peer_ids[1]) > 0) {
g_print("Found %i peers already in room\n", len - 1);
app_state = ROOM_CALL_OFFERING;
for (ii = 1; ii < len; ii++) {
gchar* peer_id = g_strdup(peer_ids[ii]);
g_print("Negotiating with peer %s\n", peer_id);
/* This might fail asynchronously */
call_peer(peer_id);
peers = g_list_prepend(peers, peer_id);
}
}
g_strfreev(peer_ids);
return;
}
int
main(int argc, char* argv[])
{
GOptionContext* context;
GError* error = NULL;
context = g_option_context_new("- gstreamer webrtc sendrecv demo");
g_option_context_add_main_entries(context, entries, NULL);
g_option_context_add_group(context, gst_init_get_option_group());
if (!g_option_context_parse(context, &argc, &argv, &error)) {
g_printerr("Error initializing: %s\n", error->message);
return -1;
}
if (!check_plugins())
return -1;
if (!room_id) {
g_printerr("--room-id is a required argument\n");
return -1;
}
if (!local_id)
local_id = g_strdup_printf("%s-%i", g_get_user_name(),
g_random_int_range(10, 10000));
/* Sanitize by removing whitespace, modifies string in-place */
g_strdelimit(local_id, " \t\n\r", '-');
g_print("Our local id is %s\n", local_id);
if (!server_url)
server_url = g_strdup(default_server_url);
/* Don't use strict ssl when running a localhost server, because
* it's probably a test server with a self-signed certificate */
{
GstUri* uri = gst_uri_from_string(server_url);
if (g_strcmp0("localhost", gst_uri_get_host(uri)) == 0 ||
g_strcmp0("127.0.0.1", gst_uri_get_host(uri)) == 0)
strict_ssl = FALSE;
gst_uri_unref(uri);
}
loop = g_main_loop_new(NULL, FALSE);
connect_to_websocket_server_async();
g_main_loop_run(loop);
gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_NULL);
g_print("Pipeline stopped\n");
gst_object_unref(pipeline);
g_free(server_url);
g_free(local_id);
g_free(room_id);
return 0;
}
I am trying to build a NDK based c++ low latancy audio player which will encounter three operations for multiple audios.
Play from assets.
Stream from an online source.
Play from local device storage.
From one of the Oboe samples provided by Google, I added another function to the class NDKExtractor.cpp to extract a URL based audio and render it to audio device while reading from source at the same time.
int32_t NDKExtractor::decode(char *file, uint8_t *targetData, AudioProperties targetProperties) {
LOGD("Using NDK decoder: %s",file);
// Extract the audio frames
AMediaExtractor *extractor = AMediaExtractor_new();
//using this method instead of AMediaExtractor_setDataSourceFd() as used for asset files in the rythem game example
media_status_t amresult = AMediaExtractor_setDataSource(extractor, file);
if (amresult != AMEDIA_OK) {
LOGE("Error setting extractor data source, err %d", amresult);
return 0;
}
// Specify our desired output format by creating it from our source
AMediaFormat *format = AMediaExtractor_getTrackFormat(extractor, 0);
int32_t sampleRate;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &sampleRate)) {
LOGD("Source sample rate %d", sampleRate);
if (sampleRate != targetProperties.sampleRate) {
LOGE("Input (%d) and output (%d) sample rates do not match. "
"NDK decoder does not support resampling.",
sampleRate,
targetProperties.sampleRate);
return 0;
}
} else {
LOGE("Failed to get sample rate");
return 0;
};
int32_t channelCount;
if (AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &channelCount)) {
LOGD("Got channel count %d", channelCount);
if (channelCount != targetProperties.channelCount) {
LOGE("NDK decoder does not support different "
"input (%d) and output (%d) channel counts",
channelCount,
targetProperties.channelCount);
}
} else {
LOGE("Failed to get channel count");
return 0;
}
const char *formatStr = AMediaFormat_toString(format);
LOGD("Output format %s", formatStr);
const char *mimeType;
if (AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mimeType)) {
LOGD("Got mime type %s", mimeType);
} else {
LOGE("Failed to get mime type");
return 0;
}
// Obtain the correct decoder
AMediaCodec *codec = nullptr;
AMediaExtractor_selectTrack(extractor, 0);
codec = AMediaCodec_createDecoderByType(mimeType);
AMediaCodec_configure(codec, format, nullptr, nullptr, 0);
AMediaCodec_start(codec);
// DECODE
bool isExtracting = true;
bool isDecoding = true;
int32_t bytesWritten = 0;
while (isExtracting || isDecoding) {
if (isExtracting) {
// Obtain the index of the next available input buffer
ssize_t inputIndex = AMediaCodec_dequeueInputBuffer(codec, 2000);
//LOGV("Got input buffer %d", inputIndex);
// The input index acts as a status if its negative
if (inputIndex < 0) {
if (inputIndex == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
// LOGV("Codec.dequeueInputBuffer try again later");
} else {
LOGE("Codec.dequeueInputBuffer unknown error status");
}
} else {
// Obtain the actual buffer and read the encoded data into it
size_t inputSize;
uint8_t *inputBuffer = AMediaCodec_getInputBuffer(codec, inputIndex,
&inputSize);
//LOGV("Sample size is: %d", inputSize);
ssize_t sampleSize = AMediaExtractor_readSampleData(extractor, inputBuffer,
inputSize);
auto presentationTimeUs = AMediaExtractor_getSampleTime(extractor);
if (sampleSize > 0) {
// Enqueue the encoded data
AMediaCodec_queueInputBuffer(codec, inputIndex, 0, sampleSize,
presentationTimeUs,
0);
AMediaExtractor_advance(extractor);
} else {
LOGD("End of extractor data stream");
isExtracting = false;
// We need to tell the codec that we've reached the end of the stream
AMediaCodec_queueInputBuffer(codec, inputIndex, 0, 0,
presentationTimeUs,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
}
}
}
if (isDecoding) {
// Dequeue the decoded data
AMediaCodecBufferInfo info;
ssize_t outputIndex = AMediaCodec_dequeueOutputBuffer(codec, &info, 0);
if (outputIndex >= 0) {
// Check whether this is set earlier
if (info.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
LOGD("Reached end of decoding stream");
isDecoding = false;
} else {
// Valid index, acquire buffer
size_t outputSize;
uint8_t *outputBuffer = AMediaCodec_getOutputBuffer(codec, outputIndex,
&outputSize);
/*LOGV("Got output buffer index %d, buffer size: %d, info size: %d writing to pcm index %d",
outputIndex,
outputSize,
info.size,
m_writeIndex);*/
// copy the data out of the buffer
memcpy(targetData + bytesWritten, outputBuffer, info.size);
bytesWritten += info.size;
AMediaCodec_releaseOutputBuffer(codec, outputIndex, false);
}
} else {
// The outputIndex doubles as a status return if its value is < 0
switch (outputIndex) {
case AMEDIACODEC_INFO_TRY_AGAIN_LATER:
LOGD("dequeueOutputBuffer: try again later");
break;
case AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED:
LOGD("dequeueOutputBuffer: output buffers changed");
break;
case AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED:
LOGD("dequeueOutputBuffer: output outputFormat changed");
format = AMediaCodec_getOutputFormat(codec);
LOGD("outputFormat changed to: %s", AMediaFormat_toString(format));
break;
}
}
}
}
// Clean up
AMediaFormat_delete(format);
AMediaCodec_delete(codec);
AMediaExtractor_delete(extractor);
return bytesWritten;
}
Now the problem i am facing is that this code it first extracts all the audio data saves it into a buffer which then becomes part of AFileDataSource which i derived from DataSource class in the same sample.
And after its done extracting the whole file it plays by calling the onAudioReady() for Oboe AudioStreamBuilder.
What I need is to play as it streams the chunk of audio buffer.
Optional Query: Also aside from the question it blocks the UI even though i created a foreground service to communicate with the NDK functions to execute this code. Any thoughts on this?
You probably solved this already, but for future readers...
You need a FIFO buffer to store the decoded audio. You can use the Oboe's FIFO buffer e.g. oboe::FifoBuffer.
You can have a low/high watermark for the buffer and a state machine, so you start decoding when the buffer is almost empty and you stop decoding when it's full (you'll figure out the other states that you need).
As a side note, I implemented such player only to find at some later time, that the AAC codec is broken on some devices (Xiaomi and Amazon come to mind), so I had to throw away the AMediaCodec/AMediaExtractor parts and use an AAC library instead.
You have to implement a ringBuffer (or use the one implemented in the oboe example LockFreeQueue.h) and copy the data on buffers that you send on the ringbuffer from the extracting thread. On the other end of the RingBuffer, the audio thread will get that data from the queue and copy it to the audio buffer. This will happen on onAudioReady(oboe::AudioStream *oboeStream, void *audioData, int32_t numFrames) callback that you have to implement in your class (look oboe docs). Be sure to follow all the good practices on the Audio thread (don't allocate/deallocate memory there, no mutexes and no file I/O etc.)
Optional query: A service doesn't run in a separate thread, so obviously if you call it from UI thread it blocks the UI. Look at other types of services, there you can have IntentService or a service with a Messenger that will launch a separate thread on Java, or you can create threads in C++ side using std::thread
In Linux, I have a USB hub with the 'register' shown in the image below. This register is supposed to disable power on a certain port on the hub.
I tried to use LibUSB ( my code is shown below ) to write the register, 0x0A, with all zeros to disable all ports. The problem is, the hub is controlled by the standard Linux USB Hub driver and so the Kernel driver is detached. The write also fails. The failure messages are shown below.
Error messages:
$ /mnt/apps/UsbPowerControl
5 Devices in list.
Vendor:Device = 1908:1320
Vendor:Device = 0403:6001
Vendor:Device = 289d:0010
Vendor:Device = 0424:2513
Vendor:Device = 1d6b:0002
Opening Device = 0424:2513
Device Opened
Kernel Driver Active
Kernel Driver Detached!
Claimed Interface
Data-><-
Writing Data...
libusb: error [submit_bulk_transfer] submiturb failed error -1 errno=2
Write Error
Released Interface
How can I use LibUSB to write this Hub register, to dynamically disable and enable the ports, without unregistering the Linux driver and having my write fail?
#include <iostream>
#include <cassert>
#include <libusb-1.0/libusb.h>
using namespace std;
#define VENDOR_ID 0x0424
#define PRODUCT_ID 0x2513
int main() {
libusb_device **devs; //pointer to pointer of device, used to retrieve a list of devices
libusb_device_handle *dev_handle; //a device handle
libusb_context *ctx = NULL; //a libusb session
int r; //for return values
ssize_t cnt; //holding number of devices in list
r = libusb_init(&ctx); //initialize the library for the session we just declared
if(r < 0) {
cout<<"Init Error "<<r<<endl; //there was an error
return 1;
}
libusb_set_debug(ctx, 3); //set verbosity level to 3, as suggested in the documentation
cnt = libusb_get_device_list(ctx, &devs); //get the list of devices
if(cnt < 0) {
cout<<"Get Device Error"<<endl; //there was an error
return 1;
}
cout<<cnt<<" Devices in list."<<endl;
for (size_t idx = 0; idx < cnt; ++idx) {
libusb_device *device = devs[idx];
libusb_device_descriptor desc = {0};
int rc = libusb_get_device_descriptor(device, &desc);
assert(rc == 0);
printf("Vendor:Device = %04x:%04x\n", desc.idVendor, desc.idProduct);
}
printf("Opening Device = %04x:%04x\n", VENDOR_ID, PRODUCT_ID);
dev_handle = libusb_open_device_with_vid_pid(ctx, VENDOR_ID, PRODUCT_ID); //these are vendorID and productID I found for my usb device
if(dev_handle == NULL)
cout<<"Cannot open device"<<endl;
else
cout<<"Device Opened"<<endl;
libusb_free_device_list(devs, 1); //free the list, unref the devices in it
unsigned char *data = new unsigned char[1]; //data to write
data[0]=0b00000000;
int actual; //used to find out how many bytes were written
if(libusb_kernel_driver_active(dev_handle, 0) == 1) { //find out if kernel driver is attached
cout<<"Kernel Driver Active"<<endl;
if(libusb_detach_kernel_driver(dev_handle, 0) == 0) //detach it
cout<<"Kernel Driver Detached!"<<endl;
}
r = libusb_claim_interface(dev_handle, 0); //claim interface 0 (the first) of device (mine had jsut 1)
if(r < 0) {
cout<<"Cannot Claim Interface"<<endl;
return 1;
}
cout<<"Claimed Interface"<<endl;
cout<<"Data->"<<data<<"<-"<<endl; //just to see the data we want to write : abcd
cout<<"Writing Data..."<<endl;
r = libusb_bulk_transfer(dev_handle, (0x0A | LIBUSB_ENDPOINT_OUT), data, 1, &actual, 0); //my device's out endpoint was 2, found with trial- the device had 2 endpoints: 2 and 129
if(r == 0 && actual == 1) //we wrote the 1 bytes successfully
cout<<"Writing Successful!"<<endl;
else
cout<<"Write Error"<<endl;
r = libusb_release_interface(dev_handle, 0); //release the claimed interface
if(r!=0) {
cout<<"Cannot Release Interface"<<endl;
return 1;
}
cout<<"Released Interface"<<endl;
libusb_close(dev_handle); //close the device we opened
libusb_exit(ctx); //needs to be called to end the
delete[] data; //delete the allocated memory for data
return 0;
}
int libusb_detach_kernel_driver ( libusb_device_handle * dev,
int interface_number
)
...
If successful, you will then be able to claim the interface and perform I/O.
...
int libusb_kernel_driver_active ( libusb_device_handle * dev,
int interface_number
)
...
If a kernel driver is active, you cannot claim the interface, and libusb will be unable to perform I/O.
...
Due to what is written above, the short answer to the question "How to do I/O without detaching driver" is "You can't".
Why write fails? This is another matter. I'd suggest looking into a number of things:
Check out the value returned from libusb_bulk_transfer, maybe it will give you the idea of what is happening.
Sounds stupid, but I always check it out before anything else: process privileges.
Also, I can suggest another way of approaching the solution, namely sysfs.
I assume that your device(am I right?) supports EEPROM and SMBus access. It means that this support should be manifested in the kernel somewhere around /sys/bus/i2c/devices/[some_device_id]/eeprom (probably another device number, another directory position, etc, because it is all driver-related), but if it can be found and read just as any other file (which is likely, unless something is wrong with the device), then it probably should be able to write into it as well. If the read works, then I suggest to compare the hexdump -C of the found file to the datasheet, and if the data seems legit, try writing directly into your register(file offset).
Anyway, accessing character device files and sysfs files is a general way of accessing drivers' data in linux. Probably you don't even need to use libusb's API to write that single byte.
I am trying to initialize a gstreamer pipeline via the C API. The code looks similar to this:
[...]
pipeline = (GstPipeline*)gst_pipeline_new(nullptr);
if(!pipeline) {
//error
return;
}
[...]
sink = gst_element_factory_make("autoaudiosink", nullptr);
if(!sink) {
//error
return;
}
if(!gst_bin_add((GstBin*)pipeline, sink)) {
//error
return;
}
[...]
GstBus* bus = gst_pipeline_get_bus(pipeline);
gst_bus_add_watch(bus, (GstBusFunc)bus_callback, this);
gst_object_unref(bus);
GstStateChangeReturn state = gst_element_set_state((GstElement*)pipeline, GST_STATE_PLAYING);
if(!state) {
//error2
return;
}
[...]
Although errors appear in the cout I would like to retrieve the errors as a C string (or any format that I can process). How do I do that?
I have read that errors are posted to the bus of the pipeline, yet when calling gst_element_set_state my bus callback isn't called. Maybe because the glib main loop is not running yet?
Sorry, I am pretty new to the gstreamer/glib libraries and I couldn't find a clear answer in the (overwhelming) documentation.
One solution is to set a listener which receives messages and checks for errors. Example: http://gstreamer.freedesktop.org/data/doc/gstreamer/head/manual/html/chapter-helloworld.html. (GError is the data type used for errors.)
I am using DirectShow to develop a program in windows embedded ce 6.0.
I write the program in C/C++.
the program needs to deal with multiple audio input devices.
I am able to get available audio input devices in directshow,
but don't know how to specify an input device and capture audio from it.
is there any way to do it?
Thanks!
// firstly, using following code to create audio filter
IBaseFilter * pDevice = NULL;
CoCreateInstance(CLSID_AudioCapture, NULL, CLSCTX_INPROC,IID_IBaseFilter, (void**)&pDevice);
// then, enumerate PIN to get input audio name from filter
IEnumPins * pinEnum = NULL;
IPin * pin = NULL;
ULONG fetchCount = 0;
PIN_INFO pinInfo;
pDevice->EnumPins(&pinEnum);
while (SUCCEEDED(pinEnum->Next(1, &pin, &fetchCount)) && fetchCount)
{
pin->QueryPinInfo(&pinInfo);
if (pinInfo.dir == PINDIR_INPUT)
{
// get name from pinInfo.achName
}
}