Related
Here is my audio init code. My app responds when queue buffers are ready, but all data in buffer is zero. Checking sound in system preferences shows that USB Audio CODEC in sound input dialog is active. AudioInit() is called right after app launches.
{
#pragma mark user data struct
typedef struct MyRecorder
{
AudioFileID recordFile;
SInt64 recordPacket;
Float32 *pSampledData;
MorseDecode *pMorseDecoder;
} MyRecorder;
#pragma mark utility functions
void CheckError(OSStatus error, const char *operation)
{
if(error == noErr) return;
char errorString[20];
// see if it appears to be a 4 char code
*(UInt32*)(errorString + 1) = CFSwapInt32HostToBig(error);
if (isprint(errorString[1]) && isprint(errorString[2]) &&
isprint(errorString[3]) && isprint(errorString[4]))
{
errorString[0] = errorString[5] = '\'';
errorString[6] = '\0';
}
else
{
sprintf(errorString, "%d", (int)error);
}
fprintf(stderr, "Error: %s (%s)\n", operation, errorString);
}
OSStatus MyGetDefaultInputDeviceSampleRate(Float64 *outSampleRate)
{
OSStatus error;
AudioDeviceID deviceID = 0;
AudioObjectPropertyAddress propertyAddress;
UInt32 propertySize;
propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(AudioDeviceID);
error = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&propertyAddress,
0,
NULL,
&propertySize,
&deviceID);
if(error)
return error;
propertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
propertyAddress.mElement = 0;
propertySize = sizeof(Float64);
error = AudioObjectGetPropertyData(deviceID,
&propertyAddress,
0,
NULL,
&propertySize,
outSampleRate);
return error;
}
static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format,
AudioQueueRef queue,
float seconds)
{
int packets, frames, bytes;
frames = (int)ceil(seconds * format->mSampleRate);
if(format->mBytesPerFrame > 0)
{
bytes = frames * format->mBytesPerFrame;
}
else
{
UInt32 maxPacketSize;
if(format->mBytesPerPacket > 0)
{
// constant packet size
maxPacketSize = format->mBytesPerPacket;
}
else
{
// get the largest single packet size possible
UInt32 propertySize = sizeof(maxPacketSize);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterPropertyMaximumOutputPacketSize,
&maxPacketSize,
&propertySize),
"Couldn't get queues max output packet size");
}
if(format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
// worst case scenario: 1 frame in a packet
packets = frames;
// sanity check
if(packets == 0)
packets = 1;
bytes = packets * maxPacketSize;
}
return bytes;
}
extern void bridgeToMainThread(MorseDecode *pDecode);
static int callBacks = 0;
// ---------------------------------------------
static void MyAQInputCallback(void *inUserData,
AudioQueueRef inQueue,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc)
{
MyRecorder *recorder = (MyRecorder*)inUserData;
Float32 *pAudioData = (Float32*)(inBuffer->mAudioData);
recorder->pMorseDecoder->pBuffer = pAudioData;
recorder->pMorseDecoder->bufferSize = inNumPackets;
bridgeToMainThread(recorder->pMorseDecoder);
CheckError(AudioQueueEnqueueBuffer(inQueue,
inBuffer,
0,
NULL),
"AudioQueueEnqueueBuffer failed");
printf("packets = %ld, bytes = %ld\n",(long)inNumPackets,(long)inBuffer->mAudioDataByteSize);
callBacks++;
//printf("\ncallBacks = %d\n",callBacks);
//if(callBacks == 0)
//audioStop();
}
static AudioQueueRef queue = {0};
static MyRecorder recorder = {0};
static AudioStreamBasicDescription recordFormat;
void audioInit()
{
// set up format
memset(&recordFormat,0,sizeof(recordFormat));
recordFormat.mFormatID = kAudioFormatLinearPCM;
recordFormat.mChannelsPerFrame = 2;
recordFormat.mBitsPerChannel = 32;
recordFormat.mBytesPerPacket = recordFormat.mBytesPerFrame = recordFormat.mChannelsPerFrame * sizeof(Float32);
recordFormat.mFramesPerPacket = 1;
//recordFormat.mFormatFlags = kAudioFormatFlagsCanonical;
recordFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);
UInt32 propSize = sizeof(recordFormat);
CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0,
NULL,
&propSize,
&recordFormat),
"AudioFormatProperty failed");
recorder.pMorseDecoder = MorseDecode::pInstance();
recorder.pMorseDecoder->m_sampleRate = recordFormat.mSampleRate;
// recorder.pMorseDecoder->setCircularBuffer();
//set up queue
CheckError(AudioQueueNewInput(&recordFormat,
MyAQInputCallback,
&recorder,
NULL,
kCFRunLoopCommonModes,
0,
&queue),
"AudioQueueNewInput failed");
UInt32 size = sizeof(recordFormat);
CheckError(AudioQueueGetProperty(queue,
kAudioConverterCurrentOutputStreamDescription,
&recordFormat,
&size), "Couldn't get queue's format");
// set up buffers and enqueue
const int kNumberRecordBuffers = 3;
int bufferByteSize = MyComputeRecordBufferSize(&recordFormat, queue, AUDIO_BUFFER_DURATION);
for(int bufferIndex = 0; bufferIndex < kNumberRecordBuffers; bufferIndex++)
{
AudioQueueBufferRef buffer;
CheckError(AudioQueueAllocateBuffer(queue,
bufferByteSize,
&buffer),
"AudioQueueAllocateBuffer failed");
CheckError(AudioQueueEnqueueBuffer(queue,
buffer,
0,
NULL),
"AudioQueueEnqueueBuffer failed");
}
}
void audioRun()
{
CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");
}
void audioStop()
{
CheckError(AudioQueuePause(queue), "AudioQueuePause failed");
}
}
This sounds like the new macOS 'microphone privacy' setting, which, if set to 'no access' for your app, will cause precisely this behaviour. So:
Open the System Preferences pane.
Click on 'Security and Privacy'.
Select the Privacy tab.
Click on 'Microphone' in the left-hand pane.
Locate your app in the right-hand pane and tick the checkbox next to it.
Then restart your app and test it.
Tedious, no?
Edit: As stated in the comments, you can't directly request microphone access, but you can detect whether it has been granted to your app or not by calling [AVCaptureDevice authorizationStatusForMediaType: AVMediaTypeAudio].
I try to create fragmented MP4 from raw H264 video data so I could play it in internet browser's player. My goal is to create live streaming system, where media server would send fragmented MP4 pieces to browser. The server would buffer input data from RaspberryPi camera, which sends video as H264 frames. It would then mux that video data and make it available for client. The browser would play media data (that were muxed by server and sent i.e. through websocket) by using Media Source Extensions.
For test purpose I wrote the following pieces of code (using many examples I found in the intenet):
C++ application using avcodec which muxes raw H264 video to fragmented MP4 and saves it to a file:
#define READBUFSIZE 4096
#define IOBUFSIZE 4096
#define ERRMSGSIZE 128
#include <cstdint>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
extern "C"
{
#include <libavformat/avformat.h>
#include <libavutil/error.h>
#include <libavutil/opt.h>
}
enum NalType : uint8_t
{
//NALs containing stream metadata
SEQ_PARAM_SET = 0x7,
PIC_PARAM_SET = 0x8
};
std::vector<uint8_t> outputData;
int mediaMuxCallback(void *opaque, uint8_t *buf, int bufSize)
{
outputData.insert(outputData.end(), buf, buf + bufSize);
return bufSize;
}
std::string getAvErrorString(int errNr)
{
char errMsg[ERRMSGSIZE];
av_strerror(errNr, errMsg, ERRMSGSIZE);
return std::string(errMsg);
}
int main(int argc, char **argv)
{
if(argc < 2)
{
std::cout << "Missing file name" << std::endl;
return 1;
}
std::fstream file(argv[1], std::ios::in | std::ios::binary);
if(!file.is_open())
{
std::cout << "Couldn't open file " << argv[1] << std::endl;
return 2;
}
std::vector<uint8_t> inputMediaData;
do
{
char buf[READBUFSIZE];
file.read(buf, READBUFSIZE);
int size = file.gcount();
if(size > 0)
inputMediaData.insert(inputMediaData.end(), buf, buf + size);
} while(!file.eof());
file.close();
//Initialize avcodec
av_register_all();
uint8_t *ioBuffer;
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_H264);
AVCodecContext *codecCtxt = avcodec_alloc_context3(codec);
AVCodecParserContext *parserCtxt = av_parser_init(AV_CODEC_ID_H264);
AVOutputFormat *outputFormat = av_guess_format("mp4", nullptr, nullptr);
AVFormatContext *formatCtxt;
AVIOContext *ioCtxt;
AVStream *videoStream;
int res = avformat_alloc_output_context2(&formatCtxt, outputFormat, nullptr, nullptr);
if(res < 0)
{
std::cout << "Couldn't initialize format context; the error was: " << getAvErrorString(res) << std::endl;
return 3;
}
if((videoStream = avformat_new_stream( formatCtxt, avcodec_find_encoder(formatCtxt->oformat->video_codec) )) == nullptr)
{
std::cout << "Couldn't initialize video stream" << std::endl;
return 4;
}
else if(!codec)
{
std::cout << "Couldn't initialize codec" << std::endl;
return 5;
}
else if(codecCtxt == nullptr)
{
std::cout << "Couldn't initialize codec context" << std::endl;
return 6;
}
else if(parserCtxt == nullptr)
{
std::cout << "Couldn't initialize parser context" << std::endl;
return 7;
}
else if((ioBuffer = (uint8_t*)av_malloc(IOBUFSIZE)) == nullptr)
{
std::cout << "Couldn't allocate I/O buffer" << std::endl;
return 8;
}
else if((ioCtxt = avio_alloc_context(ioBuffer, IOBUFSIZE, 1, nullptr, nullptr, mediaMuxCallback, nullptr)) == nullptr)
{
std::cout << "Couldn't initialize I/O context" << std::endl;
return 9;
}
//Set video stream data
videoStream->id = formatCtxt->nb_streams - 1;
videoStream->codec->width = 1280;
videoStream->codec->height = 720;
videoStream->time_base.den = 60; //FPS
videoStream->time_base.num = 1;
videoStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
formatCtxt->pb = ioCtxt;
//Retrieve SPS and PPS for codec extdata
const uint32_t synchMarker = 0x01000000;
unsigned int i = 0;
int spsStart = -1, ppsStart = -1;
uint16_t spsSize = 0, ppsSize = 0;
while(spsSize == 0 || ppsSize == 0)
{
uint32_t *curr = (uint32_t*)(inputMediaData.data() + i);
if(*curr == synchMarker)
{
unsigned int currentNalStart = i;
i += sizeof(uint32_t);
uint8_t nalType = inputMediaData.data()[i] & 0x1F;
if(nalType == SEQ_PARAM_SET)
spsStart = currentNalStart;
else if(nalType == PIC_PARAM_SET)
ppsStart = currentNalStart;
if(spsStart >= 0 && spsSize == 0 && spsStart != i)
spsSize = currentNalStart - spsStart;
else if(ppsStart >= 0 && ppsSize == 0 && ppsStart != i)
ppsSize = currentNalStart - ppsStart;
}
++i;
}
videoStream->codec->extradata = inputMediaData.data() + spsStart;
videoStream->codec->extradata_size = ppsStart + ppsSize;
//Write main header
AVDictionary *options = nullptr;
av_dict_set(&options, "movflags", "frag_custom+empty_moov", 0);
res = avformat_write_header(formatCtxt, &options);
if(res < 0)
{
std::cout << "Couldn't write container main header; the error was: " << getAvErrorString(res) << std::endl;
return 10;
}
//Retrieve frames from input video and wrap them in container
int currentInputIndex = 0;
int framesInSecond = 0;
while(currentInputIndex < inputMediaData.size())
{
uint8_t *frameBuffer;
int frameSize;
res = av_parser_parse2(parserCtxt, codecCtxt, &frameBuffer, &frameSize, inputMediaData.data() + currentInputIndex,
inputMediaData.size() - currentInputIndex, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if(frameSize == 0) //No more frames while some data still remains (is that even possible?)
{
std::cout << "Some data left unparsed: " << std::to_string(inputMediaData.size() - currentInputIndex) << std::endl;
break;
}
//Prepare packet with video frame to be dumped into container
AVPacket packet;
av_init_packet(&packet);
packet.data = frameBuffer;
packet.size = frameSize;
packet.stream_index = videoStream->index;
currentInputIndex += frameSize;
//Write packet to the video stream
res = av_write_frame(formatCtxt, &packet);
if(res < 0)
{
std::cout << "Couldn't write packet with video frame; the error was: " << getAvErrorString(res) << std::endl;
return 11;
}
if(++framesInSecond == 60) //We want 1 segment per second
{
framesInSecond = 0;
res = av_write_frame(formatCtxt, nullptr); //Flush segment
}
}
res = av_write_frame(formatCtxt, nullptr); //Flush if something has been left
//Write media data in container to file
file.open("my_mp4.mp4", std::ios::out | std::ios::binary);
if(!file.is_open())
{
std::cout << "Couldn't open output file " << std::endl;
return 12;
}
file.write((char*)outputData.data(), outputData.size());
if(file.fail())
{
std::cout << "Couldn't write to file" << std::endl;
return 13;
}
std::cout << "Media file muxed successfully" << std::endl;
return 0;
}
(I hardcoded a few values, such as video dimensions or framerate, but as I said this is just a test code.)
Simple HTML webpage using MSE to play my fragmented MP4
<!DOCTYPE html>
<html>
<head>
<title>Test strumienia</title>
</head>
<body>
<video width="1280" height="720" controls>
</video>
</body>
<script>
var vidElement = document.querySelector('video');
if (window.MediaSource) {
var mediaSource = new MediaSource();
vidElement.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener('sourceopen', sourceOpen);
} else {
console.log("The Media Source Extensions API is not supported.")
}
function sourceOpen(e) {
URL.revokeObjectURL(vidElement.src);
var mime = 'video/mp4; codecs="avc1.640028"';
var mediaSource = e.target;
var sourceBuffer = mediaSource.addSourceBuffer(mime);
var videoUrl = 'my_mp4.mp4';
fetch(videoUrl)
.then(function(response) {
return response.arrayBuffer();
})
.then(function(arrayBuffer) {
sourceBuffer.addEventListener('updateend', function(e) {
if (!sourceBuffer.updating && mediaSource.readyState === 'open') {
mediaSource.endOfStream();
}
});
sourceBuffer.appendBuffer(arrayBuffer);
});
}
</script>
</html>
Output MP4 file generated by my C++ application can be played i.e. in MPC, but it doesn't play in any web browser I tested it with. It also doesn't have any duration (MPC keeps showing 00:00).
To compare output MP4 file I got from my C++ application described above, I also used FFMPEG to create fragmented MP4 file from the same source file with raw H264 stream. I used the following command:
ffmpeg -r 60 -i input.h264 -c:v copy -f mp4 -movflags empty_moov+default_base_moof+frag_keyframe test.mp4
This file generated by FFMPEG is played correctly by every web browser I used for tests. It also has correct duration (but also it has trailing atom, which wouldn't be present in my live stream anyway, and as I need a live stream, it won't have any fixed duration in the first place).
MP4 atoms for both files look very similiar (they have identical avcc section for sure). What's interesting (but not sure if it's of any importance), both files have different NALs format than input file (RPI camera produces video stream in Annex-B format, while output MP4 files contain NALs in AVCC format... or at least it looks like it's the case when I compare mdat atoms with input H264 data).
I assume there is some field (or a few fields) I need to set for avcodec to make it produce video stream that would be properly decoded and played by browsers players. But what field(s) do I need to set? Or maybe problem lies somewhere else? I ran out of ideas.
EDIT 1:
As suggested, I investigated binary content of both MP4 files (generated by my app and FFMPEG tool) with hex editor. What I can confirm:
both files have identical avcc section (they match perfectly and are in AVCC format, I analyzed it byte after byte and there's no mistake about it)
both files have NALs in AVCC format (I looked closely at mdat atoms and they don't differ between both MP4 files)
So I guess there's nothing wrong with the extradata creation in my code - avcodec takes care of it properly, even if I just feed it with SPS and PPS NALs. It converts them by itself, so no need for me to do it by hand. Still, my original problem remains.
EDIT 2: I achieved partial success - MP4 generated by my app now plays in Firefox. I added this line to the code (along with rest of stream initialization):
videoStream->codec->time_base = videoStream->time_base;
So now this section of my code looks like this:
//Set video stream data
videoStream->id = formatCtxt->nb_streams - 1;
videoStream->codec->width = 1280;
videoStream->codec->height = 720;
videoStream->time_base.den = 60; //FPS
videoStream->time_base.num = 1;
videoStream->codec->time_base = videoStream->time_base;
videoStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
formatCtxt->pb = ioCtxt;
I finally found the solution. My MP4 now plays in Chrome (while still playing in other tested browsers).
In Chrome chrome://media-internals/ shows MSE logs (of a sort). When I looked there, I found a few of following warnings for my test player:
ISO-BMFF container metadata for video frame indicates that the frame is not a keyframe, but the video frame contents indicate the opposite.
That made me think and encouraged to set AV_PKT_FLAG_KEY for packets with keyframes. I added following code to section with filling AVPacket structure:
//Check if keyframe field needs to be set
int allowedNalsCount = 3; //In one packet there would be at most three NALs: SPS, PPS and video frame
packet.flags = 0;
for(int i = 0; i < frameSize && allowedNalsCount > 0; ++i)
{
uint32_t *curr = (uint32_t*)(frameBuffer + i);
if(*curr == synchMarker)
{
uint8_t nalType = frameBuffer[i + sizeof(uint32_t)] & 0x1F;
if(nalType == KEYFRAME)
{
std::cout << "Keyframe detected at frame nr " << framesTotal << std::endl;
packet.flags = AV_PKT_FLAG_KEY;
break;
}
else
i += sizeof(uint32_t) + 1; //We parsed this already, no point in doing it again
--allowedNalsCount;
}
}
A KEYFRAME constant turns out to be 0x5 in my case (Slice IDR).
MP4 atoms for both files look very similiar (they have identical avcc
section for sure)
Double check that, The code supplied suggests otherwise to me.
What's interesting (but not sure if it's of any importance), both
files have different NALs format than input file (RPI camera produces
video stream in Annex-B format, while output MP4 files contain NALs in
AVCC format... or at least it looks like it's the case when I compare
mdat atoms with input H264 data).
It is very important, mp4 will not work with annex b.
You need to fill in extradata with AVC Decoder Configuration Record, not just SPS/PPS
Here's how the record should look like:
AVCDCR
We can find this explanation in [Chrome Source]
(https://chromium.googlesource.com/chromium/src/+/refs/heads/master/media/formats/mp4/mp4_stream_parser.cc#799) "chrome media source code":
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Use |analysis.is_keyframe|, if it was actually determined, for logging
// if the analysis mismatches the container's keyframe metadata for
// |frame_buf|.
if (analysis.is_keyframe.has_value() &&
is_keyframe != analysis.is_keyframe.value()) {
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_video_keyframe_mismatches_,
kMaxVideoKeyframeMismatchLogs)
<< "ISO-BMFF container metadata for video frame indicates that the "
"frame is "
<< (is_keyframe ? "" : "not ")
<< "a keyframe, but the video frame contents indicate the "
"opposite.";
// As of September 2018, it appears that all of Edge, Firefox, Safari
// work with content that marks non-avc-keyframes as a keyframe in the
// container. Encoders/muxers/old streams still exist that produce
// all-keyframe mp4 video tracks, though many of the coded frames are
// not keyframes (likely workaround due to the impact on low-latency
// live streams until https://crbug.com/229412 was fixed). We'll trust
// the AVC frame's keyframe-ness over the mp4 container's metadata if
// they mismatch. If other out-of-order codecs in mp4 (e.g. HEVC, DV)
// implement keyframe analysis in their frame_bitstream_converter, we'll
// similarly trust that analysis instead of the mp4.
is_keyframe = analysis.is_keyframe.value();
}
As the code comment show, chrome trust the AVC frame's keyframe-ness over the mp4 container's metadata. So nalu type in H264/HEVC should be more important than mp4 container box sdtp & trun description.
I bought 'AVerMedia Capture Card (C985 LITE)' last week, and I connected video camera to this capture card's HDMI input.
When I tested with AVerMedia's RECentral software, Amcap, ffmpeg, it worked.
But, when I tested with AVerMedia's AVerCapSDKDemo, VLC, Windows Movie maker, Windows directshow, it didn't work.
Then, I try to get camera frame(in real time) by internet sample code and my c++ code (with and without using openCV). All of the code work with general USB Webcam, but didn't work with this capture card.
The result showed that every c++ code can see this capture card, but can't see the camera that connected to the card.
The conditions, that I tested and it didn't work, are below:
1st PC Spec: Intel core i5, Ram 16 GB, HDD 1 TB, DirectX 11 with windows10 64 bit
2nd PC Spec: Intel core i7, Ram 8 GB, HDD 1 TB, DirectX 11 with windows7 64 bit
IDE: visual studio 2015
Camera: GoPro and SONY Handycam, both full HD with HDMI output
About my project, I want to tracking the car on the road in real time,
therefore I decided to use C985 Capture Card that support full HD.
Does anyone have any advice?
Thank you very much.
Best regards,
--
Edit: Add Example Code
1.My code with openCV: For this code, it always show "error: frame not read from webcam\n".
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
#include<conio.h>
int main() {
cv::VideoCapture capWebcam(0); // declare a VideoCapture object and associate to webcam, 0 => use 1st webcam
if (capWebcam.isOpened() == false) { // check if VideoCapture object was associated to webcam successfully
std::cout << "error: capWebcam not accessed successfully\n\n"; // if not, print error message to std out
_getch(); // may have to modify this line if not using Windows
return(0); // and exit program
}
char charCheckForEscKey = 0;
while (charCheckForEscKey != 27 && capWebcam.isOpened()) { // until the Esc key is pressed or webcam connection is lost
bool blnFrameReadSuccessfully = capWebcam.read(imgOriginal); // get next frame
if (!blnFrameReadSuccessfully || imgOriginal.empty()) { // if frame not read successfully
std::cout << "error: frame not read from webcam\n"; // print error message to std out
continue; // and jump out of while loop
}
cv::namedWindow("imgOriginal", CV_WINDOW_NORMAL); // note: you can use CV_WINDOW_NORMAL which allows resizing the window
cv::imshow("imgOriginal", imgOriginal); // show windows
charCheckForEscKey = cv::waitKey(1); // delay (in ms) and get key press, if any
} // end while
return(0);
}
2.My code without openCV. (Using AForge): For this code, the image show nothing.
private void Form1_Load(object sender, EventArgs e)
{
FilterInfoCollection videoDevices = new FilterInfoCollection(FilterCategory.VideoInputDevice);
for (int i = 0; i< videoDevices.Count; i++)
{
comboBox1.Items.Add(videoDevices[i].MonikerString);
}
// create video source
}
private void video_NewFrame(object sender, NewFrameEventArgs eventArgs)
{
Bitmap img = (Bitmap)eventArgs.Frame.Clone();
pictureBox1.Image = img;
}
private void button1_Click(object sender, EventArgs e)
{
VideoCaptureDeviceForm xx = new VideoCaptureDeviceForm();
xx.ShowDialog();
VideoCaptureDevice videoSource = new VideoCaptureDevice(xx.VideoDeviceMoniker);
//videoSource.Source = "AVerMedia HD Capture C985 Bus 2";
VideoInput input = videoSource.CrossbarVideoInput;
MessageBox.Show("" + videoSource.CheckIfCrossbarAvailable());
MessageBox.Show(" " + input.Index + " " + input.Type);
// set NewFrame event handler
videoSource.NewFrame += video_NewFrame;
foreach(var x in videoSource.AvailableCrossbarVideoInputs)
{
MessageBox.Show("AvailableCrossbarVideoInputs > " + x.Index);
}
videoSource.VideoSourceError += VideoSource_VideoSourceError;
// start the video source
videoSource.Start();
// signal to stop when you no longer need capturing
videoSource.SignalToStop();
videoSource.Start();
MessageBox.Show("AvailableCrossbarVideoInputs length :" + videoSource.AvailableCrossbarVideoInputs.Length);
input = videoSource.CrossbarVideoInput;
MessageBox.Show(" " + input.Index + " " + input.Type);
videoSource.SignalToStop();
videoSource.Start();
}
3.Code from Internet: I use the code from code project(Capture Live Video from various Video Devices) in link below. It showed "can't detect Webcam".
https://www.codeproject.com/articles/7123/capture-live-video-from-various-video-devices
Hope my code can help: (I use AVerMedia SDK + OpenCV3, use directshow api to open device then get video to mat format)
#include "stdafx.h"
#include "atlstr.h"
#include <iostream>
#include "AVerCapAPI_Pro.h"
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <windows.h>
using namespace std;
using namespace cv;
void ErrorMsg(DWORD ErrorCode)
{
printf("ErrorCode = %d\n", ErrorCode);
if (ErrorCode == CAP_EC_SUCCESS)
{
printf("CAP_EC_SUCCESS\n");
}
if (ErrorCode == CAP_EC_INIT_DEVICE_FAILED)
{
printf("CAP_EC_INIT_DEVICE_FAILED\n");
}
if (ErrorCode == CAP_EC_DEVICE_IN_USE)
{
printf("CAP_EC_DEVICE_IN_USE\n");
}
if (ErrorCode == CAP_EC_NOT_SUPPORTED)
{
printf("CAP_EC_NOT_SUPPORTED\n");
}
if (ErrorCode == CAP_EC_INVALID_PARAM)
{
printf("CAP_EC_INVALID_PARAM\n");
}
if (ErrorCode == CAP_EC_TIMEOUT)
{
printf("CAP_EC_TIMEOUT\n");
}
if (ErrorCode == CAP_EC_NOT_ENOUGH_MEMORY)
{
printf("CAP_EC_NOT_ENOUGH_MEMORY\n");
}
if (ErrorCode == CAP_EC_UNKNOWN_ERROR)
{
printf("CAP_EC_UNKNOWN_ERROR\n");
}
if (ErrorCode == CAP_EC_ERROR_STATE)
{
printf("CAP_EC_ERROR_STATE\n");
}
if (ErrorCode == CAP_EC_HDCP_PROTECTED_CONTENT)
{
printf("CAP_EC_HDCP_PROTECTED_CONTENT\n");
}
}
BOOL WINAPI CaptureVideo(VIDEO_SAMPLE_INFO VideoInfo, BYTE *pbData, LONG lLength, __int64 tRefTime, LONG lUserData);
BOOL bGetData = FALSE;
Mat ans2;
int main(int argc, char** argv)
{
LONG lRetVal;
DWORD dwDeviceNum;
DWORD dwDeviceIndex = 0;
HANDLE hAverCapturedevice[10];
//Device Control
//1. Get Device Number
lRetVal = AVerGetDeviceNum(&dwDeviceNum);
if (lRetVal != CAP_EC_SUCCESS) {
printf("\nAVerGetDeviceNum Fail");
ErrorMsg(lRetVal);
system("pause");
}
if (dwDeviceNum == 0) {
printf("NO device found\n");
system("pause");
}
else {
printf("Device Number = %d\n", dwDeviceNum);
}
//2. Create device representative object handle
for (DWORD dwDeviceIndex = 0; dwDeviceIndex < dwDeviceNum; dwDeviceIndex++) {
lRetVal = AVerCreateCaptureObjectEx(dwDeviceIndex, DEVICETYPE_ALL, NULL, &hAverCapturedevice[dwDeviceIndex]);
if (lRetVal != CAP_EC_SUCCESS) {
printf("\nAVerCreateCaptureObjectEx Fail\n");
ErrorMsg(lRetVal);
system("pause");
}
else
printf("\nAVerCreateCaptureObjectEx Success\n");
}
//3. Start Streaming//
//3.1 set video source
//lRetVal = AVerSetVideoSource(hAverCapturedevice[0], 3);
lRetVal = AVerSetVideoSource(hAverCapturedevice[0], 3);
//3.2 set Video Resolution & FrameRate
VIDEO_RESOLUTION VideoResolution = { 0 };
INPUT_VIDEO_INFO InputVideoInfo;
ZeroMemory(&InputVideoInfo, sizeof(InputVideoInfo));
InputVideoInfo.dwVersion = 2;
Sleep(500);
lRetVal = AVerGetVideoInfo(hAverCapturedevice[0], &InputVideoInfo);
VideoResolution.dwVersion = 1;
VideoResolution.dwVideoResolution = VIDEORESOLUTION_1280X720;
lRetVal = AVerSetVideoResolutionEx(hAverCapturedevice[0], &VideoResolution);
lRetVal = AVerSetVideoInputFrameRate(hAverCapturedevice[0], 6000);
//3.3 Start Streaming
lRetVal = AVerStartStreaming(hAverCapturedevice[0]);
if (lRetVal != CAP_EC_SUCCESS) {
printf("\AVerStartStreaming Fail\n");
ErrorMsg(lRetVal);
//system("pause");
}
else
{
printf("\AVerStartStreaming Success\n");
//system("pause");
}
//4. Capture Single Image
#if 0
CAPTURE_IMAGE_INFO m_CaptureImageInfo = { 0 };
char text[] = "E:\Lena.bmp";
wchar_t wtext[20];
#define _CRT_SECURE_NO_WARNINGS
#pragma warning( disable : 4996 )
mbstowcs(wtext, text, strlen(text) + 1);//Plus null
LPWSTR m_strSavePath = wtext;
CAPTURE_SINGLE_IMAGE_INFO pCaptureSingleImageInfo = { 0 };
pCaptureSingleImageInfo.dwVersion = 1;
pCaptureSingleImageInfo.dwImageType = 2;
pCaptureSingleImageInfo.bOverlayMix = FALSE;
pCaptureSingleImageInfo.lpFileName = m_strSavePath;
//pCaptureSingleImageInfo.rcCapRect = 0;
lRetVal = AVerCaptureSingleImage(hAverCapturedevice[0], &pCaptureSingleImageInfo);
printf("\AVerCaptureSingleImage\n");
ErrorMsg(lRetVal);
#endif
#if 1
//video capture
VIDEO_CAPTURE_INFO VideoCaptureInfo;
ZeroMemory(&VideoCaptureInfo, sizeof(VIDEO_CAPTURE_INFO));
VideoCaptureInfo.bOverlayMix = FALSE;
VideoCaptureInfo.dwCaptureType = CT_SEQUENCE_FRAME;
VideoCaptureInfo.dwSaveType = ST_CALLBACK_RGB24;
VideoCaptureInfo.lpCallback = CaptureVideo;
VideoCaptureInfo.lCallbackUserData = NULL;
lRetVal = AVerCaptureVideoSequenceStart(hAverCapturedevice[0], VideoCaptureInfo);
if (FAILED(lRetVal))
{
return lRetVal;
}
//system("pause");// hange up
#endif
int i;
scanf_s("%d", &i, 4); //must input any number in console !!
//5. Stop Streaming
lRetVal = AVerCaptureVideoSequenceStop(hAverCapturedevice[0]);
lRetVal = AVerStopStreaming(hAverCapturedevice[0]);
//printf("\AVerStopStreaming Success\n");
ErrorMsg(lRetVal);
return 0;
}
BOOL WINAPI CaptureVideo(VIDEO_SAMPLE_INFO VideoInfo, BYTE *pbData, LONG lLength, __int64 tRefTime, LONG lUserData)
{
if (!bGetData)
{
ans2 = Mat(VideoInfo.dwHeight, VideoInfo.dwWidth, CV_8UC3, (uchar*)pbData).clone();//single capture image
//ans2 = Mat(VideoInfo.dwHeight, VideoInfo.dwWidth, CV_8UC3, (uchar*)pbData); //sequence capture image
bGetData = TRUE;
}
imshow("ans2", ans2);
waitKey(1);
return TRUE;
}
Now, it's solved by formatted computer and installed Windows 10 without updates.
And I wrote program to call GraphEdit that set up the following filters.
GraphEdit's filter
Everything seemed to work fine until I updated windows by mistake.
I am trying to build an application that converts my old custom Ethernet logs (bin files) to standard winpcap style logs.
The problem is that I can't seem to find an example of how to opening a pcap_t* without using an adapter (network card). The temp.pkt has not been created.
I have looked thou the examples provided with Winpcap and all of them use a live adapter when dumping packets. This example is the closest \WpdPack\Examples-pcap\savedump\savedump.c is the closest, see example below slightly modified.
#ifdef _MSC_VER
/*
* we do not want the warnings about the old deprecated and unsecure CRT functions
* since these examples can be compiled under *nix as well
*/
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "pcap.h"
int main(int argc, char **argv)
{
pcap_if_t *alldevs;
pcap_if_t *d;
int inum;
int i=0;
pcap_t *adhandle;
char errbuf[PCAP_ERRBUF_SIZE];
pcap_dumper_t *dumpfile;
/* Open the adapter */
if ((adhandle= pcap_open(??????, // name of the device
65536, // portion of the packet to capture.
// 65536 grants that the whole packet will be captured on all the MACs.
1, // promiscuous mode (nonzero means promiscuous)
1000, // read timeout
errbuf // error buffer
)) == NULL)
{
fprintf(stderr,"\nUnable to open the adapter. %s is not supported by WinPcap\n", d->name);
/* Free the device list */
pcap_freealldevs(alldevs);
return -1;
}
/* Open the dump file */
dumpfile = pcap_dump_open(adhandle, argv[1]);
if(dumpfile==NULL) {
fprintf(stderr,"\nError opening output file\n");
return -1;
}
// ---------------------------
struct pcap_pkthdr header;
header.ts.tv_sec = 1 ; /* seconds */
header.ts.tv_usec = 1; /* and microseconds */
header.caplen = 100; /* length of portion present */
header.len = 100 ; /* length this packet (off wire) */
u_char pkt_data[100];
for( int i = 0 ; i < 100 ; i++ ) {
pkt_data[i] = i ;
}
pcap_dump( (u_char *) dumpfile, &header, (u_char *) &pkt_data);
// ---------------------------
/* start the capture */
// pcap_loop(adhandle, 0, packet_handler, (unsigned char *)dumpfile);
pcap_close(adhandle);
return 0;
}
I suggest doing that using pcap_t since using WinPcap is better than writing it yourself.
The following steps is how to do it:
Use pcap_open_dead() function to create a pcap_t. Read the function description here. The linktype for Ethernet is 1.
Use pcap_dump_open() function to create a pcap_dumper_t.
Use pcap_dump() function to write the packet to the dump file.
I hope this would help you.
If all you're doing is converting your own file format to .pcap, you don't need a pcap_t*, you can just use something like:
FILE* create_pcap_file(const char *filename, int linktype)
{
struct pcap_file_header fh;
fh.magic = TCPDUMP_MAGIC;
fh.sigfigs = 0;
fh.version_major = 2;
fh.version_minor = 4;
fh.snaplen = 2<<15;
fh.thiszone = 0;
fh.linktype = linktype;
FILE *file = fopen(filename, "wb");
if(file != NULL) {
if(fwrite(&fh, sizeof(fh), 1, file) != 1) {
fclose(file);
file = NULL;
}
}
return file;
}
int write_pcap_packet(FILE* file,size_t length,const unsigned char *data,const struct timeval *tval)
{
struct pcap_pkthdr pkhdr;
pkhdr.caplen = length;
pkhdr.len = length;
pkhdr.ts = *tval;
if(fwrite(&pkhdr, sizeof(pkhdr), 1, file) != 1) {
return 1;
}
if(fwrite(data, 1, length, file) != length) {
return 2;
}
return 0;
}
How to write bitmaps as frames to Ogg Theora in C\C++?
Some Examples with source would be grate!)
The entire solution is a little lengthy to post on here as a code sample, but if you download libtheora from Xiph.org, there is an example png2theora. All of the library functions I am about to mention can be found in the documentation on Xiph.org for theora and ogg.
Call th_info_init() to initialise a th_info structure, then set up you output parameters by assigning the appropriate members in that.
Use that structure in a call to th_encode_alloc() to get an encoder context
Initialise an ogg stream, with ogg_stream_init()
Initialise a blank th_comment structure using th_comment_init
Iterate through the following:
Call th_encode_flushheader with the the encoder context, the blank comment structure and an ogg_packet.
Send the resulting packet to the ogg stream with ogg_stream_packetin()
Until th_encode_flushheader returns 0 (or an error code)
Now, repeatedly call ogg_stream_pageout(), every time writing the page.header and then page.body to an output file, until it returns 0. Now call ogg_stream_flush and write the resulting page to the file.
You can now write frames to the encoder. Here is how I did it:
int theora_write_frame(int outputFd, unsigned long w, unsigned long h, unsigned char *yuv_y, unsigned char *yuv_u, unsigned char *yuv_v, int last)
{
th_ycbcr_buffer ycbcr;
ogg_packet op;
ogg_page og;
unsigned long yuv_w;
unsigned long yuv_h;
/* Must hold: yuv_w >= w */
yuv_w = (w + 15) & ~15;
/* Must hold: yuv_h >= h */
yuv_h = (h + 15) & ~15;
//Fill out the ycbcr buffer
ycbcr[0].width = yuv_w;
ycbcr[0].height = yuv_h;
ycbcr[0].stride = yuv_w;
ycbcr[1].width = yuv_w;
ycbcr[1].stride = ycbcr[1].width;
ycbcr[1].height = yuv_h;
ycbcr[2].width = ycbcr[1].width;
ycbcr[2].stride = ycbcr[1].stride;
ycbcr[2].height = ycbcr[1].height;
if(encoderInfo->pixel_fmt == TH_PF_420)
{
//Chroma is decimated by 2 in both directions
ycbcr[1].width = yuv_w >> 1;
ycbcr[2].width = yuv_w >> 1;
ycbcr[1].height = yuv_h >> 1;
ycbcr[2].height = yuv_h >> 1;
}else if(encoderInfo->pixel_fmt == TH_PF_422)
{
ycbcr[1].width = yuv_w >> 1;
ycbcr[2].width = yuv_w >> 1;
}else if(encoderInfo->pixel_fmt != TH_PF_422)
{
//Then we have an unknown pixel format
//We don't know how long the arrays are!
fprintf(stderr, "[theora_write_frame] Unknown pixel format in writeFrame!\n");
return -1;
}
ycbcr[0].data = yuv_y;
ycbcr[1].data = yuv_u;
ycbcr[2].data = yuv_v;
/* Theora is a one-frame-in,one-frame-out system; submit a frame
for compression and pull out the packet */
if(th_encode_ycbcr_in(encoderContext, ycbcr)) {
fprintf(stderr, "[theora_write_frame] Error: could not encode frame\n");
return -1;
}
if(!th_encode_packetout(encoderContext, last, &op)) {
fprintf(stderr, "[theora_write_frame] Error: could not read packets\n");
return -1;
}
ogg_stream_packetin(&theoraStreamState, &op);
ssize_t bytesWritten = 0;
int pagesOut = 0;
while(ogg_stream_pageout(&theoraStreamState, &og)) {
pagesOut ++;
bytesWritten = write(outputFd, og.header, og.header_len);
if(bytesWritten != og.header_len)
{
fprintf(stderr, "[theora_write_frame] Error: Could not write to file\n");
return -1;
}
bytesWritten = write(outputFd, og.body, og.body_len);
if(bytesWritten != og.body_len)
{
bytesWritten = fprintf(stderr, "[theora_write_frame] Error: Could not write to file\n");
return -1;
}
}
return pagesOut;
}
Where encoderInfo is the th_info structure used to initialise the encoder (static in the data section for me).
On your last frame, setting the last frame on th_encode_packetout() will make sure the stream terminates properly.
Once your done, just make sure to clean up (closing fds mainly). th_info_clear() will clear the th_info structure, and th_encode_free() will free your encoder context.
Obviously, you'll need to convert your bitmap into YUV planes before you can pass them to theora_write_frame().
Hope this is of some help. Good luck!
Here's the libtheora API and example code.
Here's a micro howto that shows how to use the theora binaries. As the encoder reads raw, uncompressed 'yuv4mpeg' data for video you could use that from your app, too by piping the video frames to the encoder.