I'm trying to embed V8 but I have some memory leaks. The following minimal code reproduce the leaks.
int main(int argc, char* argv[])
{
// V8 version 7.1.302.33
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate* pIsolate = v8::Isolate::New(create_params); // If I remove this line and the next one, memory leak disappears
pIsolate->Dispose();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete create_params.array_buffer_allocator;
}
With this code, my visual studio 2017 print theses leaks in the output
Detected memory leaks!
Dumping objects ->
{5565} normal block at 0x000001BA6F417950, 8 bytes long.
Data: < i > 00 00 E8 69 18 00 00 00
{5564} normal block at 0x000001BA6F416960, 16 bytes long.
Data : <8 >o > 38 D7 3E 6F BA 01 00 00 00 00 00 00 00 00 00 00
{5563} normal block at 0x000001BA6F3ED720, 56 bytes long.
Data: < >o > o > A0 D6 3E 6F BA 01 00 00 A0 D6 3E 6F BA 01 00 00
{989} normal block at 0x000001BA6F4194E0, 128 bytes long.
Data: < >o > o > A0 D6 3E 6F BA 01 00 00 A0 D6 3E 6F BA 01 00 00
{988} normal block at 0x000001BA6F416CD0, 16 bytes long.
Data: < p ` > 70 B4 60 0A FF 7F 00 00 00 00 00 00 00 00 00 00
{987} normal block at 0x000001BA6F417270, 16 bytes long.
Data : < X ` > 58 B4 60 0A FF 7F 00 00 00 00 00 00 00 00 00 00
{986} normal block at 0x000001BA6F3ED6A0, 56 bytes long.
Data : < >o > o > 20 D7 3E 6F BA 01 00 00 20 D7 3E 6F BA 01 00 00
Object dump complete.
So do you know what I forget ?
Thanks in advance for your help ;)
I copied your code, and ran it on linux environment, with -fsanitize=address flag setted, didn't get any memory leak error.
Here a dirty workaround to remove most memory leaks.
You have to redefine the internal class IsolateAllocator to call private function FreeProcessWidePtrComprCageForTesting().
A first way by adding a new function Free() to be able to call the private function.
namespace v8::internal
{
class IsolateAllocator
{
public:
static void Free() { FreeProcessWidePtrComprCageForTesting(); }
private:
static void FreeProcessWidePtrComprCageForTesting();
};
} // namespace v8::internal
or without adding a function to IsolateAllocator, but using a friend class like it's done in v8 unit test (I just rename the friend class).
namespace v8::internal
{
class IsolateAllocator
{
private:
friend class SequentialUnmapperTest;
static void FreeProcessWidePtrComprCageForTesting();
};
class SequentialUnmapperTest
{
public:
static void Free() { IsolateAllocator::FreeProcessWidePtrComprCageForTesting(); }
};
} // namespace v8::internal
Call the Free() function before v8::V8::Dispose();:
isolate->Dispose();
v8::internal::IsolateAllocator::Free();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete create_params.array_buffer_allocator;
There is still few memory leak due to call v8::V8::InitializeICUDefaultLocation() (which do some allocation using when calling udata_setCommonData()) and other due to base::LazyInstance<std::weak_ptr<CodeRange>>::type process_wide_code_range_ in code-range.cc which store a std::weak_ptr<> which is never destroyed...
Related
This is very strange! I look around and find nothing. My test code is below:
#include "pch.h"
#include "Windows.h"
#include "openssl/ssl.h"
#pragma comment(lib,"../Include/lib/libssl.lib")
#pragma comment(lib,"../Include/lib/libcrypto.lib")
#pragma comment(lib,"Crypt32.lib")
#pragma comment(lib,"ws2_32.lib")
#include <stdlib.h>
#include <crtdbg.h>
#define _CRTDBG_MAP_ALLOC
const char* priKey = "607BC8BA457EC0A1B5ABAD88061502BEA5844E17C7DD247345CD57E501B3300B4B8889D3DFCF5017847606508DF8B283C701F35007D5F4DBA96023DD3B0CCE062D8F63BCC16021B944A1E88861B70D456BAA1E0E69C90DFCA13B4D4EA5403DA25FEBF94B0515644A7D2DF88299189688DA5D8951512ADC3B1A35BAEEC147F69AB101048B9029E65A77A438A05AE30337E82B0571B80A955C72EA0DB3B457ECC8B81F346624E3D22755FEB3D84F810431974803E13E26BF95AF7AB590E17C0113BFE9B36BE12BE16D89273348E0CC094526CAF54ABF8044565EC9500EBF657265474BC362EBDFD78F513282AAF0EEF9BA0BB00F7FF9C7E61F00465BBD379D6201";
const char* pubKey = "AE7DF3EB95DF1F864F86DA9952BB44E760152986731253C96C135E5391AEFF68F5C1640552F1CCC2BA2C12C0C68C051E343B786F13215CEFC8221D9FA97D50E895EAF50D1AF32DC5EB40C9F1F8CA5145B43CEF83F2A89C9661AFA73A70D32951271C6BEFE1B5F24B512520DA7FD4EEC982F2D8057FE1938FA2FB54D8D282A25D8397298B75E154739EF16B8E2F18368F5BEEAD3D18528B9B1A63C731A71735CDB6102E187EF3377B72B58A124FA280891A79A2BD789D5DBA3618BBD74367F5C50A220204D90A59828C3C81FDBD9D2A91CBF6C8563C6FE987BE21B19BBC340DE4D42290D63909AD5D856D13B8CDC91D5701570045CE3609D4F8884F69120AD9A3";
void rsa_test(const char* n,const char* d)
{
RSA* rsa = RSA_new();
BIGNUM* bn = BN_new();
BIGNUM* bd = BN_new();
BIGNUM* be = BN_new();
BN_set_word(be, 0x10001);
if (n) BN_hex2bn(&bn, n);
if (d) BN_hex2bn(&bd, d);
RSA_set0_key(rsa, bn, be, bd);
//calc hash
const char* msg = "hello,rsa!!";
BYTE shaResult[SHA256_DIGEST_LENGTH] = { 0 };
SHA256((unsigned char*)msg, strlen(msg), shaResult);
//sign
unsigned int olen;
unsigned char sign[256] = { 0 };
RSA_sign(NID_sha256, shaResult, SHA256_DIGEST_LENGTH, sign, &olen,rsa);
RSA_free(rsa);
}
DWORD thread_test(LPVOID lpParam)
{
rsa_test(pubKey, priKey);
return 0;
}
int main()
{
//_CrtSetBreakAlloc(159);
_CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF);
//CreateThread(nullptr, 0, thread_test, nullptr, 0, nullptr);
//rsa_test(pubKey,priKey);
system("pause");
}
Calling rsa_test(pubKey,priKey) directly in the main thread DO NOT cause memory leaks!
Calling CreateThread(nullptr, 0, thread_test, nullptr, 0, nullptr) comes out memory leaks!!!
Console output as follows:
Detected memory leaks!
Dumping objects ->
{173} normal block at 0x000002BDE6D44000, 264 bytes long.
Data: < _W- :_ s ! 6> D8 89 FD 5F 57 2D C5 3A 5F 82 73 F1 00 21 FA 36
{162} normal block at 0x000002BDE6D43AC0, 264 bytes long.
Data: < > 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F
{161} normal block at 0x000002BDE6D2E2A0, 160 bytes long.
Data: <` > 60 F1 0F 91 F6 7F 00 00 00 00 00 00 00 00 00 00
{160} normal block at 0x000002BDE6D2DBA0, 160 bytes long.
Data: <` > 60 F1 0F 91 F6 7F 00 00 00 00 00 00 00 00 00 00
{159} normal block at 0x000002BDE6D48230, 352 bytes long.
Data: < P > 00 00 00 00 00 00 00 00 50 AB D3 E6 BD 02 00 00
{158} normal block at 0x000002BDE6D286B0, 12 bytes long.
Data: < > 00 00 00 00 00 00 00 00 01 00 00 00
Object dump complete.
Then I use _CrtSetBreakAlloc(159) (or other memory id) to location,and here is my call stack screenshot:
my vs2017 showing the break point at CRYPTO_zalloc(crypto/mem.c)
So my question is how to free those leak memory in thread!
Thanks a lot!!
Download my test code(build with visual studio 2017 x64)
https://www.openssl.org/docs/man1.1.0/man3/OPENSSL_thread_stop.html
OPENSSL_thread_stop();
will do it for you. You can call it like below
DWORD thread_test(LPVOID lpParam)
{
rsa_test(pubKey, priKey);
OPENSSL_thread_stop();
return 0;
}
My goal is using library OSGEarth to make a MFC project that can display the model "openstreetmap.earth". I finished this and can see the the earth.But every time when i close the project, the output window in vs2015 say there are memory leaks in the program.
Here is the window output:
Detected memory leaks!
Dumping objects ->
{306240} normal block at 0x00000000076902F0, 16 bytes long.
Data: <0,i > 30 2C 69 07 00 00 00 00 00 00 00 00 00 00 00 00
{306239} normal block at 0x0000000007692C30, 9 bytes long.
Data: <Pragma: > 50 72 61 67 6D 61 3A 20 00
{303648} normal block at 0x0000000007693040, 16 bytes long.
Data: < 5i > 90 35 69 07 00 00 00 00 00 00 00 00 00 00 00 00
{303647} normal block at 0x0000000007693590, 9 bytes long.
Data: <Pragma: > 50 72 61 67 6D 61 3A 20 00
{301180} normal block at 0x00000000076938B0, 16 bytes long.
Data: <`8i > 60 38 69 07 00 00 00 00 00 00 00 00 00 00 00 00
{301179} normal block at 0x0000000007693860, 9 bytes long.
Data: <Pragma: > 50 72 61 67 6D 61 3A 20 00
{297799} normal block at 0x0000000007691060, 16 bytes long.
Data: < i > 10 10 69 07 00 00 00 00 00 00 00 00 00 00 00 00
I examined the program and found that when I delete this code m_Model = osgDB::readNodeFile(m_strModelName); there is no more memory leaks.
void COSGEarth::InitSceneGraph(void)
{
// Init the main Root Node/Group
m_Root = new osg::Group;
// Load the Model from the model name,
//delete below line, no memory leak
m_Model = osgDB::readNodeFile(m_strModelName);
if (!m_Model) return;
// Optimize the model
osgUtil::Optimizer optimizer;
optimizer.optimize(m_Model.get());
optimizer.reset();
// Add the model to the scene
m_Root->addChild(m_Model.get());
}
I defined m_Model as osg::ref_ptr<osg::Node> m_Model. This is Intelligent pointer.
Why there are memory leaks and how I can solve this issue?
Here is source code :http://bbs.osgchina.org/forum.php?mod=attachment&aid=NzIwNnwzZWYxZDIyZjlhOGY1MWFjZjhiNGFiMWYwMTc5YmJlNXwxNTEyMzc5ODE2&request=yes&_f=.zip
I believe these reported "leaks" are false positives. Refer to this thread that explains why:
http://forum.openscenegraph.org/viewtopic.php?t=1475
Detected memory leaks!
Dumping objects ->
{9370} normal block at 0x000000C16B24C480, 24 bytes long.
Data: <`h= > 60 68 3D FB F6 7F 00 00 01 00 00 00 01 00 00 00
{8549} normal block at 0x000000C16B25CC30, 21627 bytes long.
Data: < 0 %k > FA FA FA FA FA FA FA FA 30 CC 25 6B C1 00 00 00
{5196} normal block at 0x000000C16B253320, 12839 bytes long.
Data: < > CD CD CD CD CD CD CD CD CD CD CD CD CD CD CD CD
{192} normal block at 0x000000C16B24CE40, 24 bytes long.
Data: < m= > 20 6D 3D FB F6 7F 00 00 02 00 00 00 01 00 00 00
{191} normal block at 0x000000C16B251780, 16 bytes long.
Data: < $k > 10 DB 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{190} normal block at 0x000000C16B251410, 16 bytes long.
Data: < $k > F0 DA 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{189} normal block at 0x000000C16B2514B0, 16 bytes long.
Data: < $k > D0 DA 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{188} normal block at 0x000000C16B2516E0, 16 bytes long.
Data: < $k > B0 DA 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{187} normal block at 0x000000C16B251690, 16 bytes long.
Data: < $k > 90 DA 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{186} normal block at 0x000000C16B251370, 16 bytes long.
Data: <p $k > 70 DA 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{185} normal block at 0x000000C16B251230, 16 bytes long.
Data: <P $k > 50 DA 24 6B C1 00 00 00 00 00 00 00 00 00 00 00
{184} normal block at 0x000000C16B24DA50, 224 bytes long.
Data: <0 %k #3%k > 30 12 25 6B C1 00 00 00 40 33 25 6B C1 00 00 00
{156} normal block at 0x000000C16B24C4E0, 24 bytes long.
Data: <P $k # $k > 50 DA 24 6B C1 00 00 00 40 CE 24 6B C1 00 00 00
{155} normal block at 0x000000C16B24C300, 32 bytes long.
Data: <../dataset/refer> 2E 2E 2F 64 61 74 61 73 65 74 2F 72 65 66 65 72
{154} normal block at 0x000000C16B250AB0, 16 bytes long.
Data: < k > A8 F4 09 6B C1 00 00 00 00 00 00 00 00 00 00 00
Object dump complete.
'3DMM_1st.exe' (Win32): Loaded 'C:\Windows\System32\kernel.appcore.dll'. Cannot find or open the PDB file.
The program '[36392] 3DMM_1st.exe' has exited with code 1 (0x1).strong text
Can anyone help me? I got a problem relating to memory leaks. I don't know how to solve it, can anyone can give some suggestions, it will greatly be appreciated.
Here are some info about my code. I created a struct named ObjectData and a class named ObjectLoader just as follows:
struct ObjectData {
std::vector <glm::vec3> vertices, normals, colors;
std::vector <glm::vec2> texCoords;
std::vector <unsigned int> vIndices, uIndices, nIndices;
};
class ObjectLoader {
private:
std::tr1::shared_ptr<ObjectData> object;
bool hasUV, hasNormals, hasColor, colorChecked, indexChecked;
std::string parseString(std::string src, std::string code);
std::vector<glm::vec3> parseVerColor(std::string src, std::string code);
glm::vec2 parseVec2(std::string src, std::string code);
glm::vec3 parseVec3(std::string src, std::string code);
void addIndices(std::string str);
void checkIndices(std::string str);
void checkColors(std::string str);
void loadObjects(std::string objPath);
public:
ObjectLoader(std::string objName);
~ObjectLoader();
std::tr1::shared_ptr<ObjectData> getModel();
};
Here is the getModel() and ObjectLoader() implementation code:
std::tr1::shared_ptr<ObjectData> ObjectLoader::getModel() {
return object;
}
ObjectLoader::ObjectLoader(std::string objName) {
indexChecked = false;
colorChecked = false;
std::string fileName = objName;
object = std::tr1::shared_ptr<ObjectData>(new ObjectData());
}
When I test my code I get the problem related to the memory leaks.
Here is my test code:
std::tr1::shared_ptr<ObjectLoader> loader = std::tr1::shared_ptr<ObjectLoader>(new ObjectLoader(fileName));
std::tr1::shared_ptr<ObjectData> data = loader->getModel();
_CrtDumpMemoryLeaks();
You have a problem detecting the leaks because of the scope of the std::shared_ptr.
In the code;
std::tr1::shared_ptr<ObjectLoader> loader = std::tr1::shared_ptr<ObjectLoader>(new ObjectLoader(fileName));
std::tr1::shared_ptr<ObjectData> data = loader->getModel();
_CrtDumpMemoryLeaks();
The loader and data destructors, and hence the deletions, do not run until after the _CrtDumpMemoryLeaks(); function reports the leaks.
Adding an extra scope can help with this, else the code needs to be restructured.
{
std::tr1::shared_ptr<ObjectLoader> loader = std::tr1::shared_ptr<ObjectLoader>(new ObjectLoader(fileName));
std::tr1::shared_ptr<ObjectData> data = loader->getModel();
} // destructors run here...
_CrtDumpMemoryLeaks();
I am using openssl in my project. When I exit my application I get "Detected memory leaks!" in Visual Studio 2013.
Detected memory leaks!
Dumping objects ->
{70202} normal block at 0x056CB738, 12 bytes long.
Data: <8 j > 38 E8 6A 05 00 00 00 00 04 00 00 00
{70201} normal block at 0x056CB6E8, 16 bytes long.
Data: < > 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
{70200} normal block at 0x056CB698, 20 bytes long.
Data: < l > 00 00 00 00 E8 B6 6C 05 00 00 00 00 04 00 00 00
{70199} normal block at 0x056AE838, 12 bytes long.
Data: < l > 04 00 00 00 98 B6 6C 05 00 00 00 00
{70198} normal block at 0x056CB618, 64 bytes long.
Data: < > 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
{70197} normal block at 0x056CB578, 96 bytes long.
Data: < l 3 3 > 18 B6 6C 05 00 FE C0 33 C0 FD C0 33 08 00 00 00
Object dump complete.
When I add
_CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF);
_CrtSetBreakAlloc(70202);
to main main function I always get a breakpoint at the allocation of the x509 store, no matter for which of the 6 numbers (70202,...) I set the break point.
I initialize and uninitialize the x509 store in a class' constructor and destructor (see below).
Is there anything else I need to look out for when using the x509_STORE?
Foo::CSCACerts::CSCACerts(void)
{
m_store = X509_STORE_new();
}
Foo::CSCACerts::~CSCACerts(void)
{
X509_STORE_free( m_store );
}
int main()
{
map<string,string> x;
x["Haitham"]="000#hotmail.com";
x.clear();
_CrtDumpMemoryLeaks();
return 0;
}
when i test for memory leaks for this program the output is Detected
memory leaks!
{152} normal block at 0x0070ABD8, 8 bytes long.
Data: <4 p > 34 AB 70 00 00 00 00 00
{151} normal block at 0x0070AB90, 8 bytes long.
{150} normal block at 0x0070AB08, 72 bytes long.
Data: <p p ` p > 60 A9 70 00 60 A9 70 00 60 A9 70 00 01 00 CD
CD
{145} normal block at 0x00704C40, 8 bytes long.
Data: < ^ > E4 FE 5E 00 00 00 00 00
{144} normal block at 0x0070A960, 72 bytes long.
Data: < p p p > 08 AB 70 00 08 AB 70 00 08 AB 70 00 01 01 CD
CD
Data: < p > 18 AB 70 00 00 00 00 00
Object dump complete.
This issue is that the you're calling the function that checks for memory leaks too soon. The map hasn't been destroyed at the point you call _CrtDumpMemoryLeaks.
Change your code to this:
int main()
{
{
map<string,string> x;
x["Haitham"]="000#hotmail.com";
x.clear();
}
_CrtDumpMemoryLeaks();
return 0;
}
This should now show that the map is destrouyed, since it is local to the { } block.