blob: 680928d0d5dd5eb19c0796d271419bff6a886c0f [file] [log] [blame]
VERS_1.0 {
local:
_Znwm; # operator new(unsigned long);
_ZnwmRKSt9nothrow_t; # operator new(unsigned long, std::nothrow_t const&);
_ZdlPv; # operator delete(void*);
_ZdlPvRKSt9nothrow_t; # operator delete(void*, std::nothrow_t const&);
_Znam; # operator new[](unsigned long);
_ZnamRKSt9nothrow_t; # operator new[](unsigned long, std::nothrow_t const&);
_ZdaPv; # operator delete[](void*);
_ZdaPvRKSt9nothrow_t; # operator delete[](void*, std::nothrow_t const&);
extern "C++" {
#
# Hide everything in std namespace (C++ STL symbols):
#
# This fixes several intermittent crashes that were happening in the
# deep_learning module in different forms on different platforms, due to
# tensorflow calling madlib's instantiation of STL template functions,
# which circumvents the hiding of madlib's global new and delete symbols
# (intended to override the libstdc++ version of these symbols for all
# of libmadlib.so, but not for anything outside of it).
#
# The main crash we looked at was due to the use of std::set<string> in
# madlib/src/modules/linalg/metric.cpp. Tensorflow is a python library,
# but it loads Google's C++ protobuf library _message.so. The crash was
# happening because _message.so also uses std::set<string> for some
# things, and it ends up calling madlib's instantiation of them. We were
# able to fix it by hiding all madlib symbols associated with the std::set<string
# class, but this just resulted in a similar crash happening instead because of a
# different STL symbol exposed by madlib.
#
# In the long term, we should move to hiding all symbols by default
# and explicitly make exceptions for madlib API functions. But this would
# be a bigger change and require more extensive testing to make sure it
# doesn't cause any obscure issues on any platform. So for now we are just
# hiding those in namespace std.
#
std::*;
boost::*;
};
};