Fix an object lifetime bug in net load tests

The commands handler must not be destroyed before the config
object, or we'll be accessing freed memory.

An earlier attempt at using boost::shared_ptr to control object
lifetime turned out to be very invasive, though would be a
better solution in theory.
This commit is contained in:
moneromooo-monero 2017-10-09 16:46:42 +01:00
parent 86e9de588c
commit 7dbf76d0da
No known key found for this signature in database
GPG key ID: 686F07454D6CEFC3
10 changed files with 45 additions and 16 deletions

View file

@ -87,6 +87,7 @@ namespace levin
virtual void on_connection_new(t_connection_context& context){}; virtual void on_connection_new(t_connection_context& context){};
virtual void on_connection_close(t_connection_context& context){}; virtual void on_connection_close(t_connection_context& context){};
virtual ~levin_commands_handler(){}
}; };
#define LEVIN_OK 0 #define LEVIN_OK 0

View file

@ -52,6 +52,7 @@ namespace levin
class levin_client_async class levin_client_async
{ {
levin_commands_handler* m_pcommands_handler; levin_commands_handler* m_pcommands_handler;
void (*commands_handler_destroy)(levin_commands_handler*);
volatile uint32_t m_is_stop; volatile uint32_t m_is_stop;
volatile uint32_t m_threads_count; volatile uint32_t m_threads_count;
::critical_section m_send_lock; ::critical_section m_send_lock;
@ -85,9 +86,9 @@ namespace levin
::critical_section m_connection_lock; ::critical_section m_connection_lock;
net_utils::blocked_mode_client m_transport; net_utils::blocked_mode_client m_transport;
public: public:
levin_client_async():m_pcommands_handler(NULL), m_is_stop(0), m_threads_count(0), m_invoke_data_ready(0), m_invoke_is_active(0) levin_client_async():m_pcommands_handler(NULL), commands_handler_destroy(NULL), m_is_stop(0), m_threads_count(0), m_invoke_data_ready(0), m_invoke_is_active(0)
{} {}
levin_client_async(const levin_client_async& /*v*/):m_pcommands_handler(NULL), m_is_stop(0), m_threads_count(0), m_invoke_data_ready(0), m_invoke_is_active(0) levin_client_async(const levin_client_async& /*v*/):m_pcommands_handler(NULL), commands_handler_destroy(NULL), m_is_stop(0), m_threads_count(0), m_invoke_data_ready(0), m_invoke_is_active(0)
{} {}
~levin_client_async() ~levin_client_async()
{ {
@ -97,11 +98,16 @@ namespace levin
while(boost::interprocess::ipcdetail::atomic_read32(&m_threads_count)) while(boost::interprocess::ipcdetail::atomic_read32(&m_threads_count))
::Sleep(100); ::Sleep(100);
set_handler(NULL);
} }
void set_handler(levin_commands_handler* phandler) void set_handler(levin_commands_handler* phandler, void (*destroy)(levin_commands_handler*) = NULL)
{ {
if (commands_handler_destroy && m_pcommands_handler)
(*commands_handler_destroy)(m_pcommands_handler);
m_pcommands_handler = phandler; m_pcommands_handler = phandler;
m_pcommands_handler_destroy = destroy;
} }
bool connect(uint32_t ip, uint32_t port, uint32_t timeout) bool connect(uint32_t ip, uint32_t port, uint32_t timeout)

View file

@ -43,6 +43,8 @@ namespace levin
struct protocl_handler_config struct protocl_handler_config
{ {
levin_commands_handler<t_connection_context>* m_pcommands_handler; levin_commands_handler<t_connection_context>* m_pcommands_handler;
void (*m_pcommands_handler_destroy)(levin_commands_handler<t_connection_context>*);
~protocl_handler_config() { if (m_pcommands_handler && m_pcommands_handler_destroy) (*m_pcommands_handler_destroy)(m_pcommands_handler); }
}; };
template<class t_connection_context = net_utils::connection_context_base> template<class t_connection_context = net_utils::connection_context_base>

View file

@ -72,9 +72,11 @@ class async_protocol_handler_config
friend class async_protocol_handler<t_connection_context>; friend class async_protocol_handler<t_connection_context>;
levin_commands_handler<t_connection_context>* m_pcommands_handler;
void (*m_pcommands_handler_destroy)(levin_commands_handler<t_connection_context>*);
public: public:
typedef t_connection_context connection_context; typedef t_connection_context connection_context;
levin_commands_handler<t_connection_context>* m_pcommands_handler;
uint64_t m_max_packet_size; uint64_t m_max_packet_size;
uint64_t m_invoke_timeout; uint64_t m_invoke_timeout;
@ -91,8 +93,9 @@ public:
template<class callback_t> template<class callback_t>
bool for_connection(const boost::uuids::uuid &connection_id, callback_t cb); bool for_connection(const boost::uuids::uuid &connection_id, callback_t cb);
size_t get_connections_count(); size_t get_connections_count();
void set_handler(levin_commands_handler<t_connection_context>* handler, void (*destroy)(levin_commands_handler<t_connection_context>*) = NULL);
async_protocol_handler_config():m_pcommands_handler(NULL), m_max_packet_size(LEVIN_DEFAULT_MAX_PACKET_SIZE) async_protocol_handler_config():m_pcommands_handler(NULL), m_pcommands_handler_destroy(NULL), m_max_packet_size(LEVIN_DEFAULT_MAX_PACKET_SIZE)
{} {}
void del_out_connections(size_t count); void del_out_connections(size_t count);
}; };
@ -832,6 +835,15 @@ size_t async_protocol_handler_config<t_connection_context>::get_connections_coun
} }
//------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------
template<class t_connection_context> template<class t_connection_context>
void async_protocol_handler_config<t_connection_context>::set_handler(levin_commands_handler<t_connection_context>* handler, void (*destroy)(levin_commands_handler<t_connection_context>*))
{
if (m_pcommands_handler && m_pcommands_handler_destroy)
(*m_pcommands_handler_destroy)(m_pcommands_handler);
m_pcommands_handler = handler;
m_pcommands_handler_destroy = destroy;
}
//------------------------------------------------------------------------------------------
template<class t_connection_context>
int async_protocol_handler_config<t_connection_context>::notify(int command, const std::string& in_buff, boost::uuids::uuid connection_id) int async_protocol_handler_config<t_connection_context>::notify(int command, const std::string& in_buff, boost::uuids::uuid connection_id)
{ {
async_protocol_handler<t_connection_context>* aph; async_protocol_handler<t_connection_context>* aph;

View file

@ -155,7 +155,7 @@ namespace tests
bool init(const std::string& bind_port = "", const std::string& bind_ip = "0.0.0.0") bool init(const std::string& bind_port = "", const std::string& bind_ip = "0.0.0.0")
{ {
m_net_server.get_config_object().m_pcommands_handler = this; m_net_server.get_config_object().set_handler(this);
m_net_server.get_config_object().m_invoke_timeout = 1000; m_net_server.get_config_object().m_invoke_timeout = 1000;
LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port); LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port);
return m_net_server.init_server(bind_port, bind_ip); return m_net_server.init_server(bind_port, bind_ip);

View file

@ -560,7 +560,7 @@ namespace nodetool
//configure self //configure self
m_net_server.set_threads_prefix("P2P"); m_net_server.set_threads_prefix("P2P");
m_net_server.get_config_object().m_pcommands_handler = this; m_net_server.get_config_object().set_handler(this);
m_net_server.get_config_object().m_invoke_timeout = P2P_DEFAULT_INVOKE_TIMEOUT; m_net_server.get_config_object().m_invoke_timeout = P2P_DEFAULT_INVOKE_TIMEOUT;
m_net_server.set_connection_filter(this); m_net_server.set_connection_filter(this);

View file

@ -193,7 +193,7 @@ namespace
{ {
m_thread_count = (std::max)(min_thread_count, boost::thread::hardware_concurrency() / 2); m_thread_count = (std::max)(min_thread_count, boost::thread::hardware_concurrency() / 2);
m_tcp_server.get_config_object().m_pcommands_handler = &m_commands_handler; m_tcp_server.get_config_object().set_handler(&m_commands_handler);
m_tcp_server.get_config_object().m_invoke_timeout = CONNECTION_TIMEOUT; m_tcp_server.get_config_object().m_invoke_timeout = CONNECTION_TIMEOUT;
ASSERT_TRUE(m_tcp_server.init_server(clt_port, "127.0.0.1")); ASSERT_TRUE(m_tcp_server.init_server(clt_port, "127.0.0.1"));
@ -238,9 +238,10 @@ namespace
static void TearDownTestCase() static void TearDownTestCase()
{ {
// Stop server // Stop server
test_levin_commands_handler commands_handler; test_levin_commands_handler *commands_handler_ptr = new test_levin_commands_handler();
test_tcp_server tcp_server(epee::net_utils::e_connection_type_NET); test_levin_commands_handler &commands_handler = *commands_handler_ptr;
tcp_server.get_config_object().m_pcommands_handler = &commands_handler; test_tcp_server tcp_server(epee::net_utils::e_connection_type_RPC);
tcp_server.get_config_object().set_handler(commands_handler_ptr, [](epee::levin::levin_commands_handler<test_connection_context> *handler)->void { delete handler; });
tcp_server.get_config_object().m_invoke_timeout = CONNECTION_TIMEOUT; tcp_server.get_config_object().m_invoke_timeout = CONNECTION_TIMEOUT;
if (!tcp_server.init_server(clt_port, "127.0.0.1")) return; if (!tcp_server.init_server(clt_port, "127.0.0.1")) return;

View file

@ -151,6 +151,11 @@ namespace net_load_tests
bool handle_new_connection(const boost::uuids::uuid& connection_id, bool ignore_close_fails = false) bool handle_new_connection(const boost::uuids::uuid& connection_id, bool ignore_close_fails = false)
{ {
size_t idx = m_next_opened_conn_idx.fetch_add(1, std::memory_order_relaxed); size_t idx = m_next_opened_conn_idx.fetch_add(1, std::memory_order_relaxed);
if (idx >= m_connections.size())
{
LOG_PRINT_L0("ERROR: connections overflow");
exit(1);
}
m_connections[idx] = connection_id; m_connections[idx] = connection_id;
size_t prev_connection_count = m_opened_connection_count.fetch_add(1, std::memory_order_relaxed); size_t prev_connection_count = m_opened_connection_count.fetch_add(1, std::memory_order_relaxed);

View file

@ -224,8 +224,8 @@ int main(int argc, char** argv)
if (!tcp_server.init_server(srv_port, "127.0.0.1")) if (!tcp_server.init_server(srv_port, "127.0.0.1"))
return 1; return 1;
srv_levin_commands_handler commands_handler(tcp_server); srv_levin_commands_handler *commands_handler = new srv_levin_commands_handler(tcp_server);
tcp_server.get_config_object().m_pcommands_handler = &commands_handler; tcp_server.get_config_object().set_handler(commands_handler, [](epee::levin::levin_commands_handler<test_connection_context> *handler) { delete handler; });
tcp_server.get_config_object().m_invoke_timeout = 10000; tcp_server.get_config_object().m_invoke_timeout = 10000;
//tcp_server.get_config_object().m_max_packet_size = max_packet_size; //tcp_server.get_config_object().m_max_packet_size = max_packet_size;

View file

@ -187,9 +187,11 @@ namespace
typedef std::unique_ptr<test_connection> test_connection_ptr; typedef std::unique_ptr<test_connection> test_connection_ptr;
async_protocol_handler_test() async_protocol_handler_test():
m_pcommands_handler(new test_levin_commands_handler()),
m_commands_handler(*m_pcommands_handler)
{ {
m_handler_config.m_pcommands_handler = &m_commands_handler; m_handler_config.set_handler(m_pcommands_handler, [](epee::levin::levin_commands_handler<test_levin_connection_context> *handler) { delete handler; });
m_handler_config.m_invoke_timeout = invoke_timeout; m_handler_config.m_invoke_timeout = invoke_timeout;
m_handler_config.m_max_packet_size = max_packet_size; m_handler_config.m_max_packet_size = max_packet_size;
} }
@ -212,7 +214,7 @@ namespace
protected: protected:
boost::asio::io_service m_io_service; boost::asio::io_service m_io_service;
test_levin_protocol_handler_config m_handler_config; test_levin_protocol_handler_config m_handler_config;
test_levin_commands_handler m_commands_handler; test_levin_commands_handler *m_pcommands_handler, &m_commands_handler;
}; };
class positive_test_connection_to_levin_protocol_handler_calls : public async_protocol_handler_test class positive_test_connection_to_levin_protocol_handler_calls : public async_protocol_handler_test