id summary reporter owner description type status milestone component version severity resolution keywords cc 6800 endless loop in dev_poll_reactor on Solaris when using async_write with more than 65536 bytes p_ranadheer@… chris_kohlhoff "The following example-program puts io_service thread in a tight endless loop on Solaris when the server is trying to send more than 65536 bytes (the server and client have to be running on a different host) and client is running. truss on server process shows following calls continuously. {{{ /6: recvmsg(57, 0xFFFFFD7FEB9FDDF0, 0) Err#11 EAGAIN /6: write(13, "" 9\0\0\019\0\0\0"", 8) = 8 /6: ioctl(13, DP_POLL, 0xFFFFFD7FEB9FE460) = 1 /6: recvmsg(57, 0xFFFFFD7FEB9FDDF0, 0) Err#11 EAGAIN /6: write(13, "" 9\0\0\019\0\0\0"", 8) = 8 /6: ioctl(13, DP_POLL, 0xFFFFFD7FEB9FE460) = 1 /6: recvmsg(57, 0xFFFFFD7FEB9FDDF0, 0) Err#11 EAGAIN /6: write(13, "" 9\0\0\019\0\0\0"", 8) = 8 /6: ioctl(13, DP_POLL, 0xFFFFFD7FEB9FE460) = 1 /6: recvmsg(57, 0xFFFFFD7FEB9FDDF0, 0) Err#11 EAGAIN /6: write(13, "" 9\0\0\019\0\0\0"", 8) = 8 /6: ioctl(13, DP_POLL, 0xFFFFFD7FEB9FE460) = 1 /6: recvmsg(57, 0xFFFFFD7FEB9FDDF0, 0) Err#11 EAGAIN /6: write(13, "" 9\0\0\019\0\0\0"", 8) = 8 /6: ioctl(13, DP_POLL, 0xFFFFFD7FEB9FE460) = 1 }}} The busy-wait loop continues until we stop the client. Here is the server code. I've just changed boost asynchronous TCP daytime server sample to return the specified number of bytes message instead of daytime and put the client connection in read mode. {{{ #include #include #include #include #include #include #include #include #include #include using boost::asio::ip::tcp; std::string make_message(unsigned int message_size) { using namespace std; std::string data(message_size, 'A'); return data; } class tcp_connection : public boost::enable_shared_from_this { public: typedef boost::shared_ptr pointer; static pointer create(boost::asio::io_service& io_service, unsigned int message_size) { return pointer(new tcp_connection(io_service, message_size)); } tcp::socket& socket() { return socket_; } void handleMessage(const boost::system::error_code& message_error) { if (message_error) { std::cout<<""Error while reading the message from client""< _header; unsigned int message_size_; }; class tcp_server { public: tcp_server(boost::asio::io_service& io_service, unsigned int port, unsigned int message_size) : acceptor_(io_service, tcp::endpoint(tcp::v4(), port)), message_size_(message_size) { start_accept(); } private: void start_accept() { tcp_connection::pointer new_connection = tcp_connection::create(acceptor_.get_io_service(), message_size_); acceptor_.async_accept(new_connection->socket(), boost::bind(&tcp_server::handle_accept, this, new_connection, boost::asio::placeholders::error)); } void handle_accept(tcp_connection::pointer new_connection, const boost::system::error_code& error) { if (!error) { new_connection->start(); start_accept(); } } tcp::acceptor acceptor_; unsigned int message_size_; }; int main(int argc, char* argv[]) { if (argc != 3) { std::cerr << ""Usage: server port message_size"" << std::endl; return 1; } unsigned int port = boost::lexical_cast(argv[1]); unsigned int message_size = boost::lexical_cast(argv[2]); try { boost::asio::io_service io_service; tcp_server server(io_service, port, message_size); io_service.run(); } catch (std::exception& e) { std::cerr << e.what() << std::endl; } return 0; } }}} For the client we can use boost synchronous TCP daytime client sample (changed to accept port as an argument) . {{{ #include #include #include using boost::asio::ip::tcp; int main(int argc, char* argv[]) { try { if (argc != 3) { std::cerr << ""Usage: client "" << std::endl; return 1; } boost::asio::io_service io_service; tcp::resolver resolver(io_service); tcp::resolver::query query(argv[1], argv[2]); tcp::resolver::iterator endpoint_iterator = resolver.resolve(query); tcp::resolver::iterator end; tcp::socket socket(io_service); boost::system::error_code error = boost::asio::error::host_not_found; while (error && endpoint_iterator != end) { socket.close(); socket.connect(*endpoint_iterator++, error); } if (error) throw boost::system::system_error(error); for (;;) { boost::array buf; boost::system::error_code error; size_t len = socket.read_some(boost::asio::buffer(buf), error); if (error == boost::asio::error::eof) break; // Connection closed cleanly by peer. else if (error) throw boost::system::system_error(error); // Some other error. std::cout.write(buf.data(), len); } } catch (std::exception& e) { std::cerr << e.what() << std::endl; } return 0; } }}} Here is how I ran the server and client on two different solaris hosts - {{{ server 9081 200000 client 9081 }}} After running the server and client, do ""truss"" on server which will be showing tight polling loop as mentioned above. The program works fine on Linux and works fine on Solaris when compiled with the flag -DBOOST_ASIO_DISABLE_DEV_POLL." Bugs new To Be Determined asio Boost 1.49.0 Problem solaris samm@…