diff -rNu epoll_reactor.ipp.orig epoll_reactor.ipp > /mnt/builder/3rdParty/boost/srcs/epoll_reactor.ipp.patch
|
old
|
new
|
|
| 205 | 205 | epoll_reactor::per_descriptor_data& descriptor_data, |
| 206 | 206 | reactor_op* op, bool allow_speculative) |
| 207 | 207 | { |
| 208 | | if (!descriptor_data) |
| | 208 | // LG |
| | 209 | epoll_reactor::per_descriptor_data local_descriptor_data = descriptor_data; |
| | 210 | if (!local_descriptor_data) |
| | 211 | // LG |
| 209 | 212 | { |
| 210 | 213 | op->ec_ = boost::asio::error::bad_descriptor; |
| 211 | 214 | post_immediate_completion(op); |
| 212 | 215 | return; |
| 213 | 216 | } |
| 214 | 217 | |
| 215 | | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); |
| | 218 | // LG - use local_descriptor_data instead of descriptor_data to access mutex, as descriptor_data might have been reset to 0 by another thread |
| | 219 | mutex::scoped_lock descriptor_lock(local_descriptor_data->mutex_); |
| | 220 | |
| | 221 | // LG - have to check descriptor_data again, as another thread might have set it to 0 |
| | 222 | if (!descriptor_data) |
| | 223 | { |
| | 224 | op->ec_ = boost::asio::error::bad_descriptor; |
| | 225 | post_immediate_completion(op); |
| | 226 | return; |
| | 227 | } |
| | 228 | // LG |
| 216 | 229 | |
| 217 | 230 | if (descriptor_data->shutdown_) |
| 218 | 231 | { |
| … |
… |
|
| 251 | 264 | void epoll_reactor::cancel_ops(socket_type, |
| 252 | 265 | epoll_reactor::per_descriptor_data& descriptor_data) |
| 253 | 266 | { |
| 254 | | if (!descriptor_data) |
| | 267 | // LG |
| | 268 | epoll_reactor::per_descriptor_data local_descriptor_data = descriptor_data; |
| | 269 | if (!local_descriptor_data) |
| | 270 | // LG |
| 255 | 271 | return; |
| 256 | 272 | |
| 257 | | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); |
| | 273 | // LG - use local_descriptor_data instead of descriptor_data to access mutex, as descriptor_data might have been reset to 0 by another thread |
| | 274 | mutex::scoped_lock descriptor_lock(local_descriptor_data->mutex_); |
| | 275 | |
| | 276 | // LG - have to check descriptor_data again, as another thread might have set it to 0 |
| | 277 | if (!descriptor_data) |
| | 278 | return; |
| | 279 | // LG |
| 258 | 280 | |
| 259 | 281 | op_queue<operation> ops; |
| 260 | 282 | for (int i = 0; i < max_ops; ++i) |
| … |
… |
|
| 275 | 297 | void epoll_reactor::deregister_descriptor(socket_type descriptor, |
| 276 | 298 | epoll_reactor::per_descriptor_data& descriptor_data, bool closing) |
| 277 | 299 | { |
| 278 | | if (!descriptor_data) |
| | 300 | // LG |
| | 301 | epoll_reactor::per_descriptor_data local_descriptor_data = descriptor_data; |
| | 302 | if (!local_descriptor_data) |
| | 303 | // LG |
| 279 | 304 | return; |
| 280 | 305 | |
| 281 | | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); |
| | 306 | // LG - use local_descriptor_data instead of descriptor_data to access mutex, as descriptor_data might have been reset to 0 by another thread |
| | 307 | mutex::scoped_lock descriptor_lock(local_descriptor_data->mutex_); |
| | 308 | // LG - have to check descriptor_data again, as another thread might have set it to 0 |
| | 309 | if (!descriptor_data) |
| | 310 | return; |
| | 311 | // LG |
| 282 | 312 | |
| 283 | 313 | if (!descriptor_data->shutdown_) |
| 284 | 314 | { |
| … |
… |
|
| 307 | 337 | descriptor_data->descriptor_ = -1; |
| 308 | 338 | descriptor_data->shutdown_ = true; |
| 309 | 339 | |
| 310 | | descriptor_lock.unlock(); |
| 311 | | |
| 312 | | free_descriptor_state(descriptor_data); |
| | 340 | //LG |
| | 341 | // ensure that descriptor_data is being reset INSIDE protected block (before descriptor_lock.unlock()) |
| | 342 | // code is not strihtforward here because descriptor_lock lock on mutex that resides inside *descriptor_data |
| | 343 | epoll_reactor::per_descriptor_data descriptor_data_ = descriptor_data; |
| 313 | 344 | descriptor_data = 0; |
| | 345 | descriptor_lock.unlock(); |
| | 346 | free_descriptor_state(descriptor_data_); |
| | 347 | //LG |
| 314 | 348 | |
| 315 | 349 | io_service_.post_deferred_completions(ops); |
| 316 | 350 | } |
| … |
… |
|
| 319 | 353 | void epoll_reactor::deregister_internal_descriptor(socket_type descriptor, |
| 320 | 354 | epoll_reactor::per_descriptor_data& descriptor_data) |
| 321 | 355 | { |
| 322 | | if (!descriptor_data) |
| | 356 | // LG |
| | 357 | epoll_reactor::per_descriptor_data local_descriptor_data = descriptor_data; |
| | 358 | if (!local_descriptor_data) |
| | 359 | // LG |
| 323 | 360 | return; |
| 324 | 361 | |
| 325 | | mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); |
| | 362 | // LG - use local_descriptor_data instead of descriptor_data to access mutex, as descriptor_data might have been reset to 0 by another thread |
| | 363 | mutex::scoped_lock descriptor_lock(local_descriptor_data->mutex_); |
| | 364 | // LG - have to check descriptor_data again, as another thread might have set it to 0 |
| | 365 | if (!descriptor_data) |
| | 366 | return; |
| | 367 | // LG |
| 326 | 368 | |
| 327 | 369 | if (!descriptor_data->shutdown_) |
| 328 | 370 | { |
| … |
… |
|
| 336 | 378 | descriptor_data->descriptor_ = -1; |
| 337 | 379 | descriptor_data->shutdown_ = true; |
| 338 | 380 | |
| 339 | | descriptor_lock.unlock(); |
| 340 | | |
| 341 | | free_descriptor_state(descriptor_data); |
| | 381 | //LG |
| | 382 | // ensure that descriptor_data is being reset INSIDE protected block (before descriptor_lock.unlock()) |
| | 383 | // code is not strihtforward here because descriptor_lock lock on mutex that resides inside *descriptor_data |
| | 384 | epoll_reactor::per_descriptor_data descriptor_data_ = descriptor_data; |
| 342 | 385 | descriptor_data = 0; |
| | 386 | descriptor_lock.unlock(); |
| | 387 | free_descriptor_state(descriptor_data_); |
| | 388 | // LG |
| 343 | 389 | } |
| 344 | 390 | } |
| 345 | 391 | |