mirror of
https://github.com/mastodon/mastodon.git
synced 2024-12-25 12:34:58 +01:00
Merge pull request from GHSA-9pxv-6qvf-pjwc
* Fix timeout handling of outbound HTTP requests * Use CLOCK_MONOTONIC instead of Time.now
This commit is contained in:
parent
0aa0b71f2c
commit
e75ad1de0f
@ -7,11 +7,48 @@ require 'resolv'
|
|||||||
# Monkey-patch the HTTP.rb timeout class to avoid using a timeout block
|
# Monkey-patch the HTTP.rb timeout class to avoid using a timeout block
|
||||||
# around the Socket#open method, since we use our own timeout blocks inside
|
# around the Socket#open method, since we use our own timeout blocks inside
|
||||||
# that method
|
# that method
|
||||||
|
#
|
||||||
|
# Also changes how the read timeout behaves so that it is cumulative (closer
|
||||||
|
# to HTTP::Timeout::Global, but still having distinct timeouts for other
|
||||||
|
# operation types)
|
||||||
class HTTP::Timeout::PerOperation
|
class HTTP::Timeout::PerOperation
|
||||||
def connect(socket_class, host, port, nodelay = false)
|
def connect(socket_class, host, port, nodelay = false)
|
||||||
@socket = socket_class.open(host, port)
|
@socket = socket_class.open(host, port)
|
||||||
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
|
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if nodelay
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Reset deadline when the connection is re-used for different requests
|
||||||
|
def reset_counter
|
||||||
|
@deadline = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# Read data from the socket
|
||||||
|
def readpartial(size, buffer = nil)
|
||||||
|
@deadline ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + @read_timeout
|
||||||
|
|
||||||
|
timeout = false
|
||||||
|
loop do
|
||||||
|
result = @socket.read_nonblock(size, buffer, exception: false)
|
||||||
|
|
||||||
|
return :eof if result.nil?
|
||||||
|
|
||||||
|
remaining_time = @deadline - Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
||||||
|
raise HTTP::TimeoutError, "Read timed out after #{@read_timeout} seconds" if timeout || remaining_time <= 0
|
||||||
|
return result if result != :wait_readable
|
||||||
|
|
||||||
|
# marking the socket for timeout. Why is this not being raised immediately?
|
||||||
|
# it seems there is some race-condition on the network level between calling
|
||||||
|
# #read_nonblock and #wait_readable, in which #read_nonblock signalizes waiting
|
||||||
|
# for reads, and when waiting for x seconds, it returns nil suddenly without completing
|
||||||
|
# the x seconds. In a normal case this would be a timeout on wait/read, but it can
|
||||||
|
# also mean that the socket has been closed by the server. Therefore we "mark" the
|
||||||
|
# socket for timeout and try to read more bytes. If it returns :eof, it's all good, no
|
||||||
|
# timeout. Else, the first timeout was a proper timeout.
|
||||||
|
# This hack has to be done because io/wait#wait_readable doesn't provide a value for when
|
||||||
|
# the socket is closed by the server, and HTTP::Parser doesn't provide the limit for the chunks.
|
||||||
|
timeout = true unless @socket.to_io.wait_readable(remaining_time)
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class Request
|
class Request
|
||||||
|
Loading…
Reference in New Issue
Block a user