@@ -142,14 +142,28 @@ def image_source_last_modified_by_file_metadata(path):
142142 )
143143
144144
145+ @contextmanager
146+ def urlopen_retry (url ):
147+ for retry in range (1 , 5 ):
148+ try :
149+ yield urlopen (url )
150+ return
151+ except IOError as e :
152+ last_error = e
153+ logging .warning ("Opening %s failed, retry #%i: %s" , url , retry , e )
154+ time .sleep (retry * retry )
155+
156+ raise last_error
157+
158+
145159def origurl (path ):
146160 """Return the original URL that a given file was downloaded from."""
147161 return get_metadata_from_file (path , URL_XATTR )
148162
149163
150164def get_metadata_from_url (url , metadata_key ):
151165 """Get metadata from given url."""
152- with urlopen (url ) as url_response : # nosec
166+ with urlopen_retry (url ) as url_response : # nosec
153167 return url_response .getheader (metadata_key )
154168
155169
@@ -160,7 +174,7 @@ def get_inventory_script(inventory):
160174 os .environ ["TOX_WORK_DIR" ], "standard-inventory-qcow2"
161175 )
162176 try :
163- with urlopen (inventory ) as url_response : # nosec
177+ with urlopen_retry (inventory ) as url_response : # nosec
164178 with open (inventory_tempfile , "wb" ) as inf :
165179 shutil .copyfileobj (url_response , inf )
166180 os .chmod (inventory_tempfile , 0o777 ) # nosec
@@ -205,9 +219,8 @@ def fetch_image(url, cache, label):
205219
206220 image_tempfile = tempfile .NamedTemporaryFile (dir = cache , delete = False )
207221 try :
208- request = urlopen (url ) # nosec
209- shutil .copyfileobj (request , image_tempfile )
210- request .close ()
222+ with urlopen_retry (url ) as request : # nosec
223+ shutil .copyfileobj (request , image_tempfile )
211224 except Exception : # pylint: disable=broad-except
212225 logging .warning (traceback .format_exc ())
213226 os .unlink (image_tempfile .name )
@@ -286,8 +299,8 @@ def centoshtml2image(url, desiredarch):
286299 logging .error ("Could not determine CentOS version from %s" , url )
287300 return ""
288301
289- page = urlopen (url ) # nosec
290- tree = BeautifulSoup (page .read (), "html.parser" )
302+ with urlopen_retry (url ) as page : # nosec
303+ tree = BeautifulSoup (page .read (), "html.parser" )
291304 imagelist = [
292305 td .a ["href" ]
293306 for td in tree .find_all ("td" , class_ = "indexcolname" )
0 commit comments