@@ -142,14 +142,29 @@ def image_source_last_modified_by_file_metadata(path):
142142 )
143143
144144
145+ @contextmanager
146+ def urlopen_retry (url ):
147+ """Retry opening url up to 5 times."""
148+ for retry in range (1 , 5 ):
149+ try :
150+ yield urlopen (url )
151+ return
152+ except IOError as e :
153+ last_error = e
154+ logging .warning ("Opening %s failed, retry #%i: %s" , url , retry , e )
155+ time .sleep (retry * retry )
156+
157+ raise last_error
158+
159+
145160def origurl (path ):
146161 """Return the original URL that a given file was downloaded from."""
147162 return get_metadata_from_file (path , URL_XATTR )
148163
149164
150165def get_metadata_from_url (url , metadata_key ):
151166 """Get metadata from given url."""
152- with urlopen (url ) as url_response : # nosec
167+ with urlopen_retry (url ) as url_response : # nosec
153168 return url_response .getheader (metadata_key )
154169
155170
@@ -160,7 +175,7 @@ def get_inventory_script(inventory):
160175 os .environ ["TOX_WORK_DIR" ], "standard-inventory-qcow2"
161176 )
162177 try :
163- with urlopen (inventory ) as url_response : # nosec
178+ with urlopen_retry (inventory ) as url_response : # nosec
164179 with open (inventory_tempfile , "wb" ) as inf :
165180 shutil .copyfileobj (url_response , inf )
166181 os .chmod (inventory_tempfile , 0o777 ) # nosec
@@ -205,9 +220,8 @@ def fetch_image(url, cache, label):
205220
206221 image_tempfile = tempfile .NamedTemporaryFile (dir = cache , delete = False )
207222 try :
208- request = urlopen (url ) # nosec
209- shutil .copyfileobj (request , image_tempfile )
210- request .close ()
223+ with urlopen_retry (url ) as request : # nosec
224+ shutil .copyfileobj (request , image_tempfile )
211225 except Exception : # pylint: disable=broad-except
212226 logging .warning (traceback .format_exc ())
213227 os .unlink (image_tempfile .name )
@@ -286,8 +300,8 @@ def centoshtml2image(url, desiredarch):
286300 logging .error ("Could not determine CentOS version from %s" , url )
287301 return ""
288302
289- page = urlopen (url ) # nosec
290- tree = BeautifulSoup (page .read (), "html.parser" )
303+ with urlopen_retry (url ) as page : # nosec
304+ tree = BeautifulSoup (page .read (), "html.parser" )
291305 imagelist = [
292306 td .a ["href" ]
293307 for td in tree .find_all ("td" , class_ = "indexcolname" )
0 commit comments