Skip to content

Commit c020cb6

Browse files
authored
Revert 758 Update async dependencies (#768)
This caused a production outage, then a second outage after the server restarted.
1 parent 5acd658 commit c020cb6

File tree

10 files changed

+537
-704
lines changed

10 files changed

+537
-704
lines changed

Cargo.lock

+454-541
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

+7-11
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ regex = "1"
1515
structopt = "0.3"
1616
crates-index-diff = "7"
1717
time = "0.1"
18-
reqwest = { version = "0.10", features = ["blocking"] }
18+
reqwest = "0.9"
1919
semver = "0.9"
2020
slug = "=0.1.1"
2121
env_logger = "0.7"
@@ -29,10 +29,15 @@ toml = "0.5"
2929
html5ever = "0.22"
3030
schemamama = "0.3"
3131
schemamama_postgres = "0.2"
32+
rusoto_s3 = "0.40"
33+
rusoto_core = "0.40"
34+
rusoto_credential = "0.40"
35+
futures = "0.1"
36+
tokio = "0.1"
3237
systemstat = "0.1.4"
3338
prometheus = { version = "0.7.0", default-features = false }
3439
lazy_static = "1.0.0"
35-
rustwide = "0.7.1"
40+
rustwide = "=0.7.0"
3641
mime_guess = "2"
3742
dotenv = "0.15"
3843

@@ -44,15 +49,6 @@ params = "0.8"
4449
staticfile = { version = "0.4", features = [ "cache" ] }
4550
tempfile = "3.1.0"
4651

47-
# Communicating with S3
48-
rusoto_s3 = "0.43"
49-
rusoto_core = "0.43"
50-
rusoto_credential = "0.43"
51-
52-
# Async
53-
futures-util = "0.3"
54-
tokio = { version = "0.2", features = ["rt-threaded"] }
55-
5652
[target.'cfg(not(windows))'.dependencies]
5753
libc = "0.2"
5854

src/db/add_package.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@ use crate::error::Result;
1111
use failure::err_msg;
1212
use log::debug;
1313
use postgres::Connection;
14-
use reqwest::blocking::Client;
1514
use reqwest::header::ACCEPT;
15+
use reqwest::Client;
1616
use rustc_serialize::json::{Json, ToJson};
1717
use semver::Version;
1818
use slug::slugify;

src/db/delete_crate.rs

+24-30
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
use crate::storage::s3::{s3_client, S3Backend, S3_BUCKET_NAME};
1+
use crate::storage::s3::{s3_client, S3_BUCKET_NAME};
22
use failure::{Error, Fail};
33
use postgres::Connection;
4-
use rusoto_s3::{DeleteObjectsRequest, ListObjectsV2Request, ObjectIdentifier, S3};
4+
use rusoto_s3::{DeleteObjectsRequest, ListObjectsV2Request, ObjectIdentifier, S3Client, S3};
55

66
/// List of directories in docs.rs's underlying storage (either the database or S3) containing a
77
/// subdirectory named after the crate. Those subdirectories will be deleted.
@@ -22,9 +22,8 @@ pub fn delete_crate(conn: &Connection, name: &str) -> Result<(), Error> {
2222
};
2323

2424
delete_from_database(conn, name, crate_id)?;
25-
if let Some(client) = s3_client() {
26-
let mut backend = S3Backend::new(client, S3_BUCKET_NAME);
27-
delete_from_s3(&mut backend, name)?;
25+
if let Some(s3) = s3_client() {
26+
delete_from_s3(&s3, name)?;
2827
}
2928

3029
Ok(())
@@ -69,26 +68,24 @@ fn delete_from_database(conn: &Connection, name: &str, crate_id: i32) -> Result<
6968
Ok(())
7069
}
7170

72-
fn delete_from_s3(s3: &mut S3Backend<'_>, name: &str) -> Result<(), Error> {
71+
fn delete_from_s3(s3: &S3Client, name: &str) -> Result<(), Error> {
7372
for prefix in STORAGE_PATHS_TO_DELETE {
7473
delete_prefix_from_s3(s3, &format!("{}/{}/", prefix, name))?;
7574
}
76-
7775
Ok(())
7876
}
7977

80-
fn delete_prefix_from_s3(s3: &mut S3Backend<'_>, name: &str) -> Result<(), Error> {
78+
fn delete_prefix_from_s3(s3: &S3Client, name: &str) -> Result<(), Error> {
8179
let mut continuation_token = None;
82-
8380
loop {
84-
let list =
85-
s3.runtime_handle()
86-
.block_on(s3.client().list_objects_v2(ListObjectsV2Request {
87-
bucket: S3_BUCKET_NAME.into(),
88-
prefix: Some(name.into()),
89-
continuation_token,
90-
..ListObjectsV2Request::default()
91-
}))?;
81+
let list = s3
82+
.list_objects_v2(ListObjectsV2Request {
83+
bucket: S3_BUCKET_NAME.into(),
84+
prefix: Some(name.into()),
85+
continuation_token,
86+
..ListObjectsV2Request::default()
87+
})
88+
.sync()?;
9289

9390
let to_delete = list
9491
.contents
@@ -100,23 +97,20 @@ fn delete_prefix_from_s3(s3: &mut S3Backend<'_>, name: &str) -> Result<(), Error
10097
version_id: None,
10198
})
10299
.collect::<Vec<_>>();
103-
104-
let resp =
105-
s3.runtime_handle()
106-
.block_on(s3.client().delete_objects(DeleteObjectsRequest {
107-
bucket: S3_BUCKET_NAME.into(),
108-
delete: rusoto_s3::Delete {
109-
objects: to_delete,
110-
quiet: None,
111-
},
112-
..DeleteObjectsRequest::default()
113-
}))?;
114-
100+
let resp = s3
101+
.delete_objects(DeleteObjectsRequest {
102+
bucket: S3_BUCKET_NAME.into(),
103+
delete: rusoto_s3::Delete {
104+
objects: to_delete,
105+
quiet: None,
106+
},
107+
..DeleteObjectsRequest::default()
108+
})
109+
.sync()?;
115110
if let Some(errs) = resp.errors {
116111
for err in &errs {
117112
log::error!("error deleting file from s3: {:?}", err);
118113
}
119-
120114
failure::bail!("uploading to s3 failed");
121115
}
122116

src/storage/mod.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ pub fn get_file_list<P: AsRef<Path>>(path: P) -> Result<Vec<PathBuf>, Error> {
6161

6262
pub(crate) enum Storage<'a> {
6363
Database(DatabaseBackend<'a>),
64-
S3(Box<S3Backend<'a>>),
64+
S3(S3Backend<'a>),
6565
}
6666

6767
impl<'a> Storage<'a> {
@@ -72,16 +72,16 @@ impl<'a> Storage<'a> {
7272
DatabaseBackend::new(conn).into()
7373
}
7474
}
75-
pub(crate) fn get(&mut self, path: &str) -> Result<Blob, Error> {
75+
pub(crate) fn get(&self, path: &str) -> Result<Blob, Error> {
7676
match self {
7777
Self::Database(db) => db.get(path),
7878
Self::S3(s3) => s3.get(path),
7979
}
8080
}
8181

82-
fn store_batch(&mut self, batch: Vec<Blob>, trans: &Transaction) -> Result<(), Error> {
82+
fn store_batch(&mut self, batch: &[Blob], trans: &Transaction) -> Result<(), Error> {
8383
match self {
84-
Self::Database(db) => db.store_batch(&batch, trans),
84+
Self::Database(db) => db.store_batch(batch, trans),
8585
Self::S3(s3) => s3.store_batch(batch),
8686
}
8787
}
@@ -141,7 +141,7 @@ impl<'a> Storage<'a> {
141141
if batch.is_empty() {
142142
break;
143143
}
144-
self.store_batch(batch, &trans)?;
144+
self.store_batch(&batch, &trans)?;
145145
}
146146

147147
trans.commit()?;
@@ -180,7 +180,7 @@ impl<'a> From<DatabaseBackend<'a>> for Storage<'a> {
180180

181181
impl<'a> From<S3Backend<'a>> for Storage<'a> {
182182
fn from(db: S3Backend<'a>) -> Self {
183-
Self::S3(Box::new(db))
183+
Self::S3(db)
184184
}
185185
}
186186

src/storage/s3.rs

+28-72
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,14 @@
11
use super::Blob;
22
use failure::Error;
3-
use futures_util::{
4-
future::FutureExt,
5-
stream::{FuturesUnordered, StreamExt},
6-
};
3+
use futures::Future;
74
use log::{error, warn};
85
use rusoto_core::region::Region;
96
use rusoto_credential::DefaultCredentialsProvider;
107
use rusoto_s3::{GetObjectRequest, PutObjectRequest, S3Client, S3};
118
use std::convert::TryInto;
129
use std::io::Read;
1310
use time::Timespec;
14-
use tokio::runtime::{Handle, Runtime};
11+
use tokio::runtime::Runtime;
1512

1613
#[cfg(test)]
1714
mod test;
@@ -35,23 +32,15 @@ impl<'a> S3Backend<'a> {
3532
}
3633
}
3734

38-
#[cfg(test)]
39-
pub(crate) fn with_runtime(client: S3Client, bucket: &'a str, runtime: Runtime) -> Self {
40-
Self {
41-
client,
42-
bucket,
43-
runtime,
44-
}
45-
}
46-
47-
pub(super) fn get(&mut self, path: &str) -> Result<Blob, Error> {
35+
pub(super) fn get(&self, path: &str) -> Result<Blob, Error> {
4836
let res = self
49-
.runtime
50-
.block_on(self.client.get_object(GetObjectRequest {
37+
.client
38+
.get_object(GetObjectRequest {
5139
bucket: self.bucket.to_string(),
5240
key: path.into(),
5341
..Default::default()
54-
}))?;
42+
})
43+
.sync()?;
5544

5645
let mut b = res.body.unwrap().into_blocking_read();
5746
let mut content = Vec::with_capacity(
@@ -71,16 +60,14 @@ impl<'a> S3Backend<'a> {
7160
})
7261
}
7362

74-
pub(super) fn store_batch(&mut self, mut uploads: Vec<Blob>) -> Result<(), Error> {
63+
pub(super) fn store_batch(&mut self, batch: &[Blob]) -> Result<(), Error> {
64+
use futures::stream::FuturesUnordered;
65+
use futures::stream::Stream;
7566
let mut attempts = 0;
7667

7768
loop {
78-
// `FuturesUnordered` is used because the order of execution doesn't
79-
// matter, we just want things to execute as fast as possible
80-
let futures = FuturesUnordered::new();
81-
82-
// Drain uploads, filling `futures` with upload requests
83-
for blob in uploads.drain(..) {
69+
let mut futures = FuturesUnordered::new();
70+
for blob in batch {
8471
futures.push(
8572
self.client
8673
.put_object(PutObjectRequest {
@@ -90,53 +77,27 @@ impl<'a> S3Backend<'a> {
9077
content_type: Some(blob.mime.clone()),
9178
..Default::default()
9279
})
93-
// Drop the value returned by `put_object` because we don't need it,
94-
// emit an error and replace the error values with the blob that failed
95-
// to upload so that we can retry failed uploads
96-
.map(|resp| match resp {
97-
Ok(..) => {
98-
// Increment the total uploaded files when a file is uploaded
99-
crate::web::metrics::UPLOADED_FILES_TOTAL.inc_by(1);
100-
101-
Ok(())
102-
}
103-
Err(err) => {
104-
error!("failed to upload file to s3: {:?}", err);
105-
Err(blob)
106-
}
80+
.inspect(|_| {
81+
crate::web::metrics::UPLOADED_FILES_TOTAL.inc_by(1);
10782
}),
10883
);
10984
}
11085
attempts += 1;
11186

112-
// Collect all the failed uploads so that we can retry them
113-
uploads = self.runtime.block_on(
114-
futures
115-
.filter_map(|resp| async move { resp.err() })
116-
.collect(),
117-
);
118-
119-
// If there are no further uploads we were successful and can return
120-
if uploads.is_empty() {
121-
break;
122-
123-
// If more than three attempts to upload fail, return an error
124-
} else if attempts >= 3 {
125-
error!("failed to upload to s3, abandoning");
126-
failure::bail!("Failed to upload to s3 three times, abandoning");
87+
match self.runtime.block_on(futures.map(drop).collect()) {
88+
// this batch was successful, start another batch if there are still more files
89+
Ok(_) => break,
90+
Err(err) => {
91+
error!("failed to upload to s3: {:?}", err);
92+
// if a futures error occurs, retry the batch
93+
if attempts > 2 {
94+
panic!("failed to upload 3 times, exiting");
95+
}
96+
}
12797
}
12898
}
129-
13099
Ok(())
131100
}
132-
133-
pub fn runtime_handle(&self) -> Handle {
134-
self.runtime.handle().clone()
135-
}
136-
137-
pub fn client(&self) -> &S3Client {
138-
&self.client
139-
}
140101
}
141102

142103
fn parse_timespec(raw: &str) -> Result<Timespec, Error> {
@@ -150,15 +111,13 @@ pub(crate) fn s3_client() -> Option<S3Client> {
150111
if std::env::var_os("AWS_ACCESS_KEY_ID").is_none() && std::env::var_os("FORCE_S3").is_none() {
151112
return None;
152113
}
153-
154114
let creds = match DefaultCredentialsProvider::new() {
155115
Ok(creds) => creds,
156116
Err(err) => {
157117
warn!("failed to retrieve AWS credentials: {}", err);
158118
return None;
159119
}
160120
};
161-
162121
Some(S3Client::new_with(
163122
rusoto_core::request::HttpClient::new().unwrap(),
164123
creds,
@@ -176,6 +135,7 @@ pub(crate) fn s3_client() -> Option<S3Client> {
176135
pub(crate) mod tests {
177136
use super::*;
178137
use crate::test::*;
138+
use std::slice;
179139

180140
#[test]
181141
fn test_parse_timespec() {
@@ -205,7 +165,7 @@ pub(crate) mod tests {
205165

206166
// Add a test file to the database
207167
let s3 = env.s3();
208-
s3.upload(vec![blob.clone()]).unwrap();
168+
s3.upload(slice::from_ref(&blob)).unwrap();
209169

210170
// Test that the proper file was returned
211171
s3.assert_blob(&blob, "dir/foo.txt");
@@ -227,10 +187,8 @@ pub(crate) mod tests {
227187
"b",
228188
"a_very_long_file_name_that_has_an.extension",
229189
"parent/child",
230-
"h/i/
231-
g/h/l/y/_/n/e/s/t/e/d/_/d/i/r/e/c/t/o/r/i/e/s",
190+
"h/i/g/h/l/y/_/n/e/s/t/e/d/_/d/i/r/e/c/t/o/r/i/e/s",
232191
];
233-
234192
let blobs: Vec<_> = names
235193
.iter()
236194
.map(|&path| Blob {
@@ -240,12 +198,10 @@ pub(crate) mod tests {
240198
content: "Hello world!".into(),
241199
})
242200
.collect();
243-
244-
s3.upload(blobs.clone()).unwrap();
201+
s3.upload(&blobs).unwrap();
245202
for blob in &blobs {
246203
s3.assert_blob(blob, &blob.path);
247204
}
248-
249205
Ok(())
250206
})
251207
}

0 commit comments

Comments
 (0)