diff --git a/src/app.rs b/src/app.rs index 8dcb9fef7f2..75212a95e66 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,7 +1,7 @@ //! Application-wide components in a struct accessible from each request use crate::{db, util::CargoResult, Config, Env}; -use std::{env, path::PathBuf, sync::Arc, time::Duration}; +use std::{path::PathBuf, sync::Arc, time::Duration}; use diesel::r2d2; use scheduled_thread_pool::ScheduledThreadPool; @@ -45,25 +45,25 @@ impl App { ); github.scopes.push(String::from("read:org")); - let db_pool_size = match (env::var("DB_POOL_SIZE"), config.env) { + let db_pool_size = match (dotenv::var("DB_POOL_SIZE"), config.env) { (Ok(num), _) => num.parse().expect("couldn't parse DB_POOL_SIZE"), (_, Env::Production) => 10, _ => 1, }; - let db_min_idle = match (env::var("DB_MIN_IDLE"), config.env) { + let db_min_idle = match (dotenv::var("DB_MIN_IDLE"), config.env) { (Ok(num), _) => Some(num.parse().expect("couldn't parse DB_MIN_IDLE")), (_, Env::Production) => Some(5), _ => None, }; - let db_helper_threads = match (env::var("DB_HELPER_THREADS"), config.env) { + let db_helper_threads = match (dotenv::var("DB_HELPER_THREADS"), config.env) { (Ok(num), _) => num.parse().expect("couldn't parse DB_HELPER_THREADS"), (_, Env::Production) => 3, _ => 1, }; - let db_connection_timeout = match (env::var("DB_TIMEOUT"), config.env) { + let db_connection_timeout = match (dotenv::var("DB_TIMEOUT"), config.env) { (Ok(num), _) => num.parse().expect("couldn't parse DB_TIMEOUT"), (_, Env::Production) => 10, (_, Env::Test) => 1, diff --git a/src/bin/background-worker.rs b/src/bin/background-worker.rs index 07f4f1d8412..2dfe34dfc9e 100644 --- a/src/bin/background-worker.rs +++ b/src/bin/background-worker.rs @@ -12,7 +12,6 @@ use cargo_registry::git::Repository; use cargo_registry::{background, background_jobs::*, db}; use diesel::r2d2; -use std::env; use std::thread::sleep; use std::time::Duration; @@ -25,8 +24,8 @@ fn main() { let db_config = r2d2::Pool::builder().max_size(2); let db_pool = db::diesel_pool(&config.db_url, config.env, db_config); - let username = env::var("GIT_HTTP_USER"); - let password = env::var("GIT_HTTP_PWD"); + let username = dotenv::var("GIT_HTTP_USER"); + let password = dotenv::var("GIT_HTTP_PWD"); let credentials = match (username, password) { (Ok(u), Ok(p)) => Some((u, p)), _ => None, diff --git a/src/bin/monitor.rs b/src/bin/monitor.rs index 9f1c25be20d..5fc93f47f5c 100644 --- a/src/bin/monitor.rs +++ b/src/bin/monitor.rs @@ -13,7 +13,6 @@ mod on_call; use cargo_registry::{db, util::CargoResult}; use diesel::prelude::*; -use std::env; fn main() -> CargoResult<()> { let conn = db::connect_now()?; @@ -30,7 +29,7 @@ fn check_stalled_background_jobs(conn: &PgConnection) -> CargoResult<()> { println!("Checking for stalled background jobs"); - let max_job_time = env::var("MAX_JOB_TIME") + let max_job_time = dotenv::var("MAX_JOB_TIME") .map(|s| s.parse::().unwrap()) .unwrap_or(15); diff --git a/src/bin/on_call/mod.rs b/src/bin/on_call/mod.rs index 1228f6e73b4..e76add72258 100644 --- a/src/bin/on_call/mod.rs +++ b/src/bin/on_call/mod.rs @@ -1,5 +1,4 @@ use cargo_registry::util::{internal, CargoResult}; -use std::env; use reqwest::{header, StatusCode as Status}; @@ -27,8 +26,8 @@ impl Event { /// If the variant is `Trigger`, this will page whoever is on call /// (potentially waking them up at 3 AM). pub fn send(self) -> CargoResult<()> { - let api_token = env::var("PAGERDUTY_API_TOKEN")?; - let service_key = env::var("PAGERDUTY_INTEGRATION_KEY")?; + let api_token = dotenv::var("PAGERDUTY_API_TOKEN")?; + let service_key = dotenv::var("PAGERDUTY_INTEGRATION_KEY")?; let mut response = reqwest::Client::new() .post("https://events.pagerduty.com/generic/2010-04-15/create_event.json") diff --git a/src/bin/server.rs b/src/bin/server.rs index 5349bfb7018..8d97081f657 100644 --- a/src/bin/server.rs +++ b/src/bin/server.rs @@ -3,7 +3,6 @@ use cargo_registry::{boot, App, Env}; use jemalloc_ctl; use std::{ - env, fs::File, sync::{mpsc::channel, Arc}, }; @@ -33,16 +32,16 @@ fn main() { let categories_toml = include_str!("../boot/categories.toml"); boot::categories::sync(categories_toml).unwrap(); - let heroku = env::var("HEROKU").is_ok(); + let heroku = dotenv::var("HEROKU").is_ok(); let port = if heroku { 8888 } else { - env::var("PORT") + dotenv::var("PORT") .ok() .and_then(|s| s.parse().ok()) .unwrap_or(8888) }; - let threads = env::var("SERVER_THREADS") + let threads = dotenv::var("SERVER_THREADS") .map(|s| s.parse().expect("SERVER_THREADS was not a valid number")) .unwrap_or_else(|_| { if config.env == Env::Development { @@ -52,7 +51,7 @@ fn main() { } }); - let server = if env::var("USE_HYPER").is_ok() { + let server = if dotenv::var("USE_HYPER").is_ok() { println!("Booting with a hyper based server"); Hyper(HyperService::new(app, threads as usize)) } else { diff --git a/src/config.rs b/src/config.rs index bf2b436e414..31aaf7ec527 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,5 @@ use crate::{env, uploaders::Uploader, Env, Replica}; -use std::{env, path::PathBuf}; +use std::path::PathBuf; use url::Url; #[derive(Clone, Debug)] @@ -43,12 +43,12 @@ impl Default for Config { fn default() -> Config { let checkout = PathBuf::from(env("GIT_REPO_CHECKOUT")); let api_protocol = String::from("https"); - let mirror = if env::var("MIRROR").is_ok() { + let mirror = if dotenv::var("MIRROR").is_ok() { Replica::ReadOnlyMirror } else { Replica::Primary }; - let heroku = env::var("HEROKU").is_ok(); + let heroku = dotenv::var("HEROKU").is_ok(); let cargo_env = if heroku { Env::Production } else { @@ -62,12 +62,12 @@ impl Default for Config { Uploader::S3 { bucket: s3::Bucket::new( env("S3_BUCKET"), - env::var("S3_REGION").ok(), + dotenv::var("S3_REGION").ok(), env("S3_ACCESS_KEY"), env("S3_SECRET_KEY"), &api_protocol, ), - cdn: env::var("S3_CDN").ok(), + cdn: dotenv::var("S3_CDN").ok(), proxy: None, } } @@ -83,18 +83,18 @@ impl Default for Config { Uploader::S3 { bucket: s3::Bucket::new( env("S3_BUCKET"), - env::var("S3_REGION").ok(), - env::var("S3_ACCESS_KEY").unwrap_or_default(), - env::var("S3_SECRET_KEY").unwrap_or_default(), + dotenv::var("S3_REGION").ok(), + dotenv::var("S3_ACCESS_KEY").unwrap_or_default(), + dotenv::var("S3_SECRET_KEY").unwrap_or_default(), &api_protocol, ), - cdn: env::var("S3_CDN").ok(), + cdn: dotenv::var("S3_CDN").ok(), proxy: None, } } // In Development mode, either running as a primary instance or a read-only mirror _ => { - if env::var("S3_BUCKET").is_ok() { + if dotenv::var("S3_BUCKET").is_ok() { // If we've set the `S3_BUCKET` variable to any value, use all of the values // for the related S3 environment variables and configure the app to upload to // and read from S3 like production does. All values except for bucket are @@ -103,12 +103,12 @@ impl Default for Config { Uploader::S3 { bucket: s3::Bucket::new( env("S3_BUCKET"), - env::var("S3_REGION").ok(), - env::var("S3_ACCESS_KEY").unwrap_or_default(), - env::var("S3_SECRET_KEY").unwrap_or_default(), + dotenv::var("S3_REGION").ok(), + dotenv::var("S3_ACCESS_KEY").unwrap_or_default(), + dotenv::var("S3_SECRET_KEY").unwrap_or_default(), &api_protocol, ), - cdn: env::var("S3_CDN").ok(), + cdn: dotenv::var("S3_CDN").ok(), proxy: None, } } else { diff --git a/src/controllers/site_metadata.rs b/src/controllers/site_metadata.rs index 16806810bb8..4011f74c3c7 100644 --- a/src/controllers/site_metadata.rs +++ b/src/controllers/site_metadata.rs @@ -6,7 +6,7 @@ use super::prelude::*; /// If `HEROKU_SLUG_COMMIT` is not set, returns `"unknown"`. pub fn show_deployed_sha(req: &mut dyn Request) -> CargoResult { let deployed_sha = - ::std::env::var("HEROKU_SLUG_COMMIT").unwrap_or_else(|_| String::from("unknown")); + dotenv::var("HEROKU_SLUG_COMMIT").unwrap_or_else(|_| String::from("unknown")); #[derive(Serialize)] struct R<'a> { diff --git a/src/controllers/user/session.rs b/src/controllers/user/session.rs index 9a89ccdf348..daa5fca50e0 100644 --- a/src/controllers/user/session.rs +++ b/src/controllers/user/session.rs @@ -133,13 +133,10 @@ pub fn logout(req: &mut dyn Request) -> CargoResult { #[cfg(test)] mod tests { use super::*; - use dotenv::dotenv; - use std::env; fn pg_connection() -> PgConnection { - let _ = dotenv(); let database_url = - env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); PgConnection::establish(&database_url).unwrap() } diff --git a/src/db.rs b/src/db.rs index b090be41a4d..7b6a8f7746f 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,5 +1,3 @@ -use std::env; - use conduit::Request; use diesel::prelude::*; use diesel::r2d2::{self, ConnectionManager, CustomizeConnection}; @@ -68,7 +66,7 @@ impl Deref for DieselPooledConn<'_> { pub fn connect_now() -> ConnectionResult { use diesel::Connection; let mut url = Url::parse(&crate::env("DATABASE_URL")).expect("Invalid database URL"); - if env::var("HEROKU").is_ok() && !url.query_pairs().any(|(k, _)| k == "sslmode") { + if dotenv::var("HEROKU").is_ok() && !url.query_pairs().any(|(k, _)| k == "sslmode") { url.query_pairs_mut().append_pair("sslmode", "require"); } PgConnection::establish(&url.to_string()) @@ -80,7 +78,7 @@ pub fn diesel_pool( config: r2d2::Builder>, ) -> DieselPool { let mut url = Url::parse(url).expect("Invalid database URL"); - if env::var("HEROKU").is_ok() && !url.query_pairs().any(|(k, _)| k == "sslmode") { + if dotenv::var("HEROKU").is_ok() && !url.query_pairs().any(|(k, _)| k == "sslmode") { url.query_pairs_mut().append_pair("sslmode", "require"); } diff --git a/src/email.rs b/src/email.rs index 132560f7c1d..aef984da927 100644 --- a/src/email.rs +++ b/src/email.rs @@ -1,8 +1,6 @@ -use std::env; use std::path::Path; use crate::util::{bad_request, CargoResult}; -use dotenv::dotenv; use lettre::file::FileTransport; use lettre::smtp::authentication::{Credentials, Mechanism}; @@ -19,12 +17,10 @@ pub struct MailgunConfigVars { } pub fn init_config_vars() -> Option { - dotenv().ok(); - match ( - env::var("MAILGUN_SMTP_LOGIN"), - env::var("MAILGUN_SMTP_PASSWORD"), - env::var("MAILGUN_SMTP_SERVER"), + dotenv::var("MAILGUN_SMTP_LOGIN"), + dotenv::var("MAILGUN_SMTP_PASSWORD"), + dotenv::var("MAILGUN_SMTP_SERVER"), ) { (Ok(login), Ok(password), Ok(server)) => Some(MailgunConfigVars { smtp_login: login, diff --git a/src/lib.rs b/src/lib.rs index c597c25fe14..ec3da9b6c67 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,8 +100,7 @@ pub fn build_handler(app: Arc) -> MiddlewareBuilder { /// Panics if the environment variable with the name passed in as an argument is not defined /// in the current environment. pub fn env(s: &str) -> String { - dotenv::dotenv().ok(); - ::std::env::var(s).unwrap_or_else(|_| panic!("must have `{}` defined", s)) + dotenv::var(s).unwrap_or_else(|_| panic!("must have `{}` defined", s)) } sql_function!(fn lower(x: ::diesel::sql_types::Text) -> ::diesel::sql_types::Text); diff --git a/src/models/category.rs b/src/models/category.rs index 7db66e72c6c..28563337c48 100644 --- a/src/models/category.rs +++ b/src/models/category.rs @@ -184,13 +184,10 @@ impl<'a> NewCategory<'a> { mod tests { use super::*; use diesel::connection::SimpleConnection; - use dotenv::dotenv; - use std::env; fn pg_connection() -> PgConnection { - let _ = dotenv(); let database_url = - env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); let conn = PgConnection::establish(&database_url).unwrap(); // These tests deadlock if run concurrently conn.batch_execute("BEGIN; LOCK categories IN ACCESS EXCLUSIVE MODE") diff --git a/src/models/keyword.rs b/src/models/keyword.rs index fc121fc5754..24149a50ac6 100644 --- a/src/models/keyword.rs +++ b/src/models/keyword.rs @@ -99,13 +99,10 @@ mod tests { use super::*; use diesel; use diesel::connection::SimpleConnection; - use dotenv::dotenv; - use std::env; fn pg_connection() -> PgConnection { - let _ = dotenv(); let database_url = - env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); let conn = PgConnection::establish(&database_url).unwrap(); // These tests deadlock if run concurrently conn.batch_execute("BEGIN;").unwrap(); diff --git a/src/tests/all.rs b/src/tests/all.rs index 9929ca7ee7a..7b184a29139 100644 --- a/src/tests/all.rs +++ b/src/tests/all.rs @@ -20,7 +20,6 @@ use cargo_registry::{ }; use std::{ borrow::Cow, - env, sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -112,15 +111,13 @@ fn app() -> ( Arc, conduit_middleware::MiddlewareBuilder, ) { - dotenv::dotenv().ok(); - let (proxy, bomb) = record::proxy(); let uploader = Uploader::S3 { bucket: s3::Bucket::new( String::from("alexcrichton-test"), None, - std::env::var("S3_ACCESS_KEY").unwrap_or_default(), - std::env::var("S3_SECRET_KEY").unwrap_or_default(), + dotenv::var("S3_ACCESS_KEY").unwrap_or_default(), + dotenv::var("S3_SECRET_KEY").unwrap_or_default(), // When testing we route all API traffic over HTTP so we can // sniff/record it, but everywhere else we use https "http", @@ -140,8 +137,8 @@ fn simple_app(uploader: Uploader) -> (Arc, conduit_middleware::MiddlewareBu session_key: "test this has to be over 32 bytes long".to_string(), git_repo_checkout: git::checkout(), index_location: Url::from_file_path(&git::bare()).unwrap(), - gh_client_id: env::var("GH_CLIENT_ID").unwrap_or_default(), - gh_client_secret: env::var("GH_CLIENT_SECRET").unwrap_or_default(), + gh_client_id: dotenv::var("GH_CLIENT_ID").unwrap_or_default(), + gh_client_secret: dotenv::var("GH_CLIENT_SECRET").unwrap_or_default(), db_url: env("TEST_DATABASE_URL"), env: Env::Test, max_upload_size: 3000, @@ -160,7 +157,7 @@ fn simple_app(uploader: Uploader) -> (Arc, conduit_middleware::MiddlewareBu // Return the environment variable only if it has been defined fn env(var: &str) -> String { - match env::var(var) { + match dotenv::var(var) { Ok(ref s) if s == "" => panic!("environment variable `{}` must not be empty", var), Ok(s) => s, _ => panic!( diff --git a/src/tests/categories.rs b/src/tests/categories.rs index bcb854e0caa..7134e2d3773 100644 --- a/src/tests/categories.rs +++ b/src/tests/categories.rs @@ -1,8 +1,6 @@ use cargo_registry::schema::categories; -use std::env; use diesel::*; -use dotenv::dotenv; const ALGORITHMS: &str = r#" [algorithms] @@ -40,9 +38,8 @@ description = "Another category ho hum" "#; fn pg_connection() -> PgConnection { - let _ = dotenv(); let database_url = - env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); + dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); let conn = PgConnection::establish(&database_url).unwrap(); conn.begin_test_transaction().unwrap(); conn diff --git a/src/tests/record.rs b/src/tests/record.rs index aedb813831e..80b4575772b 100644 --- a/src/tests/record.rs +++ b/src/tests/record.rs @@ -3,7 +3,6 @@ use cargo_registry::models::NewUser; use std::{ borrow::Cow, collections::HashSet, - env, fs::{self, File}, io::{self, prelude::*}, net, @@ -80,7 +79,7 @@ enum Record { pub fn proxy() -> (String, Bomb) { let me = thread::current().name().unwrap().to_string(); - let record = env::var("RECORD").is_ok(); + let record = dotenv::var("RECORD").is_ok(); let a = t!(net::TcpListener::bind("127.0.0.1:0")); let ret = format!("http://{}", t!(a.local_addr())); diff --git a/src/tests/util.rs b/src/tests/util.rs index 8fefe90a3c7..333fc453190 100644 --- a/src/tests/util.rs +++ b/src/tests/util.rs @@ -48,7 +48,6 @@ pub struct TestApp(Rc); impl TestApp { /// Initialize an application with an `Uploader` that panics pub fn init() -> TestAppBuilder { - dotenv::dotenv().ok(); let (app, middle) = crate::simple_app(Uploader::Panic); let inner = Rc::new(TestAppInner { app,