Add declarative attic cache
Signed-off-by: Nikolaos Karaolidis <nick@karaolidis.com>
This commit is contained in:
162
overlays/attic-client/declarative-key-pair.patch
Normal file
162
overlays/attic-client/declarative-key-pair.patch
Normal file
@@ -0,0 +1,162 @@
|
||||
diff --git a/client/src/cli.rs b/client/src/cli.rs
|
||||
index ee86783..e96b1b6 100644
|
||||
--- a/client/src/cli.rs
|
||||
+++ b/client/src/cli.rs
|
||||
@@ -9,6 +9,7 @@ use enum_as_inner::EnumAsInner;
|
||||
|
||||
use crate::command::cache::{self, Cache};
|
||||
use crate::command::get_closure::{self, GetClosure};
|
||||
+use crate::command::key::{self, Key};
|
||||
use crate::command::login::{self, Login};
|
||||
use crate::command::push::{self, Push};
|
||||
use crate::command::r#use::{self, Use};
|
||||
@@ -30,6 +31,7 @@ pub enum Command {
|
||||
Push(Push),
|
||||
Cache(Cache),
|
||||
WatchStore(WatchStore),
|
||||
+ Key(Key),
|
||||
|
||||
#[clap(hide = true)]
|
||||
GetClosure(GetClosure),
|
||||
@@ -57,6 +59,7 @@ pub async fn run() -> Result<()> {
|
||||
Command::Cache(_) => cache::run(opts).await,
|
||||
Command::WatchStore(_) => watch_store::run(opts).await,
|
||||
Command::GetClosure(_) => get_closure::run(opts).await,
|
||||
+ Command::Key(_) => key::run(opts).await,
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/client/src/command/cache.rs b/client/src/command/cache.rs
|
||||
index af01378..af24d8c 100644
|
||||
--- a/client/src/command/cache.rs
|
||||
+++ b/client/src/command/cache.rs
|
||||
@@ -7,8 +7,11 @@ use crate::api::ApiClient;
|
||||
use crate::cache::CacheRef;
|
||||
use crate::cli::Opts;
|
||||
use crate::config::Config;
|
||||
-use attic::api::v1::cache_config::{
|
||||
- CacheConfig, CreateCacheRequest, KeypairConfig, RetentionPeriodConfig,
|
||||
+use attic::{
|
||||
+ api::v1::cache_config::{
|
||||
+ CacheConfig, CreateCacheRequest, KeypairConfig, RetentionPeriodConfig,
|
||||
+ },
|
||||
+ signing::NixKeypair,
|
||||
};
|
||||
|
||||
/// Manage caches on an Attic server.
|
||||
@@ -72,6 +75,12 @@ struct Create {
|
||||
default_value = "cache.nixos.org-1"
|
||||
)]
|
||||
upstream_cache_key_names: Vec<String>,
|
||||
+
|
||||
+ /// The signing keypair to use for the cache.
|
||||
+ ///
|
||||
+ /// If not specified, a new keypair will be generated.
|
||||
+ #[clap(long)]
|
||||
+ keypair_path: Option<String>,
|
||||
}
|
||||
|
||||
/// Configure a cache.
|
||||
@@ -91,6 +100,14 @@ struct Configure {
|
||||
#[clap(long)]
|
||||
regenerate_keypair: bool,
|
||||
|
||||
+ /// Set a keypair for the cache.
|
||||
+ ///
|
||||
+ /// The server-side signing key will be set to the
|
||||
+ /// specified keypair. This is useful for setting up
|
||||
+ /// a cache with a pre-existing keypair.
|
||||
+ #[clap(long, conflicts_with = "regenerate_keypair")]
|
||||
+ keypair_path: Option<String>,
|
||||
+
|
||||
/// Make the cache public.
|
||||
///
|
||||
/// Use `--private` to make it private.
|
||||
@@ -179,9 +196,15 @@ async fn create_cache(sub: Create) -> Result<()> {
|
||||
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||
let api = ApiClient::from_server_config(server.clone())?;
|
||||
|
||||
+ let keypair = if let Some(keypair_path) = &sub.keypair_path {
|
||||
+ let contents = std::fs::read_to_string(keypair_path)?;
|
||||
+ KeypairConfig::Keypair(NixKeypair::from_str(&contents)?)
|
||||
+ } else {
|
||||
+ KeypairConfig::Generate
|
||||
+ };
|
||||
+
|
||||
let request = CreateCacheRequest {
|
||||
- // TODO: Make this configurable?
|
||||
- keypair: KeypairConfig::Generate,
|
||||
+ keypair,
|
||||
is_public: sub.public,
|
||||
priority: sub.priority,
|
||||
store_dir: sub.store_dir,
|
||||
@@ -230,6 +253,10 @@ async fn configure_cache(sub: Configure) -> Result<()> {
|
||||
|
||||
if sub.regenerate_keypair {
|
||||
patch.keypair = Some(KeypairConfig::Generate);
|
||||
+ } else if let Some(keypair_path) = &sub.keypair_path {
|
||||
+ let contents = std::fs::read_to_string(keypair_path)?;
|
||||
+ let keypair = KeypairConfig::Keypair(NixKeypair::from_str(&contents)?);
|
||||
+ patch.keypair = Some(keypair);
|
||||
}
|
||||
|
||||
patch.store_dir = sub.store_dir;
|
||||
diff --git a/client/src/command/key.rs b/client/src/command/key.rs
|
||||
new file mode 100644
|
||||
index 0000000..807d8a7
|
||||
--- /dev/null
|
||||
+++ b/client/src/command/key.rs
|
||||
@@ -0,0 +1,42 @@
|
||||
+use anyhow::Result;
|
||||
+use clap::{Parser, Subcommand};
|
||||
+
|
||||
+use crate::cli::Opts;
|
||||
+use attic::signing::NixKeypair;
|
||||
+
|
||||
+/// Manage signing keys.
|
||||
+#[derive(Debug, Parser)]
|
||||
+pub struct Key {
|
||||
+ #[clap(subcommand)]
|
||||
+ command: KeyCommand,
|
||||
+}
|
||||
+
|
||||
+#[derive(Debug, Subcommand)]
|
||||
+enum KeyCommand {
|
||||
+ Generate(Generate),
|
||||
+}
|
||||
+
|
||||
+/// Generate a key.
|
||||
+#[derive(Debug, Clone, Parser)]
|
||||
+pub struct Generate {
|
||||
+ /// Name of the key (must not contain colons).
|
||||
+ name: String,
|
||||
+}
|
||||
+
|
||||
+pub async fn run(opts: Opts) -> Result<()> {
|
||||
+ let sub = opts.command.as_key().unwrap();
|
||||
+ match &sub.command {
|
||||
+ KeyCommand::Generate(sub) => generate_key(sub).await,
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+async fn generate_key(sub: &Generate) -> Result<()> {
|
||||
+ let keypair = NixKeypair::generate(&sub.name)?;
|
||||
+
|
||||
+ println!("🔑 Generated keypair \"{}\"", sub.name);
|
||||
+ println!();
|
||||
+ println!(" Private key: {}", keypair.export_keypair());
|
||||
+ println!(" Public key: {}", keypair.export_public_key());
|
||||
+
|
||||
+ Ok(())
|
||||
+}
|
||||
diff --git a/client/src/command/mod.rs b/client/src/command/mod.rs
|
||||
index cca423f..26b105a 100644
|
||||
--- a/client/src/command/mod.rs
|
||||
+++ b/client/src/command/mod.rs
|
||||
@@ -1,5 +1,6 @@
|
||||
pub mod cache;
|
||||
pub mod get_closure;
|
||||
+pub mod key;
|
||||
pub mod login;
|
||||
pub mod push;
|
||||
pub mod r#use;
|
@@ -1,5 +1,11 @@
|
||||
final: prev:
|
||||
# FIXME: https://github.com/zhaofengli/attic/pull/280
|
||||
prev.attic-client.overrideAttrs (oldAttrs: {
|
||||
patches = oldAttrs.patches or [ ] ++ [ ./stdout-logging.patch ];
|
||||
patches = oldAttrs.patches or [ ] ++ [
|
||||
# fix: log non-errors to stdout
|
||||
(builtins.fetchurl {
|
||||
url = "https://github.com/zhaofengli/attic/pull/280.patch";
|
||||
sha256 = "sha256:0j6ay6d9is7053sq5njakjmlpwk24db296rma694jggpl19ibxjv";
|
||||
})
|
||||
./declarative-key-pair.patch
|
||||
];
|
||||
})
|
||||
|
@@ -1,321 +0,0 @@
|
||||
diff --git a/client/src/command/cache.rs b/client/src/command/cache.rs
|
||||
index af01378..0602b3b 100644
|
||||
--- a/client/src/command/cache.rs
|
||||
+++ b/client/src/command/cache.rs
|
||||
@@ -189,7 +189,7 @@ async fn create_cache(sub: Create) -> Result<()> {
|
||||
};
|
||||
|
||||
api.create_cache(cache, request).await?;
|
||||
- eprintln!(
|
||||
+ println!(
|
||||
"✨ Created cache \"{}\" on \"{}\"",
|
||||
cache.as_str(),
|
||||
server_name.as_str()
|
||||
@@ -239,7 +239,7 @@ async fn configure_cache(sub: Configure) -> Result<()> {
|
||||
let api = ApiClient::from_server_config(server.clone())?;
|
||||
api.configure_cache(cache, &patch).await?;
|
||||
|
||||
- eprintln!(
|
||||
+ println!(
|
||||
"✅ Configured \"{}\" on \"{}\"",
|
||||
cache.as_str(),
|
||||
server_name.as_str()
|
||||
@@ -254,12 +254,12 @@ async fn destroy_cache(sub: Destroy) -> Result<()> {
|
||||
let (server_name, server, cache) = config.resolve_cache(&sub.cache)?;
|
||||
|
||||
if !sub.no_confirm {
|
||||
- eprintln!("When you destory a cache:");
|
||||
- eprintln!();
|
||||
- eprintln!("1. Everyone will lose access.");
|
||||
- eprintln!("2. The underlying data won't be deleted immediately.");
|
||||
- eprintln!("3. You may not be able to create a cache of the same name.");
|
||||
- eprintln!();
|
||||
+ println!("When you destory a cache:");
|
||||
+ println!();
|
||||
+ println!("1. Everyone will lose access.");
|
||||
+ println!("2. The underlying data won't be deleted immediately.");
|
||||
+ println!("3. You may not be able to create a cache of the same name.");
|
||||
+ println!();
|
||||
|
||||
let answer: String = Input::new()
|
||||
.with_prompt(format!(
|
||||
@@ -278,7 +278,7 @@ async fn destroy_cache(sub: Destroy) -> Result<()> {
|
||||
let api = ApiClient::from_server_config(server.clone())?;
|
||||
api.destroy_cache(cache).await?;
|
||||
|
||||
- eprintln!("🗑️ The cache was destroyed.");
|
||||
+ println!("🗑️ The cache was destroyed.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -291,40 +291,40 @@ async fn show_cache_config(sub: Info) -> Result<()> {
|
||||
let cache_config = api.get_cache_config(cache).await?;
|
||||
|
||||
if let Some(is_public) = cache_config.is_public {
|
||||
- eprintln!(" Public: {}", is_public);
|
||||
+ println!(" Public: {}", is_public);
|
||||
}
|
||||
|
||||
if let Some(public_key) = cache_config.public_key {
|
||||
- eprintln!(" Public Key: {}", public_key);
|
||||
+ println!(" Public Key: {}", public_key);
|
||||
}
|
||||
|
||||
if let Some(substituter_endpoint) = cache_config.substituter_endpoint {
|
||||
- eprintln!("Binary Cache Endpoint: {}", substituter_endpoint);
|
||||
+ println!("Binary Cache Endpoint: {}", substituter_endpoint);
|
||||
}
|
||||
|
||||
if let Some(api_endpoint) = cache_config.api_endpoint {
|
||||
- eprintln!(" API Endpoint: {}", api_endpoint);
|
||||
+ println!(" API Endpoint: {}", api_endpoint);
|
||||
}
|
||||
|
||||
if let Some(store_dir) = cache_config.store_dir {
|
||||
- eprintln!(" Store Directory: {}", store_dir);
|
||||
+ println!(" Store Directory: {}", store_dir);
|
||||
}
|
||||
|
||||
if let Some(priority) = cache_config.priority {
|
||||
- eprintln!(" Priority: {}", priority);
|
||||
+ println!(" Priority: {}", priority);
|
||||
}
|
||||
|
||||
if let Some(upstream_cache_key_names) = cache_config.upstream_cache_key_names {
|
||||
- eprintln!(" Upstream Cache Keys: {:?}", upstream_cache_key_names);
|
||||
+ println!(" Upstream Cache Keys: {:?}", upstream_cache_key_names);
|
||||
}
|
||||
|
||||
if let Some(retention_period) = cache_config.retention_period {
|
||||
match retention_period {
|
||||
RetentionPeriodConfig::Period(period) => {
|
||||
- eprintln!(" Retention Period: {:?}", period);
|
||||
+ println!(" Retention Period: {:?}", period);
|
||||
}
|
||||
RetentionPeriodConfig::Global => {
|
||||
- eprintln!(" Retention Period: Global Default");
|
||||
+ println!(" Retention Period: Global Default");
|
||||
}
|
||||
}
|
||||
}
|
||||
diff --git a/client/src/command/login.rs b/client/src/command/login.rs
|
||||
index 9abcea7..6cadd59 100644
|
||||
--- a/client/src/command/login.rs
|
||||
+++ b/client/src/command/login.rs
|
||||
@@ -28,7 +28,7 @@ pub async fn run(opts: Opts) -> Result<()> {
|
||||
let mut config_m = config.as_mut();
|
||||
|
||||
if let Some(server) = config_m.servers.get_mut(&sub.name) {
|
||||
- eprintln!("✍️ Overwriting server \"{}\"", sub.name.as_str());
|
||||
+ println!("✍️ Overwriting server \"{}\"", sub.name.as_str());
|
||||
|
||||
server.endpoint = sub.endpoint.to_owned();
|
||||
|
||||
@@ -38,7 +38,7 @@ pub async fn run(opts: Opts) -> Result<()> {
|
||||
});
|
||||
}
|
||||
} else {
|
||||
- eprintln!("✍️ Configuring server \"{}\"", sub.name.as_str());
|
||||
+ println!("✍️ Configuring server \"{}\"", sub.name.as_str());
|
||||
|
||||
config_m.servers.insert(
|
||||
sub.name.to_owned(),
|
||||
diff --git a/client/src/command/push.rs b/client/src/command/push.rs
|
||||
index b2bb661..5d39549 100644
|
||||
--- a/client/src/command/push.rs
|
||||
+++ b/client/src/command/push.rs
|
||||
@@ -91,7 +91,7 @@ impl PushContext {
|
||||
|
||||
return Ok(());
|
||||
} else {
|
||||
- eprintln!("⚙️ Pushing {num_missing_paths} paths to \"{cache}\" on \"{server}\" ({num_already_cached} already cached, {num_upstream} in upstream)...",
|
||||
+ println!("⚙️ Pushing {num_missing_paths} paths to \"{cache}\" on \"{server}\" ({num_already_cached} already cached, {num_upstream} in upstream)...",
|
||||
cache = self.cache_name.as_str(),
|
||||
server = self.server_name.as_str(),
|
||||
num_missing_paths = plan.store_path_map.len(),
|
||||
diff --git a/client/src/command/use.rs b/client/src/command/use.rs
|
||||
index 37d8cd6..d87f65e 100644
|
||||
--- a/client/src/command/use.rs
|
||||
+++ b/client/src/command/use.rs
|
||||
@@ -34,15 +34,15 @@ pub async fn run(opts: Opts) -> Result<()> {
|
||||
let public_key = cache_config.public_key
|
||||
.ok_or_else(|| anyhow!("The server did not tell us which public key it uses. Is signing managed by the client?"))?;
|
||||
|
||||
- eprintln!(
|
||||
+ println!(
|
||||
"Configuring Nix to use \"{cache}\" on \"{server_name}\":",
|
||||
cache = cache.as_str(),
|
||||
server_name = server_name.as_str(),
|
||||
);
|
||||
|
||||
// Modify nix.conf
|
||||
- eprintln!("+ Substituter: {}", substituter);
|
||||
- eprintln!("+ Trusted Public Key: {}", public_key);
|
||||
+ println!("+ Substituter: {}", substituter);
|
||||
+ println!("+ Trusted Public Key: {}", public_key);
|
||||
|
||||
let mut nix_config = NixConfig::load().await?;
|
||||
nix_config.add_substituter(&substituter);
|
||||
@@ -50,7 +50,7 @@ pub async fn run(opts: Opts) -> Result<()> {
|
||||
|
||||
// Modify netrc
|
||||
if let Some(token) = server.token()? {
|
||||
- eprintln!("+ Access Token");
|
||||
+ println!("+ Access Token");
|
||||
|
||||
let mut nix_netrc = NixNetrc::load().await?;
|
||||
let host = Url::parse(&substituter)?
|
||||
diff --git a/client/src/command/watch_store.rs b/client/src/command/watch_store.rs
|
||||
index 24eaf7a..aec0c33 100644
|
||||
--- a/client/src/command/watch_store.rs
|
||||
+++ b/client/src/command/watch_store.rs
|
||||
@@ -91,7 +91,7 @@ pub async fn run(opts: Opts) -> Result<()> {
|
||||
|
||||
watcher.watch(&store_dir, RecursiveMode::NonRecursive)?;
|
||||
|
||||
- eprintln!(
|
||||
+ println!(
|
||||
"👀 Pushing new store paths to \"{cache}\" on \"{server}\"",
|
||||
cache = cache.as_str(),
|
||||
server = server_name.as_str(),
|
||||
diff --git a/client/src/push.rs b/client/src/push.rs
|
||||
index 309bd4b..2fea414 100644
|
||||
--- a/client/src/push.rs
|
||||
+++ b/client/src/push.rs
|
||||
@@ -595,7 +595,7 @@ pub async fn upload_path(
|
||||
};
|
||||
|
||||
mp.suspend(|| {
|
||||
- eprintln!(
|
||||
+ println!(
|
||||
"✅ {} ({})",
|
||||
path.as_os_str().to_string_lossy(),
|
||||
info_string
|
||||
diff --git a/server/src/database/migration/m20230112_000004_migrate_nar_remote_files_to_chunks.rs b/server/src/database/migration/m20230112_000004_migrate_nar_remote_files_to_chunks.rs
|
||||
index 42d70a6..6bbe585 100644
|
||||
--- a/server/src/database/migration/m20230112_000004_migrate_nar_remote_files_to_chunks.rs
|
||||
+++ b/server/src/database/migration/m20230112_000004_migrate_nar_remote_files_to_chunks.rs
|
||||
@@ -24,7 +24,7 @@ impl MigrationTrait for Migration {
|
||||
// When this migration is run, we assume that there are no
|
||||
// preexisting chunks.
|
||||
|
||||
- eprintln!("* Migrating NARs to chunks...");
|
||||
+ println!("* Migrating NARs to chunks...");
|
||||
|
||||
// Add a temporary column into `chunk` to store the related `nar_id`.
|
||||
manager
|
||||
diff --git a/server/src/database/migration/m20230112_000005_drop_old_nar_columns.rs b/server/src/database/migration/m20230112_000005_drop_old_nar_columns.rs
|
||||
index 9d29b66..7436b4a 100644
|
||||
--- a/server/src/database/migration/m20230112_000005_drop_old_nar_columns.rs
|
||||
+++ b/server/src/database/migration/m20230112_000005_drop_old_nar_columns.rs
|
||||
@@ -16,7 +16,7 @@ impl MigrationName for Migration {
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
- eprintln!("* Migrating NAR schema...");
|
||||
+ println!("* Migrating NAR schema...");
|
||||
|
||||
if manager.get_database_backend() == DatabaseBackend::Sqlite {
|
||||
// Just copy all data to a new table
|
||||
diff --git a/server/src/lib.rs b/server/src/lib.rs
|
||||
index 0314e69..89644e1 100644
|
||||
--- a/server/src/lib.rs
|
||||
+++ b/server/src/lib.rs
|
||||
@@ -217,7 +217,7 @@ async fn fallback(_: Uri) -> ServerResult<()> {
|
||||
|
||||
/// Runs the API server.
|
||||
pub async fn run_api_server(cli_listen: Option<SocketAddr>, config: Config) -> Result<()> {
|
||||
- eprintln!("Starting API server...");
|
||||
+ println!("Starting API server...");
|
||||
|
||||
let state = StateInner::new(config).await;
|
||||
|
||||
@@ -239,7 +239,7 @@ pub async fn run_api_server(cli_listen: Option<SocketAddr>, config: Config) -> R
|
||||
.layer(TraceLayer::new_for_http())
|
||||
.layer(CatchPanicLayer::new());
|
||||
|
||||
- eprintln!("Listening on {:?}...", listen);
|
||||
+ println!("Listening on {:?}...", listen);
|
||||
|
||||
let listener = TcpListener::bind(&listen).await?;
|
||||
|
||||
@@ -256,7 +256,7 @@ pub async fn run_api_server(cli_listen: Option<SocketAddr>, config: Config) -> R
|
||||
|
||||
/// Runs database migrations.
|
||||
pub async fn run_migrations(config: Config) -> Result<()> {
|
||||
- eprintln!("Running migrations...");
|
||||
+ println!("Running migrations...");
|
||||
|
||||
let state = StateInner::new(config).await;
|
||||
let db = state.database().await?;
|
||||
diff --git a/server/src/main.rs b/server/src/main.rs
|
||||
index c5f08df..3a37c23 100644
|
||||
--- a/server/src/main.rs
|
||||
+++ b/server/src/main.rs
|
||||
@@ -121,14 +121,14 @@ fn init_logging(tokio_console: bool) {
|
||||
.init();
|
||||
|
||||
if tokio_console {
|
||||
- eprintln!("Note: tokio-console is enabled");
|
||||
+ println!("Note: tokio-console is enabled");
|
||||
}
|
||||
}
|
||||
|
||||
fn dump_version() {
|
||||
#[cfg(debug_assertions)]
|
||||
- eprintln!("Attic Server {} (debug)", env!("CARGO_PKG_VERSION"));
|
||||
+ println!("Attic Server {} (debug)", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
- eprintln!("Attic Server {} (release)", env!("CARGO_PKG_VERSION"));
|
||||
+ println!("Attic Server {} (release)", env!("CARGO_PKG_VERSION"));
|
||||
}
|
||||
diff --git a/server/src/oobe.rs b/server/src/oobe.rs
|
||||
index d3d912d..98ef88c 100644
|
||||
--- a/server/src/oobe.rs
|
||||
+++ b/server/src/oobe.rs
|
||||
@@ -77,25 +77,25 @@ pub async fn run_oobe() -> Result<()> {
|
||||
token.encode(&SignatureType::RS256(key), &None, &None)?
|
||||
};
|
||||
|
||||
- eprintln!();
|
||||
- eprintln!("-----------------");
|
||||
- eprintln!("Welcome to Attic!");
|
||||
- eprintln!();
|
||||
- eprintln!("A simple setup using SQLite and local storage has been configured for you in:");
|
||||
- eprintln!();
|
||||
- eprintln!(" {}", config_path.to_str().unwrap());
|
||||
- eprintln!();
|
||||
- eprintln!("Run the following command to log into this server:");
|
||||
- eprintln!();
|
||||
- eprintln!(" attic login local http://localhost:8080 {root_token}");
|
||||
- eprintln!();
|
||||
- eprintln!("Documentations and guides:");
|
||||
- eprintln!();
|
||||
- eprintln!(" https://docs.attic.rs");
|
||||
- eprintln!();
|
||||
- eprintln!("Enjoy!");
|
||||
- eprintln!("-----------------");
|
||||
- eprintln!();
|
||||
+ println!();
|
||||
+ println!("-----------------");
|
||||
+ println!("Welcome to Attic!");
|
||||
+ println!();
|
||||
+ println!("A simple setup using SQLite and local storage has been configured for you in:");
|
||||
+ println!();
|
||||
+ println!(" {}", config_path.to_str().unwrap());
|
||||
+ println!();
|
||||
+ println!("Run the following command to log into this server:");
|
||||
+ println!();
|
||||
+ println!(" attic login local http://localhost:8080 {root_token}");
|
||||
+ println!();
|
||||
+ println!("Documentations and guides:");
|
||||
+ println!();
|
||||
+ println!(" https://docs.attic.rs");
|
||||
+ println!();
|
||||
+ println!("Enjoy!");
|
||||
+ println!("-----------------");
|
||||
+ println!();
|
||||
|
||||
Ok(())
|
||||
}
|
@@ -1,10 +1,15 @@
|
||||
final: prev:
|
||||
prev.tea.overrideAttrs (oldAttrs: {
|
||||
patches = oldAttrs.patches or [ ] ++ [
|
||||
# feat: add user auth via env
|
||||
(builtins.fetchurl {
|
||||
url = "https://gitea.com/gitea/tea/pulls/639.patch";
|
||||
sha256 = "sha256:0c5gpi6aajd3h0wp7lrvj5qk9wsqhgbap7ijvl0x117v0g8mgzvs";
|
||||
})
|
||||
./instance-ssh-host-env.patch
|
||||
# fix: evaluate env login in repo context
|
||||
(builtins.fetchurl {
|
||||
url = "https://gitea.com/gitea/tea/pulls/809.patch";
|
||||
sha256 = "sha256:1f9cyizwmza6kg0r3q5d8h7vvph4wnh7kh3wvi5aqnbw100j7igg";
|
||||
})
|
||||
];
|
||||
})
|
||||
|
@@ -1,174 +0,0 @@
|
||||
diff --git a/modules/config/login.go b/modules/config/login.go
|
||||
index 3b77fb9..94de9cd 100644
|
||||
--- a/modules/config/login.go
|
||||
+++ b/modules/config/login.go
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"os"
|
||||
+ "strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -200,6 +201,63 @@ func UpdateLogin(login *Login) error {
|
||||
return saveConfig()
|
||||
}
|
||||
|
||||
+// CreateLoginFromEnvVars returns a login based on environment variables, or nil if no login can be created
|
||||
+func CreateLoginFromEnvVars() (*Login, error) {
|
||||
+ var token string
|
||||
+
|
||||
+ giteaToken := os.Getenv("GITEA_TOKEN")
|
||||
+ githubToken := os.Getenv("GH_TOKEN")
|
||||
+ giteaInstanceURL := os.Getenv("GITEA_INSTANCE_URL")
|
||||
+ instanceInsecure := os.Getenv("GITEA_INSTANCE_INSECURE")
|
||||
+ giteaInstanceSSHHost := os.Getenv("GITEA_INSTANCE_SSH_HOST")
|
||||
+ insecure := false
|
||||
+ if len(instanceInsecure) > 0 {
|
||||
+ insecure, _ = strconv.ParseBool(instanceInsecure)
|
||||
+ }
|
||||
+
|
||||
+ // if no tokens are set, or no instance url for gitea fail fast
|
||||
+ if len(giteaInstanceURL) == 0 || (len(giteaToken) == 0 && len(githubToken) == 0) {
|
||||
+ return nil, nil
|
||||
+ }
|
||||
+
|
||||
+ token = giteaToken
|
||||
+ if len(giteaToken) == 0 {
|
||||
+ token = githubToken
|
||||
+ }
|
||||
+
|
||||
+ login := &Login{
|
||||
+ Name: "GITEA_LOGIN_VIA_ENV",
|
||||
+ URL: giteaInstanceURL,
|
||||
+ Token: token,
|
||||
+ SSHHost: giteaInstanceSSHHost,
|
||||
+ Insecure: insecure,
|
||||
+ SSHKey: "",
|
||||
+ SSHCertPrincipal: "",
|
||||
+ SSHKeyFingerprint: "",
|
||||
+ SSHAgent: false,
|
||||
+ VersionCheck: true,
|
||||
+ Created: time.Now().Unix(),
|
||||
+ }
|
||||
+
|
||||
+ client := login.Client()
|
||||
+ u, _, err := client.GetMyUserInfo()
|
||||
+ if err != nil {
|
||||
+ return nil, fmt.Errorf("failed to validate token: %s", err)
|
||||
+ }
|
||||
+
|
||||
+ login.User = u.UserName
|
||||
+
|
||||
+ if login.SSHHost == "" {
|
||||
+ parsedURL, err := url.Parse(giteaInstanceURL)
|
||||
+ if err != nil {
|
||||
+ return nil, err
|
||||
+ }
|
||||
+ login.SSHHost = parsedURL.Host
|
||||
+ }
|
||||
+
|
||||
+ return login, nil
|
||||
+}
|
||||
+
|
||||
// Client returns a client to operate Gitea API. You may provide additional modifiers
|
||||
// for the client like gitea.SetBasicAuth() for customization
|
||||
func (l *Login) Client(options ...gitea.ClientOption) *gitea.Client {
|
||||
diff --git a/modules/context/context.go b/modules/context/context.go
|
||||
index aec5592..636eeec 100644
|
||||
--- a/modules/context/context.go
|
||||
+++ b/modules/context/context.go
|
||||
@@ -9,9 +9,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
- "strconv"
|
||||
"strings"
|
||||
- "time"
|
||||
|
||||
"code.gitea.io/tea/modules/config"
|
||||
"code.gitea.io/tea/modules/git"
|
||||
@@ -108,16 +106,6 @@ func InitCommand(cmd *cli.Command) *TeaContext {
|
||||
c.RepoSlug = repoFlag
|
||||
}
|
||||
|
||||
- // override config user with env variable
|
||||
- envLogin := GetLoginByEnvVar()
|
||||
- if envLogin != nil {
|
||||
- _, err := utils.ValidateAuthenticationMethod(envLogin.URL, envLogin.Token, "", "", false, "", "")
|
||||
- if err != nil {
|
||||
- log.Fatal(err.Error())
|
||||
- }
|
||||
- c.Login = envLogin
|
||||
- }
|
||||
-
|
||||
// override login from flag, or use default login if repo based detection failed
|
||||
if len(loginFlag) != 0 {
|
||||
c.Login = config.GetLoginByName(loginFlag)
|
||||
@@ -196,10 +184,25 @@ func contextFromLocalRepo(repoPath, remoteValue string) (*git.TeaRepo, *config.L
|
||||
return repo, nil, "", fmt.Errorf("Remote '%s' not found in this Git repository", remoteValue)
|
||||
}
|
||||
|
||||
+ envLogin, err := config.CreateLoginFromEnvVars()
|
||||
+ if err != nil {
|
||||
+ log.Fatal(err.Error())
|
||||
+ }
|
||||
+
|
||||
logins, err := config.GetLogins()
|
||||
if err != nil {
|
||||
return repo, nil, "", err
|
||||
}
|
||||
+
|
||||
+ if envLogin != nil {
|
||||
+ _, err := utils.ValidateAuthenticationMethod(envLogin.URL, envLogin.Token, "", "", false, "", "")
|
||||
+ if err != nil {
|
||||
+ log.Fatal(err.Error())
|
||||
+ }
|
||||
+
|
||||
+ logins = append([]config.Login{*envLogin}, logins...)
|
||||
+ }
|
||||
+
|
||||
for _, l := range logins {
|
||||
sshHost := l.GetSSHHost()
|
||||
for _, u := range remoteConfig.URLs {
|
||||
@@ -223,40 +226,3 @@ func contextFromLocalRepo(repoPath, remoteValue string) (*git.TeaRepo, *config.L
|
||||
|
||||
return repo, nil, "", errNotAGiteaRepo
|
||||
}
|
||||
-
|
||||
-// GetLoginByEnvVar returns a login based on environment variables, or nil if no login can be created
|
||||
-func GetLoginByEnvVar() *config.Login {
|
||||
- var token string
|
||||
-
|
||||
- giteaToken := os.Getenv("GITEA_TOKEN")
|
||||
- githubToken := os.Getenv("GH_TOKEN")
|
||||
- giteaInstanceURL := os.Getenv("GITEA_INSTANCE_URL")
|
||||
- instanceInsecure := os.Getenv("GITEA_INSTANCE_INSECURE")
|
||||
- insecure := false
|
||||
- if len(instanceInsecure) > 0 {
|
||||
- insecure, _ = strconv.ParseBool(instanceInsecure)
|
||||
- }
|
||||
-
|
||||
- // if no tokens are set, or no instance url for gitea fail fast
|
||||
- if len(giteaInstanceURL) == 0 || (len(giteaToken) == 0 && len(githubToken) == 0) {
|
||||
- return nil
|
||||
- }
|
||||
-
|
||||
- token = giteaToken
|
||||
- if len(giteaToken) == 0 {
|
||||
- token = githubToken
|
||||
- }
|
||||
-
|
||||
- return &config.Login{
|
||||
- Name: "GITEA_LOGIN_VIA_ENV",
|
||||
- URL: giteaInstanceURL,
|
||||
- Token: token,
|
||||
- Insecure: insecure,
|
||||
- SSHKey: "",
|
||||
- SSHCertPrincipal: "",
|
||||
- SSHKeyFingerprint: "",
|
||||
- SSHAgent: false,
|
||||
- Created: time.Now().Unix(),
|
||||
- VersionCheck: false,
|
||||
- }
|
||||
-}
|
Reference in New Issue
Block a user