Compare commits

..

10 Commits

Author SHA1 Message Date
73511c258b deb: fix injection of .deb packages with apt-get
All checks were successful
CI / build (push) Successful in 9m5s
CI / snap (push) Successful in 3m56s
2026-04-04 12:41:52 +02:00
d83174c980 deb: allow injecting .deb files packages
Some checks failed
CI / build (push) Failing after 1m19s
CI / snap (push) Has been skipped
2026-04-04 12:35:14 +02:00
2b27b7b06e deb: add quirks to allow packages to declare specific package directories
All checks were successful
CI / build (push) Successful in 8m41s
CI / snap (push) Successful in 3m55s
2026-03-25 17:40:55 +01:00
32e15b1106 deb: ignore linux-riscv test
Some checks failed
CI / build (push) Successful in 8m16s
CI / snap (push) Failing after 7s
2026-03-19 11:27:02 +01:00
7640952bdc unshare: mount proc differently depending on root privileges 2026-03-19 11:26:10 +01:00
2b6207981a deb: make tests parallel
Some checks failed
CI / build (push) Failing after 12m10s
CI / snap (push) Has been skipped
2026-03-19 00:24:48 +01:00
daaf33cd6b deb: fix race condition for test
Some checks failed
CI / build (push) Failing after 18m26s
CI / snap (push) Has been skipped
Fix race condition around current context,
related to find_package_directory.
2026-03-18 23:34:33 +01:00
4a73e6e1d6 deb: fix race condition for tests
Some checks failed
CI / build (push) Failing after 20m7s
CI / snap (push) Has been skipped
2026-03-18 17:35:38 +01:00
d06e091121 apt/keyring: download 3 keyrings for sid
Some checks failed
CI / build (push) Failing after 21m24s
CI / snap (push) Has been skipped
2026-03-18 15:23:57 +01:00
5ec675c20b pull: fix edge cases
Some checks failed
CI / build (push) Failing after 13m53s
CI / snap (push) Has been skipped
- Ubuntu does not have 'Launchpad/Code' repo edge case
- Vcs-Git field has a git command, not only an URL edge case
2026-03-17 17:22:25 +01:00
14 changed files with 605 additions and 179 deletions

View File

@@ -12,3 +12,8 @@ quirks:
# - another-dependency # - another-dependency
# parameters: # parameters:
# key: value # key: value
linux-riscv:
deb:
package_directory:
- linux-main

View File

@@ -16,49 +16,70 @@ struct LaunchpadPpaResponse {
signing_key_fingerprint: String, signing_key_fingerprint: String,
} }
/// Download a keyring to the application cache directory and return the path /// Download keyrings to a shared keyring directory and return the directory path
/// ///
/// This function downloads the keyring to a user-writable cache directory /// This function downloads keyrings to a user-writable cache directory
/// instead of the system apt keyring directory, allowing non-root usage. /// instead of the system apt keyring directory, allowing non-root usage.
/// The returned path can be passed to mmdebstrap via --keyring. /// The returned directory path can be passed to mmdebstrap via --keyring=.
/// ///
/// For Debian keyrings (which are ASCII-armored .asc files), the key is /// For Debian keyrings (which are ASCII-armored .asc files), the keys are
/// converted to binary GPG format using gpg --dearmor. /// converted to binary GPG format using gpg --dearmor.
/// ///
/// For 'sid' and 'experimental', this downloads keyrings from the 3 latest
/// releases since sid needs keys from all recent releases.
///
/// # Arguments /// # Arguments
/// * `ctx` - Optional context to use /// * `ctx` - Optional context to use
/// * `series` - The distribution series (e.g., "noble", "sid") /// * `series` - The distribution series (e.g., "noble", "sid")
/// ///
/// # Returns /// # Returns
/// The path to the downloaded keyring file (in binary GPG format) /// The path to the keyring directory containing all downloaded keyring files
pub async fn download_cache_keyring( pub async fn download_cache_keyrings(
ctx: Option<Arc<context::Context>>, ctx: Option<Arc<context::Context>>,
series: &str, series: &str,
) -> Result<PathBuf, Box<dyn Error>> { ) -> Result<PathBuf, Box<dyn Error>> {
let ctx = ctx.unwrap_or_else(context::current); let ctx = ctx.unwrap_or_else(context::current);
// Obtain keyring URL from distro_info // Obtain keyring URLs from distro_info
let keyring_url = distro_info::get_keyring_url(series).await?; let keyring_urls = distro_info::get_keyring_urls(series).await?;
log::debug!("Downloading keyring from: {}", keyring_url); log::debug!("Downloading keyrings from: {:?}", keyring_urls);
// Get the application cache directory // Use system temp directory for keyrings since it's accessible from unshare mode
let proj_dirs = directories::ProjectDirs::from("com", "pkh", "pkh") // The home directory may not be accessible from mmdebstrap's unshare namespace
.ok_or("Could not determine project directories")?; let temp_dir = std::env::temp_dir();
let cache_dir = proj_dirs.cache_dir(); let keyring_dir = temp_dir.join("pkh-keyrings");
// Create cache directory if it doesn't exist // Create keyring directory if it doesn't exist
if !ctx.exists(cache_dir)? { if !ctx.exists(&keyring_dir)? {
ctx.command("mkdir").arg("-p").arg(cache_dir).status()?; ctx.command("mkdir").arg("-p").arg(&keyring_dir).status()?;
} }
// Make keyring directory world-accessible so mmdebstrap in unshare mode can access it
ctx.command("chmod")
.arg("a+rwx")
.arg(&keyring_dir)
.status()?;
for keyring_url in keyring_urls {
// Extract the original filename from the keyring URL // Extract the original filename from the keyring URL
let filename = keyring_url let filename = keyring_url
.split('/') .split('/')
.next_back() .next_back()
.unwrap_or("pkh-{}.gpg") .unwrap_or("pkh-{}.gpg")
.replace("{}", series); .replace("{}", series);
let download_path = cache_dir.join(&filename); let download_path = keyring_dir.join(&filename);
// Determine the binary keyring path
let binary_path = if filename.ends_with(".asc") {
// ASCII-armored key: convert to .gpg
let binary_filename = filename.strip_suffix(".asc").unwrap_or(&filename);
keyring_dir.join(format!("{}.gpg", binary_filename))
} else {
download_path.clone()
};
// Skip download if the binary keyring already exists
if !ctx.exists(&binary_path)? {
// Download the keyring using curl // Download the keyring using curl
let mut curl_cmd = ctx.command("curl"); let mut curl_cmd = ctx.command("curl");
curl_cmd curl_cmd
@@ -75,11 +96,7 @@ pub async fn download_cache_keyring(
} }
// If the downloaded file is an ASCII-armored key (.asc), convert it to binary GPG format // If the downloaded file is an ASCII-armored key (.asc), convert it to binary GPG format
// mmdebstrap's --keyring option expects binary GPG keyrings if filename.ends_with(".asc") {
let keyring_path = if filename.ends_with(".asc") {
let binary_filename = filename.strip_suffix(".asc").unwrap_or(&filename);
let binary_path = cache_dir.join(format!("{}.gpg", binary_filename));
log::debug!("Converting ASCII-armored key to binary GPG format"); log::debug!("Converting ASCII-armored key to binary GPG format");
let mut gpg_cmd = ctx.command("gpg"); let mut gpg_cmd = ctx.command("gpg");
gpg_cmd gpg_cmd
@@ -97,18 +114,33 @@ pub async fn download_cache_keyring(
// Remove the original .asc file // Remove the original .asc file
let _ = ctx.command("rm").arg("-f").arg(&download_path).status(); let _ = ctx.command("rm").arg("-f").arg(&download_path).status();
}
binary_path // Make the keyring file world-readable so mmdebstrap in unshare mode can access it
} else { ctx.command("chmod").arg("a+r").arg(&binary_path).status()?;
download_path
};
log::info!( log::info!(
"Successfully downloaded keyring for {} to {}", "Successfully downloaded keyring for {} to {}",
series, series,
keyring_path.display() binary_path.display()
); );
Ok(keyring_path) } else {
log::debug!(
"Keyring already exists at {}, skipping download",
binary_path.display()
);
// Ensure existing keyring is world-readable
ctx.command("chmod").arg("a+r").arg(&binary_path).status()?;
}
}
log::info!(
"Keyrings for {} available in {}",
series,
keyring_dir.display()
);
Ok(keyring_dir)
} }
/// Download and import a PPA key using Launchpad API /// Download and import a PPA key using Launchpad API

View File

@@ -207,10 +207,21 @@ impl UnshareDriver {
cmd.arg("-w").arg(dir); cmd.arg("-w").arg(dir);
} }
cmd.arg("--").arg("bash").arg("-c").arg(format!( // Build the bash command: set up /dev/pts and run the program
"mount -t proc proc /proc; mkdir /dev/pts; mount -t devpts devpts /dev/pts; touch /dev/ptmx; mount --bind /dev/pts/ptmx /dev/ptmx; {} {}", // /proc should already be bind-mounted from the host before entering the namespace
let program_args = args
.iter()
.map(|a| format!("\"{a}\""))
.collect::<Vec<_>>()
.join(" ");
cmd.arg("--")
.arg("bash")
.arg("-c")
.arg(format!(
"mkdir -p /dev/pts; mount -t devpts devpts /dev/pts 2>/dev/null || true; touch /dev/ptmx; mount --bind /dev/pts/ptmx /dev/ptmx 2>/dev/null || true; {} {}",
program, program,
args.iter().map(|a| format!("\"{a}\"")).collect::<Vec<_>>().join(" ") program_args
)); ));
cmd cmd

View File

@@ -1,15 +1,16 @@
use crate::context; use crate::context::Context;
use std::collections::HashMap; use std::collections::HashMap;
use std::error::Error; use std::error::Error;
use std::sync::Arc;
/// Set environment variables for cross-compilation /// Set environment variables for cross-compilation
pub fn setup_environment( pub fn setup_environment(
env: &mut HashMap<String, String>, env: &mut HashMap<String, String>,
arch: &str, arch: &str,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
let dpkg_architecture = String::from_utf8( let dpkg_architecture = String::from_utf8(
context::current() ctx.command("dpkg-architecture")
.command("dpkg-architecture")
.arg("-a") .arg("-a")
.arg(arch) .arg(arch)
.output()? .output()?
@@ -34,8 +35,11 @@ pub fn setup_environment(
/// Ensure that repositories for target architecture are available /// Ensure that repositories for target architecture are available
/// This also handles the 'ports.ubuntu.com' vs 'archive.ubuntu.com' on Ubuntu /// This also handles the 'ports.ubuntu.com' vs 'archive.ubuntu.com' on Ubuntu
pub fn ensure_repositories(arch: &str, series: &str) -> Result<(), Box<dyn Error>> { pub fn ensure_repositories(
let ctx = context::current(); arch: &str,
series: &str,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
let local_arch = crate::get_current_arch(); let local_arch = crate::get_current_arch();
// Add target ('host') architecture // Add target ('host') architecture

View File

@@ -1,9 +1,9 @@
use crate::context; use crate::context::{self, Context, ContextConfig};
use crate::context::{Context, ContextConfig};
use directories::ProjectDirs; use directories::ProjectDirs;
use std::error::Error; use std::error::Error;
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
use tar::Archive; use tar::Archive;
use xz2::read::XzDecoder; use xz2::read::XzDecoder;
@@ -13,20 +13,26 @@ pub struct EphemeralContextGuard {
previous_context: String, previous_context: String,
chroot_path: PathBuf, chroot_path: PathBuf,
build_succeeded: bool, build_succeeded: bool,
base_ctx: Arc<Context>,
} }
impl EphemeralContextGuard { impl EphemeralContextGuard {
/// Create a new ephemeral unshare context for the specified series /// Create a new ephemeral unshare context with an explicit base context
/// ///
/// # Arguments /// # Arguments
/// * `series` - The distribution series (e.g., "noble", "sid") /// * `series` - The distribution series (e.g., "noble", "sid")
/// * `arch` - Optional target architecture. If provided and different from host, /// * `arch` - Optional target architecture. If provided and different from host,
/// downloads a chroot for that architecture (uses qemu_binfmt transparently) /// downloads a chroot for that architecture (uses qemu_binfmt transparently)
pub async fn new(series: &str, arch: Option<&str>) -> Result<Self, Box<dyn Error>> { /// * `base_ctx` - The base context to use for creating the chroot
pub async fn new_with_context(
series: &str,
arch: Option<&str>,
base_ctx: Arc<Context>,
) -> Result<Self, Box<dyn Error>> {
let current_context_name = context::manager().current_name(); let current_context_name = context::manager().current_name();
// Create a temporary directory for the chroot // Create a temporary directory for the chroot
let chroot_path_str = context::current().create_temp_dir()?; let chroot_path_str = base_ctx.create_temp_dir()?;
let chroot_path = PathBuf::from(chroot_path_str); let chroot_path = PathBuf::from(chroot_path_str);
log::debug!( log::debug!(
@@ -37,7 +43,7 @@ impl EphemeralContextGuard {
); );
// Download and extract the chroot tarball // Download and extract the chroot tarball
Self::download_and_extract_chroot(series, arch, &chroot_path).await?; Self::download_and_extract_chroot(series, arch, &chroot_path, base_ctx.clone()).await?;
// Switch to an ephemeral context to build the package in the chroot // Switch to an ephemeral context to build the package in the chroot
context::manager().set_current_ephemeral(Context::new(ContextConfig::Unshare { context::manager().set_current_ephemeral(Context::new(ContextConfig::Unshare {
@@ -49,6 +55,7 @@ impl EphemeralContextGuard {
previous_context: current_context_name, previous_context: current_context_name,
chroot_path, chroot_path,
build_succeeded: false, build_succeeded: false,
base_ctx,
}) })
} }
@@ -56,7 +63,10 @@ impl EphemeralContextGuard {
series: &str, series: &str,
arch: Option<&str>, arch: Option<&str>,
chroot_path: &PathBuf, chroot_path: &PathBuf,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
// Clone ctx for use in create_device_nodes after download_chroot_tarball consumes it
let ctx_for_devices = ctx.clone();
// Get project directories for caching // Get project directories for caching
let proj_dirs = ProjectDirs::from("com", "pkh", "pkh") let proj_dirs = ProjectDirs::from("com", "pkh", "pkh")
.ok_or("Could not determine project directories")?; .ok_or("Could not determine project directories")?;
@@ -74,7 +84,6 @@ impl EphemeralContextGuard {
// Check for existing lockfile, and wait for a timeout if it exists // Check for existing lockfile, and wait for a timeout if it exists
// After timeout, warn the user // After timeout, warn the user
let lockfile_path = tarball_path.with_extension("lock"); let lockfile_path = tarball_path.with_extension("lock");
let ctx = context::current();
// Check if lockfile exists and wait for it to be removed // Check if lockfile exists and wait for it to be removed
let mut wait_time = 0; let mut wait_time = 0;
@@ -110,7 +119,7 @@ impl EphemeralContextGuard {
series, series,
arch arch
); );
Self::download_chroot_tarball(series, arch, &tarball_path).await?; Self::download_chroot_tarball(series, arch, &tarball_path, ctx).await?;
} else { } else {
log::debug!( log::debug!(
"Using cached chroot tarball for {} (arch: {:?})", "Using cached chroot tarball for {} (arch: {:?})",
@@ -125,7 +134,12 @@ impl EphemeralContextGuard {
// Create device nodes in the chroot // Create device nodes in the chroot
log::debug!("Creating device nodes in chroot..."); log::debug!("Creating device nodes in chroot...");
Self::create_device_nodes(chroot_path)?; Self::create_device_nodes(chroot_path, ctx_for_devices.clone())?;
// Bind mount /proc from host into chroot (before entering unshare namespace)
// This allows /proc to work in containers where mounting inside unshare fails
log::debug!("Bind-mounting /proc into chroot...");
Self::bind_mount_proc(chroot_path, ctx_for_devices)?;
Ok(()) Ok(())
} }
@@ -134,18 +148,17 @@ impl EphemeralContextGuard {
series: &str, series: &str,
arch: Option<&str>, arch: Option<&str>,
tarball_path: &Path, tarball_path: &Path,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
// Create a lock file to make sure that noone tries to use the file while it's not fully downloaded // Create a lock file to make sure that noone tries to use the file while it's not fully downloaded
let lockfile_path = tarball_path.with_extension("lock"); let lockfile_path = tarball_path.with_extension("lock");
ctx.command("touch") ctx.command("touch")
.arg(lockfile_path.to_string_lossy().to_string()) .arg(lockfile_path.to_string_lossy().to_string())
.status()?; .status()?;
// Download the keyring to the cache directory // Download the keyring(s)
let keyring_path = let keyring_dir =
crate::apt::keyring::download_cache_keyring(Some(ctx.clone()), series).await?; crate::apt::keyring::download_cache_keyrings(Some(ctx.clone()), series).await?;
// Use mmdebstrap to download the tarball to the cache directory // Use mmdebstrap to download the tarball to the cache directory
let mut cmd = ctx.command("mmdebstrap"); let mut cmd = ctx.command("mmdebstrap");
@@ -153,7 +166,13 @@ impl EphemeralContextGuard {
.arg("--mode=unshare") .arg("--mode=unshare")
.arg("--include=mount,curl,ca-certificates") .arg("--include=mount,curl,ca-certificates")
.arg("--format=tar") .arg("--format=tar")
.arg(format!("--keyring={}", keyring_path.display())); .arg(format!("--keyring={}", keyring_dir.display()))
// Setup hook to copy keyrings into the chroot so apt inside can use them
.arg("--setup-hook=mkdir -p \"$1/etc/apt/trusted.gpg.d\"")
.arg(format!(
"--setup-hook=cp {}/*.gpg \"$1/etc/apt/trusted.gpg.d/\"",
keyring_dir.display()
));
// Add architecture if specified // Add architecture if specified
if let Some(a) = arch { if let Some(a) = arch {
@@ -212,8 +231,10 @@ impl EphemeralContextGuard {
Ok(()) Ok(())
} }
fn create_device_nodes(chroot_path: &Path) -> Result<(), Box<dyn Error>> { fn create_device_nodes(
let ctx = context::current(); chroot_path: &Path,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> {
let dev_null_path = chroot_path.join("dev/null"); let dev_null_path = chroot_path.join("dev/null");
let dev_zero_path = chroot_path.join("dev/zero"); let dev_zero_path = chroot_path.join("dev/zero");
@@ -270,6 +291,43 @@ impl EphemeralContextGuard {
Ok(()) Ok(())
} }
/// Bind mount /proc from host into the chroot
/// This is done before entering the unshare namespace, so it works in containers
fn bind_mount_proc(
chroot_path: &Path,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> {
let proc_path = chroot_path.join("proc");
// Ensure /proc directory exists in chroot
fs::create_dir_all(&proc_path)?;
// Check if we're running as root
let is_root = crate::utils::root::is_root()?;
// Bind mount host's /proc into chroot (with sudo if not root)
let mut cmd = ctx.command(if is_root { "mount" } else { "sudo" });
if !is_root {
cmd.arg("mount");
}
let status = cmd
.arg("--bind")
.arg("/proc")
.arg(proc_path.to_string_lossy().to_string())
.status()?;
if !status.success() {
log::warn!(
"Could not bind-mount /proc into chroot at {}. Some packages may not install correctly.",
proc_path.display()
);
} else {
log::debug!("Bind-mounted /proc into chroot at {}", proc_path.display());
}
Ok(())
}
/// Mark the build as successful, which will trigger chroot cleanup on drop /// Mark the build as successful, which will trigger chroot cleanup on drop
pub fn mark_build_successful(&mut self) { pub fn mark_build_successful(&mut self) {
self.build_succeeded = true; self.build_succeeded = true;
@@ -294,14 +352,26 @@ impl Drop for EphemeralContextGuard {
// Check if we're running as root to avoid unnecessary sudo // Check if we're running as root to avoid unnecessary sudo
let is_root = crate::utils::root::is_root().unwrap_or(false); let is_root = crate::utils::root::is_root().unwrap_or(false);
// Unmount /proc from chroot before removing (ignore errors)
let proc_path = self.chroot_path.join("proc");
let _ = if is_root {
self.base_ctx.command("umount").arg(&proc_path).status()
} else {
self.base_ctx
.command("sudo")
.arg("umount")
.arg(&proc_path)
.status()
};
let result = if is_root { let result = if is_root {
context::current() self.base_ctx
.command("rm") .command("rm")
.arg("-rf") .arg("-rf")
.arg(&self.chroot_path) .arg(&self.chroot_path)
.status() .status()
} else { } else {
context::current() self.base_ctx
.command("sudo") .command("sudo")
.arg("rm") .arg("rm")
.arg("-rf") .arg("-rf")

View File

@@ -1,11 +1,12 @@
/// Local binary package building /// Local binary package building
/// Directly calling 'debian/rules' in current context /// Directly calling 'debian/rules' in current context
use crate::context; use crate::context::Context;
use crate::deb::find_dsc_file; use crate::deb::find_dsc_file;
use log::warn; use log::warn;
use std::collections::HashMap; use std::collections::HashMap;
use std::error::Error; use std::error::Error;
use std::path::Path; use std::path::Path;
use std::sync::Arc;
use crate::apt; use crate::apt;
use crate::deb::cross; use crate::deb::cross;
@@ -20,14 +21,13 @@ pub async fn build(
cross: bool, cross: bool,
ppa: Option<&[&str]>, ppa: Option<&[&str]>,
inject_packages: Option<&[&str]>, inject_packages: Option<&[&str]>,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
// Environment // Environment
let mut env = HashMap::<String, String>::new(); let mut env = HashMap::<String, String>::new();
env.insert("LANG".to_string(), "C".to_string()); env.insert("LANG".to_string(), "C".to_string());
env.insert("DEBIAN_FRONTEND".to_string(), "noninteractive".to_string()); env.insert("DEBIAN_FRONTEND".to_string(), "noninteractive".to_string());
let ctx = context::current();
// Parallel building: find local number of cores, and use that // Parallel building: find local number of cores, and use that
let num_cores = ctx let num_cores = ctx
.command("nproc") .command("nproc")
@@ -52,11 +52,11 @@ pub async fn build(
if cross { if cross {
log::debug!("Setting up environment for local cross build..."); log::debug!("Setting up environment for local cross build...");
cross::setup_environment(&mut env, arch)?; cross::setup_environment(&mut env, arch, ctx.clone())?;
cross::ensure_repositories(arch, series)?; cross::ensure_repositories(arch, series, ctx.clone())?;
} }
let mut sources = apt::sources::load(None)?; let mut sources = apt::sources::load(Some(ctx.clone()))?;
let mut modified = false; let mut modified = false;
let mut added_ppas: Vec<(&str, &str)> = Vec::new(); let mut added_ppas: Vec<(&str, &str)> = Vec::new();
@@ -117,11 +117,12 @@ pub async fn build(
} }
if modified { if modified {
apt::sources::save_legacy(None, sources, "/etc/apt/sources.list")?; apt::sources::save_legacy(Some(ctx.clone()), sources, "/etc/apt/sources.list")?;
// Download and import PPA keys for all added PPAs // Download and import PPA keys for all added PPAs
for (user, ppa_name) in added_ppas { for (user, ppa_name) in added_ppas {
if let Err(e) = crate::apt::keyring::download_trust_ppa_key(None, user, ppa_name).await if let Err(e) =
crate::apt::keyring::download_trust_ppa_key(Some(ctx.clone()), user, ppa_name).await
{ {
warn!( warn!(
"Failed to download PPA key for {}/{}: {}", "Failed to download PPA key for {}/{}: {}",
@@ -169,24 +170,15 @@ pub async fn build(
} }
// Find the actual package directory // Find the actual package directory
let package_dir = crate::deb::find_package_directory(Path::new(build_root), package, version)?; let package_dir =
crate::deb::find_package_directory(Path::new(build_root), package, version, &ctx)?;
let package_dir_str = package_dir let package_dir_str = package_dir
.to_str() .to_str()
.ok_or("Invalid package directory path")?; .ok_or("Invalid package directory path")?;
// Install injected packages if specified // Install injected packages if specified
if let Some(packages) = inject_packages { if let Some(packages) = inject_packages {
log::info!("Installing injected packages: {:?}", packages); install_injected_packages(packages, &env, ctx.clone())?;
let mut cmd = ctx.command("apt-get");
cmd.envs(env.clone())
.arg("-y")
.arg("--allow-downgrades")
.arg("install")
.args(packages);
let status = cmd.status()?;
if !status.success() {
return Err(format!("Could not install injected packages: {:?}", packages).into());
}
} }
// Install arch-specific build dependencies // Install arch-specific build dependencies
@@ -204,7 +196,7 @@ pub async fn build(
// If build-dep fails, we try to explain the failure using dose-debcheck // If build-dep fails, we try to explain the failure using dose-debcheck
if !status.success() { if !status.success() {
dose3_explain_dependencies(package, version, arch, build_root, cross)?; dose3_explain_dependencies(package, version, arch, build_root, cross, ctx.clone())?;
return Err("Could not install build-dependencies for the build".into()); return Err("Could not install build-dependencies for the build".into());
} }
@@ -221,7 +213,7 @@ pub async fn build(
// If build-dep fails, we try to explain the failure using dose-debcheck // If build-dep fails, we try to explain the failure using dose-debcheck
if !status.success() { if !status.success() {
dose3_explain_dependencies(package, version, arch, build_root, cross)?; dose3_explain_dependencies(package, version, arch, build_root, cross, ctx.clone())?;
return Err("Could not install build-dependencies for the build".into()); return Err("Could not install build-dependencies for the build".into());
} }
@@ -254,15 +246,67 @@ pub async fn build(
Ok(()) Ok(())
} }
fn install_injected_packages(
packages: &[&str],
env: &HashMap<String, String>,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
log::info!("Installing injected packages: {:?}", packages);
// Separate .deb files from package names
let mut deb_files: Vec<String> = Vec::new();
let mut package_names: Vec<&str> = Vec::new();
for pkg in packages {
// Check if it's a .deb file path (ends with .deb and exists as a file)
let pkg_path = Path::new(pkg);
if pkg.ends_with(".deb") && pkg_path.exists() {
// Copy the .deb file into the build context
let dest_root = ctx.create_temp_dir()?;
let chroot_path = ctx.ensure_available(pkg_path, &dest_root)?;
log::debug!(
"Copied .deb file '{}' to chroot path '{}'",
pkg,
chroot_path.display()
);
deb_files.push(chroot_path.to_string_lossy().to_string());
} else {
package_names.push(pkg);
}
}
// Install .deb files
if !deb_files.is_empty() || !package_names.is_empty() {
log::info!("Installing .deb files: {:?}", deb_files);
let mut cmd = ctx.command("apt-get");
cmd.envs(env.clone())
.arg("-y")
.arg("--allow-downgrades")
.arg("install");
// Add the .deb file paths with ./ prefix for apt to recognize them as local files
for deb_path in &deb_files {
cmd.arg(format!("./{}", deb_path.trim_start_matches('/')));
}
if !package_names.is_empty() {
cmd.args(&package_names);
}
let status = cmd.status()?;
if !status.success() {
return Err(format!("Could not install injected packages: {:?}", deb_files).into());
}
}
Ok(())
}
fn dose3_explain_dependencies( fn dose3_explain_dependencies(
package: &str, package: &str,
version: &str, version: &str,
arch: &str, arch: &str,
build_root: &str, build_root: &str,
cross: bool, cross: bool,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
// Construct the list of Packages files // Construct the list of Packages files
let mut bg_args = Vec::new(); let mut bg_args = Vec::new();
let mut cmd = ctx.command("apt-get"); let mut cmd = ctx.command("apt-get");
@@ -284,7 +328,7 @@ fn dose3_explain_dependencies(
// Transform the dsc file into a 'Source' stanza (replacing 'Source' with 'Package') // Transform the dsc file into a 'Source' stanza (replacing 'Source' with 'Package')
// TODO: Remove potential GPG headers/signature // TODO: Remove potential GPG headers/signature
let dsc_path = find_dsc_file(build_root, package, version)?; let dsc_path = find_dsc_file(build_root, package, version, &ctx)?;
let mut dsc_content = ctx.read_file(&dsc_path)?; let mut dsc_content = ctx.read_file(&dsc_path)?;
dsc_content = dsc_content.replace("Source", "Package"); dsc_content = dsc_content.replace("Source", "Package");
ctx.write_file( ctx.write_file(

View File

@@ -3,9 +3,10 @@ mod ephemeral;
mod local; mod local;
mod sbuild; mod sbuild;
use crate::context; use crate::context::{self, Context};
use std::error::Error; use std::error::Error;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
/// Build mode for the binary build /// Build mode for the binary build
#[derive(PartialEq)] #[derive(PartialEq)]
@@ -17,6 +18,7 @@ pub enum BuildMode {
} }
/// Build package in 'cwd' to a .deb /// Build package in 'cwd' to a .deb
#[allow(clippy::too_many_arguments)]
pub async fn build_binary_package( pub async fn build_binary_package(
arch: Option<&str>, arch: Option<&str>,
series: Option<&str>, series: Option<&str>,
@@ -25,6 +27,7 @@ pub async fn build_binary_package(
mode: Option<BuildMode>, mode: Option<BuildMode>,
ppa: Option<&[&str]>, ppa: Option<&[&str]>,
inject_packages: Option<&[&str]>, inject_packages: Option<&[&str]>,
ctx: Option<Arc<Context>>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
let cwd = cwd.unwrap_or_else(|| Path::new(".")); let cwd = cwd.unwrap_or_else(|| Path::new("."));
@@ -57,19 +60,35 @@ pub async fn build_binary_package(
None None
}; };
// Use provided context or get current
let base_ctx = ctx.unwrap_or_else(context::current);
let mut guard = if mode == BuildMode::Local { let mut guard = if mode == BuildMode::Local {
Some(ephemeral::EphemeralContextGuard::new(series, chroot_arch).await?) Some(
ephemeral::EphemeralContextGuard::new_with_context(
series,
chroot_arch,
base_ctx.clone(),
)
.await?,
)
} else { } else {
None None
}; };
// Get the build context - either the ephemeral context or the base context
let build_ctx = if mode == BuildMode::Local {
context::current()
} else {
base_ctx.clone()
};
// Prepare build directory // Prepare build directory
let ctx = context::current(); let build_root = build_ctx.create_temp_dir()?;
let build_root = ctx.create_temp_dir()?;
// Ensure availability of all needed files for the build // Ensure availability of all needed files for the build
let parent_dir = cwd.parent().ok_or("Cannot find parent directory")?; let parent_dir = cwd.parent().ok_or("Cannot find parent directory")?;
ctx.ensure_available(parent_dir, &build_root)?; build_ctx.ensure_available(parent_dir, &build_root)?;
let parent_dir_name = parent_dir let parent_dir_name = parent_dir
.file_name() .file_name()
.ok_or("Cannot find parent directory name")?; .ok_or("Cannot find parent directory name")?;
@@ -87,19 +106,28 @@ pub async fn build_binary_package(
cross, cross,
ppa, ppa,
inject_packages, inject_packages,
build_ctx.clone(),
) )
.await? .await?
} }
BuildMode::Sbuild => sbuild::build(&package, &version, arch, series, &build_root, cross)?, BuildMode::Sbuild => sbuild::build(
&package,
&version,
arch,
series,
&build_root,
cross,
build_ctx.clone(),
)?,
}; };
// Retrieve produced .deb files // Retrieve produced .deb files
let remote_files = ctx.list_files(Path::new(&build_root))?; let remote_files = build_ctx.list_files(Path::new(&build_root))?;
for remote_file in remote_files { for remote_file in remote_files {
if remote_file.extension().is_some_and(|ext| ext == "deb") { if remote_file.extension().is_some_and(|ext| ext == "deb") {
let file_name = remote_file.file_name().ok_or("Invalid remote filename")?; let file_name = remote_file.file_name().ok_or("Invalid remote filename")?;
let local_dest = parent_dir.join(file_name); let local_dest = parent_dir.join(file_name);
ctx.retrieve_path(&remote_file, &local_dest)?; build_ctx.retrieve_path(&remote_file, &local_dest)?;
} }
} }
@@ -114,12 +142,25 @@ pub async fn build_binary_package(
/// Find the current package directory by trying both patterns: /// Find the current package directory by trying both patterns:
/// - package/package /// - package/package
/// - package/package-origversion /// - package/package-origversion
/// - custom directories from quirks configuration
pub(crate) fn find_package_directory( pub(crate) fn find_package_directory(
parent_dir: &Path, parent_dir: &Path,
package: &str, package: &str,
version: &str, version: &str,
ctx: &context::Context,
) -> Result<PathBuf, Box<dyn Error>> { ) -> Result<PathBuf, Box<dyn Error>> {
let ctx = context::current(); // Check quirks first for custom package directories
let custom_dirs = crate::quirks::get_package_directories(package);
for custom_dir in custom_dirs {
let package_dir = parent_dir.join(&custom_dir);
if ctx.exists(&package_dir)? && ctx.exists(&package_dir.join("debian"))? {
log::debug!(
"Found package directory via quirks: {}",
package_dir.display()
);
return Ok(package_dir);
}
}
// Try package/package pattern first // Try package/package pattern first
let package_dir = parent_dir.join(package).join(package); let package_dir = parent_dir.join(package).join(package);
@@ -196,14 +237,14 @@ fn find_dsc_file(
build_root: &str, build_root: &str,
package: &str, package: &str,
version: &str, version: &str,
ctx: &Arc<Context>,
) -> Result<PathBuf, Box<dyn Error>> { ) -> Result<PathBuf, Box<dyn Error>> {
// Strip epoch from version (e.g., "1:2.3.4-5" -> "2.3.4-5") // Strip epoch from version (e.g., "1:2.3.4-5" -> "2.3.4-5")
let version_without_epoch = version.split_once(':').map(|(_, v)| v).unwrap_or(version); let version_without_epoch = version.split_once(':').map(|(_, v)| v).unwrap_or(version);
let dsc_name = format!("{}_{}.dsc", package, version_without_epoch); let dsc_name = format!("{}_{}.dsc", package, version_without_epoch);
let dsc_path = PathBuf::from(build_root).join(&dsc_name); let dsc_path = PathBuf::from(build_root).join(&dsc_name);
// Check if the .dsc file exists in current context // Check if the .dsc file exists in context
let ctx = context::current();
if !ctx.exists(&dsc_path)? { if !ctx.exists(&dsc_path)? {
return Err(format!("Could not find .dsc file at {}", dsc_path.display()).into()); return Err(format!("Could not find .dsc file at {}", dsc_path.display()).into());
} }
@@ -212,7 +253,9 @@ fn find_dsc_file(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use serial_test::serial; use super::*;
use std::sync::Arc;
async fn test_build_end_to_end( async fn test_build_end_to_end(
package: &str, package: &str,
series: &str, series: &str,
@@ -242,13 +285,26 @@ mod tests {
.expect("Cannot pull package"); .expect("Cannot pull package");
log::info!("Successfully pulled package {}", package); log::info!("Successfully pulled package {}", package);
// Create a fresh local context for this test
let ctx = Arc::new(Context::new(crate::context::ContextConfig::Local));
// Change directory to the package directory // Change directory to the package directory
let cwd = crate::deb::find_package_directory(cwd, package, &package_info.stanza.version) let cwd =
crate::deb::find_package_directory(cwd, package, &package_info.stanza.version, &ctx)
.expect("Cannot find package directory"); .expect("Cannot find package directory");
log::debug!("Package directory: {}", cwd.display()); log::debug!("Package directory: {}", cwd.display());
log::info!("Starting binary package build..."); log::info!("Starting binary package build...");
crate::deb::build_binary_package(arch, Some(series), Some(&cwd), cross, None, None, None) crate::deb::build_binary_package(
arch,
Some(series),
Some(&cwd),
cross,
None,
None,
None,
Some(ctx),
)
.await .await
.expect("Cannot build binary package (deb)"); .expect("Cannot build binary package (deb)");
log::info!("Successfully built binary package"); log::info!("Successfully built binary package");
@@ -273,16 +329,10 @@ mod tests {
); );
} }
// Tests below will be marked 'serial' // Tests no longer need to be 'serial' since each test uses its own
// As builds are using ephemeral contexts, tests running on the same // explicit context instead of shared global state.
// process could use the ephemeral context of another thread and
// interfere with each other.
// FIXME: This is not ideal. In the future, we might want to
// either explicitely pass context (instead of shared state) or
// fork for building?
#[tokio::test] #[tokio::test]
#[test_log::test] #[test_log::test]
#[serial]
async fn test_deb_hello_ubuntu_end_to_end() { async fn test_deb_hello_ubuntu_end_to_end() {
test_build_end_to_end("hello", "noble", None, None, false).await; test_build_end_to_end("hello", "noble", None, None, false).await;
} }
@@ -291,7 +341,6 @@ mod tests {
#[tokio::test] #[tokio::test]
#[test_log::test] #[test_log::test]
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
#[serial]
async fn test_deb_hello_ubuntu_cross_end_to_end() { async fn test_deb_hello_ubuntu_cross_end_to_end() {
test_build_end_to_end("hello", "noble", None, Some("riscv64"), true).await; test_build_end_to_end("hello", "noble", None, Some("riscv64"), true).await;
} }
@@ -301,7 +350,6 @@ mod tests {
/// for example. /// for example.
#[tokio::test] #[tokio::test]
#[test_log::test] #[test_log::test]
#[serial]
async fn test_deb_hello_debian_sid_end_to_end() { async fn test_deb_hello_debian_sid_end_to_end() {
test_build_end_to_end("hello", "sid", None, None, false).await; test_build_end_to_end("hello", "sid", None, None, false).await;
} }
@@ -310,6 +358,10 @@ mod tests {
/// It is important to ensure that pkh can cross-compile linux-riscv, as /// It is important to ensure that pkh can cross-compile linux-riscv, as
/// for risc-v hardware is still rare and cross-compilation is necessary /// for risc-v hardware is still rare and cross-compilation is necessary
/// to debug and test /// to debug and test
/// NOTE: Ideally, we want to run this in CI, but it takes more than 1h
/// to fully build the linux-riscv package on an amd64 builder, which is too
/// much time
#[ignore]
#[tokio::test] #[tokio::test]
#[test_log::test] #[test_log::test]
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@@ -328,7 +380,6 @@ mod tests {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
#[tokio::test] #[tokio::test]
#[test_log::test] #[test_log::test]
#[serial]
async fn test_deb_gcc_debian_end_to_end() { async fn test_deb_gcc_debian_end_to_end() {
test_build_end_to_end("gcc-15", "sid", None, None, false).await; test_build_end_to_end("gcc-15", "sid", None, None, false).await;
} }

View File

@@ -1,8 +1,9 @@
/// Sbuild binary package building /// Sbuild binary package building
/// Call 'sbuild' with the dsc file to build the package with unshare /// Call 'sbuild' with the dsc file to build the package with unshare
use crate::context; use crate::context::Context;
use std::error::Error; use std::error::Error;
use std::path::Path; use std::path::Path;
use std::sync::Arc;
pub fn build( pub fn build(
package: &str, package: &str,
@@ -11,11 +12,11 @@ pub fn build(
series: &str, series: &str,
build_root: &str, build_root: &str,
cross: bool, cross: bool,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> { ) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
// Find the actual package directory // Find the actual package directory
let package_dir = crate::deb::find_package_directory(Path::new(build_root), package, version)?; let package_dir =
crate::deb::find_package_directory(Path::new(build_root), package, version, &ctx)?;
let package_dir_str = package_dir let package_dir_str = package_dir
.to_str() .to_str()
.ok_or("Invalid package directory path")?; .ok_or("Invalid package directory path")?;

View File

@@ -134,6 +134,18 @@ pub async fn get_ordered_series_name(dist: &str) -> Result<Vec<String>, Box<dyn
/// Get the latest released series for a dist (excluding future releases and special cases like sid) /// Get the latest released series for a dist (excluding future releases and special cases like sid)
pub async fn get_latest_released_series(dist: &str) -> Result<String, Box<dyn Error>> { pub async fn get_latest_released_series(dist: &str) -> Result<String, Box<dyn Error>> {
let latest = get_n_latest_released_series(dist, 1).await?;
latest
.first()
.cloned()
.ok_or("No released series found".into())
}
/// Get the N latest released series for a dist (excluding future releases and special cases like sid)
pub async fn get_n_latest_released_series(
dist: &str,
n: usize,
) -> Result<Vec<String>, Box<dyn Error>> {
let series_info_list = get_ordered_series(dist).await?; let series_info_list = get_ordered_series(dist).await?;
let today = chrono::Local::now().date_naive(); let today = chrono::Local::now().date_naive();
@@ -153,11 +165,11 @@ pub async fn get_latest_released_series(dist: &str) -> Result<String, Box<dyn Er
// Sort by release date descending (newest first) // Sort by release date descending (newest first)
released_series.sort_by(|a, b| b.release.cmp(&a.release)); released_series.sort_by(|a, b| b.release.cmp(&a.release));
if let Some(latest) = released_series.first() { Ok(released_series
Ok(latest.series.clone()) .iter()
} else { .take(n)
Err("No released series found".into()) .map(|s| s.series.clone())
} .collect())
} }
/// Obtain the distribution (eg. debian, ubuntu) from a distribution series (eg. noble, bookworm) /// Obtain the distribution (eg. debian, ubuntu) from a distribution series (eg. noble, bookworm)
@@ -202,8 +214,11 @@ pub fn get_base_url(dist: &str) -> String {
DATA.dist.get(dist).unwrap().base_url.clone() DATA.dist.get(dist).unwrap().base_url.clone()
} }
/// Obtain the URL for the archive keyring of a distribution series /// Obtain the URLs for the archive keyrings of a distribution series
pub async fn get_keyring_url(series: &str) -> Result<String, Box<dyn Error>> { ///
/// For 'sid' and 'experimental', returns keyrings from the 3 latest releases
/// since sid needs keys from all recent releases.
pub async fn get_keyring_urls(series: &str) -> Result<Vec<String>, Box<dyn Error>> {
let dist = get_dist_from_series(series).await?; let dist = get_dist_from_series(series).await?;
let dist_data = DATA let dist_data = DATA
.dist .dist
@@ -212,24 +227,36 @@ pub async fn get_keyring_url(series: &str) -> Result<String, Box<dyn Error>> {
// For Debian, we need the series number to form the keyring URL // For Debian, we need the series number to form the keyring URL
if dist == "debian" { if dist == "debian" {
// Special case for 'sid' - use the latest released version // Special case for 'sid' - use keyrings from the 3 latest released versions
if series == "sid" || series == "experimental" { if series == "sid" || series == "experimental" {
let latest_released = get_latest_released_series("debian").await?; let latest_released = get_n_latest_released_series("debian", 3).await?;
let series_num = get_debian_series_number(&latest_released).await?.unwrap(); let mut urls = Vec::new();
// Replace {series_num} placeholder with the latest released series number for released_series in latest_released {
Ok(dist_data if let Some(series_num) = get_debian_series_number(&released_series).await? {
urls.push(
dist_data
.archive_keyring .archive_keyring
.replace("{series_num}", &series_num)) .replace("{series_num}", &series_num),
);
}
}
if urls.is_empty() {
Err("No keyring URLs found for sid/experimental".into())
} else {
Ok(urls)
}
} else { } else {
let series_num = get_debian_series_number(series).await?.unwrap(); let series_num = get_debian_series_number(series).await?.unwrap();
// Replace {series_num} placeholder with the actual series number // Replace {series_num} placeholder with the actual series number
Ok(dist_data Ok(vec![
dist_data
.archive_keyring .archive_keyring
.replace("{series_num}", &series_num)) .replace("{series_num}", &series_num),
])
} }
} else { } else {
// For other distributions like Ubuntu, use the keyring directly // For other distributions like Ubuntu, use the keyring directly
Ok(dist_data.archive_keyring.clone()) Ok(vec![dist_data.archive_keyring.clone()])
} }
} }
@@ -347,14 +374,47 @@ mod tests {
} }
#[tokio::test] #[tokio::test]
async fn test_get_keyring_url_sid() { async fn test_get_keyring_urls_sid() {
// Test that 'sid' uses the latest released version for keyring URL // Test that 'sid' returns keyrings from the 3 latest released versions
let sid_keyring = get_keyring_url("sid").await.unwrap(); let sid_keyrings = get_keyring_urls("sid").await.unwrap();
let latest_released = get_latest_released_series("debian").await.unwrap();
let latest_keyring = get_keyring_url(&latest_released).await.unwrap();
// The keyring URL for 'sid' should be the same as the latest released version // Should have keyring URLs for sid
assert_eq!(sid_keyring, latest_keyring); assert!(!sid_keyrings.is_empty());
assert!(sid_keyrings.len() <= 3);
// Each URL should be a valid Debian keyring URL
for url in &sid_keyrings {
assert!(
url.contains("ftp-master.debian.org/keys"),
"URL '{}' does not contain expected pattern",
url
);
}
}
#[tokio::test]
async fn test_get_keyring_url_regular_series() {
// Test that regular series (like bookworm) returns a single keyring URL
let bookworm_keyring = &get_keyring_urls("bookworm").await.unwrap()[0];
assert!(
bookworm_keyring.contains("ftp-master.debian.org/keys"),
"URL '{}' does not contain expected pattern",
bookworm_keyring
);
}
#[tokio::test]
async fn test_get_n_latest_released_series() {
// Test getting 3 latest released series
let latest_3 = get_n_latest_released_series("debian", 3).await.unwrap();
// Should have at most 3 series
assert!(!latest_3.is_empty());
assert!(latest_3.len() <= 3);
// Should not contain 'sid' or 'experimental'
assert!(!latest_3.contains(&"sid".to_string()));
assert!(!latest_3.contains(&"experimental".to_string()));
} }
#[tokio::test] #[tokio::test]

View File

@@ -197,6 +197,7 @@ fn main() {
mode, mode,
ppa, ppa,
inject_packages, inject_packages,
None,
) )
.await .await
}) { }) {

View File

@@ -153,6 +153,14 @@ impl Iterator for DebianSources {
} }
} }
// Parse Vcs-Git field: it may contain just a URL, or URL followed by -b <branch>
// e.g., "https://salsa.debian.org/science-team/paraview.git -b debian/latest"
let vcs_git = fields.get("Vcs-Git").map(|vcs| {
// Split on whitespace and take the first part (the URL)
// The URL should not contain spaces, so this is safe
vcs.split_whitespace().next().unwrap_or(vcs).to_string()
});
Some(PackageStanza { Some(PackageStanza {
package: fields.get("Package").unwrap().to_string(), package: fields.get("Package").unwrap().to_string(),
version: fields.get("Version").unwrap().to_string(), version: fields.get("Version").unwrap().to_string(),
@@ -161,7 +169,7 @@ impl Iterator for DebianSources {
.get("Format") .get("Format")
.cloned() .cloned()
.unwrap_or_else(|| "1.0".to_string()), .unwrap_or_else(|| "1.0".to_string()),
vcs_git: fields.get("Vcs-Git").cloned(), vcs_git,
vcs_browser: fields.get("Vcs-Browser").cloned(), vcs_browser: fields.get("Vcs-Browser").cloned(),
files, files,
}) })

View File

@@ -504,14 +504,17 @@ pub async fn pull(
// Depending on target series, we pick target branch; if latest series is specified, // Depending on target series, we pick target branch; if latest series is specified,
// we target the development branch, i.e. the default branch // we target the development branch, i.e. the default branch
// Only use Ubuntu-specific branch naming if the VCS is from Launchpad
let is_launchpad_vcs = url.contains("launchpad.net");
let branch_name = if crate::distro_info::get_ordered_series_name(package_info.dist.as_str()) let branch_name = if crate::distro_info::get_ordered_series_name(package_info.dist.as_str())
.await?[0] .await?[0]
!= *series != *series
{ {
if package_info.dist == "ubuntu" { if package_info.dist == "ubuntu" && is_launchpad_vcs {
Some(format!("{}/{}", package_info.dist, series)) Some(format!("{}/{}", package_info.dist, series))
} else { } else {
// Debian does not have reliable branch naming... // Debian does not have reliable branch naming...
// Also, Ubuntu packages with salsa VCS don't have Ubuntu-specific branches
// For now, we skip that part and clone default // For now, we skip that part and clone default
// TODO: Inspect remote branches and tags for matches // TODO: Inspect remote branches and tags for matches
None None
@@ -615,18 +618,27 @@ mod tests {
let head = repo.head().unwrap(); let head = repo.head().unwrap();
let name = head.name().unwrap(); let name = head.name().unwrap();
// Check if the VCS is from Launchpad - only Launchpad has Ubuntu-specific branches
let is_launchpad_vcs = info
.preferred_vcs
.as_ref()
.map(|url| url.contains("launchpad.net"))
.unwrap_or(false);
if let Some(s) = series { if let Some(s) = series {
// The local branch should be named dist/series // The local branch should be named dist/series
// We skip debian for now as it does not have a reliable naming scheme // We skip debian for now as it does not have a reliable naming scheme
if info.dist == "ubuntu" { // Also skip Ubuntu packages with non-Launchpad VCS (e.g., salsa.debian.org)
if info.dist == "ubuntu" && is_launchpad_vcs {
assert_eq!(name, format!("refs/heads/{0}/{s}", info.dist)); assert_eq!(name, format!("refs/heads/{0}/{s}", info.dist));
} }
} else { } else {
// The local branch should be named ubuntu/devel for Ubuntu // The local branch should be named ubuntu/devel for Ubuntu
// Debian unfortunately does not have a reliable naming scheme // Debian unfortunately does not have a reliable naming scheme
// Also skip Ubuntu packages with non-Launchpad VCS
// Given that there was no series specified, and this is a test, // Given that there was no series specified, and this is a test,
// we require to have a distribution specified // we require to have a distribution specified
if dist.unwrap() == "ubuntu" { if dist.unwrap() == "ubuntu" && is_launchpad_vcs {
assert_eq!(name, "refs/heads/ubuntu/devel"); assert_eq!(name, "refs/heads/ubuntu/devel");
} }
} }
@@ -690,4 +702,15 @@ mod tests {
async fn test_pull_hello_ubuntu_latest_end_to_end() { async fn test_pull_hello_ubuntu_latest_end_to_end() {
test_pull_package_end_to_end("hello", None, Some("ubuntu"), None).await; test_pull_package_end_to_end("hello", None, Some("ubuntu"), None).await;
} }
/// Test for paraview - a package that has no Ubuntu Launchpad code,
/// only a debian salsa repo, even in Ubuntu.
/// Furthermore, paraview has a Vcs-Git value of:
/// Vcs-Git: https://salsa.debian.org/science-team/paraview.git -b debian/latest
/// Given that it is not only an url but also specifies a branch, it needs
/// special care, that this test ensures.
#[tokio::test]
async fn test_pull_paraview_ubuntu_end_to_end() {
test_pull_package_end_to_end("paraview", Some("noble"), None, None).await;
}
} }

82
src/put.rs Normal file
View File

@@ -0,0 +1,82 @@
use std::path::Path;
use std::process::Command;
use crate::ProgressCallback;
use std::fs;
use pkh::package_info::parse_control_file;
/// Execute the `put` subcommand to upload package to PPA or archive
///
/// # Arguments
/// - series: Target distribution series (e.g. "focal")
/// - dist: Target distribution (e.g. "ubuntu")
/// - version: Package version override
/// - ppa: Target PPA in "user/ppa-name" format
/// - archive: Set to true for official archive uploads
/// - cwd: Current working directory containing source package
/// - progress: Progress callback for UI updates
pub async fn put(
series: Option<&str>,
dist: Option<&str>,
version: Option<&str>,
ppa: Option<&str>,
archive: bool,
cwd: Option<&Path>,
progress: ProgressCallback<'_>,
) -> Result<(), Box<dyn std::error::Error>> {
let current_dir = cwd.unwrap_or_else(|| Path::new("."));
let control_path = current_dir.join("debian/control");
let control_content = fs::read_to_string(&control_path).map_err(|e| {
format!("Failed to read debian/control: {}. Are you in a source package directory?", e)
})?;
let package_info = parse_control_file(&control_content)?;
let package = package_info.source.ok_or("Could not determine package name from debian/control")?;
if let Some(cb) = progress {
cb(&package, "Uploading package...", 0, 1);
}
// Find .dsc file in current directory
let dsc_files: Vec<_> = current_dir.read_dir()?
.filter_map(|entry| {
let entry = entry.ok()?;
let path = entry.path();
if path.extension()? == "dsc" {
Some(path)
} else {
None
}
})
.collect();
let dsc_file = dsc_files.first().ok_or("No .dsc file found in current directory")?;
if dsc_files.len() > 1 {
return Err("Multiple .dsc files found - please make sure only one exists".into());
}
if archive {
println!("Uploading {} to official archive", dsc_file.display());
// Execute dput with official archive config
Command::new("dput")
.arg("ubuntu")
.arg(dsc_file)
.status()?;
} else if let Some(ppa) = ppa {
println!("Uploading {} to PPA: {}", dsc_file.display(), ppa);
// Execute dput with PPA target
Command::new("dput")
.arg(format!("ppa:{}", ppa))
.arg(dsc_file)
.status()?;
} else {
return Err("Must specify either --ppa for PPA upload or --archive for official archive".into());
}
if let Some(cb) = progress {
cb(&package, "Upload complete", 1, 1);
}
Ok(())
}

View File

@@ -17,6 +17,12 @@ pub struct OperationQuirks {
/// Additional parameters for the operation /// Additional parameters for the operation
#[serde(default)] #[serde(default)]
pub parameters: HashMap<String, serde_yaml::Value>, pub parameters: HashMap<String, serde_yaml::Value>,
/// Custom package directories to try when looking for the package source
/// This is useful for packages that don't follow the standard naming conventions
/// like linux packages that use directories like "linux-main" or other custom names
#[serde(default)]
pub package_directory: Vec<String>,
} }
/// Quirks for a specific package /// Quirks for a specific package
@@ -75,3 +81,31 @@ pub fn get_deb_extra_dependencies(package: &str) -> Vec<String> {
Vec::new() Vec::new()
} }
/// Get package directories from quirks configuration
///
/// This function returns the list of custom package directories to try
/// when looking for the package source directory.
///
/// # Arguments
/// * `package` - The package name
///
/// # Returns
/// * `Vec<String>` - List of package directories to try, or empty vector if none
pub fn get_package_directories(package: &str) -> Vec<String> {
if let Some(quirks) = get_package_quirks(&QUIRKS_DATA, package) {
// Check deb quirks first, then pull quirks
if let Some(deb_quirks) = &quirks.deb
&& !deb_quirks.package_directory.is_empty()
{
return deb_quirks.package_directory.clone();
}
if let Some(pull_quirks) = &quirks.pull
&& !pull_quirks.package_directory.is_empty()
{
return pull_quirks.package_directory.clone();
}
}
Vec::new()
}