Compare commits

..

10 Commits

Author SHA1 Message Date
73511c258b deb: fix injection of .deb packages with apt-get
All checks were successful
CI / build (push) Successful in 9m5s
CI / snap (push) Successful in 3m56s
2026-04-04 12:41:52 +02:00
d83174c980 deb: allow injecting .deb files packages
Some checks failed
CI / build (push) Failing after 1m19s
CI / snap (push) Has been skipped
2026-04-04 12:35:14 +02:00
2b27b7b06e deb: add quirks to allow packages to declare specific package directories
All checks were successful
CI / build (push) Successful in 8m41s
CI / snap (push) Successful in 3m55s
2026-03-25 17:40:55 +01:00
32e15b1106 deb: ignore linux-riscv test
Some checks failed
CI / build (push) Successful in 8m16s
CI / snap (push) Failing after 7s
2026-03-19 11:27:02 +01:00
7640952bdc unshare: mount proc differently depending on root privileges 2026-03-19 11:26:10 +01:00
2b6207981a deb: make tests parallel
Some checks failed
CI / build (push) Failing after 12m10s
CI / snap (push) Has been skipped
2026-03-19 00:24:48 +01:00
daaf33cd6b deb: fix race condition for test
Some checks failed
CI / build (push) Failing after 18m26s
CI / snap (push) Has been skipped
Fix race condition around current context,
related to find_package_directory.
2026-03-18 23:34:33 +01:00
4a73e6e1d6 deb: fix race condition for tests
Some checks failed
CI / build (push) Failing after 20m7s
CI / snap (push) Has been skipped
2026-03-18 17:35:38 +01:00
d06e091121 apt/keyring: download 3 keyrings for sid
Some checks failed
CI / build (push) Failing after 21m24s
CI / snap (push) Has been skipped
2026-03-18 15:23:57 +01:00
5ec675c20b pull: fix edge cases
Some checks failed
CI / build (push) Failing after 13m53s
CI / snap (push) Has been skipped
- Ubuntu does not have 'Launchpad/Code' repo edge case
- Vcs-Git field has a git command, not only an URL edge case
2026-03-17 17:22:25 +01:00
14 changed files with 605 additions and 179 deletions

View File

@@ -12,3 +12,8 @@ quirks:
# - another-dependency
# parameters:
# key: value
linux-riscv:
deb:
package_directory:
- linux-main

View File

@@ -16,99 +16,131 @@ struct LaunchpadPpaResponse {
signing_key_fingerprint: String,
}
/// Download a keyring to the application cache directory and return the path
/// Download keyrings to a shared keyring directory and return the directory path
///
/// This function downloads the keyring to a user-writable cache directory
/// This function downloads keyrings to a user-writable cache directory
/// instead of the system apt keyring directory, allowing non-root usage.
/// The returned path can be passed to mmdebstrap via --keyring.
/// The returned directory path can be passed to mmdebstrap via --keyring=.
///
/// For Debian keyrings (which are ASCII-armored .asc files), the key is
/// For Debian keyrings (which are ASCII-armored .asc files), the keys are
/// converted to binary GPG format using gpg --dearmor.
///
/// For 'sid' and 'experimental', this downloads keyrings from the 3 latest
/// releases since sid needs keys from all recent releases.
///
/// # Arguments
/// * `ctx` - Optional context to use
/// * `series` - The distribution series (e.g., "noble", "sid")
///
/// # Returns
/// The path to the downloaded keyring file (in binary GPG format)
pub async fn download_cache_keyring(
/// The path to the keyring directory containing all downloaded keyring files
pub async fn download_cache_keyrings(
ctx: Option<Arc<context::Context>>,
series: &str,
) -> Result<PathBuf, Box<dyn Error>> {
let ctx = ctx.unwrap_or_else(context::current);
// Obtain keyring URL from distro_info
let keyring_url = distro_info::get_keyring_url(series).await?;
log::debug!("Downloading keyring from: {}", keyring_url);
// Obtain keyring URLs from distro_info
let keyring_urls = distro_info::get_keyring_urls(series).await?;
log::debug!("Downloading keyrings from: {:?}", keyring_urls);
// Get the application cache directory
let proj_dirs = directories::ProjectDirs::from("com", "pkh", "pkh")
.ok_or("Could not determine project directories")?;
let cache_dir = proj_dirs.cache_dir();
// Use system temp directory for keyrings since it's accessible from unshare mode
// The home directory may not be accessible from mmdebstrap's unshare namespace
let temp_dir = std::env::temp_dir();
let keyring_dir = temp_dir.join("pkh-keyrings");
// Create cache directory if it doesn't exist
if !ctx.exists(cache_dir)? {
ctx.command("mkdir").arg("-p").arg(cache_dir).status()?;
// Create keyring directory if it doesn't exist
if !ctx.exists(&keyring_dir)? {
ctx.command("mkdir").arg("-p").arg(&keyring_dir).status()?;
}
// Extract the original filename from the keyring URL
let filename = keyring_url
.split('/')
.next_back()
.unwrap_or("pkh-{}.gpg")
.replace("{}", series);
let download_path = cache_dir.join(&filename);
// Make keyring directory world-accessible so mmdebstrap in unshare mode can access it
ctx.command("chmod")
.arg("a+rwx")
.arg(&keyring_dir)
.status()?;
// Download the keyring using curl
let mut curl_cmd = ctx.command("curl");
curl_cmd
.arg("-s")
.arg("-f")
.arg("-L")
.arg(&keyring_url)
.arg("--output")
.arg(&download_path);
for keyring_url in keyring_urls {
// Extract the original filename from the keyring URL
let filename = keyring_url
.split('/')
.next_back()
.unwrap_or("pkh-{}.gpg")
.replace("{}", series);
let download_path = keyring_dir.join(&filename);
let status = curl_cmd.status()?;
if !status.success() {
return Err(format!("Failed to download keyring from {}", keyring_url).into());
}
// Determine the binary keyring path
let binary_path = if filename.ends_with(".asc") {
// ASCII-armored key: convert to .gpg
let binary_filename = filename.strip_suffix(".asc").unwrap_or(&filename);
keyring_dir.join(format!("{}.gpg", binary_filename))
} else {
download_path.clone()
};
// If the downloaded file is an ASCII-armored key (.asc), convert it to binary GPG format
// mmdebstrap's --keyring option expects binary GPG keyrings
let keyring_path = if filename.ends_with(".asc") {
let binary_filename = filename.strip_suffix(".asc").unwrap_or(&filename);
let binary_path = cache_dir.join(format!("{}.gpg", binary_filename));
// Skip download if the binary keyring already exists
if !ctx.exists(&binary_path)? {
// Download the keyring using curl
let mut curl_cmd = ctx.command("curl");
curl_cmd
.arg("-s")
.arg("-f")
.arg("-L")
.arg(&keyring_url)
.arg("--output")
.arg(&download_path);
log::debug!("Converting ASCII-armored key to binary GPG format");
let mut gpg_cmd = ctx.command("gpg");
gpg_cmd
.arg("--dearmor")
.arg("--output")
.arg(&binary_path)
.arg(&download_path);
let status = curl_cmd.status()?;
if !status.success() {
return Err(format!("Failed to download keyring from {}", keyring_url).into());
}
let status = gpg_cmd.status()?;
if !status.success() {
return Err("Failed to convert keyring to binary format"
.to_string()
.into());
// If the downloaded file is an ASCII-armored key (.asc), convert it to binary GPG format
if filename.ends_with(".asc") {
log::debug!("Converting ASCII-armored key to binary GPG format");
let mut gpg_cmd = ctx.command("gpg");
gpg_cmd
.arg("--dearmor")
.arg("--output")
.arg(&binary_path)
.arg(&download_path);
let status = gpg_cmd.status()?;
if !status.success() {
return Err("Failed to convert keyring to binary format"
.to_string()
.into());
}
// Remove the original .asc file
let _ = ctx.command("rm").arg("-f").arg(&download_path).status();
}
// Make the keyring file world-readable so mmdebstrap in unshare mode can access it
ctx.command("chmod").arg("a+r").arg(&binary_path).status()?;
log::info!(
"Successfully downloaded keyring for {} to {}",
series,
binary_path.display()
);
} else {
log::debug!(
"Keyring already exists at {}, skipping download",
binary_path.display()
);
// Ensure existing keyring is world-readable
ctx.command("chmod").arg("a+r").arg(&binary_path).status()?;
}
// Remove the original .asc file
let _ = ctx.command("rm").arg("-f").arg(&download_path).status();
binary_path
} else {
download_path
};
}
log::info!(
"Successfully downloaded keyring for {} to {}",
"Keyrings for {} available in {}",
series,
keyring_path.display()
keyring_dir.display()
);
Ok(keyring_path)
Ok(keyring_dir)
}
/// Download and import a PPA key using Launchpad API

View File

@@ -207,11 +207,22 @@ impl UnshareDriver {
cmd.arg("-w").arg(dir);
}
cmd.arg("--").arg("bash").arg("-c").arg(format!(
"mount -t proc proc /proc; mkdir /dev/pts; mount -t devpts devpts /dev/pts; touch /dev/ptmx; mount --bind /dev/pts/ptmx /dev/ptmx; {} {}",
program,
args.iter().map(|a| format!("\"{a}\"")).collect::<Vec<_>>().join(" ")
));
// Build the bash command: set up /dev/pts and run the program
// /proc should already be bind-mounted from the host before entering the namespace
let program_args = args
.iter()
.map(|a| format!("\"{a}\""))
.collect::<Vec<_>>()
.join(" ");
cmd.arg("--")
.arg("bash")
.arg("-c")
.arg(format!(
"mkdir -p /dev/pts; mount -t devpts devpts /dev/pts 2>/dev/null || true; touch /dev/ptmx; mount --bind /dev/pts/ptmx /dev/ptmx 2>/dev/null || true; {} {}",
program,
program_args
));
cmd
}

View File

@@ -1,15 +1,16 @@
use crate::context;
use crate::context::Context;
use std::collections::HashMap;
use std::error::Error;
use std::sync::Arc;
/// Set environment variables for cross-compilation
pub fn setup_environment(
env: &mut HashMap<String, String>,
arch: &str,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
let dpkg_architecture = String::from_utf8(
context::current()
.command("dpkg-architecture")
ctx.command("dpkg-architecture")
.arg("-a")
.arg(arch)
.output()?
@@ -34,8 +35,11 @@ pub fn setup_environment(
/// Ensure that repositories for target architecture are available
/// This also handles the 'ports.ubuntu.com' vs 'archive.ubuntu.com' on Ubuntu
pub fn ensure_repositories(arch: &str, series: &str) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
pub fn ensure_repositories(
arch: &str,
series: &str,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
let local_arch = crate::get_current_arch();
// Add target ('host') architecture

View File

@@ -1,9 +1,9 @@
use crate::context;
use crate::context::{Context, ContextConfig};
use crate::context::{self, Context, ContextConfig};
use directories::ProjectDirs;
use std::error::Error;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tar::Archive;
use xz2::read::XzDecoder;
@@ -13,20 +13,26 @@ pub struct EphemeralContextGuard {
previous_context: String,
chroot_path: PathBuf,
build_succeeded: bool,
base_ctx: Arc<Context>,
}
impl EphemeralContextGuard {
/// Create a new ephemeral unshare context for the specified series
/// Create a new ephemeral unshare context with an explicit base context
///
/// # Arguments
/// * `series` - The distribution series (e.g., "noble", "sid")
/// * `arch` - Optional target architecture. If provided and different from host,
/// downloads a chroot for that architecture (uses qemu_binfmt transparently)
pub async fn new(series: &str, arch: Option<&str>) -> Result<Self, Box<dyn Error>> {
/// * `base_ctx` - The base context to use for creating the chroot
pub async fn new_with_context(
series: &str,
arch: Option<&str>,
base_ctx: Arc<Context>,
) -> Result<Self, Box<dyn Error>> {
let current_context_name = context::manager().current_name();
// Create a temporary directory for the chroot
let chroot_path_str = context::current().create_temp_dir()?;
let chroot_path_str = base_ctx.create_temp_dir()?;
let chroot_path = PathBuf::from(chroot_path_str);
log::debug!(
@@ -37,7 +43,7 @@ impl EphemeralContextGuard {
);
// Download and extract the chroot tarball
Self::download_and_extract_chroot(series, arch, &chroot_path).await?;
Self::download_and_extract_chroot(series, arch, &chroot_path, base_ctx.clone()).await?;
// Switch to an ephemeral context to build the package in the chroot
context::manager().set_current_ephemeral(Context::new(ContextConfig::Unshare {
@@ -49,6 +55,7 @@ impl EphemeralContextGuard {
previous_context: current_context_name,
chroot_path,
build_succeeded: false,
base_ctx,
})
}
@@ -56,7 +63,10 @@ impl EphemeralContextGuard {
series: &str,
arch: Option<&str>,
chroot_path: &PathBuf,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> {
// Clone ctx for use in create_device_nodes after download_chroot_tarball consumes it
let ctx_for_devices = ctx.clone();
// Get project directories for caching
let proj_dirs = ProjectDirs::from("com", "pkh", "pkh")
.ok_or("Could not determine project directories")?;
@@ -74,7 +84,6 @@ impl EphemeralContextGuard {
// Check for existing lockfile, and wait for a timeout if it exists
// After timeout, warn the user
let lockfile_path = tarball_path.with_extension("lock");
let ctx = context::current();
// Check if lockfile exists and wait for it to be removed
let mut wait_time = 0;
@@ -110,7 +119,7 @@ impl EphemeralContextGuard {
series,
arch
);
Self::download_chroot_tarball(series, arch, &tarball_path).await?;
Self::download_chroot_tarball(series, arch, &tarball_path, ctx).await?;
} else {
log::debug!(
"Using cached chroot tarball for {} (arch: {:?})",
@@ -125,7 +134,12 @@ impl EphemeralContextGuard {
// Create device nodes in the chroot
log::debug!("Creating device nodes in chroot...");
Self::create_device_nodes(chroot_path)?;
Self::create_device_nodes(chroot_path, ctx_for_devices.clone())?;
// Bind mount /proc from host into chroot (before entering unshare namespace)
// This allows /proc to work in containers where mounting inside unshare fails
log::debug!("Bind-mounting /proc into chroot...");
Self::bind_mount_proc(chroot_path, ctx_for_devices)?;
Ok(())
}
@@ -134,18 +148,17 @@ impl EphemeralContextGuard {
series: &str,
arch: Option<&str>,
tarball_path: &Path,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
// Create a lock file to make sure that noone tries to use the file while it's not fully downloaded
let lockfile_path = tarball_path.with_extension("lock");
ctx.command("touch")
.arg(lockfile_path.to_string_lossy().to_string())
.status()?;
// Download the keyring to the cache directory
let keyring_path =
crate::apt::keyring::download_cache_keyring(Some(ctx.clone()), series).await?;
// Download the keyring(s)
let keyring_dir =
crate::apt::keyring::download_cache_keyrings(Some(ctx.clone()), series).await?;
// Use mmdebstrap to download the tarball to the cache directory
let mut cmd = ctx.command("mmdebstrap");
@@ -153,7 +166,13 @@ impl EphemeralContextGuard {
.arg("--mode=unshare")
.arg("--include=mount,curl,ca-certificates")
.arg("--format=tar")
.arg(format!("--keyring={}", keyring_path.display()));
.arg(format!("--keyring={}", keyring_dir.display()))
// Setup hook to copy keyrings into the chroot so apt inside can use them
.arg("--setup-hook=mkdir -p \"$1/etc/apt/trusted.gpg.d\"")
.arg(format!(
"--setup-hook=cp {}/*.gpg \"$1/etc/apt/trusted.gpg.d/\"",
keyring_dir.display()
));
// Add architecture if specified
if let Some(a) = arch {
@@ -212,8 +231,10 @@ impl EphemeralContextGuard {
Ok(())
}
fn create_device_nodes(chroot_path: &Path) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
fn create_device_nodes(
chroot_path: &Path,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> {
let dev_null_path = chroot_path.join("dev/null");
let dev_zero_path = chroot_path.join("dev/zero");
@@ -270,6 +291,43 @@ impl EphemeralContextGuard {
Ok(())
}
/// Bind mount /proc from host into the chroot
/// This is done before entering the unshare namespace, so it works in containers
fn bind_mount_proc(
chroot_path: &Path,
ctx: Arc<context::Context>,
) -> Result<(), Box<dyn Error>> {
let proc_path = chroot_path.join("proc");
// Ensure /proc directory exists in chroot
fs::create_dir_all(&proc_path)?;
// Check if we're running as root
let is_root = crate::utils::root::is_root()?;
// Bind mount host's /proc into chroot (with sudo if not root)
let mut cmd = ctx.command(if is_root { "mount" } else { "sudo" });
if !is_root {
cmd.arg("mount");
}
let status = cmd
.arg("--bind")
.arg("/proc")
.arg(proc_path.to_string_lossy().to_string())
.status()?;
if !status.success() {
log::warn!(
"Could not bind-mount /proc into chroot at {}. Some packages may not install correctly.",
proc_path.display()
);
} else {
log::debug!("Bind-mounted /proc into chroot at {}", proc_path.display());
}
Ok(())
}
/// Mark the build as successful, which will trigger chroot cleanup on drop
pub fn mark_build_successful(&mut self) {
self.build_succeeded = true;
@@ -294,14 +352,26 @@ impl Drop for EphemeralContextGuard {
// Check if we're running as root to avoid unnecessary sudo
let is_root = crate::utils::root::is_root().unwrap_or(false);
// Unmount /proc from chroot before removing (ignore errors)
let proc_path = self.chroot_path.join("proc");
let _ = if is_root {
self.base_ctx.command("umount").arg(&proc_path).status()
} else {
self.base_ctx
.command("sudo")
.arg("umount")
.arg(&proc_path)
.status()
};
let result = if is_root {
context::current()
self.base_ctx
.command("rm")
.arg("-rf")
.arg(&self.chroot_path)
.status()
} else {
context::current()
self.base_ctx
.command("sudo")
.arg("rm")
.arg("-rf")

View File

@@ -1,11 +1,12 @@
/// Local binary package building
/// Directly calling 'debian/rules' in current context
use crate::context;
use crate::context::Context;
use crate::deb::find_dsc_file;
use log::warn;
use std::collections::HashMap;
use std::error::Error;
use std::path::Path;
use std::sync::Arc;
use crate::apt;
use crate::deb::cross;
@@ -20,14 +21,13 @@ pub async fn build(
cross: bool,
ppa: Option<&[&str]>,
inject_packages: Option<&[&str]>,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
// Environment
let mut env = HashMap::<String, String>::new();
env.insert("LANG".to_string(), "C".to_string());
env.insert("DEBIAN_FRONTEND".to_string(), "noninteractive".to_string());
let ctx = context::current();
// Parallel building: find local number of cores, and use that
let num_cores = ctx
.command("nproc")
@@ -52,11 +52,11 @@ pub async fn build(
if cross {
log::debug!("Setting up environment for local cross build...");
cross::setup_environment(&mut env, arch)?;
cross::ensure_repositories(arch, series)?;
cross::setup_environment(&mut env, arch, ctx.clone())?;
cross::ensure_repositories(arch, series, ctx.clone())?;
}
let mut sources = apt::sources::load(None)?;
let mut sources = apt::sources::load(Some(ctx.clone()))?;
let mut modified = false;
let mut added_ppas: Vec<(&str, &str)> = Vec::new();
@@ -117,11 +117,12 @@ pub async fn build(
}
if modified {
apt::sources::save_legacy(None, sources, "/etc/apt/sources.list")?;
apt::sources::save_legacy(Some(ctx.clone()), sources, "/etc/apt/sources.list")?;
// Download and import PPA keys for all added PPAs
for (user, ppa_name) in added_ppas {
if let Err(e) = crate::apt::keyring::download_trust_ppa_key(None, user, ppa_name).await
if let Err(e) =
crate::apt::keyring::download_trust_ppa_key(Some(ctx.clone()), user, ppa_name).await
{
warn!(
"Failed to download PPA key for {}/{}: {}",
@@ -169,24 +170,15 @@ pub async fn build(
}
// Find the actual package directory
let package_dir = crate::deb::find_package_directory(Path::new(build_root), package, version)?;
let package_dir =
crate::deb::find_package_directory(Path::new(build_root), package, version, &ctx)?;
let package_dir_str = package_dir
.to_str()
.ok_or("Invalid package directory path")?;
// Install injected packages if specified
if let Some(packages) = inject_packages {
log::info!("Installing injected packages: {:?}", packages);
let mut cmd = ctx.command("apt-get");
cmd.envs(env.clone())
.arg("-y")
.arg("--allow-downgrades")
.arg("install")
.args(packages);
let status = cmd.status()?;
if !status.success() {
return Err(format!("Could not install injected packages: {:?}", packages).into());
}
install_injected_packages(packages, &env, ctx.clone())?;
}
// Install arch-specific build dependencies
@@ -204,7 +196,7 @@ pub async fn build(
// If build-dep fails, we try to explain the failure using dose-debcheck
if !status.success() {
dose3_explain_dependencies(package, version, arch, build_root, cross)?;
dose3_explain_dependencies(package, version, arch, build_root, cross, ctx.clone())?;
return Err("Could not install build-dependencies for the build".into());
}
@@ -221,7 +213,7 @@ pub async fn build(
// If build-dep fails, we try to explain the failure using dose-debcheck
if !status.success() {
dose3_explain_dependencies(package, version, arch, build_root, cross)?;
dose3_explain_dependencies(package, version, arch, build_root, cross, ctx.clone())?;
return Err("Could not install build-dependencies for the build".into());
}
@@ -254,15 +246,67 @@ pub async fn build(
Ok(())
}
fn install_injected_packages(
packages: &[&str],
env: &HashMap<String, String>,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
log::info!("Installing injected packages: {:?}", packages);
// Separate .deb files from package names
let mut deb_files: Vec<String> = Vec::new();
let mut package_names: Vec<&str> = Vec::new();
for pkg in packages {
// Check if it's a .deb file path (ends with .deb and exists as a file)
let pkg_path = Path::new(pkg);
if pkg.ends_with(".deb") && pkg_path.exists() {
// Copy the .deb file into the build context
let dest_root = ctx.create_temp_dir()?;
let chroot_path = ctx.ensure_available(pkg_path, &dest_root)?;
log::debug!(
"Copied .deb file '{}' to chroot path '{}'",
pkg,
chroot_path.display()
);
deb_files.push(chroot_path.to_string_lossy().to_string());
} else {
package_names.push(pkg);
}
}
// Install .deb files
if !deb_files.is_empty() || !package_names.is_empty() {
log::info!("Installing .deb files: {:?}", deb_files);
let mut cmd = ctx.command("apt-get");
cmd.envs(env.clone())
.arg("-y")
.arg("--allow-downgrades")
.arg("install");
// Add the .deb file paths with ./ prefix for apt to recognize them as local files
for deb_path in &deb_files {
cmd.arg(format!("./{}", deb_path.trim_start_matches('/')));
}
if !package_names.is_empty() {
cmd.args(&package_names);
}
let status = cmd.status()?;
if !status.success() {
return Err(format!("Could not install injected packages: {:?}", deb_files).into());
}
}
Ok(())
}
fn dose3_explain_dependencies(
package: &str,
version: &str,
arch: &str,
build_root: &str,
cross: bool,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
// Construct the list of Packages files
let mut bg_args = Vec::new();
let mut cmd = ctx.command("apt-get");
@@ -284,7 +328,7 @@ fn dose3_explain_dependencies(
// Transform the dsc file into a 'Source' stanza (replacing 'Source' with 'Package')
// TODO: Remove potential GPG headers/signature
let dsc_path = find_dsc_file(build_root, package, version)?;
let dsc_path = find_dsc_file(build_root, package, version, &ctx)?;
let mut dsc_content = ctx.read_file(&dsc_path)?;
dsc_content = dsc_content.replace("Source", "Package");
ctx.write_file(

View File

@@ -3,9 +3,10 @@ mod ephemeral;
mod local;
mod sbuild;
use crate::context;
use crate::context::{self, Context};
use std::error::Error;
use std::path::{Path, PathBuf};
use std::sync::Arc;
/// Build mode for the binary build
#[derive(PartialEq)]
@@ -17,6 +18,7 @@ pub enum BuildMode {
}
/// Build package in 'cwd' to a .deb
#[allow(clippy::too_many_arguments)]
pub async fn build_binary_package(
arch: Option<&str>,
series: Option<&str>,
@@ -25,6 +27,7 @@ pub async fn build_binary_package(
mode: Option<BuildMode>,
ppa: Option<&[&str]>,
inject_packages: Option<&[&str]>,
ctx: Option<Arc<Context>>,
) -> Result<(), Box<dyn Error>> {
let cwd = cwd.unwrap_or_else(|| Path::new("."));
@@ -57,19 +60,35 @@ pub async fn build_binary_package(
None
};
// Use provided context or get current
let base_ctx = ctx.unwrap_or_else(context::current);
let mut guard = if mode == BuildMode::Local {
Some(ephemeral::EphemeralContextGuard::new(series, chroot_arch).await?)
Some(
ephemeral::EphemeralContextGuard::new_with_context(
series,
chroot_arch,
base_ctx.clone(),
)
.await?,
)
} else {
None
};
// Get the build context - either the ephemeral context or the base context
let build_ctx = if mode == BuildMode::Local {
context::current()
} else {
base_ctx.clone()
};
// Prepare build directory
let ctx = context::current();
let build_root = ctx.create_temp_dir()?;
let build_root = build_ctx.create_temp_dir()?;
// Ensure availability of all needed files for the build
let parent_dir = cwd.parent().ok_or("Cannot find parent directory")?;
ctx.ensure_available(parent_dir, &build_root)?;
build_ctx.ensure_available(parent_dir, &build_root)?;
let parent_dir_name = parent_dir
.file_name()
.ok_or("Cannot find parent directory name")?;
@@ -87,19 +106,28 @@ pub async fn build_binary_package(
cross,
ppa,
inject_packages,
build_ctx.clone(),
)
.await?
}
BuildMode::Sbuild => sbuild::build(&package, &version, arch, series, &build_root, cross)?,
BuildMode::Sbuild => sbuild::build(
&package,
&version,
arch,
series,
&build_root,
cross,
build_ctx.clone(),
)?,
};
// Retrieve produced .deb files
let remote_files = ctx.list_files(Path::new(&build_root))?;
let remote_files = build_ctx.list_files(Path::new(&build_root))?;
for remote_file in remote_files {
if remote_file.extension().is_some_and(|ext| ext == "deb") {
let file_name = remote_file.file_name().ok_or("Invalid remote filename")?;
let local_dest = parent_dir.join(file_name);
ctx.retrieve_path(&remote_file, &local_dest)?;
build_ctx.retrieve_path(&remote_file, &local_dest)?;
}
}
@@ -114,12 +142,25 @@ pub async fn build_binary_package(
/// Find the current package directory by trying both patterns:
/// - package/package
/// - package/package-origversion
/// - custom directories from quirks configuration
pub(crate) fn find_package_directory(
parent_dir: &Path,
package: &str,
version: &str,
ctx: &context::Context,
) -> Result<PathBuf, Box<dyn Error>> {
let ctx = context::current();
// Check quirks first for custom package directories
let custom_dirs = crate::quirks::get_package_directories(package);
for custom_dir in custom_dirs {
let package_dir = parent_dir.join(&custom_dir);
if ctx.exists(&package_dir)? && ctx.exists(&package_dir.join("debian"))? {
log::debug!(
"Found package directory via quirks: {}",
package_dir.display()
);
return Ok(package_dir);
}
}
// Try package/package pattern first
let package_dir = parent_dir.join(package).join(package);
@@ -196,14 +237,14 @@ fn find_dsc_file(
build_root: &str,
package: &str,
version: &str,
ctx: &Arc<Context>,
) -> Result<PathBuf, Box<dyn Error>> {
// Strip epoch from version (e.g., "1:2.3.4-5" -> "2.3.4-5")
let version_without_epoch = version.split_once(':').map(|(_, v)| v).unwrap_or(version);
let dsc_name = format!("{}_{}.dsc", package, version_without_epoch);
let dsc_path = PathBuf::from(build_root).join(&dsc_name);
// Check if the .dsc file exists in current context
let ctx = context::current();
// Check if the .dsc file exists in context
if !ctx.exists(&dsc_path)? {
return Err(format!("Could not find .dsc file at {}", dsc_path.display()).into());
}
@@ -212,7 +253,9 @@ fn find_dsc_file(
#[cfg(test)]
mod tests {
use serial_test::serial;
use super::*;
use std::sync::Arc;
async fn test_build_end_to_end(
package: &str,
series: &str,
@@ -242,15 +285,28 @@ mod tests {
.expect("Cannot pull package");
log::info!("Successfully pulled package {}", package);
// Create a fresh local context for this test
let ctx = Arc::new(Context::new(crate::context::ContextConfig::Local));
// Change directory to the package directory
let cwd = crate::deb::find_package_directory(cwd, package, &package_info.stanza.version)
.expect("Cannot find package directory");
let cwd =
crate::deb::find_package_directory(cwd, package, &package_info.stanza.version, &ctx)
.expect("Cannot find package directory");
log::debug!("Package directory: {}", cwd.display());
log::info!("Starting binary package build...");
crate::deb::build_binary_package(arch, Some(series), Some(&cwd), cross, None, None, None)
.await
.expect("Cannot build binary package (deb)");
crate::deb::build_binary_package(
arch,
Some(series),
Some(&cwd),
cross,
None,
None,
None,
Some(ctx),
)
.await
.expect("Cannot build binary package (deb)");
log::info!("Successfully built binary package");
// Check that the .deb files are present
@@ -273,16 +329,10 @@ mod tests {
);
}
// Tests below will be marked 'serial'
// As builds are using ephemeral contexts, tests running on the same
// process could use the ephemeral context of another thread and
// interfere with each other.
// FIXME: This is not ideal. In the future, we might want to
// either explicitely pass context (instead of shared state) or
// fork for building?
// Tests no longer need to be 'serial' since each test uses its own
// explicit context instead of shared global state.
#[tokio::test]
#[test_log::test]
#[serial]
async fn test_deb_hello_ubuntu_end_to_end() {
test_build_end_to_end("hello", "noble", None, None, false).await;
}
@@ -291,7 +341,6 @@ mod tests {
#[tokio::test]
#[test_log::test]
#[cfg(target_arch = "x86_64")]
#[serial]
async fn test_deb_hello_ubuntu_cross_end_to_end() {
test_build_end_to_end("hello", "noble", None, Some("riscv64"), true).await;
}
@@ -301,7 +350,6 @@ mod tests {
/// for example.
#[tokio::test]
#[test_log::test]
#[serial]
async fn test_deb_hello_debian_sid_end_to_end() {
test_build_end_to_end("hello", "sid", None, None, false).await;
}
@@ -310,6 +358,10 @@ mod tests {
/// It is important to ensure that pkh can cross-compile linux-riscv, as
/// for risc-v hardware is still rare and cross-compilation is necessary
/// to debug and test
/// NOTE: Ideally, we want to run this in CI, but it takes more than 1h
/// to fully build the linux-riscv package on an amd64 builder, which is too
/// much time
#[ignore]
#[tokio::test]
#[test_log::test]
#[cfg(target_arch = "x86_64")]
@@ -328,7 +380,6 @@ mod tests {
#[cfg(target_arch = "x86_64")]
#[tokio::test]
#[test_log::test]
#[serial]
async fn test_deb_gcc_debian_end_to_end() {
test_build_end_to_end("gcc-15", "sid", None, None, false).await;
}

View File

@@ -1,8 +1,9 @@
/// Sbuild binary package building
/// Call 'sbuild' with the dsc file to build the package with unshare
use crate::context;
use crate::context::Context;
use std::error::Error;
use std::path::Path;
use std::sync::Arc;
pub fn build(
package: &str,
@@ -11,11 +12,11 @@ pub fn build(
series: &str,
build_root: &str,
cross: bool,
ctx: Arc<Context>,
) -> Result<(), Box<dyn Error>> {
let ctx = context::current();
// Find the actual package directory
let package_dir = crate::deb::find_package_directory(Path::new(build_root), package, version)?;
let package_dir =
crate::deb::find_package_directory(Path::new(build_root), package, version, &ctx)?;
let package_dir_str = package_dir
.to_str()
.ok_or("Invalid package directory path")?;

View File

@@ -134,6 +134,18 @@ pub async fn get_ordered_series_name(dist: &str) -> Result<Vec<String>, Box<dyn
/// Get the latest released series for a dist (excluding future releases and special cases like sid)
pub async fn get_latest_released_series(dist: &str) -> Result<String, Box<dyn Error>> {
let latest = get_n_latest_released_series(dist, 1).await?;
latest
.first()
.cloned()
.ok_or("No released series found".into())
}
/// Get the N latest released series for a dist (excluding future releases and special cases like sid)
pub async fn get_n_latest_released_series(
dist: &str,
n: usize,
) -> Result<Vec<String>, Box<dyn Error>> {
let series_info_list = get_ordered_series(dist).await?;
let today = chrono::Local::now().date_naive();
@@ -153,11 +165,11 @@ pub async fn get_latest_released_series(dist: &str) -> Result<String, Box<dyn Er
// Sort by release date descending (newest first)
released_series.sort_by(|a, b| b.release.cmp(&a.release));
if let Some(latest) = released_series.first() {
Ok(latest.series.clone())
} else {
Err("No released series found".into())
}
Ok(released_series
.iter()
.take(n)
.map(|s| s.series.clone())
.collect())
}
/// Obtain the distribution (eg. debian, ubuntu) from a distribution series (eg. noble, bookworm)
@@ -202,8 +214,11 @@ pub fn get_base_url(dist: &str) -> String {
DATA.dist.get(dist).unwrap().base_url.clone()
}
/// Obtain the URL for the archive keyring of a distribution series
pub async fn get_keyring_url(series: &str) -> Result<String, Box<dyn Error>> {
/// Obtain the URLs for the archive keyrings of a distribution series
///
/// For 'sid' and 'experimental', returns keyrings from the 3 latest releases
/// since sid needs keys from all recent releases.
pub async fn get_keyring_urls(series: &str) -> Result<Vec<String>, Box<dyn Error>> {
let dist = get_dist_from_series(series).await?;
let dist_data = DATA
.dist
@@ -212,24 +227,36 @@ pub async fn get_keyring_url(series: &str) -> Result<String, Box<dyn Error>> {
// For Debian, we need the series number to form the keyring URL
if dist == "debian" {
// Special case for 'sid' - use the latest released version
// Special case for 'sid' - use keyrings from the 3 latest released versions
if series == "sid" || series == "experimental" {
let latest_released = get_latest_released_series("debian").await?;
let series_num = get_debian_series_number(&latest_released).await?.unwrap();
// Replace {series_num} placeholder with the latest released series number
Ok(dist_data
.archive_keyring
.replace("{series_num}", &series_num))
let latest_released = get_n_latest_released_series("debian", 3).await?;
let mut urls = Vec::new();
for released_series in latest_released {
if let Some(series_num) = get_debian_series_number(&released_series).await? {
urls.push(
dist_data
.archive_keyring
.replace("{series_num}", &series_num),
);
}
}
if urls.is_empty() {
Err("No keyring URLs found for sid/experimental".into())
} else {
Ok(urls)
}
} else {
let series_num = get_debian_series_number(series).await?.unwrap();
// Replace {series_num} placeholder with the actual series number
Ok(dist_data
.archive_keyring
.replace("{series_num}", &series_num))
Ok(vec![
dist_data
.archive_keyring
.replace("{series_num}", &series_num),
])
}
} else {
// For other distributions like Ubuntu, use the keyring directly
Ok(dist_data.archive_keyring.clone())
Ok(vec![dist_data.archive_keyring.clone()])
}
}
@@ -347,14 +374,47 @@ mod tests {
}
#[tokio::test]
async fn test_get_keyring_url_sid() {
// Test that 'sid' uses the latest released version for keyring URL
let sid_keyring = get_keyring_url("sid").await.unwrap();
let latest_released = get_latest_released_series("debian").await.unwrap();
let latest_keyring = get_keyring_url(&latest_released).await.unwrap();
async fn test_get_keyring_urls_sid() {
// Test that 'sid' returns keyrings from the 3 latest released versions
let sid_keyrings = get_keyring_urls("sid").await.unwrap();
// The keyring URL for 'sid' should be the same as the latest released version
assert_eq!(sid_keyring, latest_keyring);
// Should have keyring URLs for sid
assert!(!sid_keyrings.is_empty());
assert!(sid_keyrings.len() <= 3);
// Each URL should be a valid Debian keyring URL
for url in &sid_keyrings {
assert!(
url.contains("ftp-master.debian.org/keys"),
"URL '{}' does not contain expected pattern",
url
);
}
}
#[tokio::test]
async fn test_get_keyring_url_regular_series() {
// Test that regular series (like bookworm) returns a single keyring URL
let bookworm_keyring = &get_keyring_urls("bookworm").await.unwrap()[0];
assert!(
bookworm_keyring.contains("ftp-master.debian.org/keys"),
"URL '{}' does not contain expected pattern",
bookworm_keyring
);
}
#[tokio::test]
async fn test_get_n_latest_released_series() {
// Test getting 3 latest released series
let latest_3 = get_n_latest_released_series("debian", 3).await.unwrap();
// Should have at most 3 series
assert!(!latest_3.is_empty());
assert!(latest_3.len() <= 3);
// Should not contain 'sid' or 'experimental'
assert!(!latest_3.contains(&"sid".to_string()));
assert!(!latest_3.contains(&"experimental".to_string()));
}
#[tokio::test]

View File

@@ -197,6 +197,7 @@ fn main() {
mode,
ppa,
inject_packages,
None,
)
.await
}) {

View File

@@ -153,6 +153,14 @@ impl Iterator for DebianSources {
}
}
// Parse Vcs-Git field: it may contain just a URL, or URL followed by -b <branch>
// e.g., "https://salsa.debian.org/science-team/paraview.git -b debian/latest"
let vcs_git = fields.get("Vcs-Git").map(|vcs| {
// Split on whitespace and take the first part (the URL)
// The URL should not contain spaces, so this is safe
vcs.split_whitespace().next().unwrap_or(vcs).to_string()
});
Some(PackageStanza {
package: fields.get("Package").unwrap().to_string(),
version: fields.get("Version").unwrap().to_string(),
@@ -161,7 +169,7 @@ impl Iterator for DebianSources {
.get("Format")
.cloned()
.unwrap_or_else(|| "1.0".to_string()),
vcs_git: fields.get("Vcs-Git").cloned(),
vcs_git,
vcs_browser: fields.get("Vcs-Browser").cloned(),
files,
})

View File

@@ -504,14 +504,17 @@ pub async fn pull(
// Depending on target series, we pick target branch; if latest series is specified,
// we target the development branch, i.e. the default branch
// Only use Ubuntu-specific branch naming if the VCS is from Launchpad
let is_launchpad_vcs = url.contains("launchpad.net");
let branch_name = if crate::distro_info::get_ordered_series_name(package_info.dist.as_str())
.await?[0]
!= *series
{
if package_info.dist == "ubuntu" {
if package_info.dist == "ubuntu" && is_launchpad_vcs {
Some(format!("{}/{}", package_info.dist, series))
} else {
// Debian does not have reliable branch naming...
// Also, Ubuntu packages with salsa VCS don't have Ubuntu-specific branches
// For now, we skip that part and clone default
// TODO: Inspect remote branches and tags for matches
None
@@ -615,18 +618,27 @@ mod tests {
let head = repo.head().unwrap();
let name = head.name().unwrap();
// Check if the VCS is from Launchpad - only Launchpad has Ubuntu-specific branches
let is_launchpad_vcs = info
.preferred_vcs
.as_ref()
.map(|url| url.contains("launchpad.net"))
.unwrap_or(false);
if let Some(s) = series {
// The local branch should be named dist/series
// We skip debian for now as it does not have a reliable naming scheme
if info.dist == "ubuntu" {
// Also skip Ubuntu packages with non-Launchpad VCS (e.g., salsa.debian.org)
if info.dist == "ubuntu" && is_launchpad_vcs {
assert_eq!(name, format!("refs/heads/{0}/{s}", info.dist));
}
} else {
// The local branch should be named ubuntu/devel for Ubuntu
// Debian unfortunately does not have a reliable naming scheme
// Also skip Ubuntu packages with non-Launchpad VCS
// Given that there was no series specified, and this is a test,
// we require to have a distribution specified
if dist.unwrap() == "ubuntu" {
if dist.unwrap() == "ubuntu" && is_launchpad_vcs {
assert_eq!(name, "refs/heads/ubuntu/devel");
}
}
@@ -690,4 +702,15 @@ mod tests {
async fn test_pull_hello_ubuntu_latest_end_to_end() {
test_pull_package_end_to_end("hello", None, Some("ubuntu"), None).await;
}
/// Test for paraview - a package that has no Ubuntu Launchpad code,
/// only a debian salsa repo, even in Ubuntu.
/// Furthermore, paraview has a Vcs-Git value of:
/// Vcs-Git: https://salsa.debian.org/science-team/paraview.git -b debian/latest
/// Given that it is not only an url but also specifies a branch, it needs
/// special care, that this test ensures.
#[tokio::test]
async fn test_pull_paraview_ubuntu_end_to_end() {
test_pull_package_end_to_end("paraview", Some("noble"), None, None).await;
}
}

82
src/put.rs Normal file
View File

@@ -0,0 +1,82 @@
use std::path::Path;
use std::process::Command;
use crate::ProgressCallback;
use std::fs;
use pkh::package_info::parse_control_file;
/// Execute the `put` subcommand to upload package to PPA or archive
///
/// # Arguments
/// - series: Target distribution series (e.g. "focal")
/// - dist: Target distribution (e.g. "ubuntu")
/// - version: Package version override
/// - ppa: Target PPA in "user/ppa-name" format
/// - archive: Set to true for official archive uploads
/// - cwd: Current working directory containing source package
/// - progress: Progress callback for UI updates
pub async fn put(
series: Option<&str>,
dist: Option<&str>,
version: Option<&str>,
ppa: Option<&str>,
archive: bool,
cwd: Option<&Path>,
progress: ProgressCallback<'_>,
) -> Result<(), Box<dyn std::error::Error>> {
let current_dir = cwd.unwrap_or_else(|| Path::new("."));
let control_path = current_dir.join("debian/control");
let control_content = fs::read_to_string(&control_path).map_err(|e| {
format!("Failed to read debian/control: {}. Are you in a source package directory?", e)
})?;
let package_info = parse_control_file(&control_content)?;
let package = package_info.source.ok_or("Could not determine package name from debian/control")?;
if let Some(cb) = progress {
cb(&package, "Uploading package...", 0, 1);
}
// Find .dsc file in current directory
let dsc_files: Vec<_> = current_dir.read_dir()?
.filter_map(|entry| {
let entry = entry.ok()?;
let path = entry.path();
if path.extension()? == "dsc" {
Some(path)
} else {
None
}
})
.collect();
let dsc_file = dsc_files.first().ok_or("No .dsc file found in current directory")?;
if dsc_files.len() > 1 {
return Err("Multiple .dsc files found - please make sure only one exists".into());
}
if archive {
println!("Uploading {} to official archive", dsc_file.display());
// Execute dput with official archive config
Command::new("dput")
.arg("ubuntu")
.arg(dsc_file)
.status()?;
} else if let Some(ppa) = ppa {
println!("Uploading {} to PPA: {}", dsc_file.display(), ppa);
// Execute dput with PPA target
Command::new("dput")
.arg(format!("ppa:{}", ppa))
.arg(dsc_file)
.status()?;
} else {
return Err("Must specify either --ppa for PPA upload or --archive for official archive".into());
}
if let Some(cb) = progress {
cb(&package, "Upload complete", 1, 1);
}
Ok(())
}

View File

@@ -17,6 +17,12 @@ pub struct OperationQuirks {
/// Additional parameters for the operation
#[serde(default)]
pub parameters: HashMap<String, serde_yaml::Value>,
/// Custom package directories to try when looking for the package source
/// This is useful for packages that don't follow the standard naming conventions
/// like linux packages that use directories like "linux-main" or other custom names
#[serde(default)]
pub package_directory: Vec<String>,
}
/// Quirks for a specific package
@@ -75,3 +81,31 @@ pub fn get_deb_extra_dependencies(package: &str) -> Vec<String> {
Vec::new()
}
/// Get package directories from quirks configuration
///
/// This function returns the list of custom package directories to try
/// when looking for the package source directory.
///
/// # Arguments
/// * `package` - The package name
///
/// # Returns
/// * `Vec<String>` - List of package directories to try, or empty vector if none
pub fn get_package_directories(package: &str) -> Vec<String> {
if let Some(quirks) = get_package_quirks(&QUIRKS_DATA, package) {
// Check deb quirks first, then pull quirks
if let Some(deb_quirks) = &quirks.deb
&& !deb_quirks.package_directory.is_empty()
{
return deb_quirks.package_directory.clone();
}
if let Some(pull_quirks) = &quirks.pull
&& !pull_quirks.package_directory.is_empty()
{
return pull_quirks.package_directory.clone();
}
}
Vec::new()
}