#!/bin/echo "This file is sourced, not run" # Lots of reusable functions. This file is sourced, not run. # Output path to cross compiler. proc cc_path { local i if test ! -z $MY_CROSS_PATH { setvar CC_PREFIX = "$MY_CC_PREFIX" test -z $CC_PREFIX && echo "MY_CROSS_PATH without MY_CC_PREFIX" >&2 && dienow echo -n "$MY_CROSS_PATH:" return } # Output cross it if exists, else simple. If neither exists, output simple. for i in "$BUILD"/{,simple-}cross-compiler-"$1/bin" { test -e "$i/$1-cc" && break } echo -n "$i:" } proc base_architecture { setvar ARCH = "$1" source "$CONFIG_DIR/$1" } proc load_target { # Get target platform from first command line argument. setvar ARCH_NAME = "$1" setvar CONFIG_DIR = ""$SOURCES/targets"" # Read the relevant config file. if test -f "$CONFIG_DIR/$1" { base_architecture $ARCH_NAME setvar CONFIG_DIR = '' } elif test -f "$CONFIG_DIR/$1/settings" { source "$CONFIG_DIR/$1/settings" test -z $ARCH && dienow "No base_architecture" } else { echo "Supported architectures: " ls $CONFIG_DIR exit 1 } # Which platform are we building for? export WORK="${BUILD}/temp-$ARCH_NAME" # Say "unknown" in two different ways so it doesn't assume we're NOT # cross compiling when the host and target are the same processor. (If host # and target match, the binutils/gcc/make builds won't use the cross compiler # during root-filesystem.sh, and the host compiler links binaries against the # wrong libc.) export_if_blank CROSS_HOST=$(uname -m)-walrus-linux export_if_blank CROSS_TARGET=${ARCH}-unknown-linux # Setup directories and add the cross compiler to the start of the path. setvar STAGE_DIR = ""$BUILD/${STAGE_NAME}-${ARCH_NAME}"" if test -z $KEEP_STAGEDIR { blank_tempdir $STAGE_DIR } else { mkdir -p $STAGE_DIR || dienow } NO_CLEANUP=${NO_CLEANUP/temp//} blank_tempdir $WORK export PATH="$(cc_path "$ARCH")$PATH" test ! -z $HOST_ARCH && test $HOST_ARCH != $ARCH && setvar PATH = ""$(cc_path "$HOST_ARCH")$PATH"" export_if_blank CC_PREFIX="${ARCH}-" setvar DO_CROSS = ""CROSS_COMPILE=$CC_PREFIX"" return 0 } # Note that this sources the file, rather than calling it as a separate # process. That way it can set environment variables if it wants to. proc build_section { # Don't build anything statically in host-tools, glibc is broken. # See http://people.redhat.com/drepper/no_static_linking.html for # insane rant from the glibc maintainer about why he doesn't care. is_in_list $1 $BUILD_STATIC && test ! -z $ARCH && setvar STATIC_FLAGS = ""--static"" setvar OLDCPUS = "$CPUS" setvar OLDNOCLEAN = "$NO_CLEANUP" is_in_list $1 $DEBUG_PACKAGE && setvar CPUS = '1' && setvar NO_CLEANUP = '1' if test -e "$SOURCES/sections/$1".build { setupfor $1 source "$SOURCES/sections/$1".build cleanup } else { announce $1 source "$SOURCES"/sections/"$1".sh } setvar CPUS = "$OLDCPUS" setvar NO_CLEANUP = "$OLDNOCLEAN" } # Find appropriate miniconfig file proc getconfig { for i in {$ARCH_NAME,$ARCH}/miniconfig-$1 { test -f "$CONFIG_DIR/$i" && cat "$CONFIG_DIR/$i" && return } # Output baseconfig, then append $1_CONFIG (converting $1 to uppercase) cat "$SOURCES/baseconfig-$1" eval "echo \"\${$(echo $1 | tr a-z A-Z)_CONFIG}\"" } # Find all files in $STAGE_DIR newer than $CURSRC. proc recent_binary_files { setvar PREVIOUS = '' shell {cd $STAGE_DIR || dienow find . -depth -newer "$CURSRC/BUILD-TIMESTAMP" \ | sed -e 's/^.//' -e 's/^.//' -e '/^$/d' } | while read i { setvar TEMP = "${PREVIOUS##"$i"/}" if test $[${#PREVIOUS}-${#TEMP}] -ne $[${#i}+1] { # Because the expanded $i might have \ chars in it, that's why. echo -n $i echo -ne '\0' } setvar PREVIOUS = "$i" } } # Delete a working copy of source code once the build's done. proc cleanup { # If package build exited with an error, do not continue. test $? -ne 0 && dienow if test ! -z $BINARY_PACKAGE_TARBALLS { setvar TARNAME = ""$PACKAGE-$STAGE_NAME-${ARCH_NAME}".tar.gz" test ! -z $(recent_binary_files) && echo -n Creating $TARNAME && do { recent_binary_files | xargs -0 tar -czvf \ "$BUILD/${TARNAME}" -C $STAGE_DIR || dienow } | dotprogress } if test ! -z $NO_CLEANUP { echo "skip cleanup $PACKAGE $[join(ARGV)]" return } # Loop deleting directories cd $WORK || dienow for i in $WORKDIR_LIST { echo "cleanup $i" rm -rf $i || dienow } setvar WORKDIR_LIST = '' } # Create a working directory under TMPDIR, deleting existing contents (if any), # and tracking created directories so cleanup can delete them automatically. proc blank_workdir { setvar WORKDIR_LIST = ""$1 $WORKDIR_LIST"''" NO_CLEANUP= blank_tempdir "$WORK/$1" cd "$WORK/$1" || dienow } # Extract package $1 proc setupfor { export WRAPPY_LOGPATH="$BUILD/logs/cmdlines.${ARCH_NAME}.${STAGE_NAME}.setupfor" # Make sure the source is already extracted and up-to-date. extract_package $1 || exit 1 setvar SNAPFROM = "$(package_cache "$1")" # Delete old working copy (even in the NO_CLEANUP case) then make a new # tree of links to the package cache. echo "Snapshot '$PACKAGE'..." # Try hardlink, then symlink, then normal (noclobber) copy for LINKTYPE in l s n { if test -z $REUSE_CURSRC { blank_workdir $PACKAGE setvar CURSRC = "$(pwd)" } cp -${LINKTYPE}fR "$SNAPFROM/"* $CURSRC && break } cd $CURSRC || dienow export WRAPPY_LOGPATH="$BUILD/logs/cmdlines.${ARCH_NAME}.${STAGE_NAME}.$1" # Ugly bug workaround: timestamp granularity in a lot of filesystems is only # 1 second, so find -newer misses things installed in the same second, so we # make sure it's a new second before we start actually doing anything. if test ! -z $BINARY_PACKAGE_TARBALLS { touch "$CURSRC/BUILD-TIMESTAMP" || dienow setvar TIME = $(date +%s) while true { test $TIME != $(date +%s) && break sleep .1 } } } # Given a filename.tar.ext, return the version number. proc getversion { echo $1 | sed -e 's/.*-\(\([0-9\.]\)*\([_-]rc\)*\(-pre\)*\([0-9][a-zA-Z]\)*\)*\(\.tar\..z2*\)$/'"$2"'\1/' } # Figure out what version of a package we last built proc get_download_version { getversion $(sed -n 's@URL=.*/\(.[^ ]*\).*@\1@p' "$TOP/download.sh" | grep ${1}-) } # Identify subversion or mercurial revision, or release number proc identify_release { setvar DIR = ""$SRCDIR/$1"" if test -d $DIR { shell { cd $DIR || dienow setvar ID = "$(git log -1 --format=%H 2>/dev/null)" test ! -z $ID && echo git $ID && return setvar ID = "$(hg identify -n 2>/dev/null)" test ! -z $ID && echo hg $ID && return setvar ID = "$(svn info 2>/dev/null | sed -n "s/^Revision: //p")" test ! -z $ID && echo svn $ID && return } } echo release version $(get_download_version $1) } # Create a README identifying package versions in current build. proc do_manifest { # Grab build script version number test -z $SCRIPT_VERS && setvar SCRIPT_VERS = ""mercurial rev $(cd "$TOP"; hg identify -n 2>/dev/null)"" cat <<< """ Built on $(date +%F) from: Build script: Aboriginal Linux (http://landley.net/aboriginal) $SCRIPT_VERS Base packages: uClibc (http://uclibc.org) $(identify_release uClibc) BusyBox (http://busybox.net) $(identify_release busybox) Linux (http://kernel.org/pub/linux/kernel) $(identify_release linux) toybox (http://landley.net/toybox) $(identify_release toybox) Toolchain packages: Binutils (http://www.gnu.org/software/binutils/) $(identify_release binutils) GCC (http://gcc.gnu.org) $(identify_release gcc-core) gmake (http://www.gnu.org/software/make) $(identify_release make) bash (ftp://ftp.gnu.org/gnu/bash) $(identify_release bash) Optional packages: distcc (http://distcc.samba.org) $(identify_release distcc) uClibc++ (http://cxx.uclibc.org) $(identify_release uClibc++) """ } # When building with a base architecture, symlink to the base arch name. proc link_arch_name { test $ARCH == $ARCH_NAME && return 0 rm -rf "$BUILD/$2" && ln -s $1 "$BUILD/$2" || dienow } # Check if this target has a base architecture that's already been built. # If so, link to it and exit now. proc check_for_base_arch { # If we're building something with a base architecture, symlink to actual # target. if test $ARCH != $ARCH_NAME { link_arch_name $STAGE_NAME-{"$ARCH","$ARCH_NAME"} test -e $STAGE_NAME-"$ARCH".tar.gz && link_arch_name $STAGE_NAME-{"$ARCH","$ARCH_NAME"}.tar.gz if test -e "$BUILD/$STAGE_NAME-$ARCH" { announce "Using existing ${STAGE_NAME}-$ARCH" return 1 } else { mkdir -p "$BUILD/$STAGE_NAME-$ARCH" || dienow } } } proc create_stage_tarball { # Remove the temporary directory, if empty rmdir $WORK 2>/dev/null # Handle linking to base architecture if we just built a derivative target. cd $BUILD || dienow link_arch_name $STAGE_NAME-{$ARCH,$ARCH_NAME} if test -z $NO_STAGE_TARBALLS { echo -n creating "$STAGE_NAME-${ARCH}".tar.gz do { tar czvf "$STAGE_NAME-${ARCH}".tar.gz "$STAGE_NAME-${ARCH}" || dienow } | dotprogress link_arch_name $STAGE_NAME-{$ARCH,$ARCH_NAME}.tar.gz } } # Create colon-separated path for $HOSTTOOLS and all fallback directories # (Fallback directories are to support ccache and distcc on the host.) proc hosttools_path { local X echo -n $HOSTTOOLS setvar X = '1' while [ -e "$HOSTTOOLS/fallback-$X" ] { echo -n ":$HOSTTOOLS/fallback-$X" setvar X = $[$X+1] } } # Archive directory $1 to file $2 (plus extension), type SYSIMAGE_TYPE proc image_filesystem { echo "make $SYSIMAGE_TYPE $2" # Embed an initramfs cpio if test $SYSIMAGE_TYPE == "cpio" || test $SYSIMAGE_TYPE == "rootfs" { # Borrow gen_init_cpio.c out of package cache copy of Linux source extract_package linux && $CC "$(package_cache $PACKAGE)/usr/gen_init_cpio.c" -o "$WORK"/my_gen_init_cpio || dienow "$WORK"/my_gen_init_cpio <( "$SOURCES"/toys/gen_initramfs_list.sh "$1" || dienow [ ! -e "$1"/init ] && echo "slink /init /sbin/init.sh 755 0 0" [ ! -d "$1"/dev ] && echo "dir /dev 755 0 0" echo "nod /dev/console 660 0 0 c 5 1" ) | gzip -9 > "$2.cpio.gz" || dienow echo Initramfs generated. } elif test $SYSIMAGE_TYPE == "ext2" || test $SYSIMAGE_TYPE == "ext3" { # Generate axn ext2 filesystem image from the $1 directory, with a # temporary file defining the /dev nodes for the new filesystem. test -z $SYSIMAGE_HDA_MEGS && setvar SYSIMAGE_HDA_MEGS = '64' # Produce a filesystem with the currently used space plus 20% for filesystem # overhead, which should always be big enough. setvar BLOCKS = $[1024*(($(du -m -s "$1" | awk '{print $1}')*12)/10)] test $BLOCKS -lt 4096 && setvar BLOCKS = '4096' setvar FILE = ""$2.$SYSIMAGE_TYPE"" echo "/dev d 755 0 0 - - - - -" > "$WORK/devs" && echo "/dev/console c 640 0 0 5 1 0 0 -" >> "$WORK/devs" && genext2fs -z -D "$WORK/devs" -d $1 -b $BLOCKS -i 1024 $FILE && rm "$WORK/devs" || dienow # Extend image size to HDA_MEGS if necessary, keeping it sparse. (Feeding # a larger -b size to genext2fs is insanely slow, and not particularly # sparse.) if test ! -z $SYSIMAGE_HDA_MEGS && test $((`stat -c %s "$FILE"` / (1024*1024) )) -lt $SYSIMAGE_HDA_MEGS { echo resizing image to $SYSIMAGE_HDA_MEGS resize2fs $FILE ${SYSIMAGE_HDA_MEGS}M || dienow } tune2fs -c 0 -i 0 $([ "$SYS_IMAGE_TYPE" = "ext3" ] && echo -j) $FILE || dienow echo $SYSIMAGE_TYPE generated } elif test $SYSIMAGE_TYPE == "squashfs" { mksquashfs $1 "$2.sqf" -noappend -all-root ${FORK:+-no-progress} || dienow } else { echo "Unknown image type $SYSIMAGE_TYPE" >&2 dienow } }