| 1 | #!/usr/bin/env bash
|
| 2 | #
|
| 3 | # Build Alpine Linux packages: baseline, OSH as /bin/sh, OSH as /bin/bash
|
| 4 | # See regtest/aports.md
|
| 5 | #
|
| 6 | # Usage:
|
| 7 | # regtest/aports-run.sh <function name>
|
| 8 | #
|
| 9 | # Common usage:
|
| 10 | #
|
| 11 | # export APORTS_EPOCH=2025-08-04-foo # optional override
|
| 12 | # $0 build-many-shards-overlayfs shard{0..16} # build all 17 shards in 2 configs
|
| 13 | #
|
| 14 | # Also useful:
|
| 15 | #
|
| 16 | # $0 fetch-packages fetch $pkg_filter $a_repo # alpine repo is 'main' or 'community'
|
| 17 | #
|
| 18 | # $0 fetch-packages fetch 100,300p # packages 100-300
|
| 19 | # $0 fetch-packages fetch '.*' # all packages
|
| 20 | #
|
| 21 | # Look for results in _tmp/aports-build/
|
| 22 | #
|
| 23 | # Build many packages:
|
| 24 | #
|
| 25 | # $0 build-packages-overlayfs osh-as-sh shard9 community
|
| 26 | # $0 build-packages-overlayfs osh-as-sh shardA # main is default $a_repo
|
| 27 | #
|
| 28 | # Build a single package:
|
| 29 | #
|
| 30 | # $0 build-package-overlayfs osh-as-sh userspace-rcu
|
| 31 | # $0 build-package-overlayfs osh-as-sh xterm community # community repo
|
| 32 | #
|
| 33 | # Drop into a shell:
|
| 34 | # INTERACTIVE=1 $0 build-package-overlayfs osh-as-sh userspace-rcu
|
| 35 | #
|
| 36 | # PKG_FILTER
|
| 37 | # shard[0-9]+ - shard3 is packages 301 to 400
|
| 38 | # [0-9]+ - 42 means build the first 42 packages
|
| 39 | # [0-9]+,[0-9]+p - 100,300p packages 100 to 300 (sed syntax)
|
| 40 | # ALL - all packages
|
| 41 | # .* - egrep pattern matching all packages
|
| 42 | # curl - egrep pattern matching 'curl'
|
| 43 | #
|
| 44 | # Preview packages:
|
| 45 | #
|
| 46 | # $0 package-dirs shard9 community
|
| 47 |
|
| 48 | : ${LIB_OSH=stdlib/osh}
|
| 49 | source $LIB_OSH/bash-strict.sh
|
| 50 | source $LIB_OSH/task-five.sh
|
| 51 |
|
| 52 | source regtest/aports-common.sh
|
| 53 |
|
| 54 | #
|
| 55 | # Config
|
| 56 | #
|
| 57 |
|
| 58 | show-config() {
|
| 59 | enter-rootfs sh -c '
|
| 60 | ls -l /bin/sh /bin/ash /bin/bash
|
| 61 | '
|
| 62 | }
|
| 63 |
|
| 64 | save-default-config() {
|
| 65 | enter-rootfs sh -c '
|
| 66 | set -x
|
| 67 | dest=/bin/bash.ORIG
|
| 68 | cp /bin/bash $dest
|
| 69 | '
|
| 70 | show-config
|
| 71 | }
|
| 72 |
|
| 73 |
|
| 74 | set-baseline() {
|
| 75 | # ensure we have the default config
|
| 76 | enter-rootfs sh -c '
|
| 77 | set -x
|
| 78 | ln -s -f /bin/busybox /bin/sh
|
| 79 | ln -s -f /bin/busybox /bin/ash
|
| 80 | cp /bin/bash.ORIG /bin/bash
|
| 81 | '
|
| 82 | show-config
|
| 83 | }
|
| 84 |
|
| 85 | set-osh-as-X() {
|
| 86 | local x=$1
|
| 87 |
|
| 88 | enter-rootfs sh -c '
|
| 89 | x=$1
|
| 90 | set -x
|
| 91 | if ! test -f /usr/local/bin/oils-for-unix; then
|
| 92 | echo "Build Oils first"
|
| 93 | exit
|
| 94 | fi
|
| 95 | ln -s -f /usr/local/bin/oils-for-unix /bin/$x
|
| 96 | ' dummy0 "$x"
|
| 97 | show-config
|
| 98 | }
|
| 99 |
|
| 100 | set-osh-as-sh() {
|
| 101 | set-osh-as-X sh
|
| 102 | }
|
| 103 |
|
| 104 | set-osh-as-ash() {
|
| 105 | set-osh-as-X ash
|
| 106 | }
|
| 107 |
|
| 108 | set-osh-as-bash() {
|
| 109 | set-osh-as-X bash
|
| 110 | }
|
| 111 |
|
| 112 | #
|
| 113 | # Run
|
| 114 | #
|
| 115 |
|
| 116 | package-dirs() {
|
| 117 | # lz gives 5 packages: some fail at baseline
|
| 118 | # lzip: a single fast package
|
| 119 | # mpfr4: OSH bug, and big log
|
| 120 | # yash: make sure it doesn't hang
|
| 121 | local package_filter=${1:-'lz|mpfr|yash'}
|
| 122 | local a_repo=${2:-main} # or 'community'
|
| 123 |
|
| 124 | local -a prefix
|
| 125 |
|
| 126 | if [[ $package_filter = 'ALL' ]]; then
|
| 127 | prefix=( cat )
|
| 128 |
|
| 129 | # 100 means 0 to 100
|
| 130 | elif [[ $package_filter =~ ^[0-9]+$ ]]; then
|
| 131 | prefix=( head -n $package_filter )
|
| 132 |
|
| 133 | # 100,300p means lines 100 to 300
|
| 134 | elif [[ $package_filter =~ ^[0-9]+,[0-9]+p$ ]]; then
|
| 135 | prefix=( sed -n $package_filter )
|
| 136 |
|
| 137 | elif [[ $package_filter =~ ^shard([0-9]+)$ ]]; then
|
| 138 | # shards of 100 packages
|
| 139 |
|
| 140 | local shard_num=${BASH_REMATCH[1]}
|
| 141 | #echo shard=$shard_num
|
| 142 |
|
| 143 | local range
|
| 144 | # shard 0 is 0-99
|
| 145 | # shard 9 is 900 to 999
|
| 146 | # shard 10 is 1000 to 1099
|
| 147 | case $shard_num in
|
| 148 | # sed doesn't like 000,099
|
| 149 | 0) range='1,100p' ;;
|
| 150 | *) range="${shard_num}01,$(( shard_num + 1))00p" ;;
|
| 151 | esac
|
| 152 |
|
| 153 | prefix=( sed -n "$range" )
|
| 154 |
|
| 155 | # shardA, shardB For testing the combined report
|
| 156 | elif [[ $package_filter =~ ^shard([A-Z]+)$ ]]; then
|
| 157 | local shard_name=${BASH_REMATCH[1]}
|
| 158 | case $a_repo in
|
| 159 | main)
|
| 160 | case $shard_name in
|
| 161 | A) package_filter='^gzip' ;; # failure
|
| 162 | B) package_filter='^xz' ;; # failure
|
| 163 | C) package_filter='^lz' ;; # 3 packages
|
| 164 | D) package_filter='^jq$' ;; # produces autotools test-suite.log
|
| 165 | E) package_filter='^py3-p' ;; # many packages in parallel
|
| 166 | P) package_filter='^xz$|^shorewall' ;; # patches
|
| 167 | *) package_filter='^perl-http-daemon' ;; # test out perl
|
| 168 | esac
|
| 169 | ;;
|
| 170 | community)
|
| 171 | case $shard_name in
|
| 172 | A) package_filter='^py3-zulip' ;; # one Python package
|
| 173 | B) package_filter='^xterm' ;; # one C package
|
| 174 | C) package_filter='^shfmt' ;; # one Go package
|
| 175 | D) package_filter='^shellspec' ;; # OSH disagreement because of 'var'
|
| 176 | *) package_filter='^shell' ;; # a bunch of packages
|
| 177 | esac
|
| 178 | ;;
|
| 179 | *)
|
| 180 | die "Invalid a_repo $a_repo"
|
| 181 | ;;
|
| 182 | esac
|
| 183 |
|
| 184 | prefix=( egrep "$package_filter" )
|
| 185 |
|
| 186 | elif [[ $package_filter =~ ^disagree-(.*)+$ ]]; then
|
| 187 | local filename=${BASH_REMATCH[1]}
|
| 188 | # A file of EXACT package names, not patterns
|
| 189 | # See copy-disagree
|
| 190 | local package_file="_tmp/$package_filter.txt"
|
| 191 | comm -1 -2 <(sort $package_file) <(sort _tmp/apk-${a_repo}-manifest.txt)
|
| 192 | return
|
| 193 |
|
| 194 | else
|
| 195 | prefix=( egrep "$package_filter" )
|
| 196 |
|
| 197 | fi
|
| 198 |
|
| 199 | "${prefix[@]}" _tmp/apk-${a_repo}-manifest.txt
|
| 200 | }
|
| 201 |
|
| 202 | copy-disagree() {
|
| 203 | ### Determine what to run
|
| 204 |
|
| 205 | local epoch=${1:-2025-09-18-bash}
|
| 206 | cp -v \
|
| 207 | _tmp/aports-report/$epoch/disagree-packages.txt \
|
| 208 | _tmp/disagree-$epoch.txt
|
| 209 | }
|
| 210 |
|
| 211 | do-packages() {
|
| 212 | ### Download sources - abuild puts it in /var/cahe/distfiles
|
| 213 | local action=${1:-fetch}
|
| 214 | local package_filter=${2:-}
|
| 215 | local a_repo=${3:-main}
|
| 216 | # flags to pass to the inner shell
|
| 217 | local sh_flags=${4:-'-e -u'} # -u to disable -e
|
| 218 |
|
| 219 | # 6 seconds for 10 packages
|
| 220 | # There are ~1600 packages
|
| 221 | # So if there are 20 shards, each shard could have 10?
|
| 222 |
|
| 223 | local -a package_dirs
|
| 224 | package_dirs=( $(package-dirs "$package_filter" "$a_repo") )
|
| 225 |
|
| 226 | echo "${dirs[@]}"
|
| 227 | #return
|
| 228 |
|
| 229 | time enter-rootfs-user sh $sh_flags -c '
|
| 230 |
|
| 231 | action=$1
|
| 232 | a_repo=$2
|
| 233 | shift 2
|
| 234 | for dir in "$@"; do
|
| 235 | time abuild -r -C aports/$a_repo/$dir "$action"
|
| 236 | done
|
| 237 | ' dummy0 "$action" "$a_repo" "${package_dirs[@]}"
|
| 238 | }
|
| 239 |
|
| 240 | fetch-packages() {
|
| 241 | local package_filter=${1:-}
|
| 242 | local a_repo=${2:-main}
|
| 243 |
|
| 244 | # -u means we don't pass -e (and it's non-empty)
|
| 245 | do-packages fetch "$package_filter" "$a_repo" '-u'
|
| 246 | }
|
| 247 |
|
| 248 | banner() {
|
| 249 | echo
|
| 250 | echo "=== $@"
|
| 251 | echo
|
| 252 | }
|
| 253 |
|
| 254 | build-package-overlayfs() {
|
| 255 | local config=${1:-baseline}
|
| 256 | local pkg=${2:-lua5.4}
|
| 257 | local a_repo=${3:-main}
|
| 258 |
|
| 259 | # baseline stack:
|
| 260 | # _chroot/aports-build
|
| 261 | # _chroot/package-upper/baseline/gzip # upper dir / layer dir
|
| 262 | #
|
| 263 | # osh-as-sh stack:
|
| 264 | # _chroot/aports-build
|
| 265 | # _chroot/osh-as-sh.overlay/layer # this has the symlink
|
| 266 | # _chroot/package-upper/osh-as-sh/gzip # upper dir / layer dir
|
| 267 |
|
| 268 | # allow concurrency
|
| 269 | local xargs_slot="${XARGS_SLOT:-99}"
|
| 270 | local ov_base_dir=_chroot/package-slot${xargs_slot}.overlay
|
| 271 |
|
| 272 | local merged=$ov_base_dir/merged
|
| 273 | local work=$ov_base_dir/work
|
| 274 |
|
| 275 | local layer_dir=_chroot/package-layers/$config/$pkg
|
| 276 | mkdir -p $merged $work $layer_dir
|
| 277 |
|
| 278 | local overlay_opts
|
| 279 | case $config in
|
| 280 | baseline)
|
| 281 | overlay_opts="lowerdir=$CHROOT_DIR,upperdir=$layer_dir,workdir=$work"
|
| 282 | ;;
|
| 283 | osh-as-sh)
|
| 284 | local osh_as_sh=_chroot/osh-as-sh.overlay/layer
|
| 285 | overlay_opts="lowerdir=$osh_as_sh:$CHROOT_DIR,upperdir=$layer_dir,workdir=$work"
|
| 286 | ;;
|
| 287 | *)
|
| 288 | die "Invalid config $config"
|
| 289 | ;;
|
| 290 | esac
|
| 291 |
|
| 292 | sudo mount \
|
| 293 | -t overlay \
|
| 294 | aports-package \
|
| 295 | -o "$overlay_opts" \
|
| 296 | $merged
|
| 297 |
|
| 298 | local -a prefix
|
| 299 | if test -n "$XARGS_SLOT"; then
|
| 300 | local x=$XARGS_SLOT
|
| 301 |
|
| 302 | # run slot 0 on cores 0 and 1
|
| 303 | # run slot 9 on cores 18 and 19
|
| 304 | local cores="$(( x*2 )),$(( x*2 + 1 ))"
|
| 305 |
|
| 306 | # oversubscribe
|
| 307 | # run slot 0 on cores 0 and 1
|
| 308 | # run slot 19 on cores 19 and 0
|
| 309 | #local cores="$(( x )),$(( (x + 1) % NUM_CORES ))"
|
| 310 | prefix=( taskset -c "$cores" )
|
| 311 | fi
|
| 312 |
|
| 313 | "${prefix[@]}" $merged/enter-chroot -u udu sh -c '
|
| 314 | cd oils
|
| 315 |
|
| 316 | # show the effect of the overlay
|
| 317 | #ls -l /bin/sh
|
| 318 |
|
| 319 | regtest/aports-guest.sh build-one-package "$@"
|
| 320 | ' dummy0 "$pkg" "$a_repo" "$xargs_slot"
|
| 321 |
|
| 322 | if test -n "$INTERACTIVE"; then
|
| 323 | echo "Starting interactive shell in overlayfs environment for package $a_repo/$pkg"
|
| 324 | echo "Rebuild: abuild -f -r -C ~/aports/$a_repo/$pkg"
|
| 325 | echo " Help: abuild -h"
|
| 326 | # If the last command in the child shell exited non-zero then ctrl-d/exit
|
| 327 | # will report that error code to the parent. If we don't ignore that error
|
| 328 | # we will exit early and leave the package overlay mounted.
|
| 329 | set +o errexit
|
| 330 | $merged/enter-chroot -u udu
|
| 331 | set -o errexit
|
| 332 | fi
|
| 333 |
|
| 334 | unmount-loop $merged
|
| 335 | }
|
| 336 |
|
| 337 | # old serial version
|
| 338 | OLD-build-many-packages-overlayfs() {
|
| 339 | local package_filter=${1:-}
|
| 340 | local config=${2:-baseline}
|
| 341 | local a_repo=${3:-main}
|
| 342 |
|
| 343 | local -a package_dirs
|
| 344 | package_dirs=( $(package-dirs "$package_filter" "$a_repo") )
|
| 345 |
|
| 346 | banner "Building ${#package_dirs[@]} packages (filter=$package_filter a_repo=$a_repo)"
|
| 347 |
|
| 348 | for pkg in "${package_dirs[@]}"; do
|
| 349 | build-package-overlayfs "$config" "$pkg" "$a_repo"
|
| 350 | done
|
| 351 | }
|
| 352 |
|
| 353 | build-pkg() {
|
| 354 | ### trivial wrapper around build-package-overlayfs - change arg order for xargs
|
| 355 | local config=${1:-baseline}
|
| 356 | local a_repo=${2:-main}
|
| 357 | local pkg=${3:-lua5.4}
|
| 358 |
|
| 359 | build-package-overlayfs "$config" "$pkg" "$a_repo"
|
| 360 | }
|
| 361 |
|
| 362 | NUM_CORES=$(( $(nproc) ))
|
| 363 |
|
| 364 | # 2 cores per package build
|
| 365 | NUM_PAR=$(( NUM_CORES / 2 ))
|
| 366 |
|
| 367 | # over-subscribe - allow 20 processes to see 2 cores each
|
| 368 | # Note: this causes more timeouts. TODO: get rid of shards to get rid of
|
| 369 | # stragglers, and then raise the timeout to 20 minutes or more.
|
| 370 | # NUM_PAR=$(( NUM_CORES ))
|
| 371 |
|
| 372 | # TODO: we ran into the env.sh race condition in the enter-chroot script
|
| 373 | # generated by alpine-chroot-install
|
| 374 | build-many-packages-overlayfs() {
|
| 375 | local package_filter=${1:-}
|
| 376 | local config=${2:-baseline}
|
| 377 | local a_repo=${3:-main}
|
| 378 | local parallel=${4:-T}
|
| 379 |
|
| 380 | banner "Building packages (filter=$package_filter a_repo=$a_repo)"
|
| 381 |
|
| 382 | local -a flags
|
| 383 | if test -n "$parallel"; then
|
| 384 | log "(with $NUM_PAR jobs in parallel)"
|
| 385 | flags=( -P $NUM_PAR )
|
| 386 | else
|
| 387 | log '(serially)'
|
| 388 | fi
|
| 389 |
|
| 390 | package-dirs "$package_filter" $a_repo |
|
| 391 | xargs "${flags[@]}" -n 1 --process-slot-var=XARGS_SLOT -- \
|
| 392 | $0 build-pkg $config $a_repo
|
| 393 | }
|
| 394 |
|
| 395 |
|
| 396 | clean-host-and-guest() {
|
| 397 | # host dir _tmp/aports-build
|
| 398 | rm -r -f -v $BASE_DIR
|
| 399 | }
|
| 400 |
|
| 401 | clean-guest() {
|
| 402 | # clean guest chroot
|
| 403 | sudo rm -r -f -v $CHROOT_HOME_DIR/oils/_tmp
|
| 404 | }
|
| 405 |
|
| 406 | readonly -a CONFIGS=( baseline osh-as-sh )
|
| 407 |
|
| 408 | APORTS_EPOCH="${APORTS_EPOCH:-}"
|
| 409 | # default epoch
|
| 410 | if test -z "$APORTS_EPOCH"; then
|
| 411 | APORTS_EPOCH=$(date '+%Y-%m-%d')
|
| 412 | fi
|
| 413 |
|
| 414 | _build-many-configs-overlayfs() {
|
| 415 | local package_filter=${1:-}
|
| 416 | local epoch=${2:-$APORTS_EPOCH}
|
| 417 | local a_repo=${3:-main}
|
| 418 |
|
| 419 | if test -z "$package_filter"; then
|
| 420 | die "Package filter is required (e.g. shard3, ALL)"
|
| 421 | fi
|
| 422 |
|
| 423 | clean-guest
|
| 424 |
|
| 425 | # See note about /etc/sudoers.d at top of file
|
| 426 |
|
| 427 | local dest_dir="$BASE_DIR/$epoch/$package_filter" # e.g. shard10
|
| 428 |
|
| 429 | for config in "${CONFIGS[@]}"; do
|
| 430 | banner "$epoch: Using config $config"
|
| 431 |
|
| 432 | build-many-packages-overlayfs "$package_filter" "$config" "$a_repo"
|
| 433 | done
|
| 434 | }
|
| 435 |
|
| 436 | remove-shard-files() {
|
| 437 | local shard_dir=${1:-_chroot/shardC}
|
| 438 |
|
| 439 | log "Removing big files in shard $shard_dir"
|
| 440 |
|
| 441 | # For all packages packages, for baseline and osh-as-sh, clean up the aports source dir
|
| 442 | # For linux, clang, etc. it becomes MANY GIGABYTES
|
| 443 | #
|
| 444 | # 2025-09-12: ignore errors from rm; I think there was a race condition -
|
| 445 | # processes could still be running and creating files
|
| 446 | #
|
| 447 | # rm: cannot remove '_chroot/shard6/baseline/llvm19/home/udu/aports/main/llvm19/src/llvm-project-19.1.7.src/build/lib': Directory not empty
|
| 448 | # real 1041m46.464s
|
| 449 |
|
| 450 | sudo rm -r -f $shard_dir/*/*/home/udu/aports/ || true
|
| 451 | }
|
| 452 |
|
| 453 | build-many-shards-overlayfs() {
|
| 454 | sudo -k
|
| 455 |
|
| 456 | local a_repo=${A_REPO:-main} # env var like $APORTS_EPOCH
|
| 457 |
|
| 458 | # Clean up old runs
|
| 459 | sudo rm -r -f _chroot/shard* _chroot/disagree*
|
| 460 |
|
| 461 | banner "$APORTS_EPOCH $a_repo: building shards: $*"
|
| 462 |
|
| 463 | time for shard_name in "$@"; do
|
| 464 | _build-many-configs-overlayfs "$shard_name" "$APORTS_EPOCH" "$a_repo"
|
| 465 |
|
| 466 | # Move layer files to _chroot/shard10/{baseline,osh}/...
|
| 467 | mv -v --no-target-directory _chroot/package-layers _chroot/$shard_name
|
| 468 |
|
| 469 | # Make it rsync-able in _tmp/aports-build ($BASE_DIR)
|
| 470 | make-shard-tree $shard_name $a_repo
|
| 471 |
|
| 472 | # Remove big files
|
| 473 | remove-shard-files _chroot/$shard_name
|
| 474 |
|
| 475 | # TODO: we should publish and clean up after every PACKAGE, rather than
|
| 476 | # each shard
|
| 477 | done
|
| 478 | }
|
| 479 |
|
| 480 | build-and-stat() {
|
| 481 | # Measure resource utilization
|
| 482 | local proc_dir="$BASE_DIR/$APORTS_EPOCH/proc-log"
|
| 483 | mkdir -v -p $proc_dir
|
| 484 | regtest/proc_log.py --out-dir $proc_dir --sleep-secs 5 &
|
| 485 | local proc_log_pid=$!
|
| 486 |
|
| 487 | sleep 0.05 # prevent overlapping sudo prompt
|
| 488 |
|
| 489 | build-many-shards-overlayfs "$@"
|
| 490 |
|
| 491 | kill -s TERM $proc_log_pid
|
| 492 | wc -l $proc_dir/*.txt
|
| 493 | }
|
| 494 |
|
| 495 | make-shard-tree() {
|
| 496 | ### Put outputs in rsync-able format, for a SINGLE shard
|
| 497 |
|
| 498 | # The dire structure is like this:
|
| 499 | #
|
| 500 | # _tmp/aports-build/
|
| 501 | # 2025-09-10-overlayfs/
|
| 502 | # shard0/
|
| 503 | # baseline/
|
| 504 | # apk.txt
|
| 505 | # tasks.tsv
|
| 506 | # log/
|
| 507 | # gzip.log.txt
|
| 508 | # xz.log.txt
|
| 509 | # test-suite/ # autotools dir
|
| 510 | # gzip/
|
| 511 | # test-suite.log.txt
|
| 512 | # osh-as-sh/
|
| 513 | # apk.txt
|
| 514 | # tasks.tsv
|
| 515 | # log/
|
| 516 | # gzip.log.txt
|
| 517 | # xz.log.txt
|
| 518 | # test-suite/
|
| 519 | # gzip/
|
| 520 | # test-suite.log.txt
|
| 521 | # shard1/
|
| 522 | # ...
|
| 523 | # shard16/
|
| 524 | # ...
|
| 525 |
|
| 526 | local shard_name=$1
|
| 527 | local a_repo=${2:-main}
|
| 528 | local epoch=${3:-$APORTS_EPOCH}
|
| 529 |
|
| 530 | local shard_dir=_chroot/$shard_name
|
| 531 |
|
| 532 | for config in baseline osh-as-sh; do
|
| 533 | local dest_dir=$BASE_DIR/$epoch/$shard_name/$config
|
| 534 | mkdir -p $dest_dir
|
| 535 | #ls -l $shard_dir/$config
|
| 536 |
|
| 537 | time python3 devtools/tsv_concat.py \
|
| 538 | $shard_dir/$config/*/home/udu/oils/_tmp/aports-guest/*.task.tsv > $dest_dir/tasks.tsv
|
| 539 |
|
| 540 | # Allowed to fail if zero .apk are built
|
| 541 | time md5sum $shard_dir/$config/*/home/udu/packages/$a_repo/x86_64/*.apk > $dest_dir/apk.txt \
|
| 542 | || true
|
| 543 |
|
| 544 | abridge-logs $shard_dir/$config $dest_dir
|
| 545 |
|
| 546 | done
|
| 547 | }
|
| 548 |
|
| 549 | abridge-logs() {
|
| 550 | local config_src_dir=${1:-_chroot/shardD/osh-as-sh}
|
| 551 | local dest_dir=${2:-$BASE_DIR/shardD/osh-as-sh}
|
| 552 |
|
| 553 | local log_dest_dir=$dest_dir/log
|
| 554 | local test_suite_dest_dir=$dest_dir/test-suite
|
| 555 | mkdir -p $log_dest_dir $test_suite_dest_dir
|
| 556 |
|
| 557 | local threshold=$(( 500 * 1000 )) # 500 KB
|
| 558 |
|
| 559 | # this assumes the build process doesn't create *.log.txt
|
| 560 | # test-suite.log is the name used by the autotools test runner - we want to save those too
|
| 561 | # ignore permission errors with || true
|
| 562 | { find $config_src_dir -name '*.log.txt' -a -printf '%s\t%P\n' || true; } |
|
| 563 | while read -r size path; do
|
| 564 | local src=$config_src_dir/$path
|
| 565 | # Remove text until last slash (shortest match)
|
| 566 | # like $(basename $path) but in bash, for speed
|
| 567 | local filename=${path##*/}
|
| 568 | local dest=$log_dest_dir/$filename
|
| 569 |
|
| 570 | if test "$size" -lt "$threshold"; then
|
| 571 | cp -v $src $dest
|
| 572 | else
|
| 573 | # Bug fix: abriding to 1000 lines isn't sufficient. We got some logs
|
| 574 | # that were hundreds of MB, with less than 1000 lines!
|
| 575 | { echo "*** This log is abridged to its last 500 KB:"
|
| 576 | echo
|
| 577 | tail --bytes 500000 $src
|
| 578 | } > $dest
|
| 579 | fi
|
| 580 | done
|
| 581 |
|
| 582 | { find $config_src_dir -name 'test-suite.log' -a -printf '%P\n' || true; } |
|
| 583 | while read -r path; do
|
| 584 | local src=$config_src_dir/$path
|
| 585 |
|
| 586 | # Remove text after the first slash (shortest match)
|
| 587 | local package_name=${path%%/*}
|
| 588 | local dest=$test_suite_dest_dir/$package_name/test-suite.log.txt
|
| 589 |
|
| 590 | mkdir -p "$(dirname $dest)"
|
| 591 | cp -v --no-target-directory $src $dest
|
| 592 | done
|
| 593 |
|
| 594 | # 500K threshold: 76 MB
|
| 595 | du --si -s $log_dest_dir
|
| 596 | }
|
| 597 |
|
| 598 | demo-build() {
|
| 599 | local pkg=${1:-gzip} # in shardA, uses many cores
|
| 600 | local do_pin=${2:-}
|
| 601 |
|
| 602 | local -a prefix
|
| 603 | if test -n "$do_pin"; then
|
| 604 | echo "*** Pinning to CPU 0 ***"
|
| 605 | prefix=( taskset -c 0 )
|
| 606 | fi
|
| 607 |
|
| 608 | "${prefix[@]}" $CHROOT_DIR/enter-chroot -u udu sh -c '
|
| 609 | pkg=$1
|
| 610 |
|
| 611 | echo "nproc = $(nproc)"
|
| 612 |
|
| 613 | cd oils
|
| 614 | set -x
|
| 615 |
|
| 616 | # Note the user / real ratio! How many cores did we use?
|
| 617 | time regtest/aports-guest.sh build-one-package $pkg
|
| 618 | ' dummy0 $pkg
|
| 619 | }
|
| 620 |
|
| 621 | test-taskset() {
|
| 622 | local pkg=${1:-gzip} # in shardA, uses many cores
|
| 623 |
|
| 624 | demo-build $pkg ''
|
| 625 | demo-build $pkg T
|
| 626 | }
|
| 627 |
|
| 628 | test-proc-log() {
|
| 629 | local out_dir=_tmp/proc-log
|
| 630 | mkdir -p $out_dir
|
| 631 |
|
| 632 | regtest/proc_log.py --out-dir $out_dir &
|
| 633 | local pid=$!
|
| 634 | sleep 3.1 # should get 3 entries
|
| 635 | kill $pid
|
| 636 | wc -l $out_dir/*.txt
|
| 637 | }
|
| 638 |
|
| 639 | task-five "$@"
|