| 1 | #!/usr/bin/env python2
|
| 2 | """
|
| 3 | builtin_process.py - Builtins that deal with processes or modify process state.
|
| 4 |
|
| 5 | This is sort of the opposite of builtin_pure.py.
|
| 6 | """
|
| 7 | from __future__ import print_function
|
| 8 |
|
| 9 | import resource
|
| 10 | from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
|
| 11 | RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
|
| 12 | from signal import SIGCONT
|
| 13 |
|
| 14 | from _devbuild.gen import arg_types
|
| 15 | from _devbuild.gen.syntax_asdl import loc
|
| 16 | from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
|
| 17 | wait_status_e)
|
| 18 | from core import dev
|
| 19 | from core import error
|
| 20 | from core.error import e_usage, e_die_status
|
| 21 | from core import process # W1_EXITED, etc.
|
| 22 | from core import pyos
|
| 23 | from core import pyutil
|
| 24 | from core import vm
|
| 25 | from frontend import flag_util
|
| 26 | from frontend import match
|
| 27 | from frontend import typed_args
|
| 28 | from mycpp import mops
|
| 29 | from mycpp import mylib
|
| 30 | from mycpp.mylib import log, tagswitch, print_stderr
|
| 31 |
|
| 32 | import posix_ as posix
|
| 33 |
|
| 34 | from typing import TYPE_CHECKING, List, Tuple, Optional, cast
|
| 35 | if TYPE_CHECKING:
|
| 36 | from core.process import Waiter, ExternalProgram, FdState
|
| 37 | from core import executor
|
| 38 | from core import state
|
| 39 | from display import ui
|
| 40 |
|
| 41 | _ = log
|
| 42 |
|
| 43 |
|
| 44 | class Jobs(vm._Builtin):
|
| 45 | """List jobs."""
|
| 46 |
|
| 47 | def __init__(self, job_list):
|
| 48 | # type: (process.JobList) -> None
|
| 49 | self.job_list = job_list
|
| 50 |
|
| 51 | def Run(self, cmd_val):
|
| 52 | # type: (cmd_value.Argv) -> int
|
| 53 |
|
| 54 | attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
|
| 55 | arg = arg_types.jobs(attrs.attrs)
|
| 56 |
|
| 57 | if arg.l:
|
| 58 | style = process.STYLE_LONG
|
| 59 | elif arg.p:
|
| 60 | style = process.STYLE_PID_ONLY
|
| 61 | else:
|
| 62 | style = process.STYLE_DEFAULT
|
| 63 |
|
| 64 | self.job_list.DisplayJobs(style)
|
| 65 |
|
| 66 | if arg.debug:
|
| 67 | self.job_list.DebugPrint()
|
| 68 |
|
| 69 | return 0
|
| 70 |
|
| 71 |
|
| 72 | class Fg(vm._Builtin):
|
| 73 | """Put a job in the foreground."""
|
| 74 |
|
| 75 | def __init__(self, job_control, job_list, waiter):
|
| 76 | # type: (process.JobControl, process.JobList, Waiter) -> None
|
| 77 | self.job_control = job_control
|
| 78 | self.job_list = job_list
|
| 79 | self.waiter = waiter
|
| 80 |
|
| 81 | def Run(self, cmd_val):
|
| 82 | # type: (cmd_value.Argv) -> int
|
| 83 |
|
| 84 | job_spec = '' # Job spec for current job is the default
|
| 85 | if len(cmd_val.argv) > 1:
|
| 86 | job_spec = cmd_val.argv[1]
|
| 87 |
|
| 88 | job = self.job_list.GetJobWithSpec(job_spec)
|
| 89 | # note: the 'wait' builtin falls back to JobFromPid()
|
| 90 | if job is None:
|
| 91 | print_stderr('fg: No job to put in the foreground')
|
| 92 | return 1
|
| 93 |
|
| 94 | pgid = job.ProcessGroupId()
|
| 95 | assert pgid != process.INVALID_PGID, \
|
| 96 | 'Processes put in the background should have a PGID'
|
| 97 |
|
| 98 | # TODO: Print job ID rather than the PID
|
| 99 | print_stderr('fg: PID %d Continued' % pgid)
|
| 100 | # Put the job's process group back into the foreground. GiveTerminal() must
|
| 101 | # be called before sending SIGCONT or else the process might immediately get
|
| 102 | # suspsended again if it tries to read/write on the terminal.
|
| 103 | self.job_control.MaybeGiveTerminal(pgid)
|
| 104 | job.SetForeground()
|
| 105 | # needed for Wait() loop to work
|
| 106 | job.state = job_state_e.Running
|
| 107 | posix.killpg(pgid, SIGCONT)
|
| 108 |
|
| 109 | status = -1
|
| 110 |
|
| 111 | wait_st = job.JobWait(self.waiter)
|
| 112 | UP_wait_st = wait_st
|
| 113 | with tagswitch(wait_st) as case:
|
| 114 | if case(wait_status_e.Proc):
|
| 115 | wait_st = cast(wait_status.Proc, UP_wait_st)
|
| 116 | if wait_st.state == job_state_e.Exited:
|
| 117 | self.job_list.PopChildProcess(job.PidForWait())
|
| 118 | self.job_list.CleanupWhenJobExits(job)
|
| 119 | status = wait_st.code
|
| 120 |
|
| 121 | elif case(wait_status_e.Pipeline):
|
| 122 | wait_st = cast(wait_status.Pipeline, UP_wait_st)
|
| 123 | # TODO: handle PIPESTATUS? Is this right?
|
| 124 | status = wait_st.codes[-1]
|
| 125 |
|
| 126 | elif case(wait_status_e.Cancelled):
|
| 127 | wait_st = cast(wait_status.Cancelled, UP_wait_st)
|
| 128 | status = 128 + wait_st.sig_num
|
| 129 |
|
| 130 | else:
|
| 131 | raise AssertionError()
|
| 132 |
|
| 133 | return status
|
| 134 |
|
| 135 |
|
| 136 | class Bg(vm._Builtin):
|
| 137 | """Put a job in the background."""
|
| 138 |
|
| 139 | def __init__(self, job_list):
|
| 140 | # type: (process.JobList) -> None
|
| 141 | self.job_list = job_list
|
| 142 |
|
| 143 | def Run(self, cmd_val):
|
| 144 | # type: (cmd_value.Argv) -> int
|
| 145 |
|
| 146 | # How does this differ from 'fg'? It doesn't wait and it sets controlling
|
| 147 | # terminal?
|
| 148 |
|
| 149 | raise error.Usage("isn't implemented", loc.Missing)
|
| 150 |
|
| 151 |
|
| 152 | class Fork(vm._Builtin):
|
| 153 |
|
| 154 | def __init__(self, shell_ex):
|
| 155 | # type: (vm._Executor) -> None
|
| 156 | self.shell_ex = shell_ex
|
| 157 |
|
| 158 | def Run(self, cmd_val):
|
| 159 | # type: (cmd_value.Argv) -> int
|
| 160 | _, arg_r = flag_util.ParseCmdVal('fork',
|
| 161 | cmd_val,
|
| 162 | accept_typed_args=True)
|
| 163 |
|
| 164 | arg, location = arg_r.Peek2()
|
| 165 | if arg is not None:
|
| 166 | e_usage('got unexpected argument %r' % arg, location)
|
| 167 |
|
| 168 | cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
|
| 169 | return self.shell_ex.RunBackgroundJob(cmd_frag)
|
| 170 |
|
| 171 |
|
| 172 | class ForkWait(vm._Builtin):
|
| 173 |
|
| 174 | def __init__(self, shell_ex):
|
| 175 | # type: (vm._Executor) -> None
|
| 176 | self.shell_ex = shell_ex
|
| 177 |
|
| 178 | def Run(self, cmd_val):
|
| 179 | # type: (cmd_value.Argv) -> int
|
| 180 | _, arg_r = flag_util.ParseCmdVal('forkwait',
|
| 181 | cmd_val,
|
| 182 | accept_typed_args=True)
|
| 183 | arg, location = arg_r.Peek2()
|
| 184 | if arg is not None:
|
| 185 | e_usage('got unexpected argument %r' % arg, location)
|
| 186 |
|
| 187 | cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
|
| 188 | return self.shell_ex.RunSubshell(cmd_frag)
|
| 189 |
|
| 190 |
|
| 191 | class Exec(vm._Builtin):
|
| 192 |
|
| 193 | def __init__(
|
| 194 | self,
|
| 195 | mem, # type: state.Mem
|
| 196 | ext_prog, # type: ExternalProgram
|
| 197 | fd_state, # type: FdState
|
| 198 | search_path, # type: executor.SearchPath
|
| 199 | errfmt, # type: ui.ErrorFormatter
|
| 200 | ):
|
| 201 | # type: (...) -> None
|
| 202 | self.mem = mem
|
| 203 | self.ext_prog = ext_prog
|
| 204 | self.fd_state = fd_state
|
| 205 | self.search_path = search_path
|
| 206 | self.errfmt = errfmt
|
| 207 |
|
| 208 | def Run(self, cmd_val):
|
| 209 | # type: (cmd_value.Argv) -> int
|
| 210 | _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
|
| 211 |
|
| 212 | # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
|
| 213 | if arg_r.AtEnd():
|
| 214 | self.fd_state.MakePermanent()
|
| 215 | return 0
|
| 216 |
|
| 217 | environ = self.mem.GetEnv()
|
| 218 | if 0:
|
| 219 | log('E %r', environ)
|
| 220 | log('E %r', environ)
|
| 221 | log('ZZ %r', environ.get('ZZ'))
|
| 222 | i = arg_r.i
|
| 223 | cmd = cmd_val.argv[i]
|
| 224 | argv0_path = self.search_path.CachedLookup(cmd)
|
| 225 | if argv0_path is None:
|
| 226 | e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
|
| 227 |
|
| 228 | # shift off 'exec', and remove typed args because they don't apply
|
| 229 | c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
|
| 230 | cmd_val.is_last_cmd, cmd_val.self_obj, None)
|
| 231 |
|
| 232 | self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
|
| 233 | # makes mypy and C++ compiler happy
|
| 234 | raise AssertionError('unreachable')
|
| 235 |
|
| 236 |
|
| 237 | class Wait(vm._Builtin):
|
| 238 | """
|
| 239 | wait: wait [-n] [id ...]
|
| 240 | Wait for job completion and return exit status.
|
| 241 |
|
| 242 | Waits for each process identified by an ID, which may be a process ID or a
|
| 243 | job specification, and reports its termination status. If ID is not
|
| 244 | given, waits for all currently active child processes, and the return
|
| 245 | status is zero. If ID is a a job specification, waits for all processes
|
| 246 | in that job's pipeline.
|
| 247 |
|
| 248 | If the -n option is supplied, waits for the next job to terminate and
|
| 249 | returns its exit status.
|
| 250 |
|
| 251 | Exit Status:
|
| 252 | Returns the status of the last ID; fails if ID is invalid or an invalid
|
| 253 | option is given.
|
| 254 | """
|
| 255 |
|
| 256 | def __init__(
|
| 257 | self,
|
| 258 | waiter, # type: Waiter
|
| 259 | job_list, #type: process.JobList
|
| 260 | mem, # type: state.Mem
|
| 261 | tracer, # type: dev.Tracer
|
| 262 | errfmt, # type: ui.ErrorFormatter
|
| 263 | ):
|
| 264 | # type: (...) -> None
|
| 265 | self.waiter = waiter
|
| 266 | self.job_list = job_list
|
| 267 | self.mem = mem
|
| 268 | self.tracer = tracer
|
| 269 | self.errfmt = errfmt
|
| 270 |
|
| 271 | def Run(self, cmd_val):
|
| 272 | # type: (cmd_value.Argv) -> int
|
| 273 | with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
|
| 274 | return self._Run(cmd_val)
|
| 275 |
|
| 276 | def _Run(self, cmd_val):
|
| 277 | # type: (cmd_value.Argv) -> int
|
| 278 | attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
|
| 279 | arg = arg_types.wait(attrs.attrs)
|
| 280 |
|
| 281 | job_ids, arg_locs = arg_r.Rest2()
|
| 282 |
|
| 283 | if arg.n:
|
| 284 | # Loop until there is one fewer process running, there's nothing to wait
|
| 285 | # for, or there's a signal
|
| 286 | n = self.job_list.NumRunning()
|
| 287 | if n == 0:
|
| 288 | status = 127
|
| 289 | else:
|
| 290 | target = n - 1
|
| 291 | status = 0
|
| 292 | while self.job_list.NumRunning() > target:
|
| 293 | result, w1_arg = self.waiter.WaitForOne()
|
| 294 | if result == process.W1_EXITED:
|
| 295 | # CLEAN UP
|
| 296 | pid = w1_arg
|
| 297 | pr = self.job_list.PopChildProcess(pid)
|
| 298 | self.job_list.CleanupWhenProcessExits(pid)
|
| 299 |
|
| 300 | if pr is None:
|
| 301 | print_stderr(
|
| 302 | "oils: PID %d exited, but oils didn't start it"
|
| 303 | % pid)
|
| 304 | else:
|
| 305 | status = pr.status
|
| 306 |
|
| 307 | elif result == process.W1_NO_CHILDREN:
|
| 308 | status = 127
|
| 309 | break
|
| 310 |
|
| 311 | elif result == process.W1_CALL_INTR: # signal
|
| 312 | status = 128 + w1_arg
|
| 313 | break
|
| 314 |
|
| 315 | return status
|
| 316 |
|
| 317 | if len(job_ids) == 0:
|
| 318 | #log('*** wait')
|
| 319 |
|
| 320 | # Note: NumRunning() makes sure we ignore stopped processes, which
|
| 321 | # cause WaitForOne() to return
|
| 322 |
|
| 323 | status = 0
|
| 324 | while self.job_list.NumRunning() != 0:
|
| 325 | result, w1_arg = self.waiter.WaitForOne()
|
| 326 | if result == process.W1_EXITED:
|
| 327 | pid = w1_arg
|
| 328 | self.job_list.PopChildProcess(pid)
|
| 329 | self.job_list.CleanupWhenProcessExits(pid)
|
| 330 |
|
| 331 | if result == process.W1_NO_CHILDREN:
|
| 332 | break # status is 0
|
| 333 |
|
| 334 | if result == process.W1_CALL_INTR:
|
| 335 | status = 128 + w1_arg
|
| 336 | break
|
| 337 |
|
| 338 | return status
|
| 339 |
|
| 340 | # Get list of jobs. Then we need to check if they are ALL stopped.
|
| 341 | # Returns the exit code of the last one on the COMMAND LINE, not the exit
|
| 342 | # code of last one to FINISH.
|
| 343 | jobs = [] # type: List[process.Job]
|
| 344 | for i, job_id in enumerate(job_ids):
|
| 345 | location = arg_locs[i]
|
| 346 |
|
| 347 | job = None # type: Optional[process.Job]
|
| 348 | if job_id == '' or job_id.startswith('%'):
|
| 349 | job = self.job_list.GetJobWithSpec(job_id)
|
| 350 |
|
| 351 | if job is None:
|
| 352 | #log('JOB %s', job_id)
|
| 353 | # Does it look like a PID?
|
| 354 | try:
|
| 355 | pid = int(job_id)
|
| 356 | except ValueError:
|
| 357 | raise error.Usage(
|
| 358 | 'expected PID or jobspec, got %r' % job_id, location)
|
| 359 |
|
| 360 | # TODO:
|
| 361 | # - what happens if you pass the pipeline leader PID?
|
| 362 | # - what happens if you pass a non-leader PID?
|
| 363 |
|
| 364 | job = self.job_list.JobFromPid(pid)
|
| 365 | #log('WAIT JOB %r', job)
|
| 366 |
|
| 367 | if job is None:
|
| 368 | self.errfmt.Print_("Job %s was't found" % job_id,
|
| 369 | blame_loc=location)
|
| 370 | return 127
|
| 371 |
|
| 372 | jobs.append(job)
|
| 373 |
|
| 374 | status = 1 # error
|
| 375 | for job in jobs:
|
| 376 | # polymorphic call: Process, Pipeline
|
| 377 | wait_st = job.JobWait(self.waiter)
|
| 378 |
|
| 379 | UP_wait_st = wait_st
|
| 380 | with tagswitch(wait_st) as case:
|
| 381 | if case(wait_status_e.Proc):
|
| 382 | wait_st = cast(wait_status.Proc, UP_wait_st)
|
| 383 | if wait_st.state == job_state_e.Exited:
|
| 384 | self.job_list.PopChildProcess(job.PidForWait())
|
| 385 | self.job_list.CleanupWhenJobExits(job)
|
| 386 | status = wait_st.code
|
| 387 |
|
| 388 | elif case(wait_status_e.Pipeline):
|
| 389 | wait_st = cast(wait_status.Pipeline, UP_wait_st)
|
| 390 | # TODO: handle PIPESTATUS? Is this right?
|
| 391 | status = wait_st.codes[-1]
|
| 392 |
|
| 393 | elif case(wait_status_e.Cancelled):
|
| 394 | wait_st = cast(wait_status.Cancelled, UP_wait_st)
|
| 395 | status = 128 + wait_st.sig_num
|
| 396 |
|
| 397 | else:
|
| 398 | raise AssertionError()
|
| 399 |
|
| 400 | return status
|
| 401 |
|
| 402 |
|
| 403 | class Umask(vm._Builtin):
|
| 404 |
|
| 405 | def __init__(self):
|
| 406 | # type: () -> None
|
| 407 | """Dummy constructor for mycpp."""
|
| 408 | pass
|
| 409 |
|
| 410 | def Run(self, cmd_val):
|
| 411 | # type: (cmd_value.Argv) -> int
|
| 412 |
|
| 413 | argv = cmd_val.argv[1:]
|
| 414 | if len(argv) == 0:
|
| 415 | # umask() has a dumb API: you can't get it without modifying it first!
|
| 416 | # NOTE: dash disables interrupts around the two umask() calls, but that
|
| 417 | # shouldn't be a concern for us. Signal handlers won't call umask().
|
| 418 | mask = posix.umask(0)
|
| 419 | posix.umask(mask) #
|
| 420 | print('0%03o' % mask) # octal format
|
| 421 | return 0
|
| 422 |
|
| 423 | if len(argv) == 1:
|
| 424 | a = argv[0]
|
| 425 | try:
|
| 426 | new_mask = int(a, 8)
|
| 427 | except ValueError:
|
| 428 | # NOTE: This also happens when we have '8' or '9' in the input.
|
| 429 | print_stderr(
|
| 430 | "oils warning: umask with symbolic input isn't implemented"
|
| 431 | )
|
| 432 | return 1
|
| 433 |
|
| 434 | posix.umask(new_mask)
|
| 435 | return 0
|
| 436 |
|
| 437 | e_usage('umask: unexpected arguments', loc.Missing)
|
| 438 |
|
| 439 |
|
| 440 | def _LimitString(lim, factor):
|
| 441 | # type: (mops.BigInt, int) -> str
|
| 442 | if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
|
| 443 | return 'unlimited'
|
| 444 | else:
|
| 445 | i = mops.Div(lim, mops.IntWiden(factor))
|
| 446 | return mops.ToStr(i)
|
| 447 |
|
| 448 |
|
| 449 | class Ulimit(vm._Builtin):
|
| 450 |
|
| 451 | def __init__(self):
|
| 452 | # type: () -> None
|
| 453 | """Dummy constructor for mycpp."""
|
| 454 |
|
| 455 | self._table = None # type: List[Tuple[str, int, int, str]]
|
| 456 |
|
| 457 | def _Table(self):
|
| 458 | # type: () -> List[Tuple[str, int, int, str]]
|
| 459 |
|
| 460 | # POSIX 2018
|
| 461 | #
|
| 462 | # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
|
| 463 | if self._table is None:
|
| 464 | # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
|
| 465 |
|
| 466 | # flag, RLIMIT_X, factor, description
|
| 467 | self._table = [
|
| 468 | # Following POSIX and most shells except bash, -f is in
|
| 469 | # blocks of 512 bytes
|
| 470 | ('-c', RLIMIT_CORE, 512, 'core dump size'),
|
| 471 | ('-d', RLIMIT_DATA, 1024, 'data segment size'),
|
| 472 | ('-f', RLIMIT_FSIZE, 512, 'file size'),
|
| 473 | ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
|
| 474 | ('-s', RLIMIT_STACK, 1024, 'stack size'),
|
| 475 | ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
|
| 476 | ('-v', RLIMIT_AS, 1024, 'address space size'),
|
| 477 | ]
|
| 478 |
|
| 479 | return self._table
|
| 480 |
|
| 481 | def _FindFactor(self, what):
|
| 482 | # type: (int) -> int
|
| 483 | for _, w, factor, _ in self._Table():
|
| 484 | if w == what:
|
| 485 | return factor
|
| 486 | raise AssertionError()
|
| 487 |
|
| 488 | def Run(self, cmd_val):
|
| 489 | # type: (cmd_value.Argv) -> int
|
| 490 |
|
| 491 | attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
|
| 492 | arg = arg_types.ulimit(attrs.attrs)
|
| 493 |
|
| 494 | what = 0
|
| 495 | num_what_flags = 0
|
| 496 |
|
| 497 | if arg.c:
|
| 498 | what = RLIMIT_CORE
|
| 499 | num_what_flags += 1
|
| 500 |
|
| 501 | if arg.d:
|
| 502 | what = RLIMIT_DATA
|
| 503 | num_what_flags += 1
|
| 504 |
|
| 505 | if arg.f:
|
| 506 | what = RLIMIT_FSIZE
|
| 507 | num_what_flags += 1
|
| 508 |
|
| 509 | if arg.n:
|
| 510 | what = RLIMIT_NOFILE
|
| 511 | num_what_flags += 1
|
| 512 |
|
| 513 | if arg.s:
|
| 514 | what = RLIMIT_STACK
|
| 515 | num_what_flags += 1
|
| 516 |
|
| 517 | if arg.t:
|
| 518 | what = RLIMIT_CPU
|
| 519 | num_what_flags += 1
|
| 520 |
|
| 521 | if arg.v:
|
| 522 | what = RLIMIT_AS
|
| 523 | num_what_flags += 1
|
| 524 |
|
| 525 | if num_what_flags > 1:
|
| 526 | raise error.Usage(
|
| 527 | 'can only handle one resource at a time; got too many flags',
|
| 528 | cmd_val.arg_locs[0])
|
| 529 |
|
| 530 | # Print all
|
| 531 | show_all = arg.a or arg.all
|
| 532 | if show_all:
|
| 533 | if num_what_flags > 0:
|
| 534 | raise error.Usage("doesn't accept resource flags with -a",
|
| 535 | cmd_val.arg_locs[0])
|
| 536 |
|
| 537 | extra, extra_loc = arg_r.Peek2()
|
| 538 | if extra is not None:
|
| 539 | raise error.Usage('got extra arg with -a', extra_loc)
|
| 540 |
|
| 541 | # Worst case 20 == len(str(2**64))
|
| 542 | fmt = '%5s %15s %15s %7s %s'
|
| 543 | print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
|
| 544 | for flag, what, factor, desc in self._Table():
|
| 545 | soft, hard = pyos.GetRLimit(what)
|
| 546 |
|
| 547 | soft2 = _LimitString(soft, factor)
|
| 548 | hard2 = _LimitString(hard, factor)
|
| 549 | print(fmt % (flag, soft2, hard2, str(factor), desc))
|
| 550 |
|
| 551 | return 0
|
| 552 |
|
| 553 | if num_what_flags == 0:
|
| 554 | what = RLIMIT_FSIZE # -f is the default
|
| 555 |
|
| 556 | s, s_loc = arg_r.Peek2()
|
| 557 |
|
| 558 | if s is None:
|
| 559 | factor = self._FindFactor(what)
|
| 560 | soft, hard = pyos.GetRLimit(what)
|
| 561 | if arg.H:
|
| 562 | print(_LimitString(hard, factor))
|
| 563 | else:
|
| 564 | print(_LimitString(soft, factor))
|
| 565 | return 0
|
| 566 |
|
| 567 | # Set the given resource
|
| 568 | if s == 'unlimited':
|
| 569 | # In C, RLIM_INFINITY is rlim_t
|
| 570 | limit = mops.FromC(RLIM_INFINITY)
|
| 571 | else:
|
| 572 | if match.LooksLikeInteger(s):
|
| 573 | ok, big_int = mops.FromStr2(s)
|
| 574 | if not ok:
|
| 575 | raise error.Usage('Integer too big: %s' % s, s_loc)
|
| 576 | else:
|
| 577 | raise error.Usage(
|
| 578 | "expected a number or 'unlimited', got %r" % s, s_loc)
|
| 579 |
|
| 580 | if mops.Greater(mops.IntWiden(0), big_int):
|
| 581 | raise error.Usage(
|
| 582 | "doesn't accept negative numbers, got %r" % s, s_loc)
|
| 583 |
|
| 584 | factor = self._FindFactor(what)
|
| 585 |
|
| 586 | fac = mops.IntWiden(factor)
|
| 587 | limit = mops.Mul(big_int, fac)
|
| 588 |
|
| 589 | # Overflow check like bash does
|
| 590 | # TODO: This should be replaced with a different overflow check
|
| 591 | # when we have arbitrary precision integers
|
| 592 | if not mops.Equal(mops.Div(limit, fac), big_int):
|
| 593 | #log('div %s', mops.ToStr(mops.Div(limit, fac)))
|
| 594 | raise error.Usage(
|
| 595 | 'detected integer overflow: %s' % mops.ToStr(big_int),
|
| 596 | s_loc)
|
| 597 |
|
| 598 | arg_r.Next()
|
| 599 | extra2, extra_loc2 = arg_r.Peek2()
|
| 600 | if extra2 is not None:
|
| 601 | raise error.Usage('got extra arg', extra_loc2)
|
| 602 |
|
| 603 | # Now set the resource
|
| 604 | soft, hard = pyos.GetRLimit(what)
|
| 605 |
|
| 606 | # For error message
|
| 607 | old_soft = soft
|
| 608 | old_hard = hard
|
| 609 |
|
| 610 | # Bash behavior: manipulate both, unless a flag is parsed. This
|
| 611 | # differs from zsh!
|
| 612 | if not arg.S and not arg.H:
|
| 613 | soft = limit
|
| 614 | hard = limit
|
| 615 | if arg.S:
|
| 616 | soft = limit
|
| 617 | if arg.H:
|
| 618 | hard = limit
|
| 619 |
|
| 620 | if mylib.PYTHON:
|
| 621 | try:
|
| 622 | pyos.SetRLimit(what, soft, hard)
|
| 623 | except OverflowError: # only happens in CPython
|
| 624 | raise error.Usage('detected overflow', s_loc)
|
| 625 | except (ValueError, resource.error) as e:
|
| 626 | # Annoying: Python binding changes IOError -> ValueError
|
| 627 |
|
| 628 | print_stderr('oils: ulimit error: %s' % e)
|
| 629 |
|
| 630 | # Extra info we could expose in C++ too
|
| 631 | print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
|
| 632 | _LimitString(old_soft, factor),
|
| 633 | _LimitString(old_hard, factor),
|
| 634 | _LimitString(soft, factor),
|
| 635 | _LimitString(hard, factor),
|
| 636 | ))
|
| 637 | return 1
|
| 638 | else:
|
| 639 | try:
|
| 640 | pyos.SetRLimit(what, soft, hard)
|
| 641 | except (IOError, OSError) as e:
|
| 642 | print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
|
| 643 | return 1
|
| 644 |
|
| 645 | return 0
|
| 646 |
|
| 647 |
|
| 648 | # vim: sw=4
|