OILS / builtin / process_osh.py View on Github | oils.pub

843 lines, 523 significant
1#!/usr/bin/env python2
2"""
3builtin_process.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of builtin_pure.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, loc_t, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import signal_def
28from frontend import typed_args
29from frontend import args
30from mycpp import mops
31from mycpp import mylib
32from mycpp.mylib import log, tagswitch, print_stderr
33
34import posix_ as posix
35
36from typing import TYPE_CHECKING, List, Tuple, Optional, cast
37if TYPE_CHECKING:
38 from core.process import Waiter, ExternalProgram, FdState
39 from core import executor
40 from core import state
41 from display import ui
42
43_ = log
44
45
46def PrintSignals():
47 # type: () -> None
48 # Iterate over signals and print them
49 for sig_num in xrange(signal_def.MaxSigNumber()):
50 sig_name = signal_def.GetName(sig_num)
51 if sig_name is None:
52 continue
53 print('%2d %s' % (sig_num, sig_name))
54
55
56class Jobs(vm._Builtin):
57 """List jobs."""
58
59 def __init__(self, job_list):
60 # type: (process.JobList) -> None
61 self.job_list = job_list
62
63 def Run(self, cmd_val):
64 # type: (cmd_value.Argv) -> int
65
66 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
67 arg = arg_types.jobs(attrs.attrs)
68
69 # osh doesn't support JOBSPEC arg
70 arg_r.Done()
71
72 if arg.l:
73 style = process.STYLE_LONG
74 elif arg.p:
75 style = process.STYLE_PID_ONLY
76 else:
77 style = process.STYLE_DEFAULT
78
79 self.job_list.DisplayJobs(style)
80
81 if arg.debug:
82 self.job_list.DebugPrint()
83
84 return 0
85
86
87class Fg(vm._Builtin):
88 """Put a job in the foreground."""
89
90 def __init__(self, job_control, job_list, waiter):
91 # type: (process.JobControl, process.JobList, Waiter) -> None
92 self.job_control = job_control
93 self.job_list = job_list
94 self.waiter = waiter
95 self.exec_opts = waiter.exec_opts
96
97 def Run(self, cmd_val):
98 # type: (cmd_value.Argv) -> int
99
100 job_spec = '' # Job spec for current job is the default
101 if len(cmd_val.argv) > 1:
102 job_spec = cmd_val.argv[1]
103
104 job = self.job_list.JobFromSpec(job_spec)
105 # note: the 'wait' builtin falls back to JobFromPid()
106 if job is None:
107 print_stderr('fg: No job to put in the foreground')
108 return 1
109
110 pgid = job.ProcessGroupId()
111 assert pgid != process.INVALID_PGID, \
112 'Processes put in the background should have a PGID'
113
114 # Put the job's process group back into the foreground. GiveTerminal() must
115 # be called before sending SIGCONT or else the process might immediately get
116 # suspended again if it tries to read/write on the terminal.
117 self.job_control.MaybeGiveTerminal(pgid)
118 posix.killpg(pgid, SIGCONT) # Send signal
119
120 if self.exec_opts.interactive():
121 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
122
123 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
124 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
125 job.SetForeground()
126 job.state = job_state_e.Running
127
128 status = -1
129
130 wait_st = job.JobWait(self.waiter)
131 UP_wait_st = wait_st
132 with tagswitch(wait_st) as case:
133 if case(wait_status_e.Proc):
134 wait_st = cast(wait_status.Proc, UP_wait_st)
135 if wait_st.state == job_state_e.Exited:
136 self.job_list.PopChildProcess(job.PidForWait())
137 self.job_list.CleanupWhenJobExits(job)
138 status = wait_st.code
139
140 elif case(wait_status_e.Pipeline):
141 wait_st = cast(wait_status.Pipeline, UP_wait_st)
142 # TODO: handle PIPESTATUS? Is this right?
143 status = wait_st.codes[-1]
144
145 elif case(wait_status_e.Cancelled):
146 wait_st = cast(wait_status.Cancelled, UP_wait_st)
147 status = 128 + wait_st.sig_num
148
149 else:
150 raise AssertionError()
151
152 return status
153
154
155class Bg(vm._Builtin):
156 """Put a job in the background."""
157
158 def __init__(self, job_list):
159 # type: (process.JobList) -> None
160 self.job_list = job_list
161
162 def Run(self, cmd_val):
163 # type: (cmd_value.Argv) -> int
164
165 # How does this differ from 'fg'? It doesn't wait and it sets controlling
166 # terminal?
167
168 raise error.Usage("isn't implemented", loc.Missing)
169
170
171class Fork(vm._Builtin):
172
173 def __init__(self, shell_ex):
174 # type: (vm._Executor) -> None
175 self.shell_ex = shell_ex
176
177 def Run(self, cmd_val):
178 # type: (cmd_value.Argv) -> int
179 _, arg_r = flag_util.ParseCmdVal('fork',
180 cmd_val,
181 accept_typed_args=True)
182
183 arg, location = arg_r.Peek2()
184 if arg is not None:
185 e_usage('got unexpected argument %r' % arg, location)
186
187 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
188 return self.shell_ex.RunBackgroundJob(cmd_frag)
189
190
191class ForkWait(vm._Builtin):
192
193 def __init__(self, shell_ex):
194 # type: (vm._Executor) -> None
195 self.shell_ex = shell_ex
196
197 def Run(self, cmd_val):
198 # type: (cmd_value.Argv) -> int
199 _, arg_r = flag_util.ParseCmdVal('forkwait',
200 cmd_val,
201 accept_typed_args=True)
202 arg, location = arg_r.Peek2()
203 if arg is not None:
204 e_usage('got unexpected argument %r' % arg, location)
205
206 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
207 return self.shell_ex.RunSubshell(cmd_frag)
208
209
210class Exec(vm._Builtin):
211
212 def __init__(
213 self,
214 mem, # type: state.Mem
215 ext_prog, # type: ExternalProgram
216 fd_state, # type: FdState
217 search_path, # type: executor.SearchPath
218 errfmt, # type: ui.ErrorFormatter
219 ):
220 # type: (...) -> None
221 self.mem = mem
222 self.ext_prog = ext_prog
223 self.fd_state = fd_state
224 self.search_path = search_path
225 self.errfmt = errfmt
226
227 def Run(self, cmd_val):
228 # type: (cmd_value.Argv) -> int
229 attrs, arg_r = flag_util.ParseCmdVal('exec_', cmd_val)
230 arg = arg_types.exec_(attrs.attrs)
231
232 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
233 if arg_r.AtEnd():
234 self.fd_state.MakePermanent()
235 return 0
236
237 environ = self.mem.GetEnv()
238 if 0:
239 log('E %r', environ)
240 log('E %r', environ)
241 log('ZZ %r', environ.get('ZZ'))
242 i = arg_r.i
243 cmd = cmd_val.argv[i]
244 argv0_path = self.search_path.CachedLookup(cmd)
245 if argv0_path is None:
246 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
247
248 # shift off 'exec', and remove typed args because they don't apply
249 c2_argv = cmd_val.argv[i:]
250 if arg.a is not None:
251 c2_argv = [arg.a] + cmd_val.argv[i+1:]
252
253 c2 = cmd_value.Argv(c2_argv, cmd_val.arg_locs[i:],
254 cmd_val.is_last_cmd, cmd_val.self_obj, None)
255
256 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
257 # makes mypy and C++ compiler happy
258 raise AssertionError('unreachable')
259
260
261class Wait(vm._Builtin):
262 """
263 wait: wait [-n] [id ...]
264 Wait for job completion and return exit status.
265
266 Waits for each process identified by an ID, which may be a process ID or a
267 job specification, and reports its termination status. If ID is not
268 given, waits for all currently active child processes, and the return
269 status is zero. If ID is a a job specification, waits for all processes
270 in that job's pipeline.
271
272 If the -n option is supplied, waits for the next job to terminate and
273 returns its exit status.
274
275 Exit Status:
276 Returns the status of the last ID; fails if ID is invalid or an invalid
277 option is given.
278 """
279
280 def __init__(
281 self,
282 waiter, # type: Waiter
283 job_list, #type: process.JobList
284 mem, # type: state.Mem
285 tracer, # type: dev.Tracer
286 errfmt, # type: ui.ErrorFormatter
287 ):
288 # type: (...) -> None
289 self.waiter = waiter
290 self.job_list = job_list
291 self.mem = mem
292 self.tracer = tracer
293 self.errfmt = errfmt
294 self.exec_opts = waiter.exec_opts
295
296 def Run(self, cmd_val):
297 # type: (cmd_value.Argv) -> int
298 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
299 return self._Run(cmd_val)
300
301 def _WaitForJobs(self, job_ids, arg_locs):
302 # type: (List[str], List[CompoundWord]) -> int
303
304 # Get list of jobs. Then we need to check if they are ALL stopped.
305 # Returns the exit code of the last one on the COMMAND LINE, not the
306 # exit code of last one to FINISH.
307
308 jobs = [] # type: List[process.Job]
309 for i, job_id in enumerate(job_ids):
310 location = arg_locs[i]
311
312 job = None # type: Optional[process.Job]
313 if job_id == '' or job_id.startswith('%'):
314 job = self.job_list.JobFromSpec(job_id)
315
316 if job is None:
317 #log('JOB %s', job_id)
318 # Does it look like a PID?
319 try:
320 pid = int(job_id)
321 except ValueError:
322 raise error.Usage(
323 'expected PID or jobspec, got %r' % job_id, location)
324
325 job = self.job_list.JobFromPid(pid)
326 #log('WAIT JOB %r', job)
327
328 if job is None:
329 self.errfmt.Print_("Job %s wasn't found" % job_id,
330 blame_loc=location)
331 return 127
332
333 jobs.append(job)
334
335 status = 1 # error
336 for job in jobs:
337 # polymorphic call: Process, Pipeline
338 wait_st = job.JobWait(self.waiter)
339
340 UP_wait_st = wait_st
341 with tagswitch(wait_st) as case:
342 if case(wait_status_e.Proc):
343 wait_st = cast(wait_status.Proc, UP_wait_st)
344 if wait_st.state == job_state_e.Exited:
345 self.job_list.PopChildProcess(job.PidForWait())
346 self.job_list.CleanupWhenJobExits(job)
347 status = wait_st.code
348
349 elif case(wait_status_e.Pipeline):
350 wait_st = cast(wait_status.Pipeline, UP_wait_st)
351 # TODO: handle PIPESTATUS? Is this right?
352 status = wait_st.codes[-1]
353
354 # It would be logical to set PIPESTATUS here, but it's NOT
355 # what other shells do
356 #
357 # I think PIPESTATUS is legacy, and we can design better
358 # YSH semantics
359 #self.mem.SetPipeStatus(wait_st.codes)
360
361 elif case(wait_status_e.Cancelled):
362 wait_st = cast(wait_status.Cancelled, UP_wait_st)
363 status = 128 + wait_st.sig_num
364
365 else:
366 raise AssertionError()
367
368 # Return the last status
369 return status
370
371 def _WaitNext(self):
372 # type: () -> int
373
374 # Loop until there is one fewer process running, there's nothing to wait
375 # for, or there's a signal
376 n = self.job_list.NumRunning()
377 if n == 0:
378 status = 127
379 else:
380 target = n - 1
381 status = 0
382 while self.job_list.NumRunning() > target:
383 result, w1_arg = self.waiter.WaitForOne()
384 if result == process.W1_EXITED:
385 pid = w1_arg
386 pr = self.job_list.PopChildProcess(pid)
387 # TODO: background pipelines don't clean up properly,
388 # because only the last PID is registered in
389 # job_list.pid_to_job
390 self.job_list.CleanupWhenProcessExits(pid)
391
392 if pr is None:
393 if self.exec_opts.verbose_warn():
394 print_stderr(
395 "oils wait: PID %d exited, but oils didn't start it"
396 % pid)
397 else:
398 status = pr.status
399
400 elif result == process.W1_NO_CHILDREN:
401 status = 127
402 break
403
404 elif result == process.W1_CALL_INTR: # signal
405 status = 128 + w1_arg
406 break
407
408 return status
409
410 def _Run(self, cmd_val):
411 # type: (cmd_value.Argv) -> int
412 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
413 arg = arg_types.wait(attrs.attrs)
414
415 job_ids, arg_locs = arg_r.Rest2()
416
417 if len(job_ids):
418 # Note: -n and --all ignored in this case, like bash
419 return self._WaitForJobs(job_ids, arg_locs)
420
421 if arg.n:
422 return self._WaitNext()
423
424 # 'wait' or wait --all
425
426 status = 0
427
428 # Note: NumRunning() makes sure we ignore stopped processes, which
429 # cause WaitForOne() to return
430 while self.job_list.NumRunning() != 0:
431 result, w1_arg = self.waiter.WaitForOne()
432 if result == process.W1_EXITED:
433 pid = w1_arg
434 pr = self.job_list.PopChildProcess(pid)
435 # TODO: background pipelines don't clean up properly, because
436 # only the last PID is registered in job_list.pid_to_job
437 self.job_list.CleanupWhenProcessExits(pid)
438
439 if arg.verbose:
440 self.errfmt.PrintMessage(
441 '(wait) PID %d exited with status %d' %
442 (pid, pr.status), cmd_val.arg_locs[0])
443
444 if pr.status != 0 and arg.all: # YSH extension: respect failure
445 if arg.verbose:
446 self.errfmt.PrintMessage(
447 'wait --all: will fail with status 1')
448 status = 1 # set status, but keep waiting
449
450 if result == process.W1_NO_CHILDREN:
451 break # status is 0
452
453 if result == process.W1_CALL_INTR:
454 status = 128 + w1_arg
455 break
456
457 return status
458
459
460class Umask(vm._Builtin):
461
462 def __init__(self):
463 # type: () -> None
464 """Dummy constructor for mycpp."""
465 pass
466
467 def Run(self, cmd_val):
468 # type: (cmd_value.Argv) -> int
469
470 argv = cmd_val.argv[1:]
471 if len(argv) == 0:
472 # umask() has a dumb API: you can't get it without modifying it first!
473 # NOTE: dash disables interrupts around the two umask() calls, but that
474 # shouldn't be a concern for us. Signal handlers won't call umask().
475 mask = posix.umask(0)
476 posix.umask(mask) #
477 print('0%03o' % mask) # octal format
478 return 0
479
480 if len(argv) == 1:
481 a = argv[0]
482 try:
483 new_mask = int(a, 8)
484 except ValueError:
485 # NOTE: This also happens when we have '8' or '9' in the input.
486 print_stderr(
487 "oils warning: umask with symbolic input isn't implemented"
488 )
489 return 1
490
491 posix.umask(new_mask)
492 return 0
493
494 e_usage('umask: unexpected arguments', loc.Missing)
495
496
497def _LimitString(lim, factor):
498 # type: (mops.BigInt, int) -> str
499 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
500 return 'unlimited'
501 else:
502 i = mops.Div(lim, mops.IntWiden(factor))
503 return mops.ToStr(i)
504
505
506class Ulimit(vm._Builtin):
507
508 def __init__(self):
509 # type: () -> None
510 """Dummy constructor for mycpp."""
511
512 self._table = None # type: List[Tuple[str, int, int, str]]
513
514 def _Table(self):
515 # type: () -> List[Tuple[str, int, int, str]]
516
517 # POSIX 2018
518 #
519 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
520 if self._table is None:
521 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
522
523 # flag, RLIMIT_X, factor, description
524 self._table = [
525 # Following POSIX and most shells except bash, -f is in
526 # blocks of 512 bytes
527 ('-c', RLIMIT_CORE, 512, 'core dump size'),
528 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
529 ('-f', RLIMIT_FSIZE, 512, 'file size'),
530 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
531 ('-s', RLIMIT_STACK, 1024, 'stack size'),
532 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
533 ('-v', RLIMIT_AS, 1024, 'address space size'),
534 ]
535
536 return self._table
537
538 def _FindFactor(self, what):
539 # type: (int) -> int
540 for _, w, factor, _ in self._Table():
541 if w == what:
542 return factor
543 raise AssertionError()
544
545 def Run(self, cmd_val):
546 # type: (cmd_value.Argv) -> int
547
548 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
549 arg = arg_types.ulimit(attrs.attrs)
550
551 what = 0
552 num_what_flags = 0
553
554 if arg.c:
555 what = RLIMIT_CORE
556 num_what_flags += 1
557
558 if arg.d:
559 what = RLIMIT_DATA
560 num_what_flags += 1
561
562 if arg.f:
563 what = RLIMIT_FSIZE
564 num_what_flags += 1
565
566 if arg.n:
567 what = RLIMIT_NOFILE
568 num_what_flags += 1
569
570 if arg.s:
571 what = RLIMIT_STACK
572 num_what_flags += 1
573
574 if arg.t:
575 what = RLIMIT_CPU
576 num_what_flags += 1
577
578 if arg.v:
579 what = RLIMIT_AS
580 num_what_flags += 1
581
582 if num_what_flags > 1:
583 raise error.Usage(
584 'can only handle one resource at a time; got too many flags',
585 cmd_val.arg_locs[0])
586
587 # Print all
588 show_all = arg.a or arg.all
589 if show_all:
590 if num_what_flags > 0:
591 raise error.Usage("doesn't accept resource flags with -a",
592 cmd_val.arg_locs[0])
593
594 extra, extra_loc = arg_r.Peek2()
595 if extra is not None:
596 raise error.Usage('got extra arg with -a', extra_loc)
597
598 # Worst case 20 == len(str(2**64))
599 fmt = '%5s %15s %15s %7s %s'
600 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
601 for flag, what, factor, desc in self._Table():
602 soft, hard = pyos.GetRLimit(what)
603
604 soft2 = _LimitString(soft, factor)
605 hard2 = _LimitString(hard, factor)
606 print(fmt % (flag, soft2, hard2, str(factor), desc))
607
608 return 0
609
610 if num_what_flags == 0:
611 what = RLIMIT_FSIZE # -f is the default
612
613 s, s_loc = arg_r.Peek2()
614
615 if s is None:
616 factor = self._FindFactor(what)
617 soft, hard = pyos.GetRLimit(what)
618 if arg.H:
619 print(_LimitString(hard, factor))
620 else:
621 print(_LimitString(soft, factor))
622 return 0
623
624 # Set the given resource
625 if s == 'unlimited':
626 # In C, RLIM_INFINITY is rlim_t
627 limit = mops.FromC(RLIM_INFINITY)
628 else:
629 if match.LooksLikeInteger(s):
630 ok, big_int = mops.FromStr2(s)
631 if not ok:
632 raise error.Usage('Integer too big: %s' % s, s_loc)
633 else:
634 raise error.Usage(
635 "expected a number or 'unlimited', got %r" % s, s_loc)
636
637 if mops.Greater(mops.IntWiden(0), big_int):
638 raise error.Usage(
639 "doesn't accept negative numbers, got %r" % s, s_loc)
640
641 factor = self._FindFactor(what)
642
643 fac = mops.IntWiden(factor)
644 limit = mops.Mul(big_int, fac)
645
646 # Overflow check like bash does
647 # TODO: This should be replaced with a different overflow check
648 # when we have arbitrary precision integers
649 if not mops.Equal(mops.Div(limit, fac), big_int):
650 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
651 raise error.Usage(
652 'detected integer overflow: %s' % mops.ToStr(big_int),
653 s_loc)
654
655 arg_r.Next()
656 extra2, extra_loc2 = arg_r.Peek2()
657 if extra2 is not None:
658 raise error.Usage('got extra arg', extra_loc2)
659
660 # Now set the resource
661 soft, hard = pyos.GetRLimit(what)
662
663 # For error message
664 old_soft = soft
665 old_hard = hard
666
667 # Bash behavior: manipulate both, unless a flag is parsed. This
668 # differs from zsh!
669 if not arg.S and not arg.H:
670 soft = limit
671 hard = limit
672 if arg.S:
673 soft = limit
674 if arg.H:
675 hard = limit
676
677 if mylib.PYTHON:
678 try:
679 pyos.SetRLimit(what, soft, hard)
680 except OverflowError: # only happens in CPython
681 raise error.Usage('detected overflow', s_loc)
682 except (ValueError, resource.error) as e:
683 # Annoying: Python binding changes IOError -> ValueError
684
685 print_stderr('oils: ulimit error: %s' % e)
686
687 # Extra info we could expose in C++ too
688 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
689 _LimitString(old_soft, factor),
690 _LimitString(old_hard, factor),
691 _LimitString(soft, factor),
692 _LimitString(hard, factor),
693 ))
694 return 1
695 else:
696 try:
697 pyos.SetRLimit(what, soft, hard)
698 except (IOError, OSError) as e:
699 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
700 return 1
701
702 return 0
703
704
705def _SigNameToNumber(name):
706 # type: (str) -> int
707 name = name.upper()
708 if name.startswith("SIG"):
709 name = name[3:]
710 return signal_def.GetNumber(name)
711
712
713class Kill(vm._Builtin):
714 """Send a signal to a process"""
715
716 def __init__(self, job_list):
717 # type: (process.JobList) -> None
718 self.job_list = job_list
719
720 def _ParseWhat(self, what, blame_loc):
721 # type: (str, loc_t) -> int
722 if what.startswith("%"):
723 job = self.job_list.JobFromSpec(what)
724 if job is None:
725 e_usage("got invalid job ID %r" % what, blame_loc)
726 return job.ProcessGroupId()
727 else:
728 try:
729 pid = int(what)
730 except ValueError:
731 e_usage("got invalid process ID %r" % what, blame_loc)
732 return pid
733
734 def _SendSignal(self, arg_r, sig_num):
735 # type: (args.Reader, int) -> int
736 if arg_r.AtEnd():
737 e_usage("expects at least one process/job ID", loc.Missing)
738
739 while not arg_r.AtEnd():
740 arg_str, arg_loc = arg_r.Peek2()
741 pid = self._ParseWhat(arg_str, arg_loc)
742
743 posix.kill(pid, sig_num)
744 arg_r.Next()
745 return 0
746
747 def _ParseSignal(self, sig_str, blame_loc):
748 # type: (str, loc_t) -> int
749 """
750 Sigspec can one of these forms:
751 15, TERM, SIGTERM (case insensitive)
752 Raises error if sigspec is in invalid format
753 """
754 if sig_str.isdigit():
755 # We don't validate the signal number; we rely on kill() returning
756 # EINVAL instead. This is useful for sending unportable signals.
757 sig_num = int(sig_str)
758 else:
759 sig_num = _SigNameToNumber(sig_str)
760 if sig_num == signal_def.NO_SIGNAL:
761 e_usage("got invalid signal name %r" % sig_str, blame_loc)
762 return sig_num
763
764 def _TranslateSignal(self, arg, arg_loc):
765 # type: (str, loc_t) -> str
766 """
767 Convert a signal name to a number and vice versa.
768 Can also be passed an exit code, which will be converted
769 to the name of the signal used to terminate the process.
770 """
771 if arg.isdigit():
772 try:
773 sig_num = int(arg)
774 except ValueError:
775 raise error.Usage("got overflowing integer: %s" % arg,
776 arg_loc)
777 if sig_num == 0:
778 return "EXIT" # special case, this is not really a signal
779
780 if sig_num > 128:
781 sig_num -= 128 # convert exit codes to signal numbers
782
783 sig_name = signal_def.GetName(sig_num)
784 if sig_name is None:
785 e_usage("can't translate number %r to a name" % arg, arg_loc)
786 return sig_name[3:] # strip the SIG prefix
787 else:
788 sig_num = _SigNameToNumber(arg)
789 if sig_num == signal_def.NO_SIGNAL:
790 e_usage("can't translate name %r to a number" % arg, arg_loc)
791 return str(sig_num)
792
793 def _TranslateSignals(self, arg_r):
794 # type: (args.Reader) -> None
795 while not arg_r.AtEnd():
796 arg, arg_loc = arg_r.Peek2()
797 print(self._TranslateSignal(arg, arg_loc))
798 arg_r.Next()
799
800 def Run(self, cmd_val):
801 # type: (cmd_value.Argv) -> int
802 arg_r = args.Reader(cmd_val.argv, locs=cmd_val.arg_locs)
803 arg_r.Next() # skip command name
804
805 # Check for a signal argument like -15 -TERM -SIGTERM
806 first, first_loc = arg_r.Peek2()
807 if first is not None and first.startswith('-'):
808 sig_spec = first[1:]
809 if sig_spec.isdigit() or len(sig_spec) > 1:
810 sig_num = self._ParseSignal(sig_spec, first_loc)
811 arg_r.Next() # Skip signal argument
812 return self._SendSignal(arg_r, sig_num)
813
814 # Note: we're making another args.Reader here
815 attrs, arg_r = flag_util.ParseCmdVal('kill',
816 cmd_val,
817 accept_typed_args=False)
818 arg = arg_types.kill(attrs.attrs)
819
820 if arg.l or arg.L:
821 # If no arg, print all signals
822 if arg_r.AtEnd():
823 PrintSignals()
824 return 0
825
826 # Otherwise translate each arg
827 self._TranslateSignals(arg_r)
828 return 0
829
830 # -n and -s are synonyms.
831 # TODO: it would be nice if the flag parser could expose the location
832 # of 'foo' in -s foo
833 sig_num = 15 # SIGTERM, the default signal to send
834 blame_loc = cmd_val.arg_locs[0]
835 if arg.n is not None:
836 sig_num = self._ParseSignal(arg.n, blame_loc)
837 if arg.s is not None:
838 sig_num = self._ParseSignal(arg.s, blame_loc)
839
840 return self._SendSignal(arg_r, sig_num)
841
842
843# vim: sw=4