OILS / builtin / process_osh.py View on Github | oils.pub

845 lines, 525 significant
1#!/usr/bin/env python2
2"""
3builtin_process.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of builtin_pure.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, loc_t, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import signal_def
28from frontend import typed_args
29from frontend import args
30from mycpp import mops
31from mycpp import mylib
32from mycpp.mylib import log, tagswitch, print_stderr
33
34import posix_ as posix
35
36from typing import TYPE_CHECKING, List, Tuple, Optional, cast
37if TYPE_CHECKING:
38 from core.process import Waiter, ExternalProgram, FdState
39 from core import executor
40 from core import state
41 from display import ui
42
43_ = log
44
45
46def PrintSignals():
47 # type: () -> None
48 # Iterate over signals and print them
49 for sig_num in xrange(signal_def.MaxSigNumber()):
50 sig_name = signal_def.GetName(sig_num)
51 if sig_name is None:
52 continue
53 print('%2d %s' % (sig_num, sig_name))
54
55
56class Jobs(vm._Builtin):
57 """List jobs."""
58
59 def __init__(self, job_list):
60 # type: (process.JobList) -> None
61 self.job_list = job_list
62
63 def Run(self, cmd_val):
64 # type: (cmd_value.Argv) -> int
65
66 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
67 arg = arg_types.jobs(attrs.attrs)
68
69 # osh doesn't support JOBSPEC arg
70 arg_r.Done()
71
72 if arg.l:
73 style = process.STYLE_LONG
74 elif arg.p:
75 style = process.STYLE_PID_ONLY
76 else:
77 style = process.STYLE_DEFAULT
78
79 self.job_list.DisplayJobs(style)
80
81 if arg.debug:
82 self.job_list.DebugPrint()
83
84 return 0
85
86
87class Fg(vm._Builtin):
88 """Put a job in the foreground."""
89
90 def __init__(self, job_control, job_list, waiter):
91 # type: (process.JobControl, process.JobList, Waiter) -> None
92 self.job_control = job_control
93 self.job_list = job_list
94 self.waiter = waiter
95 self.exec_opts = waiter.exec_opts
96
97 def Run(self, cmd_val):
98 # type: (cmd_value.Argv) -> int
99
100 job_spec = '' # Job spec for current job is the default
101 if len(cmd_val.argv) > 1:
102 job_spec = cmd_val.argv[1]
103
104 job = self.job_list.JobFromSpec(job_spec)
105 # note: the 'wait' builtin falls back to JobFromPid()
106 if job is None:
107 print_stderr('fg: No job to put in the foreground')
108 return 1
109
110 pgid = job.ProcessGroupId()
111 assert pgid != process.INVALID_PGID, \
112 'Processes put in the background should have a PGID'
113
114 # Put the job's process group back into the foreground. GiveTerminal() must
115 # be called before sending SIGCONT or else the process might immediately get
116 # suspended again if it tries to read/write on the terminal.
117 self.job_control.MaybeGiveTerminal(pgid)
118 posix.killpg(pgid, SIGCONT) # Send signal
119
120 if self.exec_opts.interactive():
121 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
122
123 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
124 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
125 job.SetForeground()
126 job.state = job_state_e.Running
127
128 status = -1
129
130 wait_st = job.JobWait(self.waiter)
131 UP_wait_st = wait_st
132 with tagswitch(wait_st) as case:
133 if case(wait_status_e.Proc):
134 wait_st = cast(wait_status.Proc, UP_wait_st)
135 if wait_st.state == job_state_e.Exited:
136 self.job_list.PopChildProcess(job.PidForWait())
137 self.job_list.CleanupWhenJobExits(job)
138 status = wait_st.code
139
140 elif case(wait_status_e.Pipeline):
141 wait_st = cast(wait_status.Pipeline, UP_wait_st)
142 # TODO: handle PIPESTATUS? Is this right?
143 status = wait_st.codes[-1]
144
145 elif case(wait_status_e.Cancelled):
146 wait_st = cast(wait_status.Cancelled, UP_wait_st)
147 status = 128 + wait_st.sig_num
148
149 else:
150 raise AssertionError()
151
152 return status
153
154
155class Bg(vm._Builtin):
156 """Put a job in the background."""
157
158 def __init__(self, job_list):
159 # type: (process.JobList) -> None
160 self.job_list = job_list
161
162 def Run(self, cmd_val):
163 # type: (cmd_value.Argv) -> int
164
165 # How does this differ from 'fg'? It doesn't wait and it sets controlling
166 # terminal?
167
168 raise error.Usage("isn't implemented", loc.Missing)
169
170
171class Fork(vm._Builtin):
172
173 def __init__(self, shell_ex):
174 # type: (vm._Executor) -> None
175 self.shell_ex = shell_ex
176
177 def Run(self, cmd_val):
178 # type: (cmd_value.Argv) -> int
179 _, arg_r = flag_util.ParseCmdVal('fork',
180 cmd_val,
181 accept_typed_args=True)
182
183 arg, location = arg_r.Peek2()
184 if arg is not None:
185 e_usage('got unexpected argument %r' % arg, location)
186
187 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
188 return self.shell_ex.RunBackgroundJob(cmd_frag)
189
190
191class ForkWait(vm._Builtin):
192
193 def __init__(self, shell_ex):
194 # type: (vm._Executor) -> None
195 self.shell_ex = shell_ex
196
197 def Run(self, cmd_val):
198 # type: (cmd_value.Argv) -> int
199 _, arg_r = flag_util.ParseCmdVal('forkwait',
200 cmd_val,
201 accept_typed_args=True)
202 arg, location = arg_r.Peek2()
203 if arg is not None:
204 e_usage('got unexpected argument %r' % arg, location)
205
206 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
207 return self.shell_ex.RunSubshell(cmd_frag)
208
209
210class Exec(vm._Builtin):
211
212 def __init__(
213 self,
214 mem, # type: state.Mem
215 ext_prog, # type: ExternalProgram
216 fd_state, # type: FdState
217 search_path, # type: executor.SearchPath
218 errfmt, # type: ui.ErrorFormatter
219 ):
220 # type: (...) -> None
221 self.mem = mem
222 self.ext_prog = ext_prog
223 self.fd_state = fd_state
224 self.search_path = search_path
225 self.errfmt = errfmt
226
227 def Run(self, cmd_val):
228 # type: (cmd_value.Argv) -> int
229 attrs, arg_r = flag_util.ParseCmdVal('exec_', cmd_val)
230 arg = arg_types.exec_(attrs.attrs)
231
232 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
233 if arg_r.AtEnd():
234 self.fd_state.MakePermanent()
235 return 0
236
237 environ = self.mem.GetEnv()
238 if 0:
239 log('E %r', environ)
240 log('E %r', environ)
241 log('ZZ %r', environ.get('ZZ'))
242 i = arg_r.i
243 cmd = cmd_val.argv[i]
244 argv0_path = self.search_path.CachedLookup(cmd)
245 if argv0_path is None:
246 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
247
248 # shift off 'exec', and remove typed args because they don't apply
249 if arg.a is not None:
250 c2_argv = [arg.a]
251 c2_argv.extend(cmd_val.argv[i+1:])
252 else:
253 c2_argv = cmd_val.argv[i:]
254
255 c2 = cmd_value.Argv(c2_argv, cmd_val.arg_locs[i:],
256 cmd_val.is_last_cmd, cmd_val.self_obj, None)
257
258 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
259 # makes mypy and C++ compiler happy
260 raise AssertionError('unreachable')
261
262
263class Wait(vm._Builtin):
264 """
265 wait: wait [-n] [id ...]
266 Wait for job completion and return exit status.
267
268 Waits for each process identified by an ID, which may be a process ID or a
269 job specification, and reports its termination status. If ID is not
270 given, waits for all currently active child processes, and the return
271 status is zero. If ID is a a job specification, waits for all processes
272 in that job's pipeline.
273
274 If the -n option is supplied, waits for the next job to terminate and
275 returns its exit status.
276
277 Exit Status:
278 Returns the status of the last ID; fails if ID is invalid or an invalid
279 option is given.
280 """
281
282 def __init__(
283 self,
284 waiter, # type: Waiter
285 job_list, #type: process.JobList
286 mem, # type: state.Mem
287 tracer, # type: dev.Tracer
288 errfmt, # type: ui.ErrorFormatter
289 ):
290 # type: (...) -> None
291 self.waiter = waiter
292 self.job_list = job_list
293 self.mem = mem
294 self.tracer = tracer
295 self.errfmt = errfmt
296 self.exec_opts = waiter.exec_opts
297
298 def Run(self, cmd_val):
299 # type: (cmd_value.Argv) -> int
300 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
301 return self._Run(cmd_val)
302
303 def _WaitForJobs(self, job_ids, arg_locs):
304 # type: (List[str], List[CompoundWord]) -> int
305
306 # Get list of jobs. Then we need to check if they are ALL stopped.
307 # Returns the exit code of the last one on the COMMAND LINE, not the
308 # exit code of last one to FINISH.
309
310 jobs = [] # type: List[process.Job]
311 for i, job_id in enumerate(job_ids):
312 location = arg_locs[i]
313
314 job = None # type: Optional[process.Job]
315 if job_id == '' or job_id.startswith('%'):
316 job = self.job_list.JobFromSpec(job_id)
317
318 if job is None:
319 #log('JOB %s', job_id)
320 # Does it look like a PID?
321 try:
322 pid = int(job_id)
323 except ValueError:
324 raise error.Usage(
325 'expected PID or jobspec, got %r' % job_id, location)
326
327 job = self.job_list.JobFromPid(pid)
328 #log('WAIT JOB %r', job)
329
330 if job is None:
331 self.errfmt.Print_("Job %s wasn't found" % job_id,
332 blame_loc=location)
333 return 127
334
335 jobs.append(job)
336
337 status = 1 # error
338 for job in jobs:
339 # polymorphic call: Process, Pipeline
340 wait_st = job.JobWait(self.waiter)
341
342 UP_wait_st = wait_st
343 with tagswitch(wait_st) as case:
344 if case(wait_status_e.Proc):
345 wait_st = cast(wait_status.Proc, UP_wait_st)
346 if wait_st.state == job_state_e.Exited:
347 self.job_list.PopChildProcess(job.PidForWait())
348 self.job_list.CleanupWhenJobExits(job)
349 status = wait_st.code
350
351 elif case(wait_status_e.Pipeline):
352 wait_st = cast(wait_status.Pipeline, UP_wait_st)
353 # TODO: handle PIPESTATUS? Is this right?
354 status = wait_st.codes[-1]
355
356 # It would be logical to set PIPESTATUS here, but it's NOT
357 # what other shells do
358 #
359 # I think PIPESTATUS is legacy, and we can design better
360 # YSH semantics
361 #self.mem.SetPipeStatus(wait_st.codes)
362
363 elif case(wait_status_e.Cancelled):
364 wait_st = cast(wait_status.Cancelled, UP_wait_st)
365 status = 128 + wait_st.sig_num
366
367 else:
368 raise AssertionError()
369
370 # Return the last status
371 return status
372
373 def _WaitNext(self):
374 # type: () -> int
375
376 # Loop until there is one fewer process running, there's nothing to wait
377 # for, or there's a signal
378 n = self.job_list.NumRunning()
379 if n == 0:
380 status = 127
381 else:
382 target = n - 1
383 status = 0
384 while self.job_list.NumRunning() > target:
385 result, w1_arg = self.waiter.WaitForOne()
386 if result == process.W1_EXITED:
387 pid = w1_arg
388 pr = self.job_list.PopChildProcess(pid)
389 # TODO: background pipelines don't clean up properly,
390 # because only the last PID is registered in
391 # job_list.pid_to_job
392 self.job_list.CleanupWhenProcessExits(pid)
393
394 if pr is None:
395 if self.exec_opts.verbose_warn():
396 print_stderr(
397 "oils wait: PID %d exited, but oils didn't start it"
398 % pid)
399 else:
400 status = pr.status
401
402 elif result == process.W1_NO_CHILDREN:
403 status = 127
404 break
405
406 elif result == process.W1_CALL_INTR: # signal
407 status = 128 + w1_arg
408 break
409
410 return status
411
412 def _Run(self, cmd_val):
413 # type: (cmd_value.Argv) -> int
414 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
415 arg = arg_types.wait(attrs.attrs)
416
417 job_ids, arg_locs = arg_r.Rest2()
418
419 if len(job_ids):
420 # Note: -n and --all ignored in this case, like bash
421 return self._WaitForJobs(job_ids, arg_locs)
422
423 if arg.n:
424 return self._WaitNext()
425
426 # 'wait' or wait --all
427
428 status = 0
429
430 # Note: NumRunning() makes sure we ignore stopped processes, which
431 # cause WaitForOne() to return
432 while self.job_list.NumRunning() != 0:
433 result, w1_arg = self.waiter.WaitForOne()
434 if result == process.W1_EXITED:
435 pid = w1_arg
436 pr = self.job_list.PopChildProcess(pid)
437 # TODO: background pipelines don't clean up properly, because
438 # only the last PID is registered in job_list.pid_to_job
439 self.job_list.CleanupWhenProcessExits(pid)
440
441 if arg.verbose:
442 self.errfmt.PrintMessage(
443 '(wait) PID %d exited with status %d' %
444 (pid, pr.status), cmd_val.arg_locs[0])
445
446 if pr.status != 0 and arg.all: # YSH extension: respect failure
447 if arg.verbose:
448 self.errfmt.PrintMessage(
449 'wait --all: will fail with status 1')
450 status = 1 # set status, but keep waiting
451
452 if result == process.W1_NO_CHILDREN:
453 break # status is 0
454
455 if result == process.W1_CALL_INTR:
456 status = 128 + w1_arg
457 break
458
459 return status
460
461
462class Umask(vm._Builtin):
463
464 def __init__(self):
465 # type: () -> None
466 """Dummy constructor for mycpp."""
467 pass
468
469 def Run(self, cmd_val):
470 # type: (cmd_value.Argv) -> int
471
472 argv = cmd_val.argv[1:]
473 if len(argv) == 0:
474 # umask() has a dumb API: you can't get it without modifying it first!
475 # NOTE: dash disables interrupts around the two umask() calls, but that
476 # shouldn't be a concern for us. Signal handlers won't call umask().
477 mask = posix.umask(0)
478 posix.umask(mask) #
479 print('0%03o' % mask) # octal format
480 return 0
481
482 if len(argv) == 1:
483 a = argv[0]
484 try:
485 new_mask = int(a, 8)
486 except ValueError:
487 # NOTE: This also happens when we have '8' or '9' in the input.
488 print_stderr(
489 "oils warning: umask with symbolic input isn't implemented"
490 )
491 return 1
492
493 posix.umask(new_mask)
494 return 0
495
496 e_usage('umask: unexpected arguments', loc.Missing)
497
498
499def _LimitString(lim, factor):
500 # type: (mops.BigInt, int) -> str
501 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
502 return 'unlimited'
503 else:
504 i = mops.Div(lim, mops.IntWiden(factor))
505 return mops.ToStr(i)
506
507
508class Ulimit(vm._Builtin):
509
510 def __init__(self):
511 # type: () -> None
512 """Dummy constructor for mycpp."""
513
514 self._table = None # type: List[Tuple[str, int, int, str]]
515
516 def _Table(self):
517 # type: () -> List[Tuple[str, int, int, str]]
518
519 # POSIX 2018
520 #
521 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
522 if self._table is None:
523 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
524
525 # flag, RLIMIT_X, factor, description
526 self._table = [
527 # Following POSIX and most shells except bash, -f is in
528 # blocks of 512 bytes
529 ('-c', RLIMIT_CORE, 512, 'core dump size'),
530 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
531 ('-f', RLIMIT_FSIZE, 512, 'file size'),
532 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
533 ('-s', RLIMIT_STACK, 1024, 'stack size'),
534 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
535 ('-v', RLIMIT_AS, 1024, 'address space size'),
536 ]
537
538 return self._table
539
540 def _FindFactor(self, what):
541 # type: (int) -> int
542 for _, w, factor, _ in self._Table():
543 if w == what:
544 return factor
545 raise AssertionError()
546
547 def Run(self, cmd_val):
548 # type: (cmd_value.Argv) -> int
549
550 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
551 arg = arg_types.ulimit(attrs.attrs)
552
553 what = 0
554 num_what_flags = 0
555
556 if arg.c:
557 what = RLIMIT_CORE
558 num_what_flags += 1
559
560 if arg.d:
561 what = RLIMIT_DATA
562 num_what_flags += 1
563
564 if arg.f:
565 what = RLIMIT_FSIZE
566 num_what_flags += 1
567
568 if arg.n:
569 what = RLIMIT_NOFILE
570 num_what_flags += 1
571
572 if arg.s:
573 what = RLIMIT_STACK
574 num_what_flags += 1
575
576 if arg.t:
577 what = RLIMIT_CPU
578 num_what_flags += 1
579
580 if arg.v:
581 what = RLIMIT_AS
582 num_what_flags += 1
583
584 if num_what_flags > 1:
585 raise error.Usage(
586 'can only handle one resource at a time; got too many flags',
587 cmd_val.arg_locs[0])
588
589 # Print all
590 show_all = arg.a or arg.all
591 if show_all:
592 if num_what_flags > 0:
593 raise error.Usage("doesn't accept resource flags with -a",
594 cmd_val.arg_locs[0])
595
596 extra, extra_loc = arg_r.Peek2()
597 if extra is not None:
598 raise error.Usage('got extra arg with -a', extra_loc)
599
600 # Worst case 20 == len(str(2**64))
601 fmt = '%5s %15s %15s %7s %s'
602 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
603 for flag, what, factor, desc in self._Table():
604 soft, hard = pyos.GetRLimit(what)
605
606 soft2 = _LimitString(soft, factor)
607 hard2 = _LimitString(hard, factor)
608 print(fmt % (flag, soft2, hard2, str(factor), desc))
609
610 return 0
611
612 if num_what_flags == 0:
613 what = RLIMIT_FSIZE # -f is the default
614
615 s, s_loc = arg_r.Peek2()
616
617 if s is None:
618 factor = self._FindFactor(what)
619 soft, hard = pyos.GetRLimit(what)
620 if arg.H:
621 print(_LimitString(hard, factor))
622 else:
623 print(_LimitString(soft, factor))
624 return 0
625
626 # Set the given resource
627 if s == 'unlimited':
628 # In C, RLIM_INFINITY is rlim_t
629 limit = mops.FromC(RLIM_INFINITY)
630 else:
631 if match.LooksLikeInteger(s):
632 ok, big_int = mops.FromStr2(s)
633 if not ok:
634 raise error.Usage('Integer too big: %s' % s, s_loc)
635 else:
636 raise error.Usage(
637 "expected a number or 'unlimited', got %r" % s, s_loc)
638
639 if mops.Greater(mops.IntWiden(0), big_int):
640 raise error.Usage(
641 "doesn't accept negative numbers, got %r" % s, s_loc)
642
643 factor = self._FindFactor(what)
644
645 fac = mops.IntWiden(factor)
646 limit = mops.Mul(big_int, fac)
647
648 # Overflow check like bash does
649 # TODO: This should be replaced with a different overflow check
650 # when we have arbitrary precision integers
651 if not mops.Equal(mops.Div(limit, fac), big_int):
652 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
653 raise error.Usage(
654 'detected integer overflow: %s' % mops.ToStr(big_int),
655 s_loc)
656
657 arg_r.Next()
658 extra2, extra_loc2 = arg_r.Peek2()
659 if extra2 is not None:
660 raise error.Usage('got extra arg', extra_loc2)
661
662 # Now set the resource
663 soft, hard = pyos.GetRLimit(what)
664
665 # For error message
666 old_soft = soft
667 old_hard = hard
668
669 # Bash behavior: manipulate both, unless a flag is parsed. This
670 # differs from zsh!
671 if not arg.S and not arg.H:
672 soft = limit
673 hard = limit
674 if arg.S:
675 soft = limit
676 if arg.H:
677 hard = limit
678
679 if mylib.PYTHON:
680 try:
681 pyos.SetRLimit(what, soft, hard)
682 except OverflowError: # only happens in CPython
683 raise error.Usage('detected overflow', s_loc)
684 except (ValueError, resource.error) as e:
685 # Annoying: Python binding changes IOError -> ValueError
686
687 print_stderr('oils: ulimit error: %s' % e)
688
689 # Extra info we could expose in C++ too
690 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
691 _LimitString(old_soft, factor),
692 _LimitString(old_hard, factor),
693 _LimitString(soft, factor),
694 _LimitString(hard, factor),
695 ))
696 return 1
697 else:
698 try:
699 pyos.SetRLimit(what, soft, hard)
700 except (IOError, OSError) as e:
701 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
702 return 1
703
704 return 0
705
706
707def _SigNameToNumber(name):
708 # type: (str) -> int
709 name = name.upper()
710 if name.startswith("SIG"):
711 name = name[3:]
712 return signal_def.GetNumber(name)
713
714
715class Kill(vm._Builtin):
716 """Send a signal to a process"""
717
718 def __init__(self, job_list):
719 # type: (process.JobList) -> None
720 self.job_list = job_list
721
722 def _ParseWhat(self, what, blame_loc):
723 # type: (str, loc_t) -> int
724 if what.startswith("%"):
725 job = self.job_list.JobFromSpec(what)
726 if job is None:
727 e_usage("got invalid job ID %r" % what, blame_loc)
728 return job.ProcessGroupId()
729 else:
730 try:
731 pid = int(what)
732 except ValueError:
733 e_usage("got invalid process ID %r" % what, blame_loc)
734 return pid
735
736 def _SendSignal(self, arg_r, sig_num):
737 # type: (args.Reader, int) -> int
738 if arg_r.AtEnd():
739 e_usage("expects at least one process/job ID", loc.Missing)
740
741 while not arg_r.AtEnd():
742 arg_str, arg_loc = arg_r.Peek2()
743 pid = self._ParseWhat(arg_str, arg_loc)
744
745 posix.kill(pid, sig_num)
746 arg_r.Next()
747 return 0
748
749 def _ParseSignal(self, sig_str, blame_loc):
750 # type: (str, loc_t) -> int
751 """
752 Sigspec can one of these forms:
753 15, TERM, SIGTERM (case insensitive)
754 Raises error if sigspec is in invalid format
755 """
756 if sig_str.isdigit():
757 # We don't validate the signal number; we rely on kill() returning
758 # EINVAL instead. This is useful for sending unportable signals.
759 sig_num = int(sig_str)
760 else:
761 sig_num = _SigNameToNumber(sig_str)
762 if sig_num == signal_def.NO_SIGNAL:
763 e_usage("got invalid signal name %r" % sig_str, blame_loc)
764 return sig_num
765
766 def _TranslateSignal(self, arg, arg_loc):
767 # type: (str, loc_t) -> str
768 """
769 Convert a signal name to a number and vice versa.
770 Can also be passed an exit code, which will be converted
771 to the name of the signal used to terminate the process.
772 """
773 if arg.isdigit():
774 try:
775 sig_num = int(arg)
776 except ValueError:
777 raise error.Usage("got overflowing integer: %s" % arg,
778 arg_loc)
779 if sig_num == 0:
780 return "EXIT" # special case, this is not really a signal
781
782 if sig_num > 128:
783 sig_num -= 128 # convert exit codes to signal numbers
784
785 sig_name = signal_def.GetName(sig_num)
786 if sig_name is None:
787 e_usage("can't translate number %r to a name" % arg, arg_loc)
788 return sig_name[3:] # strip the SIG prefix
789 else:
790 sig_num = _SigNameToNumber(arg)
791 if sig_num == signal_def.NO_SIGNAL:
792 e_usage("can't translate name %r to a number" % arg, arg_loc)
793 return str(sig_num)
794
795 def _TranslateSignals(self, arg_r):
796 # type: (args.Reader) -> None
797 while not arg_r.AtEnd():
798 arg, arg_loc = arg_r.Peek2()
799 print(self._TranslateSignal(arg, arg_loc))
800 arg_r.Next()
801
802 def Run(self, cmd_val):
803 # type: (cmd_value.Argv) -> int
804 arg_r = args.Reader(cmd_val.argv, locs=cmd_val.arg_locs)
805 arg_r.Next() # skip command name
806
807 # Check for a signal argument like -15 -TERM -SIGTERM
808 first, first_loc = arg_r.Peek2()
809 if first is not None and first.startswith('-'):
810 sig_spec = first[1:]
811 if sig_spec.isdigit() or len(sig_spec) > 1:
812 sig_num = self._ParseSignal(sig_spec, first_loc)
813 arg_r.Next() # Skip signal argument
814 return self._SendSignal(arg_r, sig_num)
815
816 # Note: we're making another args.Reader here
817 attrs, arg_r = flag_util.ParseCmdVal('kill',
818 cmd_val,
819 accept_typed_args=False)
820 arg = arg_types.kill(attrs.attrs)
821
822 if arg.l or arg.L:
823 # If no arg, print all signals
824 if arg_r.AtEnd():
825 PrintSignals()
826 return 0
827
828 # Otherwise translate each arg
829 self._TranslateSignals(arg_r)
830 return 0
831
832 # -n and -s are synonyms.
833 # TODO: it would be nice if the flag parser could expose the location
834 # of 'foo' in -s foo
835 sig_num = 15 # SIGTERM, the default signal to send
836 blame_loc = cmd_val.arg_locs[0]
837 if arg.n is not None:
838 sig_num = self._ParseSignal(arg.n, blame_loc)
839 if arg.s is not None:
840 sig_num = self._ParseSignal(arg.s, blame_loc)
841
842 return self._SendSignal(arg_r, sig_num)
843
844
845# vim: sw=4