OILS / builtin / process_osh.py View on Github | oils.pub

806 lines, 502 significant
1#!/usr/bin/env python2
2"""
3process_osh.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of pure_osh.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, loc_t, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import signal_def
28from frontend import typed_args
29from frontend import args
30from mycpp import mops
31from mycpp import mylib
32from mycpp.mylib import log, tagswitch, print_stderr
33
34import posix_ as posix
35
36from typing import TYPE_CHECKING, List, Tuple, Optional, cast
37if TYPE_CHECKING:
38 from core.process import Waiter, ExternalProgram, FdState
39 from core import executor
40 from core import state
41 from display import ui
42
43_ = log
44
45
46def PrintSignals():
47 # type: () -> None
48 # Iterate over signals and print them
49 for sig_num in xrange(signal_def.MaxSigNumber()):
50 sig_name = signal_def.GetName(sig_num)
51 if sig_name is None:
52 continue
53 print('%2d %s' % (sig_num, sig_name))
54
55
56class Jobs(vm._Builtin):
57 """List jobs."""
58
59 def __init__(self, job_list):
60 # type: (process.JobList) -> None
61 self.job_list = job_list
62
63 def Run(self, cmd_val):
64 # type: (cmd_value.Argv) -> int
65
66 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
67 arg = arg_types.jobs(attrs.attrs)
68
69 # osh doesn't support JOBSPEC arg
70 arg_r.Done()
71
72 if arg.l:
73 style = process.STYLE_LONG
74 elif arg.p:
75 style = process.STYLE_PID_ONLY
76 else:
77 style = process.STYLE_DEFAULT
78
79 self.job_list.DisplayJobs(style)
80
81 if arg.debug:
82 self.job_list.DebugPrint()
83
84 return 0
85
86
87class Fg(vm._Builtin):
88 """Put a job in the foreground."""
89
90 def __init__(self, job_control, job_list, waiter):
91 # type: (process.JobControl, process.JobList, Waiter) -> None
92 self.job_control = job_control
93 self.job_list = job_list
94 self.waiter = waiter
95 self.exec_opts = waiter.exec_opts
96
97 def Run(self, cmd_val):
98 # type: (cmd_value.Argv) -> int
99
100 job_spec = '' # Job spec for current job is the default
101 if len(cmd_val.argv) > 1:
102 job_spec = cmd_val.argv[1]
103
104 job = self.job_list.JobFromSpec(job_spec)
105 # note: the 'wait' builtin falls back to JobFromPid()
106 if job is None:
107 print_stderr('fg: No job to put in the foreground')
108 return 1
109
110 pgid = job.ProcessGroupId()
111 assert pgid != process.INVALID_PGID, \
112 'Processes put in the background should have a PGID'
113
114 # Put the job's process group back into the foreground. GiveTerminal() must
115 # be called before sending SIGCONT or else the process might immediately get
116 # suspended again if it tries to read/write on the terminal.
117 self.job_control.MaybeGiveTerminal(pgid)
118 posix.killpg(pgid, SIGCONT) # Send signal
119
120 if self.exec_opts.interactive():
121 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
122
123 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
124 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
125 job.SetForeground()
126 job.state = job_state_e.Running
127
128 status = -1
129
130 wait_st = job.JobWait(self.waiter)
131 UP_wait_st = wait_st
132 with tagswitch(wait_st) as case:
133 if case(wait_status_e.Proc):
134 wait_st = cast(wait_status.Proc, UP_wait_st)
135 if wait_st.state == job_state_e.Exited:
136 self.job_list.PopChildProcess(job.PidForWait())
137 self.job_list.CleanupWhenJobExits(job)
138 status = wait_st.code
139
140 elif case(wait_status_e.Pipeline):
141 wait_st = cast(wait_status.Pipeline, UP_wait_st)
142 # TODO: handle PIPESTATUS? Is this right?
143 status = wait_st.codes[-1]
144
145 elif case(wait_status_e.Cancelled):
146 wait_st = cast(wait_status.Cancelled, UP_wait_st)
147 status = 128 + wait_st.sig_num
148
149 else:
150 raise AssertionError()
151
152 return status
153
154
155class Bg(vm._Builtin):
156 """Put a job in the background."""
157
158 def __init__(self, job_list):
159 # type: (process.JobList) -> None
160 self.job_list = job_list
161
162 def Run(self, cmd_val):
163 # type: (cmd_value.Argv) -> int
164
165 # How does this differ from 'fg'? It doesn't wait and it sets controlling
166 # terminal?
167
168 raise error.Usage("isn't implemented", loc.Missing)
169
170
171class Fork(vm._Builtin):
172
173 def __init__(self, shell_ex):
174 # type: (vm._Executor) -> None
175 self.shell_ex = shell_ex
176
177 def Run(self, cmd_val):
178 # type: (cmd_value.Argv) -> int
179 _, arg_r = flag_util.ParseCmdVal('fork',
180 cmd_val,
181 accept_typed_args=True)
182
183 arg, location = arg_r.Peek2()
184 if arg is not None:
185 e_usage('got unexpected argument %r' % arg, location)
186
187 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
188 return self.shell_ex.RunBackgroundJob(cmd_frag)
189
190
191class ForkWait(vm._Builtin):
192
193 def __init__(self, shell_ex):
194 # type: (vm._Executor) -> None
195 self.shell_ex = shell_ex
196
197 def Run(self, cmd_val):
198 # type: (cmd_value.Argv) -> int
199 _, arg_r = flag_util.ParseCmdVal('forkwait',
200 cmd_val,
201 accept_typed_args=True)
202 arg, location = arg_r.Peek2()
203 if arg is not None:
204 e_usage('got unexpected argument %r' % arg, location)
205
206 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
207 return self.shell_ex.RunSubshell(cmd_frag)
208
209
210class Exec(vm._Builtin):
211
212 def __init__(
213 self,
214 mem, # type: state.Mem
215 ext_prog, # type: ExternalProgram
216 fd_state, # type: FdState
217 search_path, # type: executor.SearchPath
218 errfmt, # type: ui.ErrorFormatter
219 ):
220 # type: (...) -> None
221 self.mem = mem
222 self.ext_prog = ext_prog
223 self.fd_state = fd_state
224 self.search_path = search_path
225 self.errfmt = errfmt
226
227 def Run(self, cmd_val):
228 # type: (cmd_value.Argv) -> int
229 attrs, arg_r = flag_util.ParseCmdVal('exec_', cmd_val)
230 arg = arg_types.exec_(attrs.attrs)
231
232 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
233 if arg_r.AtEnd():
234 self.fd_state.MakePermanent()
235 return 0
236
237 environ = self.mem.GetEnv()
238 if 0:
239 log('E %r', environ)
240 log('E %r', environ)
241 log('ZZ %r', environ.get('ZZ'))
242 i = arg_r.i
243 cmd = cmd_val.argv[i]
244 argv0_path = self.search_path.CachedLookup(cmd)
245 if argv0_path is None:
246 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
247
248 # shift off 'exec', and remove typed args because they don't apply
249 c2_argv = cmd_val.argv[i:]
250 if arg.a is not None:
251 c2_argv[0] = arg.a
252
253 c2 = cmd_value.Argv(c2_argv, cmd_val.arg_locs[i:],
254 cmd_val.is_last_cmd, cmd_val.self_obj, None)
255
256 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
257 # makes mypy and C++ compiler happy
258 raise AssertionError('unreachable')
259
260
261class Wait(vm._Builtin):
262 """
263 wait: wait [-n] [id ...]
264 Wait for job completion and return exit status.
265
266 Waits for each process identified by an ID, which may be a process ID or a
267 job specification, and reports its termination status. If ID is not
268 given, waits for all currently active child processes, and the return
269 status is zero. If ID is a a job specification, waits for all processes
270 in that job's pipeline.
271
272 If the -n option is supplied, waits for the next job to terminate and
273 returns its exit status.
274
275 Exit Status:
276 Returns the status of the last ID; fails if ID is invalid or an invalid
277 option is given.
278 """
279
280 def __init__(
281 self,
282 waiter, # type: Waiter
283 job_list, #type: process.JobList
284 mem, # type: state.Mem
285 tracer, # type: dev.Tracer
286 errfmt, # type: ui.ErrorFormatter
287 ):
288 # type: (...) -> None
289 self.waiter = waiter
290 self.job_list = job_list
291 self.mem = mem
292 self.tracer = tracer
293 self.errfmt = errfmt
294 self.exec_opts = waiter.exec_opts
295
296 def Run(self, cmd_val):
297 # type: (cmd_value.Argv) -> int
298 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
299 return self._Run(cmd_val)
300
301 def _WaitForJobs(self, job_ids, arg_locs):
302 # type: (List[str], List[CompoundWord]) -> int
303
304 # Get list of jobs. Then we need to check if they are ALL stopped.
305 # Returns the exit code of the last one on the COMMAND LINE, not the
306 # exit code of last one to FINISH.
307
308 jobs = [] # type: List[process.Job]
309 for i, job_id in enumerate(job_ids):
310 location = arg_locs[i]
311
312 job = None # type: Optional[process.Job]
313 if job_id == '' or job_id.startswith('%'):
314 job = self.job_list.JobFromSpec(job_id)
315
316 if job is None:
317 #log('JOB %s', job_id)
318 # Does it look like a PID?
319 try:
320 pid = int(job_id)
321 except ValueError:
322 raise error.Usage(
323 'expected PID or jobspec, got %r' % job_id, location)
324
325 job = self.job_list.JobFromPid(pid)
326 #log('WAIT JOB %r', job)
327
328 if job is None:
329 self.errfmt.Print_("Job %s wasn't found" % job_id,
330 blame_loc=location)
331 return 127
332
333 jobs.append(job)
334
335 status = 1 # error
336 for job in jobs:
337 # polymorphic call: Process, Pipeline
338 wait_st = job.JobWait(self.waiter)
339
340 UP_wait_st = wait_st
341 with tagswitch(wait_st) as case:
342 if case(wait_status_e.Proc):
343 wait_st = cast(wait_status.Proc, UP_wait_st)
344 if wait_st.state == job_state_e.Exited:
345 self.job_list.PopChildProcess(job.PidForWait())
346 self.job_list.CleanupWhenJobExits(job)
347 status = wait_st.code
348
349 elif case(wait_status_e.Pipeline):
350 wait_st = cast(wait_status.Pipeline, UP_wait_st)
351 # TODO: handle PIPESTATUS? Is this right?
352 status = wait_st.codes[-1]
353
354 # It would be logical to set PIPESTATUS here, but it's NOT
355 # what other shells do
356 #
357 # I think PIPESTATUS is legacy, and we can design better
358 # YSH semantics
359 #self.mem.SetPipeStatus(wait_st.codes)
360
361 elif case(wait_status_e.Cancelled):
362 wait_st = cast(wait_status.Cancelled, UP_wait_st)
363 status = 128 + wait_st.sig_num
364
365 else:
366 raise AssertionError()
367
368 # Return the last status
369 return status
370
371 def _WaitNext(self):
372 # type: () -> int
373
374 # Loop until there is one fewer process running, there's nothing to wait
375 # for, or there's a signal
376 n = self.job_list.NumRunning()
377 if n == 0:
378 status = 127
379 else:
380 target = n - 1
381 status = 0
382 while self.job_list.NumRunning() > target:
383 result, w1_arg = self.waiter.WaitForOne()
384 if result == process.W1_EXITED:
385 pid = w1_arg
386 pr = self.job_list.PopChildProcess(pid)
387 # TODO: background pipelines don't clean up properly,
388 # because only the last PID is registered in
389 # job_list.pid_to_job
390 self.job_list.CleanupWhenProcessExits(pid)
391
392 if pr is None:
393 if self.exec_opts.verbose_warn():
394 print_stderr(
395 "oils wait: PID %d exited, but oils didn't start it"
396 % pid)
397 else:
398 status = pr.status
399
400 elif result == process.W1_NO_CHILDREN:
401 status = 127
402 break
403
404 elif result == process.W1_CALL_INTR: # signal
405 status = 128 + w1_arg
406 break
407
408 return status
409
410 def _Run(self, cmd_val):
411 # type: (cmd_value.Argv) -> int
412 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
413 arg = arg_types.wait(attrs.attrs)
414
415 job_ids, arg_locs = arg_r.Rest2()
416
417 if len(job_ids):
418 # Note: -n and --all ignored in this case, like bash
419 return self._WaitForJobs(job_ids, arg_locs)
420
421 if arg.n:
422 return self._WaitNext()
423
424 # 'wait' or wait --all
425
426 status = 0
427
428 # Note: NumRunning() makes sure we ignore stopped processes, which
429 # cause WaitForOne() to return
430 while self.job_list.NumRunning() != 0:
431 result, w1_arg = self.waiter.WaitForOne()
432 if result == process.W1_EXITED:
433 pid = w1_arg
434 pr = self.job_list.PopChildProcess(pid)
435 # TODO: background pipelines don't clean up properly, because
436 # only the last PID is registered in job_list.pid_to_job
437 self.job_list.CleanupWhenProcessExits(pid)
438
439 if arg.verbose:
440 self.errfmt.PrintMessage(
441 '(wait) PID %d exited with status %d' %
442 (pid, pr.status), cmd_val.arg_locs[0])
443
444 if pr.status != 0 and arg.all: # YSH extension: respect failure
445 if arg.verbose:
446 self.errfmt.PrintMessage(
447 'wait --all: will fail with status 1')
448 status = 1 # set status, but keep waiting
449
450 if result == process.W1_NO_CHILDREN:
451 break # status is 0
452
453 if result == process.W1_CALL_INTR:
454 status = 128 + w1_arg
455 break
456
457 return status
458
459
460def _LimitString(lim, factor):
461 # type: (mops.BigInt, int) -> str
462 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
463 return 'unlimited'
464 else:
465 i = mops.Div(lim, mops.IntWiden(factor))
466 return mops.ToStr(i)
467
468
469class Ulimit(vm._Builtin):
470
471 def __init__(self):
472 # type: () -> None
473 """Dummy constructor for mycpp."""
474
475 self._table = None # type: List[Tuple[str, int, int, str]]
476
477 def _Table(self):
478 # type: () -> List[Tuple[str, int, int, str]]
479
480 # POSIX 2018
481 #
482 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
483 if self._table is None:
484 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
485
486 # flag, RLIMIT_X, factor, description
487 self._table = [
488 # Following POSIX and most shells except bash, -f is in
489 # blocks of 512 bytes
490 ('-c', RLIMIT_CORE, 512, 'core dump size'),
491 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
492 ('-f', RLIMIT_FSIZE, 512, 'file size'),
493 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
494 ('-s', RLIMIT_STACK, 1024, 'stack size'),
495 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
496 ('-v', RLIMIT_AS, 1024, 'address space size'),
497 ]
498
499 return self._table
500
501 def _FindFactor(self, what):
502 # type: (int) -> int
503 for _, w, factor, _ in self._Table():
504 if w == what:
505 return factor
506 raise AssertionError()
507
508 def Run(self, cmd_val):
509 # type: (cmd_value.Argv) -> int
510
511 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
512 arg = arg_types.ulimit(attrs.attrs)
513
514 what = 0
515 num_what_flags = 0
516
517 if arg.c:
518 what = RLIMIT_CORE
519 num_what_flags += 1
520
521 if arg.d:
522 what = RLIMIT_DATA
523 num_what_flags += 1
524
525 if arg.f:
526 what = RLIMIT_FSIZE
527 num_what_flags += 1
528
529 if arg.n:
530 what = RLIMIT_NOFILE
531 num_what_flags += 1
532
533 if arg.s:
534 what = RLIMIT_STACK
535 num_what_flags += 1
536
537 if arg.t:
538 what = RLIMIT_CPU
539 num_what_flags += 1
540
541 if arg.v:
542 what = RLIMIT_AS
543 num_what_flags += 1
544
545 if num_what_flags > 1:
546 raise error.Usage(
547 'can only handle one resource at a time; got too many flags',
548 cmd_val.arg_locs[0])
549
550 # Print all
551 show_all = arg.a or arg.all
552 if show_all:
553 if num_what_flags > 0:
554 raise error.Usage("doesn't accept resource flags with -a",
555 cmd_val.arg_locs[0])
556
557 extra, extra_loc = arg_r.Peek2()
558 if extra is not None:
559 raise error.Usage('got extra arg with -a', extra_loc)
560
561 # Worst case 20 == len(str(2**64))
562 fmt = '%5s %15s %15s %7s %s'
563 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
564 for flag, what, factor, desc in self._Table():
565 soft, hard = pyos.GetRLimit(what)
566
567 soft2 = _LimitString(soft, factor)
568 hard2 = _LimitString(hard, factor)
569 print(fmt % (flag, soft2, hard2, str(factor), desc))
570
571 return 0
572
573 if num_what_flags == 0:
574 what = RLIMIT_FSIZE # -f is the default
575
576 s, s_loc = arg_r.Peek2()
577
578 if s is None:
579 factor = self._FindFactor(what)
580 soft, hard = pyos.GetRLimit(what)
581 if arg.H:
582 print(_LimitString(hard, factor))
583 else:
584 print(_LimitString(soft, factor))
585 return 0
586
587 # Set the given resource
588 if s == 'unlimited':
589 # In C, RLIM_INFINITY is rlim_t
590 limit = mops.FromC(RLIM_INFINITY)
591 else:
592 if match.LooksLikeInteger(s):
593 ok, big_int = mops.FromStr2(s)
594 if not ok:
595 raise error.Usage('Integer too big: %s' % s, s_loc)
596 else:
597 raise error.Usage(
598 "expected a number or 'unlimited', got %r" % s, s_loc)
599
600 if mops.Greater(mops.IntWiden(0), big_int):
601 raise error.Usage(
602 "doesn't accept negative numbers, got %r" % s, s_loc)
603
604 factor = self._FindFactor(what)
605
606 fac = mops.IntWiden(factor)
607 limit = mops.Mul(big_int, fac)
608
609 # Overflow check like bash does
610 # TODO: This should be replaced with a different overflow check
611 # when we have arbitrary precision integers
612 if not mops.Equal(mops.Div(limit, fac), big_int):
613 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
614 raise error.Usage(
615 'detected integer overflow: %s' % mops.ToStr(big_int),
616 s_loc)
617
618 arg_r.Next()
619 extra2, extra_loc2 = arg_r.Peek2()
620 if extra2 is not None:
621 raise error.Usage('got extra arg', extra_loc2)
622
623 # Now set the resource
624 soft, hard = pyos.GetRLimit(what)
625
626 # For error message
627 old_soft = soft
628 old_hard = hard
629
630 # Bash behavior: manipulate both, unless a flag is parsed. This
631 # differs from zsh!
632 if not arg.S and not arg.H:
633 soft = limit
634 hard = limit
635 if arg.S:
636 soft = limit
637 if arg.H:
638 hard = limit
639
640 if mylib.PYTHON:
641 try:
642 pyos.SetRLimit(what, soft, hard)
643 except OverflowError: # only happens in CPython
644 raise error.Usage('detected overflow', s_loc)
645 except (ValueError, resource.error) as e:
646 # Annoying: Python binding changes IOError -> ValueError
647
648 print_stderr('oils: ulimit error: %s' % e)
649
650 # Extra info we could expose in C++ too
651 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
652 _LimitString(old_soft, factor),
653 _LimitString(old_hard, factor),
654 _LimitString(soft, factor),
655 _LimitString(hard, factor),
656 ))
657 return 1
658 else:
659 try:
660 pyos.SetRLimit(what, soft, hard)
661 except (IOError, OSError) as e:
662 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
663 return 1
664
665 return 0
666
667
668def _SigNameToNumber(name):
669 # type: (str) -> int
670 name = name.upper()
671 if name.startswith("SIG"):
672 name = name[3:]
673 return signal_def.GetNumber(name)
674
675
676class Kill(vm._Builtin):
677 """Send a signal to a process"""
678
679 def __init__(self, job_list):
680 # type: (process.JobList) -> None
681 self.job_list = job_list
682
683 def _ParseWhat(self, what, blame_loc):
684 # type: (str, loc_t) -> int
685 if what.startswith("%"):
686 job = self.job_list.JobFromSpec(what)
687 if job is None:
688 e_usage("got invalid job ID %r" % what, blame_loc)
689 return job.ProcessGroupId()
690 else:
691 try:
692 pid = int(what)
693 except ValueError:
694 e_usage("got invalid process ID %r" % what, blame_loc)
695 return pid
696
697 def _SendSignal(self, arg_r, sig_num):
698 # type: (args.Reader, int) -> int
699 if arg_r.AtEnd():
700 e_usage("expects at least one process/job ID", loc.Missing)
701
702 while not arg_r.AtEnd():
703 arg_str, arg_loc = arg_r.Peek2()
704 pid = self._ParseWhat(arg_str, arg_loc)
705
706 posix.kill(pid, sig_num)
707 arg_r.Next()
708 return 0
709
710 def _ParseSignal(self, sig_str, blame_loc):
711 # type: (str, loc_t) -> int
712 """
713 Sigspec can one of these forms:
714 15, TERM, SIGTERM (case insensitive)
715 Raises error if sigspec is in invalid format
716 """
717 if sig_str.isdigit():
718 # We don't validate the signal number; we rely on kill() returning
719 # EINVAL instead. This is useful for sending unportable signals.
720 sig_num = int(sig_str)
721 else:
722 sig_num = _SigNameToNumber(sig_str)
723 if sig_num == signal_def.NO_SIGNAL:
724 e_usage("got invalid signal name %r" % sig_str, blame_loc)
725 return sig_num
726
727 def _TranslateSignal(self, arg, arg_loc):
728 # type: (str, loc_t) -> str
729 """
730 Convert a signal name to a number and vice versa.
731 Can also be passed an exit code, which will be converted
732 to the name of the signal used to terminate the process.
733 """
734 if arg.isdigit():
735 try:
736 sig_num = int(arg)
737 except ValueError:
738 raise error.Usage("got overflowing integer: %s" % arg,
739 arg_loc)
740 if sig_num == 0:
741 return "EXIT" # special case, this is not really a signal
742
743 if sig_num > 128:
744 sig_num -= 128 # convert exit codes to signal numbers
745
746 sig_name = signal_def.GetName(sig_num)
747 if sig_name is None:
748 e_usage("can't translate number %r to a name" % arg, arg_loc)
749 return sig_name[3:] # strip the SIG prefix
750 else:
751 sig_num = _SigNameToNumber(arg)
752 if sig_num == signal_def.NO_SIGNAL:
753 e_usage("can't translate name %r to a number" % arg, arg_loc)
754 return str(sig_num)
755
756 def _TranslateSignals(self, arg_r):
757 # type: (args.Reader) -> None
758 while not arg_r.AtEnd():
759 arg, arg_loc = arg_r.Peek2()
760 print(self._TranslateSignal(arg, arg_loc))
761 arg_r.Next()
762
763 def Run(self, cmd_val):
764 # type: (cmd_value.Argv) -> int
765 arg_r = args.Reader(cmd_val.argv, locs=cmd_val.arg_locs)
766 arg_r.Next() # skip command name
767
768 # Check for a signal argument like -15 -TERM -SIGTERM
769 first, first_loc = arg_r.Peek2()
770 if first is not None and first.startswith('-'):
771 sig_spec = first[1:]
772 if sig_spec.isdigit() or len(sig_spec) > 1:
773 sig_num = self._ParseSignal(sig_spec, first_loc)
774 arg_r.Next() # Skip signal argument
775 return self._SendSignal(arg_r, sig_num)
776
777 # Note: we're making another args.Reader here
778 attrs, arg_r = flag_util.ParseCmdVal('kill',
779 cmd_val,
780 accept_typed_args=False)
781 arg = arg_types.kill(attrs.attrs)
782
783 if arg.l or arg.L:
784 # If no arg, print all signals
785 if arg_r.AtEnd():
786 PrintSignals()
787 return 0
788
789 # Otherwise translate each arg
790 self._TranslateSignals(arg_r)
791 return 0
792
793 # -n and -s are synonyms.
794 # TODO: it would be nice if the flag parser could expose the location
795 # of 'foo' in -s foo
796 sig_num = 15 # SIGTERM, the default signal to send
797 blame_loc = cmd_val.arg_locs[0]
798 if arg.n is not None:
799 sig_num = self._ParseSignal(arg.n, blame_loc)
800 if arg.s is not None:
801 sig_num = self._ParseSignal(arg.s, blame_loc)
802
803 return self._SendSignal(arg_r, sig_num)
804
805
806# vim: sw=4