OILS / builtin / process_osh.py View on Github | oils.pub

816 lines, 507 significant
1#!/usr/bin/env python2
2"""
3builtin_process.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of builtin_pure.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, loc_t, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import signal_def
28from frontend import typed_args
29from frontend import args
30from mycpp import mops
31from mycpp import mylib
32from mycpp.mylib import log, tagswitch, print_stderr
33
34import posix_ as posix
35
36from typing import TYPE_CHECKING, List, Tuple, Optional, cast
37if TYPE_CHECKING:
38 from core.process import Waiter, ExternalProgram, FdState
39 from core import executor
40 from core import state
41 from display import ui
42
43_ = log
44
45
46def PrintSignals():
47 # type: () -> None
48 # Iterate over signals and print them
49 for sig_num in xrange(signal_def.MaxSigNumber()):
50 sig_name = signal_def.GetName(sig_num)
51 if sig_name is None:
52 continue
53 print('%2d %s' % (sig_num, sig_name))
54
55
56class Jobs(vm._Builtin):
57 """List jobs."""
58
59 def __init__(self, job_list):
60 # type: (process.JobList) -> None
61 self.job_list = job_list
62
63 def Run(self, cmd_val):
64 # type: (cmd_value.Argv) -> int
65
66 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
67 arg = arg_types.jobs(attrs.attrs)
68
69 if arg.l:
70 style = process.STYLE_LONG
71 elif arg.p:
72 style = process.STYLE_PID_ONLY
73 else:
74 style = process.STYLE_DEFAULT
75
76 self.job_list.DisplayJobs(style)
77
78 if arg.debug:
79 self.job_list.DebugPrint()
80
81 return 0
82
83
84class Fg(vm._Builtin):
85 """Put a job in the foreground."""
86
87 def __init__(self, job_control, job_list, waiter):
88 # type: (process.JobControl, process.JobList, Waiter) -> None
89 self.job_control = job_control
90 self.job_list = job_list
91 self.waiter = waiter
92 self.exec_opts = waiter.exec_opts
93
94 def Run(self, cmd_val):
95 # type: (cmd_value.Argv) -> int
96
97 job_spec = '' # Job spec for current job is the default
98 if len(cmd_val.argv) > 1:
99 job_spec = cmd_val.argv[1]
100
101 job = self.job_list.JobFromSpec(job_spec)
102 # note: the 'wait' builtin falls back to JobFromPid()
103 if job is None:
104 print_stderr('fg: No job to put in the foreground')
105 return 1
106
107 pgid = job.ProcessGroupId()
108 assert pgid != process.INVALID_PGID, \
109 'Processes put in the background should have a PGID'
110
111 # Put the job's process group back into the foreground. GiveTerminal() must
112 # be called before sending SIGCONT or else the process might immediately get
113 # suspended again if it tries to read/write on the terminal.
114 self.job_control.MaybeGiveTerminal(pgid)
115 posix.killpg(pgid, SIGCONT) # Send signal
116
117 if self.exec_opts.interactive():
118 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
119
120 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
121 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
122 job.SetForeground()
123 job.state = job_state_e.Running
124
125 status = -1
126
127 wait_st = job.JobWait(self.waiter)
128 UP_wait_st = wait_st
129 with tagswitch(wait_st) as case:
130 if case(wait_status_e.Proc):
131 wait_st = cast(wait_status.Proc, UP_wait_st)
132 if wait_st.state == job_state_e.Exited:
133 self.job_list.PopChildProcess(job.PidForWait())
134 self.job_list.CleanupWhenJobExits(job)
135 status = wait_st.code
136
137 elif case(wait_status_e.Pipeline):
138 wait_st = cast(wait_status.Pipeline, UP_wait_st)
139 # TODO: handle PIPESTATUS? Is this right?
140 status = wait_st.codes[-1]
141
142 elif case(wait_status_e.Cancelled):
143 wait_st = cast(wait_status.Cancelled, UP_wait_st)
144 status = 128 + wait_st.sig_num
145
146 else:
147 raise AssertionError()
148
149 return status
150
151
152class Bg(vm._Builtin):
153 """Put a job in the background."""
154
155 def __init__(self, job_list):
156 # type: (process.JobList) -> None
157 self.job_list = job_list
158
159 def Run(self, cmd_val):
160 # type: (cmd_value.Argv) -> int
161
162 # How does this differ from 'fg'? It doesn't wait and it sets controlling
163 # terminal?
164
165 raise error.Usage("isn't implemented", loc.Missing)
166
167
168class Fork(vm._Builtin):
169
170 def __init__(self, shell_ex):
171 # type: (vm._Executor) -> None
172 self.shell_ex = shell_ex
173
174 def Run(self, cmd_val):
175 # type: (cmd_value.Argv) -> int
176 _, arg_r = flag_util.ParseCmdVal('fork',
177 cmd_val,
178 accept_typed_args=True)
179
180 arg, location = arg_r.Peek2()
181 if arg is not None:
182 e_usage('got unexpected argument %r' % arg, location)
183
184 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
185 return self.shell_ex.RunBackgroundJob(cmd_frag)
186
187
188class ForkWait(vm._Builtin):
189
190 def __init__(self, shell_ex):
191 # type: (vm._Executor) -> None
192 self.shell_ex = shell_ex
193
194 def Run(self, cmd_val):
195 # type: (cmd_value.Argv) -> int
196 _, arg_r = flag_util.ParseCmdVal('forkwait',
197 cmd_val,
198 accept_typed_args=True)
199 arg, location = arg_r.Peek2()
200 if arg is not None:
201 e_usage('got unexpected argument %r' % arg, location)
202
203 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
204 return self.shell_ex.RunSubshell(cmd_frag)
205
206
207class Exec(vm._Builtin):
208
209 def __init__(
210 self,
211 mem, # type: state.Mem
212 ext_prog, # type: ExternalProgram
213 fd_state, # type: FdState
214 search_path, # type: executor.SearchPath
215 errfmt, # type: ui.ErrorFormatter
216 ):
217 # type: (...) -> None
218 self.mem = mem
219 self.ext_prog = ext_prog
220 self.fd_state = fd_state
221 self.search_path = search_path
222 self.errfmt = errfmt
223
224 def Run(self, cmd_val):
225 # type: (cmd_value.Argv) -> int
226 _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
227
228 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
229 if arg_r.AtEnd():
230 self.fd_state.MakePermanent()
231 return 0
232
233 environ = self.mem.GetEnv()
234 if 0:
235 log('E %r', environ)
236 log('E %r', environ)
237 log('ZZ %r', environ.get('ZZ'))
238 i = arg_r.i
239 cmd = cmd_val.argv[i]
240 argv0_path = self.search_path.CachedLookup(cmd)
241 if argv0_path is None:
242 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
243
244 # shift off 'exec', and remove typed args because they don't apply
245 c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
246 cmd_val.is_last_cmd, cmd_val.self_obj, None)
247
248 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
249 # makes mypy and C++ compiler happy
250 raise AssertionError('unreachable')
251
252
253class Wait(vm._Builtin):
254 """
255 wait: wait [-n] [id ...]
256 Wait for job completion and return exit status.
257
258 Waits for each process identified by an ID, which may be a process ID or a
259 job specification, and reports its termination status. If ID is not
260 given, waits for all currently active child processes, and the return
261 status is zero. If ID is a a job specification, waits for all processes
262 in that job's pipeline.
263
264 If the -n option is supplied, waits for the next job to terminate and
265 returns its exit status.
266
267 Exit Status:
268 Returns the status of the last ID; fails if ID is invalid or an invalid
269 option is given.
270 """
271
272 def __init__(
273 self,
274 waiter, # type: Waiter
275 job_list, #type: process.JobList
276 mem, # type: state.Mem
277 tracer, # type: dev.Tracer
278 errfmt, # type: ui.ErrorFormatter
279 ):
280 # type: (...) -> None
281 self.waiter = waiter
282 self.job_list = job_list
283 self.mem = mem
284 self.tracer = tracer
285 self.errfmt = errfmt
286 self.exec_opts = waiter.exec_opts
287
288 def Run(self, cmd_val):
289 # type: (cmd_value.Argv) -> int
290 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
291 return self._Run(cmd_val)
292
293 def _WaitForJobs(self, job_ids, arg_locs):
294 # type: (List[str], List[CompoundWord]) -> int
295
296 # Get list of jobs. Then we need to check if they are ALL stopped.
297 # Returns the exit code of the last one on the COMMAND LINE, not the
298 # exit code of last one to FINISH.
299
300 jobs = [] # type: List[process.Job]
301 for i, job_id in enumerate(job_ids):
302 location = arg_locs[i]
303
304 job = None # type: Optional[process.Job]
305 if job_id == '' or job_id.startswith('%'):
306 job = self.job_list.JobFromSpec(job_id)
307
308 if job is None:
309 #log('JOB %s', job_id)
310 # Does it look like a PID?
311 try:
312 pid = int(job_id)
313 except ValueError:
314 raise error.Usage(
315 'expected PID or jobspec, got %r' % job_id, location)
316
317 job = self.job_list.JobFromPid(pid)
318 #log('WAIT JOB %r', job)
319
320 if job is None:
321 self.errfmt.Print_("Job %s wasn't found" % job_id,
322 blame_loc=location)
323 return 127
324
325 jobs.append(job)
326
327 status = 1 # error
328 for job in jobs:
329 # polymorphic call: Process, Pipeline
330 wait_st = job.JobWait(self.waiter)
331
332 UP_wait_st = wait_st
333 with tagswitch(wait_st) as case:
334 if case(wait_status_e.Proc):
335 wait_st = cast(wait_status.Proc, UP_wait_st)
336 if wait_st.state == job_state_e.Exited:
337 self.job_list.PopChildProcess(job.PidForWait())
338 self.job_list.CleanupWhenJobExits(job)
339 status = wait_st.code
340
341 elif case(wait_status_e.Pipeline):
342 wait_st = cast(wait_status.Pipeline, UP_wait_st)
343 # TODO: handle PIPESTATUS? Is this right?
344 status = wait_st.codes[-1]
345
346 # It would be logical to set PIPESTATUS here, but it's NOT
347 # what other shells do
348 #
349 # I think PIPESTATUS is legacy, and we can design better
350 # YSH semantics
351 #self.mem.SetPipeStatus(wait_st.codes)
352
353 elif case(wait_status_e.Cancelled):
354 wait_st = cast(wait_status.Cancelled, UP_wait_st)
355 status = 128 + wait_st.sig_num
356
357 else:
358 raise AssertionError()
359
360 # Return the last status
361 return status
362
363 def _WaitNext(self):
364 # type: () -> int
365
366 # Loop until there is one fewer process running, there's nothing to wait
367 # for, or there's a signal
368 n = self.job_list.NumRunning()
369 if n == 0:
370 status = 127
371 else:
372 target = n - 1
373 status = 0
374 while self.job_list.NumRunning() > target:
375 result, w1_arg = self.waiter.WaitForOne()
376 if result == process.W1_EXITED:
377 pid = w1_arg
378 pr = self.job_list.PopChildProcess(pid)
379 # TODO: background pipelines don't clean up properly,
380 # because only the last PID is registered in
381 # job_list.pid_to_job
382 self.job_list.CleanupWhenProcessExits(pid)
383
384 if pr is None:
385 if self.exec_opts.verbose_warn():
386 print_stderr(
387 "oils wait: PID %d exited, but oils didn't start it"
388 % pid)
389 else:
390 status = pr.status
391
392 elif result == process.W1_NO_CHILDREN:
393 status = 127
394 break
395
396 elif result == process.W1_CALL_INTR: # signal
397 status = 128 + w1_arg
398 break
399
400 return status
401
402 def _Run(self, cmd_val):
403 # type: (cmd_value.Argv) -> int
404 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
405 arg = arg_types.wait(attrs.attrs)
406
407 job_ids, arg_locs = arg_r.Rest2()
408
409 if len(job_ids):
410 # Note: -n and --all ignored in this case, like bash
411 return self._WaitForJobs(job_ids, arg_locs)
412
413 if arg.n:
414 return self._WaitNext()
415
416 # 'wait' or wait --all
417
418 status = 0
419
420 # Note: NumRunning() makes sure we ignore stopped processes, which
421 # cause WaitForOne() to return
422 while self.job_list.NumRunning() != 0:
423 result, w1_arg = self.waiter.WaitForOne()
424 if result == process.W1_EXITED:
425 pid = w1_arg
426 pr = self.job_list.PopChildProcess(pid)
427 # TODO: background pipelines don't clean up properly, because
428 # only the last PID is registered in job_list.pid_to_job
429 self.job_list.CleanupWhenProcessExits(pid)
430
431 if arg.verbose:
432 self.errfmt.PrintMessage(
433 '(wait) PID %d exited with status %d' %
434 (pid, pr.status), cmd_val.arg_locs[0])
435
436 if pr.status != 0 and arg.all: # YSH extension: respect failure
437 if arg.verbose:
438 self.errfmt.PrintMessage(
439 'wait --all: will fail with status 1')
440 status = 1 # set status, but keep waiting
441
442 if result == process.W1_NO_CHILDREN:
443 break # status is 0
444
445 if result == process.W1_CALL_INTR:
446 status = 128 + w1_arg
447 break
448
449 return status
450
451
452class Umask(vm._Builtin):
453
454 def __init__(self):
455 # type: () -> None
456 """Dummy constructor for mycpp."""
457 pass
458
459 def Run(self, cmd_val):
460 # type: (cmd_value.Argv) -> int
461
462 argv = cmd_val.argv[1:]
463 if len(argv) == 0:
464 # umask() has a dumb API: you can't get it without modifying it first!
465 # NOTE: dash disables interrupts around the two umask() calls, but that
466 # shouldn't be a concern for us. Signal handlers won't call umask().
467 mask = posix.umask(0)
468 posix.umask(mask) #
469 print('0%03o' % mask) # octal format
470 return 0
471
472 if len(argv) == 1:
473 a = argv[0]
474 try:
475 new_mask = int(a, 8)
476 except ValueError:
477 # NOTE: This also happens when we have '8' or '9' in the input.
478 print_stderr(
479 "oils warning: umask with symbolic input isn't implemented"
480 )
481 return 1
482
483 posix.umask(new_mask)
484 return 0
485
486 e_usage('umask: unexpected arguments', loc.Missing)
487
488
489def _LimitString(lim, factor):
490 # type: (mops.BigInt, int) -> str
491 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
492 return 'unlimited'
493 else:
494 i = mops.Div(lim, mops.IntWiden(factor))
495 return mops.ToStr(i)
496
497
498class Ulimit(vm._Builtin):
499
500 def __init__(self):
501 # type: () -> None
502 """Dummy constructor for mycpp."""
503
504 self._table = None # type: List[Tuple[str, int, int, str]]
505
506 def _Table(self):
507 # type: () -> List[Tuple[str, int, int, str]]
508
509 # POSIX 2018
510 #
511 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
512 if self._table is None:
513 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
514
515 # flag, RLIMIT_X, factor, description
516 self._table = [
517 # Following POSIX and most shells except bash, -f is in
518 # blocks of 512 bytes
519 ('-c', RLIMIT_CORE, 512, 'core dump size'),
520 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
521 ('-f', RLIMIT_FSIZE, 512, 'file size'),
522 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
523 ('-s', RLIMIT_STACK, 1024, 'stack size'),
524 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
525 ('-v', RLIMIT_AS, 1024, 'address space size'),
526 ]
527
528 return self._table
529
530 def _FindFactor(self, what):
531 # type: (int) -> int
532 for _, w, factor, _ in self._Table():
533 if w == what:
534 return factor
535 raise AssertionError()
536
537 def Run(self, cmd_val):
538 # type: (cmd_value.Argv) -> int
539
540 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
541 arg = arg_types.ulimit(attrs.attrs)
542
543 what = 0
544 num_what_flags = 0
545
546 if arg.c:
547 what = RLIMIT_CORE
548 num_what_flags += 1
549
550 if arg.d:
551 what = RLIMIT_DATA
552 num_what_flags += 1
553
554 if arg.f:
555 what = RLIMIT_FSIZE
556 num_what_flags += 1
557
558 if arg.n:
559 what = RLIMIT_NOFILE
560 num_what_flags += 1
561
562 if arg.s:
563 what = RLIMIT_STACK
564 num_what_flags += 1
565
566 if arg.t:
567 what = RLIMIT_CPU
568 num_what_flags += 1
569
570 if arg.v:
571 what = RLIMIT_AS
572 num_what_flags += 1
573
574 if num_what_flags > 1:
575 raise error.Usage(
576 'can only handle one resource at a time; got too many flags',
577 cmd_val.arg_locs[0])
578
579 # Print all
580 show_all = arg.a or arg.all
581 if show_all:
582 if num_what_flags > 0:
583 raise error.Usage("doesn't accept resource flags with -a",
584 cmd_val.arg_locs[0])
585
586 extra, extra_loc = arg_r.Peek2()
587 if extra is not None:
588 raise error.Usage('got extra arg with -a', extra_loc)
589
590 # Worst case 20 == len(str(2**64))
591 fmt = '%5s %15s %15s %7s %s'
592 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
593 for flag, what, factor, desc in self._Table():
594 soft, hard = pyos.GetRLimit(what)
595
596 soft2 = _LimitString(soft, factor)
597 hard2 = _LimitString(hard, factor)
598 print(fmt % (flag, soft2, hard2, str(factor), desc))
599
600 return 0
601
602 if num_what_flags == 0:
603 what = RLIMIT_FSIZE # -f is the default
604
605 s, s_loc = arg_r.Peek2()
606
607 if s is None:
608 factor = self._FindFactor(what)
609 soft, hard = pyos.GetRLimit(what)
610 if arg.H:
611 print(_LimitString(hard, factor))
612 else:
613 print(_LimitString(soft, factor))
614 return 0
615
616 # Set the given resource
617 if s == 'unlimited':
618 # In C, RLIM_INFINITY is rlim_t
619 limit = mops.FromC(RLIM_INFINITY)
620 else:
621 if match.LooksLikeInteger(s):
622 ok, big_int = mops.FromStr2(s)
623 if not ok:
624 raise error.Usage('Integer too big: %s' % s, s_loc)
625 else:
626 raise error.Usage(
627 "expected a number or 'unlimited', got %r" % s, s_loc)
628
629 if mops.Greater(mops.IntWiden(0), big_int):
630 raise error.Usage(
631 "doesn't accept negative numbers, got %r" % s, s_loc)
632
633 factor = self._FindFactor(what)
634
635 fac = mops.IntWiden(factor)
636 limit = mops.Mul(big_int, fac)
637
638 # Overflow check like bash does
639 # TODO: This should be replaced with a different overflow check
640 # when we have arbitrary precision integers
641 if not mops.Equal(mops.Div(limit, fac), big_int):
642 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
643 raise error.Usage(
644 'detected integer overflow: %s' % mops.ToStr(big_int),
645 s_loc)
646
647 arg_r.Next()
648 extra2, extra_loc2 = arg_r.Peek2()
649 if extra2 is not None:
650 raise error.Usage('got extra arg', extra_loc2)
651
652 # Now set the resource
653 soft, hard = pyos.GetRLimit(what)
654
655 # For error message
656 old_soft = soft
657 old_hard = hard
658
659 # Bash behavior: manipulate both, unless a flag is parsed. This
660 # differs from zsh!
661 if not arg.S and not arg.H:
662 soft = limit
663 hard = limit
664 if arg.S:
665 soft = limit
666 if arg.H:
667 hard = limit
668
669 if mylib.PYTHON:
670 try:
671 pyos.SetRLimit(what, soft, hard)
672 except OverflowError: # only happens in CPython
673 raise error.Usage('detected overflow', s_loc)
674 except (ValueError, resource.error) as e:
675 # Annoying: Python binding changes IOError -> ValueError
676
677 print_stderr('oils: ulimit error: %s' % e)
678
679 # Extra info we could expose in C++ too
680 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
681 _LimitString(old_soft, factor),
682 _LimitString(old_hard, factor),
683 _LimitString(soft, factor),
684 _LimitString(hard, factor),
685 ))
686 return 1
687 else:
688 try:
689 pyos.SetRLimit(what, soft, hard)
690 except (IOError, OSError) as e:
691 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
692 return 1
693
694 return 0
695
696
697def _SigNameToNumber(name):
698 # type: (str) -> int
699 name = name.upper()
700 if name.startswith("SIG"):
701 name = name[3:]
702 return signal_def.GetNumber(name)
703
704
705class Kill(vm._Builtin):
706 """Send a signal to a process"""
707
708 def __init__(self, job_list):
709 # type: (process.JobList) -> None
710 self.job_list = job_list
711
712 def _ParseWhat(self, what, blame_loc):
713 # type: (str, loc_t) -> int
714 if what.startswith("%"):
715 job = self.job_list.JobFromSpec(what)
716 if job is None:
717 e_usage("got invalid job ID %r" % what, blame_loc)
718 return job.ProcessGroupId()
719 else:
720 try:
721 pid = int(what)
722 except ValueError:
723 e_usage("got invalid process ID %r" % what, blame_loc)
724 return pid
725
726 def _SendSignal(self, arg_r, sig_num):
727 # type: (args.Reader, int) -> int
728 if arg_r.AtEnd():
729 e_usage("expects at least one process/job ID", loc.Missing)
730
731 while not arg_r.AtEnd():
732 arg_str, arg_loc = arg_r.Peek2()
733 pid = self._ParseWhat(arg_str, arg_loc)
734
735 posix.kill(pid, sig_num)
736 arg_r.Next()
737 return 0
738
739 def _ParseSignal(self, sig_str, blame_loc):
740 # type: (str, loc_t) -> int
741 """
742 Sigspec can one of these forms:
743 15, TERM, SIGTERM (case insensitive)
744 Raises error if sigspec is in invalid format
745 """
746 if sig_str.isdigit():
747 # We don't validate the signal number; we rely on kill() returning
748 # EINVAL instead. This is useful for sending unportable signals.
749 sig_num = int(sig_str)
750 else:
751 sig_num = _SigNameToNumber(sig_str)
752 if sig_num == signal_def.NO_SIGNAL:
753 e_usage("got invalid signal name %r" % sig_str, blame_loc)
754 return sig_num
755
756 def _TranslateSignals(self, arg_r):
757 # type: (args.Reader) -> int
758 while not arg_r.AtEnd():
759 arg, arg_loc = arg_r.Peek2()
760 if arg.isdigit():
761 sig_name = signal_def.GetName(int(arg))
762 if sig_name is None:
763 e_usage("can't translate number %r to a name" % arg, arg_loc)
764 print(sig_name[3:])
765 else:
766 sig_num = _SigNameToNumber(arg)
767 if sig_num == signal_def.NO_SIGNAL:
768 e_usage("can't translate name %r to a number" % arg, arg_loc)
769 print(str(sig_num))
770
771 arg_r.Next()
772 return 0
773
774 def Run(self, cmd_val):
775 # type: (cmd_value.Argv) -> int
776 arg_r = args.Reader(cmd_val.argv, locs=cmd_val.arg_locs)
777 arg_r.Next() # skip command name
778
779 # Check for a signal argument like -15 -TERM -SIGTERM
780 first, first_loc = arg_r.Peek2()
781 if first is not None and first.startswith('-'):
782 sig_spec = first[1:]
783 if sig_spec.isdigit() or len(sig_spec) > 1:
784 sig_num = self._ParseSignal(sig_spec, first_loc)
785 arg_r.Next() # Skip signal argument
786 return self._SendSignal(arg_r, sig_num)
787
788 # Note: we're making another args.Reader here
789 attrs, arg_r = flag_util.ParseCmdVal('kill',
790 cmd_val,
791 accept_typed_args=False)
792 arg = arg_types.kill(attrs.attrs)
793
794 if arg.l or arg.L:
795 # If no arg, print all signals
796 if arg_r.AtEnd():
797 PrintSignals()
798 return 0
799
800 # Otherwise translate each arg
801 return self._TranslateSignals(arg_r)
802
803 # -n and -s are synonyms.
804 # TODO: it would be nice if the flag parser could expose the location
805 # of 'foo' in -s foo
806 sig_num = 15 # SIGTERM, the default signal to send
807 blame_loc = cmd_val.arg_locs[0]
808 if arg.n is not None:
809 sig_num = self._ParseSignal(arg.n, blame_loc)
810 if arg.s is not None:
811 sig_num = self._ParseSignal(arg.s, blame_loc)
812
813 return self._SendSignal(arg_r, sig_num)
814
815
816# vim: sw=4