OILS / builtin / process_osh.py View on Github | oils.pub

779 lines, 486 significant
1#!/usr/bin/env python2
2"""
3process_osh.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of pure_osh.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, loc_t, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import signal_def
28from frontend import typed_args
29from frontend import args
30from mycpp import mops
31from mycpp import mylib
32from mycpp.mylib import log, tagswitch, print_stderr
33
34import posix_ as posix
35
36from typing import TYPE_CHECKING, List, Tuple, Optional, cast
37if TYPE_CHECKING:
38 from core.process import Waiter, ExternalProgram, FdState
39 from core import executor
40 from core import state
41 from display import ui
42
43_ = log
44
45
46def PrintSignals():
47 # type: () -> None
48 # Iterate over signals and print them
49 for sig_num in xrange(signal_def.MaxSigNumber()):
50 sig_name = signal_def.GetName(sig_num)
51 if sig_name is None:
52 continue
53 print('%2d %s' % (sig_num, sig_name))
54
55
56class Jobs(vm._Builtin):
57 """List jobs."""
58
59 def __init__(self, job_list):
60 # type: (process.JobList) -> None
61 self.job_list = job_list
62
63 def Run(self, cmd_val):
64 # type: (cmd_value.Argv) -> int
65
66 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
67 arg = arg_types.jobs(attrs.attrs)
68
69 if arg.l:
70 style = process.STYLE_LONG
71 elif arg.p:
72 style = process.STYLE_PID_ONLY
73 else:
74 style = process.STYLE_DEFAULT
75
76 self.job_list.DisplayJobs(style)
77
78 if arg.debug:
79 self.job_list.DebugPrint()
80
81 return 0
82
83
84class Fg(vm._Builtin):
85 """Put a job in the foreground."""
86
87 def __init__(self, job_control, job_list, waiter):
88 # type: (process.JobControl, process.JobList, Waiter) -> None
89 self.job_control = job_control
90 self.job_list = job_list
91 self.waiter = waiter
92 self.exec_opts = waiter.exec_opts
93
94 def Run(self, cmd_val):
95 # type: (cmd_value.Argv) -> int
96
97 job_spec = '' # Job spec for current job is the default
98 if len(cmd_val.argv) > 1:
99 job_spec = cmd_val.argv[1]
100
101 job = self.job_list.JobFromSpec(job_spec)
102 # note: the 'wait' builtin falls back to JobFromPid()
103 if job is None:
104 print_stderr('fg: No job to put in the foreground')
105 return 1
106
107 pgid = job.ProcessGroupId()
108 assert pgid != process.INVALID_PGID, \
109 'Processes put in the background should have a PGID'
110
111 # Put the job's process group back into the foreground. GiveTerminal() must
112 # be called before sending SIGCONT or else the process might immediately get
113 # suspended again if it tries to read/write on the terminal.
114 self.job_control.MaybeGiveTerminal(pgid)
115 posix.killpg(pgid, SIGCONT) # Send signal
116
117 if self.exec_opts.interactive():
118 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
119
120 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
121 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
122 job.SetForeground()
123 job.state = job_state_e.Running
124
125 status = -1
126
127 wait_st = job.JobWait(self.waiter)
128 UP_wait_st = wait_st
129 with tagswitch(wait_st) as case:
130 if case(wait_status_e.Proc):
131 wait_st = cast(wait_status.Proc, UP_wait_st)
132 if wait_st.state == job_state_e.Exited:
133 self.job_list.PopChildProcess(job.PidForWait())
134 self.job_list.CleanupWhenJobExits(job)
135 status = wait_st.code
136
137 elif case(wait_status_e.Pipeline):
138 wait_st = cast(wait_status.Pipeline, UP_wait_st)
139 # TODO: handle PIPESTATUS? Is this right?
140 status = wait_st.codes[-1]
141
142 elif case(wait_status_e.Cancelled):
143 wait_st = cast(wait_status.Cancelled, UP_wait_st)
144 status = 128 + wait_st.sig_num
145
146 else:
147 raise AssertionError()
148
149 return status
150
151
152class Bg(vm._Builtin):
153 """Put a job in the background."""
154
155 def __init__(self, job_list):
156 # type: (process.JobList) -> None
157 self.job_list = job_list
158
159 def Run(self, cmd_val):
160 # type: (cmd_value.Argv) -> int
161
162 # How does this differ from 'fg'? It doesn't wait and it sets controlling
163 # terminal?
164
165 raise error.Usage("isn't implemented", loc.Missing)
166
167
168class Fork(vm._Builtin):
169
170 def __init__(self, shell_ex):
171 # type: (vm._Executor) -> None
172 self.shell_ex = shell_ex
173
174 def Run(self, cmd_val):
175 # type: (cmd_value.Argv) -> int
176 _, arg_r = flag_util.ParseCmdVal('fork',
177 cmd_val,
178 accept_typed_args=True)
179
180 arg, location = arg_r.Peek2()
181 if arg is not None:
182 e_usage('got unexpected argument %r' % arg, location)
183
184 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
185 return self.shell_ex.RunBackgroundJob(cmd_frag)
186
187
188class ForkWait(vm._Builtin):
189
190 def __init__(self, shell_ex):
191 # type: (vm._Executor) -> None
192 self.shell_ex = shell_ex
193
194 def Run(self, cmd_val):
195 # type: (cmd_value.Argv) -> int
196 _, arg_r = flag_util.ParseCmdVal('forkwait',
197 cmd_val,
198 accept_typed_args=True)
199 arg, location = arg_r.Peek2()
200 if arg is not None:
201 e_usage('got unexpected argument %r' % arg, location)
202
203 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
204 return self.shell_ex.RunSubshell(cmd_frag)
205
206
207class Exec(vm._Builtin):
208
209 def __init__(
210 self,
211 mem, # type: state.Mem
212 ext_prog, # type: ExternalProgram
213 fd_state, # type: FdState
214 search_path, # type: executor.SearchPath
215 errfmt, # type: ui.ErrorFormatter
216 ):
217 # type: (...) -> None
218 self.mem = mem
219 self.ext_prog = ext_prog
220 self.fd_state = fd_state
221 self.search_path = search_path
222 self.errfmt = errfmt
223
224 def Run(self, cmd_val):
225 # type: (cmd_value.Argv) -> int
226 _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
227
228 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
229 if arg_r.AtEnd():
230 self.fd_state.MakePermanent()
231 return 0
232
233 environ = self.mem.GetEnv()
234 if 0:
235 log('E %r', environ)
236 log('E %r', environ)
237 log('ZZ %r', environ.get('ZZ'))
238 i = arg_r.i
239 cmd = cmd_val.argv[i]
240 argv0_path = self.search_path.CachedLookup(cmd)
241 if argv0_path is None:
242 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
243
244 # shift off 'exec', and remove typed args because they don't apply
245 c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
246 cmd_val.is_last_cmd, cmd_val.self_obj, None)
247
248 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
249 # makes mypy and C++ compiler happy
250 raise AssertionError('unreachable')
251
252
253class Wait(vm._Builtin):
254 """
255 wait: wait [-n] [id ...]
256 Wait for job completion and return exit status.
257
258 Waits for each process identified by an ID, which may be a process ID or a
259 job specification, and reports its termination status. If ID is not
260 given, waits for all currently active child processes, and the return
261 status is zero. If ID is a a job specification, waits for all processes
262 in that job's pipeline.
263
264 If the -n option is supplied, waits for the next job to terminate and
265 returns its exit status.
266
267 Exit Status:
268 Returns the status of the last ID; fails if ID is invalid or an invalid
269 option is given.
270 """
271
272 def __init__(
273 self,
274 waiter, # type: Waiter
275 job_list, #type: process.JobList
276 mem, # type: state.Mem
277 tracer, # type: dev.Tracer
278 errfmt, # type: ui.ErrorFormatter
279 ):
280 # type: (...) -> None
281 self.waiter = waiter
282 self.job_list = job_list
283 self.mem = mem
284 self.tracer = tracer
285 self.errfmt = errfmt
286 self.exec_opts = waiter.exec_opts
287
288 def Run(self, cmd_val):
289 # type: (cmd_value.Argv) -> int
290 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
291 return self._Run(cmd_val)
292
293 def _WaitForJobs(self, job_ids, arg_locs):
294 # type: (List[str], List[CompoundWord]) -> int
295
296 # Get list of jobs. Then we need to check if they are ALL stopped.
297 # Returns the exit code of the last one on the COMMAND LINE, not the
298 # exit code of last one to FINISH.
299
300 jobs = [] # type: List[process.Job]
301 for i, job_id in enumerate(job_ids):
302 location = arg_locs[i]
303
304 job = None # type: Optional[process.Job]
305 if job_id == '' or job_id.startswith('%'):
306 job = self.job_list.JobFromSpec(job_id)
307
308 if job is None:
309 #log('JOB %s', job_id)
310 # Does it look like a PID?
311 try:
312 pid = int(job_id)
313 except ValueError:
314 raise error.Usage(
315 'expected PID or jobspec, got %r' % job_id, location)
316
317 job = self.job_list.JobFromPid(pid)
318 #log('WAIT JOB %r', job)
319
320 if job is None:
321 self.errfmt.Print_("Job %s wasn't found" % job_id,
322 blame_loc=location)
323 return 127
324
325 jobs.append(job)
326
327 status = 1 # error
328 for job in jobs:
329 # polymorphic call: Process, Pipeline
330 wait_st = job.JobWait(self.waiter)
331
332 UP_wait_st = wait_st
333 with tagswitch(wait_st) as case:
334 if case(wait_status_e.Proc):
335 wait_st = cast(wait_status.Proc, UP_wait_st)
336 if wait_st.state == job_state_e.Exited:
337 self.job_list.PopChildProcess(job.PidForWait())
338 self.job_list.CleanupWhenJobExits(job)
339 status = wait_st.code
340
341 elif case(wait_status_e.Pipeline):
342 wait_st = cast(wait_status.Pipeline, UP_wait_st)
343 # TODO: handle PIPESTATUS? Is this right?
344 status = wait_st.codes[-1]
345
346 # It would be logical to set PIPESTATUS here, but it's NOT
347 # what other shells do
348 #
349 # I think PIPESTATUS is legacy, and we can design better
350 # YSH semantics
351 #self.mem.SetPipeStatus(wait_st.codes)
352
353 elif case(wait_status_e.Cancelled):
354 wait_st = cast(wait_status.Cancelled, UP_wait_st)
355 status = 128 + wait_st.sig_num
356
357 else:
358 raise AssertionError()
359
360 # Return the last status
361 return status
362
363 def _WaitNext(self):
364 # type: () -> int
365
366 # Loop until there is one fewer process running, there's nothing to wait
367 # for, or there's a signal
368 n = self.job_list.NumRunning()
369 if n == 0:
370 status = 127
371 else:
372 target = n - 1
373 status = 0
374 while self.job_list.NumRunning() > target:
375 result, w1_arg = self.waiter.WaitForOne()
376 if result == process.W1_EXITED:
377 pid = w1_arg
378 pr = self.job_list.PopChildProcess(pid)
379 # TODO: background pipelines don't clean up properly,
380 # because only the last PID is registered in
381 # job_list.pid_to_job
382 self.job_list.CleanupWhenProcessExits(pid)
383
384 if pr is None:
385 if self.exec_opts.verbose_warn():
386 print_stderr(
387 "oils wait: PID %d exited, but oils didn't start it"
388 % pid)
389 else:
390 status = pr.status
391
392 elif result == process.W1_NO_CHILDREN:
393 status = 127
394 break
395
396 elif result == process.W1_CALL_INTR: # signal
397 status = 128 + w1_arg
398 break
399
400 return status
401
402 def _Run(self, cmd_val):
403 # type: (cmd_value.Argv) -> int
404 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
405 arg = arg_types.wait(attrs.attrs)
406
407 job_ids, arg_locs = arg_r.Rest2()
408
409 if len(job_ids):
410 # Note: -n and --all ignored in this case, like bash
411 return self._WaitForJobs(job_ids, arg_locs)
412
413 if arg.n:
414 return self._WaitNext()
415
416 # 'wait' or wait --all
417
418 status = 0
419
420 # Note: NumRunning() makes sure we ignore stopped processes, which
421 # cause WaitForOne() to return
422 while self.job_list.NumRunning() != 0:
423 result, w1_arg = self.waiter.WaitForOne()
424 if result == process.W1_EXITED:
425 pid = w1_arg
426 pr = self.job_list.PopChildProcess(pid)
427 # TODO: background pipelines don't clean up properly, because
428 # only the last PID is registered in job_list.pid_to_job
429 self.job_list.CleanupWhenProcessExits(pid)
430
431 if arg.verbose:
432 self.errfmt.PrintMessage(
433 '(wait) PID %d exited with status %d' %
434 (pid, pr.status), cmd_val.arg_locs[0])
435
436 if pr.status != 0 and arg.all: # YSH extension: respect failure
437 if arg.verbose:
438 self.errfmt.PrintMessage(
439 'wait --all: will fail with status 1')
440 status = 1 # set status, but keep waiting
441
442 if result == process.W1_NO_CHILDREN:
443 break # status is 0
444
445 if result == process.W1_CALL_INTR:
446 status = 128 + w1_arg
447 break
448
449 return status
450
451
452def _LimitString(lim, factor):
453 # type: (mops.BigInt, int) -> str
454 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
455 return 'unlimited'
456 else:
457 i = mops.Div(lim, mops.IntWiden(factor))
458 return mops.ToStr(i)
459
460
461class Ulimit(vm._Builtin):
462
463 def __init__(self):
464 # type: () -> None
465 """Dummy constructor for mycpp."""
466
467 self._table = None # type: List[Tuple[str, int, int, str]]
468
469 def _Table(self):
470 # type: () -> List[Tuple[str, int, int, str]]
471
472 # POSIX 2018
473 #
474 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
475 if self._table is None:
476 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
477
478 # flag, RLIMIT_X, factor, description
479 self._table = [
480 # Following POSIX and most shells except bash, -f is in
481 # blocks of 512 bytes
482 ('-c', RLIMIT_CORE, 512, 'core dump size'),
483 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
484 ('-f', RLIMIT_FSIZE, 512, 'file size'),
485 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
486 ('-s', RLIMIT_STACK, 1024, 'stack size'),
487 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
488 ('-v', RLIMIT_AS, 1024, 'address space size'),
489 ]
490
491 return self._table
492
493 def _FindFactor(self, what):
494 # type: (int) -> int
495 for _, w, factor, _ in self._Table():
496 if w == what:
497 return factor
498 raise AssertionError()
499
500 def Run(self, cmd_val):
501 # type: (cmd_value.Argv) -> int
502
503 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
504 arg = arg_types.ulimit(attrs.attrs)
505
506 what = 0
507 num_what_flags = 0
508
509 if arg.c:
510 what = RLIMIT_CORE
511 num_what_flags += 1
512
513 if arg.d:
514 what = RLIMIT_DATA
515 num_what_flags += 1
516
517 if arg.f:
518 what = RLIMIT_FSIZE
519 num_what_flags += 1
520
521 if arg.n:
522 what = RLIMIT_NOFILE
523 num_what_flags += 1
524
525 if arg.s:
526 what = RLIMIT_STACK
527 num_what_flags += 1
528
529 if arg.t:
530 what = RLIMIT_CPU
531 num_what_flags += 1
532
533 if arg.v:
534 what = RLIMIT_AS
535 num_what_flags += 1
536
537 if num_what_flags > 1:
538 raise error.Usage(
539 'can only handle one resource at a time; got too many flags',
540 cmd_val.arg_locs[0])
541
542 # Print all
543 show_all = arg.a or arg.all
544 if show_all:
545 if num_what_flags > 0:
546 raise error.Usage("doesn't accept resource flags with -a",
547 cmd_val.arg_locs[0])
548
549 extra, extra_loc = arg_r.Peek2()
550 if extra is not None:
551 raise error.Usage('got extra arg with -a', extra_loc)
552
553 # Worst case 20 == len(str(2**64))
554 fmt = '%5s %15s %15s %7s %s'
555 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
556 for flag, what, factor, desc in self._Table():
557 soft, hard = pyos.GetRLimit(what)
558
559 soft2 = _LimitString(soft, factor)
560 hard2 = _LimitString(hard, factor)
561 print(fmt % (flag, soft2, hard2, str(factor), desc))
562
563 return 0
564
565 if num_what_flags == 0:
566 what = RLIMIT_FSIZE # -f is the default
567
568 s, s_loc = arg_r.Peek2()
569
570 if s is None:
571 factor = self._FindFactor(what)
572 soft, hard = pyos.GetRLimit(what)
573 if arg.H:
574 print(_LimitString(hard, factor))
575 else:
576 print(_LimitString(soft, factor))
577 return 0
578
579 # Set the given resource
580 if s == 'unlimited':
581 # In C, RLIM_INFINITY is rlim_t
582 limit = mops.FromC(RLIM_INFINITY)
583 else:
584 if match.LooksLikeInteger(s):
585 ok, big_int = mops.FromStr2(s)
586 if not ok:
587 raise error.Usage('Integer too big: %s' % s, s_loc)
588 else:
589 raise error.Usage(
590 "expected a number or 'unlimited', got %r" % s, s_loc)
591
592 if mops.Greater(mops.IntWiden(0), big_int):
593 raise error.Usage(
594 "doesn't accept negative numbers, got %r" % s, s_loc)
595
596 factor = self._FindFactor(what)
597
598 fac = mops.IntWiden(factor)
599 limit = mops.Mul(big_int, fac)
600
601 # Overflow check like bash does
602 # TODO: This should be replaced with a different overflow check
603 # when we have arbitrary precision integers
604 if not mops.Equal(mops.Div(limit, fac), big_int):
605 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
606 raise error.Usage(
607 'detected integer overflow: %s' % mops.ToStr(big_int),
608 s_loc)
609
610 arg_r.Next()
611 extra2, extra_loc2 = arg_r.Peek2()
612 if extra2 is not None:
613 raise error.Usage('got extra arg', extra_loc2)
614
615 # Now set the resource
616 soft, hard = pyos.GetRLimit(what)
617
618 # For error message
619 old_soft = soft
620 old_hard = hard
621
622 # Bash behavior: manipulate both, unless a flag is parsed. This
623 # differs from zsh!
624 if not arg.S and not arg.H:
625 soft = limit
626 hard = limit
627 if arg.S:
628 soft = limit
629 if arg.H:
630 hard = limit
631
632 if mylib.PYTHON:
633 try:
634 pyos.SetRLimit(what, soft, hard)
635 except OverflowError: # only happens in CPython
636 raise error.Usage('detected overflow', s_loc)
637 except (ValueError, resource.error) as e:
638 # Annoying: Python binding changes IOError -> ValueError
639
640 print_stderr('oils: ulimit error: %s' % e)
641
642 # Extra info we could expose in C++ too
643 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
644 _LimitString(old_soft, factor),
645 _LimitString(old_hard, factor),
646 _LimitString(soft, factor),
647 _LimitString(hard, factor),
648 ))
649 return 1
650 else:
651 try:
652 pyos.SetRLimit(what, soft, hard)
653 except (IOError, OSError) as e:
654 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
655 return 1
656
657 return 0
658
659
660def _SigNameToNumber(name):
661 # type: (str) -> int
662 name = name.upper()
663 if name.startswith("SIG"):
664 name = name[3:]
665 return signal_def.GetNumber(name)
666
667
668class Kill(vm._Builtin):
669 """Send a signal to a process"""
670
671 def __init__(self, job_list):
672 # type: (process.JobList) -> None
673 self.job_list = job_list
674
675 def _ParseWhat(self, what, blame_loc):
676 # type: (str, loc_t) -> int
677 if what.startswith("%"):
678 job = self.job_list.JobFromSpec(what)
679 if job is None:
680 e_usage("got invalid job ID %r" % what, blame_loc)
681 return job.ProcessGroupId()
682 else:
683 try:
684 pid = int(what)
685 except ValueError:
686 e_usage("got invalid process ID %r" % what, blame_loc)
687 return pid
688
689 def _SendSignal(self, arg_r, sig_num):
690 # type: (args.Reader, int) -> int
691 if arg_r.AtEnd():
692 e_usage("expects at least one process/job ID", loc.Missing)
693
694 while not arg_r.AtEnd():
695 arg_str, arg_loc = arg_r.Peek2()
696 pid = self._ParseWhat(arg_str, arg_loc)
697
698 posix.kill(pid, sig_num)
699 arg_r.Next()
700 return 0
701
702 def _ParseSignal(self, sig_str, blame_loc):
703 # type: (str, loc_t) -> int
704 """
705 Sigspec can one of these forms:
706 15, TERM, SIGTERM (case insensitive)
707 Raises error if sigspec is in invalid format
708 """
709 if sig_str.isdigit():
710 # We don't validate the signal number; we rely on kill() returning
711 # EINVAL instead. This is useful for sending unportable signals.
712 sig_num = int(sig_str)
713 else:
714 sig_num = _SigNameToNumber(sig_str)
715 if sig_num == signal_def.NO_SIGNAL:
716 e_usage("got invalid signal name %r" % sig_str, blame_loc)
717 return sig_num
718
719 def _TranslateSignals(self, arg_r):
720 # type: (args.Reader) -> int
721 while not arg_r.AtEnd():
722 arg, arg_loc = arg_r.Peek2()
723 if arg.isdigit():
724 sig_name = signal_def.GetName(int(arg))
725 if sig_name is None:
726 e_usage("can't translate number %r to a name" % arg, arg_loc)
727 print(sig_name[3:])
728 else:
729 sig_num = _SigNameToNumber(arg)
730 if sig_num == signal_def.NO_SIGNAL:
731 e_usage("can't translate name %r to a number" % arg, arg_loc)
732 print(str(sig_num))
733
734 arg_r.Next()
735 return 0
736
737 def Run(self, cmd_val):
738 # type: (cmd_value.Argv) -> int
739 arg_r = args.Reader(cmd_val.argv, locs=cmd_val.arg_locs)
740 arg_r.Next() # skip command name
741
742 # Check for a signal argument like -15 -TERM -SIGTERM
743 first, first_loc = arg_r.Peek2()
744 if first is not None and first.startswith('-'):
745 sig_spec = first[1:]
746 if sig_spec.isdigit() or len(sig_spec) > 1:
747 sig_num = self._ParseSignal(sig_spec, first_loc)
748 arg_r.Next() # Skip signal argument
749 return self._SendSignal(arg_r, sig_num)
750
751 # Note: we're making another args.Reader here
752 attrs, arg_r = flag_util.ParseCmdVal('kill',
753 cmd_val,
754 accept_typed_args=False)
755 arg = arg_types.kill(attrs.attrs)
756
757 if arg.l or arg.L:
758 # If no arg, print all signals
759 if arg_r.AtEnd():
760 PrintSignals()
761 return 0
762
763 # Otherwise translate each arg
764 return self._TranslateSignals(arg_r)
765
766 # -n and -s are synonyms.
767 # TODO: it would be nice if the flag parser could expose the location
768 # of 'foo' in -s foo
769 sig_num = 15 # SIGTERM, the default signal to send
770 blame_loc = cmd_val.arg_locs[0]
771 if arg.n is not None:
772 sig_num = self._ParseSignal(arg.n, blame_loc)
773 if arg.s is not None:
774 sig_num = self._ParseSignal(arg.s, blame_loc)
775
776 return self._SendSignal(arg_r, sig_num)
777
778
779# vim: sw=4