OILS / builtin / process_osh.py View on Github | oils.pub

819 lines, 508 significant
1#!/usr/bin/env python2
2"""
3builtin_process.py - Builtins that deal with processes or modify process state.
4
5This is sort of the opposite of builtin_pure.py.
6"""
7from __future__ import print_function
8
9import resource
10from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
11 RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
12from signal import SIGCONT
13
14from _devbuild.gen import arg_types
15from _devbuild.gen.syntax_asdl import loc, loc_t, CompoundWord
16from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
17 wait_status_e)
18from core import dev
19from core import error
20from core.error import e_usage, e_die_status
21from core import process # W1_EXITED, etc.
22from core import pyos
23from core import pyutil
24from core import vm
25from frontend import flag_util
26from frontend import match
27from frontend import signal_def
28from frontend import typed_args
29from frontend import args
30from mycpp import mops
31from mycpp import mylib
32from mycpp.mylib import log, tagswitch, print_stderr
33
34import posix_ as posix
35
36from typing import TYPE_CHECKING, List, Tuple, Optional, cast
37if TYPE_CHECKING:
38 from core.process import Waiter, ExternalProgram, FdState
39 from core import executor
40 from core import state
41 from display import ui
42
43_ = log
44
45
46def PrintSignals():
47 # type: () -> None
48 # Iterate over signals and print them
49 for sig_num in xrange(signal_def.MaxSigNumber()):
50 sig_name = signal_def.GetName(sig_num)
51 if sig_name is None:
52 continue
53 print('%2d %s' % (sig_num, sig_name))
54
55
56class Jobs(vm._Builtin):
57 """List jobs."""
58
59 def __init__(self, job_list):
60 # type: (process.JobList) -> None
61 self.job_list = job_list
62
63 def Run(self, cmd_val):
64 # type: (cmd_value.Argv) -> int
65
66 attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
67 arg = arg_types.jobs(attrs.attrs)
68
69 # osh doesn't support JOBSPEC arg
70 arg_r.Done()
71
72 if arg.l:
73 style = process.STYLE_LONG
74 elif arg.p:
75 style = process.STYLE_PID_ONLY
76 else:
77 style = process.STYLE_DEFAULT
78
79 self.job_list.DisplayJobs(style)
80
81 if arg.debug:
82 self.job_list.DebugPrint()
83
84 return 0
85
86
87class Fg(vm._Builtin):
88 """Put a job in the foreground."""
89
90 def __init__(self, job_control, job_list, waiter):
91 # type: (process.JobControl, process.JobList, Waiter) -> None
92 self.job_control = job_control
93 self.job_list = job_list
94 self.waiter = waiter
95 self.exec_opts = waiter.exec_opts
96
97 def Run(self, cmd_val):
98 # type: (cmd_value.Argv) -> int
99
100 job_spec = '' # Job spec for current job is the default
101 if len(cmd_val.argv) > 1:
102 job_spec = cmd_val.argv[1]
103
104 job = self.job_list.JobFromSpec(job_spec)
105 # note: the 'wait' builtin falls back to JobFromPid()
106 if job is None:
107 print_stderr('fg: No job to put in the foreground')
108 return 1
109
110 pgid = job.ProcessGroupId()
111 assert pgid != process.INVALID_PGID, \
112 'Processes put in the background should have a PGID'
113
114 # Put the job's process group back into the foreground. GiveTerminal() must
115 # be called before sending SIGCONT or else the process might immediately get
116 # suspended again if it tries to read/write on the terminal.
117 self.job_control.MaybeGiveTerminal(pgid)
118 posix.killpg(pgid, SIGCONT) # Send signal
119
120 if self.exec_opts.interactive():
121 print_stderr('[%%%d] PID %d Continued' % (job.job_id, pgid))
122
123 # We are not using waitpid(WCONTINUE) and WIFCONTINUED() in
124 # WaitForOne() -- it's an extension to POSIX that isn't necessary for 'fg'
125 job.SetForeground()
126 job.state = job_state_e.Running
127
128 status = -1
129
130 wait_st = job.JobWait(self.waiter)
131 UP_wait_st = wait_st
132 with tagswitch(wait_st) as case:
133 if case(wait_status_e.Proc):
134 wait_st = cast(wait_status.Proc, UP_wait_st)
135 if wait_st.state == job_state_e.Exited:
136 self.job_list.PopChildProcess(job.PidForWait())
137 self.job_list.CleanupWhenJobExits(job)
138 status = wait_st.code
139
140 elif case(wait_status_e.Pipeline):
141 wait_st = cast(wait_status.Pipeline, UP_wait_st)
142 # TODO: handle PIPESTATUS? Is this right?
143 status = wait_st.codes[-1]
144
145 elif case(wait_status_e.Cancelled):
146 wait_st = cast(wait_status.Cancelled, UP_wait_st)
147 status = 128 + wait_st.sig_num
148
149 else:
150 raise AssertionError()
151
152 return status
153
154
155class Bg(vm._Builtin):
156 """Put a job in the background."""
157
158 def __init__(self, job_list):
159 # type: (process.JobList) -> None
160 self.job_list = job_list
161
162 def Run(self, cmd_val):
163 # type: (cmd_value.Argv) -> int
164
165 # How does this differ from 'fg'? It doesn't wait and it sets controlling
166 # terminal?
167
168 raise error.Usage("isn't implemented", loc.Missing)
169
170
171class Fork(vm._Builtin):
172
173 def __init__(self, shell_ex):
174 # type: (vm._Executor) -> None
175 self.shell_ex = shell_ex
176
177 def Run(self, cmd_val):
178 # type: (cmd_value.Argv) -> int
179 _, arg_r = flag_util.ParseCmdVal('fork',
180 cmd_val,
181 accept_typed_args=True)
182
183 arg, location = arg_r.Peek2()
184 if arg is not None:
185 e_usage('got unexpected argument %r' % arg, location)
186
187 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
188 return self.shell_ex.RunBackgroundJob(cmd_frag)
189
190
191class ForkWait(vm._Builtin):
192
193 def __init__(self, shell_ex):
194 # type: (vm._Executor) -> None
195 self.shell_ex = shell_ex
196
197 def Run(self, cmd_val):
198 # type: (cmd_value.Argv) -> int
199 _, arg_r = flag_util.ParseCmdVal('forkwait',
200 cmd_val,
201 accept_typed_args=True)
202 arg, location = arg_r.Peek2()
203 if arg is not None:
204 e_usage('got unexpected argument %r' % arg, location)
205
206 cmd_frag = typed_args.RequiredBlockAsFrag(cmd_val)
207 return self.shell_ex.RunSubshell(cmd_frag)
208
209
210class Exec(vm._Builtin):
211
212 def __init__(
213 self,
214 mem, # type: state.Mem
215 ext_prog, # type: ExternalProgram
216 fd_state, # type: FdState
217 search_path, # type: executor.SearchPath
218 errfmt, # type: ui.ErrorFormatter
219 ):
220 # type: (...) -> None
221 self.mem = mem
222 self.ext_prog = ext_prog
223 self.fd_state = fd_state
224 self.search_path = search_path
225 self.errfmt = errfmt
226
227 def Run(self, cmd_val):
228 # type: (cmd_value.Argv) -> int
229 _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
230
231 # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
232 if arg_r.AtEnd():
233 self.fd_state.MakePermanent()
234 return 0
235
236 environ = self.mem.GetEnv()
237 if 0:
238 log('E %r', environ)
239 log('E %r', environ)
240 log('ZZ %r', environ.get('ZZ'))
241 i = arg_r.i
242 cmd = cmd_val.argv[i]
243 argv0_path = self.search_path.CachedLookup(cmd)
244 if argv0_path is None:
245 e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
246
247 # shift off 'exec', and remove typed args because they don't apply
248 c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
249 cmd_val.is_last_cmd, cmd_val.self_obj, None)
250
251 self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
252 # makes mypy and C++ compiler happy
253 raise AssertionError('unreachable')
254
255
256class Wait(vm._Builtin):
257 """
258 wait: wait [-n] [id ...]
259 Wait for job completion and return exit status.
260
261 Waits for each process identified by an ID, which may be a process ID or a
262 job specification, and reports its termination status. If ID is not
263 given, waits for all currently active child processes, and the return
264 status is zero. If ID is a a job specification, waits for all processes
265 in that job's pipeline.
266
267 If the -n option is supplied, waits for the next job to terminate and
268 returns its exit status.
269
270 Exit Status:
271 Returns the status of the last ID; fails if ID is invalid or an invalid
272 option is given.
273 """
274
275 def __init__(
276 self,
277 waiter, # type: Waiter
278 job_list, #type: process.JobList
279 mem, # type: state.Mem
280 tracer, # type: dev.Tracer
281 errfmt, # type: ui.ErrorFormatter
282 ):
283 # type: (...) -> None
284 self.waiter = waiter
285 self.job_list = job_list
286 self.mem = mem
287 self.tracer = tracer
288 self.errfmt = errfmt
289 self.exec_opts = waiter.exec_opts
290
291 def Run(self, cmd_val):
292 # type: (cmd_value.Argv) -> int
293 with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
294 return self._Run(cmd_val)
295
296 def _WaitForJobs(self, job_ids, arg_locs):
297 # type: (List[str], List[CompoundWord]) -> int
298
299 # Get list of jobs. Then we need to check if they are ALL stopped.
300 # Returns the exit code of the last one on the COMMAND LINE, not the
301 # exit code of last one to FINISH.
302
303 jobs = [] # type: List[process.Job]
304 for i, job_id in enumerate(job_ids):
305 location = arg_locs[i]
306
307 job = None # type: Optional[process.Job]
308 if job_id == '' or job_id.startswith('%'):
309 job = self.job_list.JobFromSpec(job_id)
310
311 if job is None:
312 #log('JOB %s', job_id)
313 # Does it look like a PID?
314 try:
315 pid = int(job_id)
316 except ValueError:
317 raise error.Usage(
318 'expected PID or jobspec, got %r' % job_id, location)
319
320 job = self.job_list.JobFromPid(pid)
321 #log('WAIT JOB %r', job)
322
323 if job is None:
324 self.errfmt.Print_("Job %s wasn't found" % job_id,
325 blame_loc=location)
326 return 127
327
328 jobs.append(job)
329
330 status = 1 # error
331 for job in jobs:
332 # polymorphic call: Process, Pipeline
333 wait_st = job.JobWait(self.waiter)
334
335 UP_wait_st = wait_st
336 with tagswitch(wait_st) as case:
337 if case(wait_status_e.Proc):
338 wait_st = cast(wait_status.Proc, UP_wait_st)
339 if wait_st.state == job_state_e.Exited:
340 self.job_list.PopChildProcess(job.PidForWait())
341 self.job_list.CleanupWhenJobExits(job)
342 status = wait_st.code
343
344 elif case(wait_status_e.Pipeline):
345 wait_st = cast(wait_status.Pipeline, UP_wait_st)
346 # TODO: handle PIPESTATUS? Is this right?
347 status = wait_st.codes[-1]
348
349 # It would be logical to set PIPESTATUS here, but it's NOT
350 # what other shells do
351 #
352 # I think PIPESTATUS is legacy, and we can design better
353 # YSH semantics
354 #self.mem.SetPipeStatus(wait_st.codes)
355
356 elif case(wait_status_e.Cancelled):
357 wait_st = cast(wait_status.Cancelled, UP_wait_st)
358 status = 128 + wait_st.sig_num
359
360 else:
361 raise AssertionError()
362
363 # Return the last status
364 return status
365
366 def _WaitNext(self):
367 # type: () -> int
368
369 # Loop until there is one fewer process running, there's nothing to wait
370 # for, or there's a signal
371 n = self.job_list.NumRunning()
372 if n == 0:
373 status = 127
374 else:
375 target = n - 1
376 status = 0
377 while self.job_list.NumRunning() > target:
378 result, w1_arg = self.waiter.WaitForOne()
379 if result == process.W1_EXITED:
380 pid = w1_arg
381 pr = self.job_list.PopChildProcess(pid)
382 # TODO: background pipelines don't clean up properly,
383 # because only the last PID is registered in
384 # job_list.pid_to_job
385 self.job_list.CleanupWhenProcessExits(pid)
386
387 if pr is None:
388 if self.exec_opts.verbose_warn():
389 print_stderr(
390 "oils wait: PID %d exited, but oils didn't start it"
391 % pid)
392 else:
393 status = pr.status
394
395 elif result == process.W1_NO_CHILDREN:
396 status = 127
397 break
398
399 elif result == process.W1_CALL_INTR: # signal
400 status = 128 + w1_arg
401 break
402
403 return status
404
405 def _Run(self, cmd_val):
406 # type: (cmd_value.Argv) -> int
407 attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
408 arg = arg_types.wait(attrs.attrs)
409
410 job_ids, arg_locs = arg_r.Rest2()
411
412 if len(job_ids):
413 # Note: -n and --all ignored in this case, like bash
414 return self._WaitForJobs(job_ids, arg_locs)
415
416 if arg.n:
417 return self._WaitNext()
418
419 # 'wait' or wait --all
420
421 status = 0
422
423 # Note: NumRunning() makes sure we ignore stopped processes, which
424 # cause WaitForOne() to return
425 while self.job_list.NumRunning() != 0:
426 result, w1_arg = self.waiter.WaitForOne()
427 if result == process.W1_EXITED:
428 pid = w1_arg
429 pr = self.job_list.PopChildProcess(pid)
430 # TODO: background pipelines don't clean up properly, because
431 # only the last PID is registered in job_list.pid_to_job
432 self.job_list.CleanupWhenProcessExits(pid)
433
434 if arg.verbose:
435 self.errfmt.PrintMessage(
436 '(wait) PID %d exited with status %d' %
437 (pid, pr.status), cmd_val.arg_locs[0])
438
439 if pr.status != 0 and arg.all: # YSH extension: respect failure
440 if arg.verbose:
441 self.errfmt.PrintMessage(
442 'wait --all: will fail with status 1')
443 status = 1 # set status, but keep waiting
444
445 if result == process.W1_NO_CHILDREN:
446 break # status is 0
447
448 if result == process.W1_CALL_INTR:
449 status = 128 + w1_arg
450 break
451
452 return status
453
454
455class Umask(vm._Builtin):
456
457 def __init__(self):
458 # type: () -> None
459 """Dummy constructor for mycpp."""
460 pass
461
462 def Run(self, cmd_val):
463 # type: (cmd_value.Argv) -> int
464
465 argv = cmd_val.argv[1:]
466 if len(argv) == 0:
467 # umask() has a dumb API: you can't get it without modifying it first!
468 # NOTE: dash disables interrupts around the two umask() calls, but that
469 # shouldn't be a concern for us. Signal handlers won't call umask().
470 mask = posix.umask(0)
471 posix.umask(mask) #
472 print('0%03o' % mask) # octal format
473 return 0
474
475 if len(argv) == 1:
476 a = argv[0]
477 try:
478 new_mask = int(a, 8)
479 except ValueError:
480 # NOTE: This also happens when we have '8' or '9' in the input.
481 print_stderr(
482 "oils warning: umask with symbolic input isn't implemented"
483 )
484 return 1
485
486 posix.umask(new_mask)
487 return 0
488
489 e_usage('umask: unexpected arguments', loc.Missing)
490
491
492def _LimitString(lim, factor):
493 # type: (mops.BigInt, int) -> str
494 if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
495 return 'unlimited'
496 else:
497 i = mops.Div(lim, mops.IntWiden(factor))
498 return mops.ToStr(i)
499
500
501class Ulimit(vm._Builtin):
502
503 def __init__(self):
504 # type: () -> None
505 """Dummy constructor for mycpp."""
506
507 self._table = None # type: List[Tuple[str, int, int, str]]
508
509 def _Table(self):
510 # type: () -> List[Tuple[str, int, int, str]]
511
512 # POSIX 2018
513 #
514 # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
515 if self._table is None:
516 # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
517
518 # flag, RLIMIT_X, factor, description
519 self._table = [
520 # Following POSIX and most shells except bash, -f is in
521 # blocks of 512 bytes
522 ('-c', RLIMIT_CORE, 512, 'core dump size'),
523 ('-d', RLIMIT_DATA, 1024, 'data segment size'),
524 ('-f', RLIMIT_FSIZE, 512, 'file size'),
525 ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
526 ('-s', RLIMIT_STACK, 1024, 'stack size'),
527 ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
528 ('-v', RLIMIT_AS, 1024, 'address space size'),
529 ]
530
531 return self._table
532
533 def _FindFactor(self, what):
534 # type: (int) -> int
535 for _, w, factor, _ in self._Table():
536 if w == what:
537 return factor
538 raise AssertionError()
539
540 def Run(self, cmd_val):
541 # type: (cmd_value.Argv) -> int
542
543 attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
544 arg = arg_types.ulimit(attrs.attrs)
545
546 what = 0
547 num_what_flags = 0
548
549 if arg.c:
550 what = RLIMIT_CORE
551 num_what_flags += 1
552
553 if arg.d:
554 what = RLIMIT_DATA
555 num_what_flags += 1
556
557 if arg.f:
558 what = RLIMIT_FSIZE
559 num_what_flags += 1
560
561 if arg.n:
562 what = RLIMIT_NOFILE
563 num_what_flags += 1
564
565 if arg.s:
566 what = RLIMIT_STACK
567 num_what_flags += 1
568
569 if arg.t:
570 what = RLIMIT_CPU
571 num_what_flags += 1
572
573 if arg.v:
574 what = RLIMIT_AS
575 num_what_flags += 1
576
577 if num_what_flags > 1:
578 raise error.Usage(
579 'can only handle one resource at a time; got too many flags',
580 cmd_val.arg_locs[0])
581
582 # Print all
583 show_all = arg.a or arg.all
584 if show_all:
585 if num_what_flags > 0:
586 raise error.Usage("doesn't accept resource flags with -a",
587 cmd_val.arg_locs[0])
588
589 extra, extra_loc = arg_r.Peek2()
590 if extra is not None:
591 raise error.Usage('got extra arg with -a', extra_loc)
592
593 # Worst case 20 == len(str(2**64))
594 fmt = '%5s %15s %15s %7s %s'
595 print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
596 for flag, what, factor, desc in self._Table():
597 soft, hard = pyos.GetRLimit(what)
598
599 soft2 = _LimitString(soft, factor)
600 hard2 = _LimitString(hard, factor)
601 print(fmt % (flag, soft2, hard2, str(factor), desc))
602
603 return 0
604
605 if num_what_flags == 0:
606 what = RLIMIT_FSIZE # -f is the default
607
608 s, s_loc = arg_r.Peek2()
609
610 if s is None:
611 factor = self._FindFactor(what)
612 soft, hard = pyos.GetRLimit(what)
613 if arg.H:
614 print(_LimitString(hard, factor))
615 else:
616 print(_LimitString(soft, factor))
617 return 0
618
619 # Set the given resource
620 if s == 'unlimited':
621 # In C, RLIM_INFINITY is rlim_t
622 limit = mops.FromC(RLIM_INFINITY)
623 else:
624 if match.LooksLikeInteger(s):
625 ok, big_int = mops.FromStr2(s)
626 if not ok:
627 raise error.Usage('Integer too big: %s' % s, s_loc)
628 else:
629 raise error.Usage(
630 "expected a number or 'unlimited', got %r" % s, s_loc)
631
632 if mops.Greater(mops.IntWiden(0), big_int):
633 raise error.Usage(
634 "doesn't accept negative numbers, got %r" % s, s_loc)
635
636 factor = self._FindFactor(what)
637
638 fac = mops.IntWiden(factor)
639 limit = mops.Mul(big_int, fac)
640
641 # Overflow check like bash does
642 # TODO: This should be replaced with a different overflow check
643 # when we have arbitrary precision integers
644 if not mops.Equal(mops.Div(limit, fac), big_int):
645 #log('div %s', mops.ToStr(mops.Div(limit, fac)))
646 raise error.Usage(
647 'detected integer overflow: %s' % mops.ToStr(big_int),
648 s_loc)
649
650 arg_r.Next()
651 extra2, extra_loc2 = arg_r.Peek2()
652 if extra2 is not None:
653 raise error.Usage('got extra arg', extra_loc2)
654
655 # Now set the resource
656 soft, hard = pyos.GetRLimit(what)
657
658 # For error message
659 old_soft = soft
660 old_hard = hard
661
662 # Bash behavior: manipulate both, unless a flag is parsed. This
663 # differs from zsh!
664 if not arg.S and not arg.H:
665 soft = limit
666 hard = limit
667 if arg.S:
668 soft = limit
669 if arg.H:
670 hard = limit
671
672 if mylib.PYTHON:
673 try:
674 pyos.SetRLimit(what, soft, hard)
675 except OverflowError: # only happens in CPython
676 raise error.Usage('detected overflow', s_loc)
677 except (ValueError, resource.error) as e:
678 # Annoying: Python binding changes IOError -> ValueError
679
680 print_stderr('oils: ulimit error: %s' % e)
681
682 # Extra info we could expose in C++ too
683 print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
684 _LimitString(old_soft, factor),
685 _LimitString(old_hard, factor),
686 _LimitString(soft, factor),
687 _LimitString(hard, factor),
688 ))
689 return 1
690 else:
691 try:
692 pyos.SetRLimit(what, soft, hard)
693 except (IOError, OSError) as e:
694 print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
695 return 1
696
697 return 0
698
699
700def _SigNameToNumber(name):
701 # type: (str) -> int
702 name = name.upper()
703 if name.startswith("SIG"):
704 name = name[3:]
705 return signal_def.GetNumber(name)
706
707
708class Kill(vm._Builtin):
709 """Send a signal to a process"""
710
711 def __init__(self, job_list):
712 # type: (process.JobList) -> None
713 self.job_list = job_list
714
715 def _ParseWhat(self, what, blame_loc):
716 # type: (str, loc_t) -> int
717 if what.startswith("%"):
718 job = self.job_list.JobFromSpec(what)
719 if job is None:
720 e_usage("got invalid job ID %r" % what, blame_loc)
721 return job.ProcessGroupId()
722 else:
723 try:
724 pid = int(what)
725 except ValueError:
726 e_usage("got invalid process ID %r" % what, blame_loc)
727 return pid
728
729 def _SendSignal(self, arg_r, sig_num):
730 # type: (args.Reader, int) -> int
731 if arg_r.AtEnd():
732 e_usage("expects at least one process/job ID", loc.Missing)
733
734 while not arg_r.AtEnd():
735 arg_str, arg_loc = arg_r.Peek2()
736 pid = self._ParseWhat(arg_str, arg_loc)
737
738 posix.kill(pid, sig_num)
739 arg_r.Next()
740 return 0
741
742 def _ParseSignal(self, sig_str, blame_loc):
743 # type: (str, loc_t) -> int
744 """
745 Sigspec can one of these forms:
746 15, TERM, SIGTERM (case insensitive)
747 Raises error if sigspec is in invalid format
748 """
749 if sig_str.isdigit():
750 # We don't validate the signal number; we rely on kill() returning
751 # EINVAL instead. This is useful for sending unportable signals.
752 sig_num = int(sig_str)
753 else:
754 sig_num = _SigNameToNumber(sig_str)
755 if sig_num == signal_def.NO_SIGNAL:
756 e_usage("got invalid signal name %r" % sig_str, blame_loc)
757 return sig_num
758
759 def _TranslateSignals(self, arg_r):
760 # type: (args.Reader) -> int
761 while not arg_r.AtEnd():
762 arg, arg_loc = arg_r.Peek2()
763 if arg.isdigit():
764 sig_name = signal_def.GetName(int(arg))
765 if sig_name is None:
766 e_usage("can't translate number %r to a name" % arg, arg_loc)
767 print(sig_name[3:])
768 else:
769 sig_num = _SigNameToNumber(arg)
770 if sig_num == signal_def.NO_SIGNAL:
771 e_usage("can't translate name %r to a number" % arg, arg_loc)
772 print(str(sig_num))
773
774 arg_r.Next()
775 return 0
776
777 def Run(self, cmd_val):
778 # type: (cmd_value.Argv) -> int
779 arg_r = args.Reader(cmd_val.argv, locs=cmd_val.arg_locs)
780 arg_r.Next() # skip command name
781
782 # Check for a signal argument like -15 -TERM -SIGTERM
783 first, first_loc = arg_r.Peek2()
784 if first is not None and first.startswith('-'):
785 sig_spec = first[1:]
786 if sig_spec.isdigit() or len(sig_spec) > 1:
787 sig_num = self._ParseSignal(sig_spec, first_loc)
788 arg_r.Next() # Skip signal argument
789 return self._SendSignal(arg_r, sig_num)
790
791 # Note: we're making another args.Reader here
792 attrs, arg_r = flag_util.ParseCmdVal('kill',
793 cmd_val,
794 accept_typed_args=False)
795 arg = arg_types.kill(attrs.attrs)
796
797 if arg.l or arg.L:
798 # If no arg, print all signals
799 if arg_r.AtEnd():
800 PrintSignals()
801 return 0
802
803 # Otherwise translate each arg
804 return self._TranslateSignals(arg_r)
805
806 # -n and -s are synonyms.
807 # TODO: it would be nice if the flag parser could expose the location
808 # of 'foo' in -s foo
809 sig_num = 15 # SIGTERM, the default signal to send
810 blame_loc = cmd_val.arg_locs[0]
811 if arg.n is not None:
812 sig_num = self._ParseSignal(arg.n, blame_loc)
813 if arg.s is not None:
814 sig_num = self._ParseSignal(arg.s, blame_loc)
815
816 return self._SendSignal(arg_r, sig_num)
817
818
819# vim: sw=4