OILS / core / process_test.py View on Github | oils.pub

719 lines, 402 significant
1#!/usr/bin/env python2
2
3import os
4import unittest
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.runtime_asdl import (RedirValue, redirect_arg, cmd_value,
8 trace)
9from _devbuild.gen.syntax_asdl import loc, redir_loc
10from asdl import runtime
11from builtin import read_osh
12from builtin import process_osh
13from builtin import trap_osh
14from core import dev
15from core import process # module under test
16from core import pyos
17from core import sh_init
18from core import state
19from core import test_lib
20from core import util
21from display import ui
22from frontend import flag_def # side effect: flags are defined, for wait builtin
23from mycpp import iolib
24from mycpp import mylib
25from mycpp.mylib import log
26from osh import cmd_parse_test
27
28import posix_ as posix
29
30_ = flag_def
31
32Process = process.Process
33ExternalThunk = process.ExternalThunk
34assertParsePipeline = cmd_parse_test.assertParsePipeline
35
36
37def Banner(msg):
38 print('-' * 60)
39 print(msg)
40
41
42class _FakeJobControl(object):
43
44 def __init__(self, enabled):
45 self.enabled = enabled
46
47 def Enabled(self):
48 return self.enabled
49
50
51class _FakeCommandEvaluator(object):
52
53 def RunPendingTraps(self):
54 pass
55
56
57def _SetupTest(self):
58 self.arena = test_lib.MakeArena('process_test.py')
59
60 self.mem = test_lib.MakeMem(self.arena)
61 parse_opts, exec_opts, mutable_opts = state.MakeOpts(self.mem, {}, None)
62 self.mem.exec_opts = exec_opts
63 self.exec_opts = exec_opts
64
65 #state.InitMem(mem, {}, '0.1')
66 sh_init.InitDefaultVars(self.mem, [])
67
68 self.job_control = process.JobControl()
69 self.job_list = process.JobList()
70
71 signal_safe = iolib.InitSignalSafe()
72 self.trap_state = trap_osh.TrapState(signal_safe)
73
74 fd_state = None
75 self.multi_trace = dev.MultiTracer(posix.getpid(), '', '', '', fd_state)
76 self.tracer = dev.Tracer(None, exec_opts, mutable_opts, self.mem,
77 mylib.Stderr(), self.multi_trace)
78 self.waiter = process.Waiter(self.job_list, exec_opts, self.trap_state,
79 self.tracer)
80 self.errfmt = ui.ErrorFormatter()
81 self.fd_state = process.FdState(self.errfmt, self.job_control,
82 self.job_list, None, self.tracer, None,
83 exec_opts)
84 self.ext_prog = process.ExternalProgram('', self.fd_state, self.errfmt,
85 util.NullDebugFile())
86 self.cmd_ev = test_lib.InitCommandEvaluator(arena=self.arena,
87 ext_prog=self.ext_prog)
88
89
90def _SetupWait(self):
91 self.wait_builtin = process_osh.Wait(self.waiter, self.job_list, self.mem,
92 self.tracer, self.errfmt)
93
94
95def _MakeThunk(argv, ext_prog):
96 arg_vec = cmd_value.Argv(argv, [loc.Missing] * len(argv), False, None,
97 None)
98 argv0_path = None
99 for path_entry in ['/bin', '/usr/bin']:
100 full_path = os.path.join(path_entry, argv[0])
101 if os.path.exists(full_path):
102 argv0_path = full_path
103 break
104 if not argv0_path:
105 argv0_path = argv[0] # fallback that tests failure case
106 return ExternalThunk(ext_prog, argv0_path, arg_vec, {})
107
108
109def _CommandNode(code_str, arena):
110 c_parser = test_lib.InitCommandParser(code_str, arena=arena)
111 return c_parser.ParseLogicalLine()
112
113
114class _Common(unittest.TestCase):
115 """Common functionality for tests below."""
116
117 def _ExtProc(self, argv):
118 thunk = _MakeThunk(argv, self.ext_prog)
119 return Process(thunk, self.job_control, self.job_list, self.tracer)
120
121 def _MakeForegroundPipeline(self, argv_list, last_str=''):
122 """
123 Foreground pipelines have self.last_thunk, from pi.AddLast().
124 Background pipelines don't
125 """
126 assert len(last_str), last_str # required
127
128 pi = process.Pipeline(False, self.job_control, self.job_list,
129 self.tracer)
130 for argv in argv_list:
131 pi.Add(self._ExtProc(argv))
132 node = _CommandNode(last_str, self.arena)
133 pi.AddLast((self.cmd_ev, node))
134 return pi
135
136 def _MakeProcess(self, node):
137 thunk = process.SubProgramThunk(self.cmd_ev, node, self.trap_state,
138 self.multi_trace, True,
139 self.exec_opts.errtrace())
140 p = process.Process(thunk, self.job_control, self.job_list,
141 self.tracer)
142 return p
143
144 def _MakeBackgroundPipeline(self, code_str):
145 node = assertParsePipeline(self, code_str)
146
147 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
148 self.job_control, self.job_list, self.tracer)
149 for child in node.children:
150 p = self._MakeProcess(child)
151 p.Init_ParentPipeline(pi)
152 pi.Add(p)
153 return pi
154
155
156class ProcessTest(_Common):
157
158 def setUp(self):
159 _SetupTest(self)
160
161 def testStdinRedirect(self):
162 PATH = '_tmp/one-two.txt'
163 # Write two lines
164 with open(PATH, 'w') as f:
165 f.write('one\ntwo\n')
166
167 # Should get the first line twice, because Pop() closes it!
168
169 r = RedirValue(Id.Redir_Less, runtime.NO_SPID, redir_loc.Fd(0),
170 redirect_arg.Path(PATH))
171
172 cmd_ev = _FakeCommandEvaluator()
173
174 err_out = []
175 self.fd_state.Push([r], err_out)
176 line1, _ = read_osh._ReadPortion(0, pyos.NEWLINE_CH, -1, False, cmd_ev)
177 self.fd_state.Pop(err_out)
178
179 self.fd_state.Push([r], err_out)
180 line2, _ = read_osh._ReadPortion(0, pyos.NEWLINE_CH, -1, False, cmd_ev)
181 self.fd_state.Pop(err_out)
182
183 # sys.stdin.readline() would erroneously return 'two' because of buffering.
184 self.assertEqual('one', line1)
185 self.assertEqual('one', line2)
186
187 def testProcess(self):
188 # 3 fds. Does Python open it? Shell seems to have it too. Maybe it
189 # inherits from the shell.
190 print('FDS BEFORE', os.listdir('/dev/fd'))
191
192 Banner('date')
193 argv = ['date']
194 p = self._ExtProc(argv)
195 why = trace.External(argv)
196 status = p.RunProcess(self.waiter, why)
197 log('date returned %d', status)
198 self.assertEqual(0, status)
199
200 Banner('does-not-exist')
201 p = self._ExtProc(['does-not-exist'])
202 print(p.RunProcess(self.waiter, why))
203
204 # 12 file descriptors open!
205 print('FDS AFTER', os.listdir('/dev/fd'))
206
207 def testPipeline(self):
208 print('BEFORE', os.listdir('/dev/fd'))
209
210 p = self._MakeForegroundPipeline(
211 [['ls'], ['cut', '-d', '.', '-f', '2'], ['sort']],
212 last_str='uniq -c')
213
214 p.StartPipeline(self.waiter)
215 pipe_status = p.RunLastPart(self.waiter, self.fd_state)
216 log('pipe_status: %s', pipe_status)
217
218 print('AFTER', os.listdir('/dev/fd'))
219
220 def testPipeline2(self):
221 Banner('ls | cut -d . -f 1 | head')
222 p = self._MakeForegroundPipeline(
223 [['ls'], ['cut', '-d', '.', '-f', '1']], last_str='head')
224
225 p.StartPipeline(self.waiter)
226 print(p.RunLastPart(self.waiter, self.fd_state))
227
228 def testPipeline3(self):
229 # Simulating subshell for each command
230 node1 = _CommandNode('ls', self.arena)
231 node2 = _CommandNode('head', self.arena)
232 node3 = _CommandNode('sort --reverse', self.arena)
233
234 thunk1 = process.SubProgramThunk(self.cmd_ev, node1, self.trap_state,
235 self.multi_trace, True, False)
236 thunk2 = process.SubProgramThunk(self.cmd_ev, node2, self.trap_state,
237 self.multi_trace, True, False)
238 thunk3 = process.SubProgramThunk(self.cmd_ev, node3, self.trap_state,
239 self.multi_trace, True, False)
240
241 p = process.Pipeline(False, self.job_control, self.job_list,
242 self.tracer)
243 p.Add(Process(thunk1, self.job_control, self.job_list, self.tracer))
244 p.Add(Process(thunk2, self.job_control, self.job_list, self.tracer))
245 p.Add(Process(thunk3, self.job_control, self.job_list, self.tracer))
246
247 last_thunk = (self.cmd_ev, _CommandNode('cat', self.arena))
248 p.AddLast(last_thunk)
249
250 p.StartPipeline(self.waiter)
251 print(p.RunLastPart(self.waiter, self.fd_state))
252
253 # TODO: Combine pipelines for other things:
254
255 # echo foo 1>&2 | tee stdout.txt
256 #
257 # foo=$(ls | head)
258 #
259 # foo=$(<<EOF ls | head)
260 # stdin
261 # EOF
262 #
263 # ls | head &
264
265 # Or technically we could fork the whole interpreter for foo|bar|baz and
266 # capture stdout of that interpreter.
267
268 def _MakePipeline2(self, jc):
269 pi = process.Pipeline(False, jc, self.job_list, self.tracer)
270
271 node1 = _CommandNode('/bin/echo testpipeline', self.arena)
272 node2 = _CommandNode('cat', self.arena)
273
274 thunk1 = process.SubProgramThunk(self.cmd_ev, node1, self.trap_state,
275 self.multi_trace, True, False)
276 thunk2 = process.SubProgramThunk(self.cmd_ev, node2, self.trap_state,
277 self.multi_trace, True, False)
278
279 pi.Add(Process(thunk1, jc, self.job_list, self.tracer))
280 pi.Add(Process(thunk2, jc, self.job_list, self.tracer))
281
282 return pi
283
284 def testPipelinePgidField(self):
285 jc = _FakeJobControl(False)
286
287 pi = self._MakePipeline2(jc)
288 self.assertEqual(process.INVALID_PGID, pi.ProcessGroupId())
289
290 pi.StartPipeline(self.waiter)
291 # No pgid
292 self.assertEqual(process.INVALID_PGID, pi.ProcessGroupId())
293
294 jc = _FakeJobControl(True)
295
296 pi = self._MakePipeline2(jc)
297 self.assertEqual(process.INVALID_PGID, pi.ProcessGroupId())
298
299 pi.StartPipeline(self.waiter)
300 # first process is the process group leader
301 self.assertEqual(pi.pids[0], pi.ProcessGroupId())
302
303 def testOpen(self):
304 # Disabled because mycpp translation can't handle it. We do this at a
305 # higher layer.
306 return
307
308 # This function used to raise BOTH OSError and IOError because Python 2 is
309 # inconsistent.
310 # We follow Python 3 in preferring OSError.
311 # https://stackoverflow.com/questions/29347790/difference-between-ioerror-and-oserror
312 self.assertRaises(OSError, self.fd_state.Open, '_nonexistent_')
313 self.assertRaises(OSError, self.fd_state.Open, 'metrics/')
314
315
316class JobListTest(_Common):
317 """
318 Test invariant that the 'wait' builtin removes the (pid -> status)
319 mappings (NOT the Waiter)
320
321 There are 4 styles of invoking it:
322
323 wait # for all
324 wait -n # for next
325 wait $pid1 $pid2 # for specific jobs -- problem: are pipelines included?
326 wait %j1 %j2 # job specs -- jobs are either pielines or processes
327
328 Bonus:
329
330 jobs -l can show exit status
331 """
332
333 def setUp(self):
334 _SetupTest(self)
335 _SetupWait(self)
336
337 def _RunBackgroundJob(self, argv):
338 p = self._ExtProc(argv)
339
340 # Similar to Executor::RunBackgroundJob()
341 p.SetBackground()
342 pid = p.StartProcess(trace.Fork)
343
344 #self.mem.last_bg_pid = pid # for $!
345
346 job_id = self.job_list.RegisterJob(p) # show in 'jobs' list
347 return pid, job_id
348
349 def _StartProcesses(self, n):
350 pids = []
351 job_ids = []
352
353 assert n < 10, n
354 for i in xrange(1, n + 1):
355 j = 10 - i # count down
356 argv = ['sh', '-c', 'sleep 0.0%d; echo i=%d; exit %d' % (j, j, j)]
357 pid, job_id = self._RunBackgroundJob(argv)
358 pids.append(pid)
359 job_ids.append(job_id)
360
361 log('pids %s', pids)
362 log('job_ids %s', job_ids)
363
364 return pids, job_ids
365
366 def assertJobListLength(self, length):
367 self.assertEqual(length, len(self.job_list.child_procs))
368 self.assertEqual(length, len(self.job_list.jobs))
369 self.assertEqual(length, len(self.job_list.pid_to_job))
370
371 def testWaitAll(self):
372 """ wait """
373 # Jobs list starts out empty
374 self.assertJobListLength(0)
375
376 # Fork 2 processes with &
377 pids, job_ids = self._StartProcesses(2)
378
379 # Now we have 2 jobs
380 self.assertJobListLength(2)
381
382 # Invoke the 'wait' builtin
383
384 cmd_val = test_lib.MakeBuiltinArgv(['wait'])
385 status = self.wait_builtin.Run(cmd_val)
386 self.assertEqual(0, status)
387
388 # Jobs list is now empty
389 self.assertJobListLength(0)
390
391 def testWaitNext(self):
392 """ wait -n """
393 # Jobs list starts out empty
394 self.assertJobListLength(0)
395
396 # Fork 2 processes with &
397 pids, job_ids = self._StartProcesses(2)
398
399 # Now we have 2 jobs
400 self.assertJobListLength(2)
401
402 ### 'wait -n'
403 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
404 status = self.wait_builtin.Run(cmd_val)
405 self.assertEqual(8, status)
406
407 # Jobs list now has 1 fewer job
408 self.assertJobListLength(1)
409
410 ### 'wait -n' again
411 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
412 status = self.wait_builtin.Run(cmd_val)
413 self.assertEqual(9, status)
414
415 # Now zero
416 self.assertJobListLength(0)
417
418 ### 'wait -n' again
419 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
420 status = self.wait_builtin.Run(cmd_val)
421 self.assertEqual(127, status)
422
423 # Still zero
424 self.assertJobListLength(0)
425
426 def testWaitPid(self):
427 """ wait $pid2 """
428 # Jobs list starts out empty
429 self.assertJobListLength(0)
430
431 # Fork 3 processes with &
432 pids, job_ids = self._StartProcesses(3)
433
434 # Now we have 3 jobs
435 self.assertJobListLength(3)
436
437 # wait $pid2
438 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[1])])
439 status = self.wait_builtin.Run(cmd_val)
440 self.assertEqual(8, status)
441
442 # Jobs list now has 1 fewer job
443 self.assertJobListLength(2)
444
445 # wait $pid3
446 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[2])])
447 status = self.wait_builtin.Run(cmd_val)
448 self.assertEqual(7, status)
449
450 self.assertJobListLength(1)
451
452 # wait $pid1
453 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[0])])
454 status = self.wait_builtin.Run(cmd_val)
455 self.assertEqual(9, status)
456
457 self.assertJobListLength(0)
458
459 def testWaitJob(self):
460 """ wait %j2 """
461
462 # Jobs list starts out empty
463 self.assertJobListLength(0)
464
465 # Fork 3 processes with &
466 pids, job_ids = self._StartProcesses(3)
467
468 # Now we have 3 jobs
469 self.assertJobListLength(3)
470
471 # wait %j2
472 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[1])])
473 status = self.wait_builtin.Run(cmd_val)
474 self.assertEqual(8, status)
475
476 self.assertJobListLength(2)
477
478 # wait %j3
479 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[2])])
480
481 status = self.wait_builtin.Run(cmd_val)
482 self.assertEqual(7, status)
483
484 self.assertJobListLength(1)
485
486 # wait %j1
487 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[0])])
488 status = self.wait_builtin.Run(cmd_val)
489 self.assertEqual(9, status)
490
491 self.assertJobListLength(0)
492
493 def testForegroundProcessCleansUpChildProcessDict(self):
494 self.assertJobListLength(0)
495
496 argv = ['sleep', '0.01']
497 p = self._ExtProc(argv)
498 why = trace.External(argv)
499 p.RunProcess(self.waiter, why)
500
501 self.assertJobListLength(0)
502
503 def testGrandchildOutlivesChild(self):
504 """ The new parent is the init process """
505
506 # Jobs list starts out empty
507 self.assertEqual(0, len(self.job_list.child_procs))
508
509 # the sleep process should outlive the sh process
510 argv = ['sh', '-c', 'sleep 0.1 & exit 99']
511 pid, job_id = self._RunBackgroundJob(argv)
512
513 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
514 status = self.wait_builtin.Run(cmd_val)
515 log('status = %d', status)
516 self.assertEqual(99, status)
517
518 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
519 status = self.wait_builtin.Run(cmd_val)
520 log('status = %d', status)
521 self.assertEqual(127, status)
522
523 # More tests:
524 #
525 # wait $pipeline_pid - with pipeline leader, and other PID
526 # wait %pipeline_job
527 # wait -n on pipeline? Does it return PIPESTATUS?
528 # wait with pipeline - should be OK
529 #
530 # Stopped jobs: does it print something interactively?
531
532
533class PipelineJobListTest(_Common):
534 """
535 Like the JobListTest above, but starts pipelines instead of individual
536 processes.
537 """
538
539 def setUp(self):
540 _SetupTest(self)
541 _SetupWait(self)
542
543 def _RunBackgroundPipeline(self, code_str):
544 # Like Executor::RunBackgroundJob()
545 pi = self._MakeBackgroundPipeline(code_str)
546 pi.StartPipeline(self.waiter)
547 pi.SetBackground()
548 #self.mem.last_bg_pid = pid # for $!
549 job_id = self.job_list.RegisterJob(pi) # show in 'jobs' list
550 return pi, job_id
551
552 def _StartPipelines(self, n):
553 pipelines = []
554 job_ids = []
555
556 assert n < 10, n
557 for i in xrange(1, n + 1):
558 j = 10 - i # count down
559 code_str = 'sleep 0.0%d | cat | (exit %d)' % (j, j)
560 #code_str = 'sleep 0.0%d | exit %d | cat' % (j, j)
561 pi, job_id = self._RunBackgroundPipeline(code_str)
562 pipelines.append(pi)
563 job_ids.append(job_id)
564
565 log('pipelines %s', pipelines)
566 log('job_ids %s', job_ids)
567
568 return pipelines, job_ids
569
570 def assertJobListLength(self, length):
571 # 3 processes per pipeline in this test
572 self.assertEqual(length * 3, len(self.job_list.child_procs))
573 self.assertEqual(length, len(self.job_list.jobs))
574 self.assertEqual(length, len(self.job_list.pid_to_job))
575
576 def testWaitAll(self):
577 """ wait """
578 # Jobs list starts out empty
579 self.assertJobListLength(0)
580
581 # Fork 2 processes with &
582 pids, job_ids = self._StartPipelines(2)
583
584 # Now we have 2 jobs
585 self.assertJobListLength(2)
586
587 # Invoke the 'wait' builtin
588
589 cmd_val = test_lib.MakeBuiltinArgv(['wait'])
590 status = self.wait_builtin.Run(cmd_val)
591 self.assertEqual(0, status)
592
593 return
594 # Jobs list is now empty
595 self.assertJobListLength(0)
596
597 def testWaitNext(self):
598 """ wait -n """
599 # Jobs list starts out empty
600 self.assertJobListLength(0)
601
602 # Fork 2 pipelines with &
603 pids, job_ids = self._StartPipelines(2)
604
605 # Now we have 2 jobs
606 self.assertJobListLength(2)
607
608 ### 'wait -n'
609 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
610 status = self.wait_builtin.Run(cmd_val)
611 return
612 self.assertEqual(8, status)
613
614 # Jobs list now has 1 fewer job
615 self.assertJobListLength(1)
616
617 ### 'wait -n' again
618 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
619 status = self.wait_builtin.Run(cmd_val)
620 self.assertEqual(9, status)
621
622 # Now zero
623 self.assertJobListLength(0)
624
625 ### 'wait -n' again
626 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
627 status = self.wait_builtin.Run(cmd_val)
628 self.assertEqual(127, status)
629
630 # Still zero
631 self.assertJobListLength(0)
632
633 def testWaitPid(self):
634 """ wait $pid2 """
635 # Jobs list starts out empty
636 self.assertJobListLength(0)
637
638 # Fork 3 processes with &
639 pids, job_ids = self._StartPipelines(3)
640
641 # Now we have 3 jobs
642 self.assertJobListLength(3)
643
644 # wait $pid2
645 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[1])])
646 return
647 status = self.wait_builtin.Run(cmd_val)
648 self.assertEqual(8, status)
649
650 # Jobs list now has 1 fewer job
651 self.assertJobListLength(2)
652
653 # wait $pid3
654 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[2])])
655 status = self.wait_builtin.Run(cmd_val)
656 self.assertEqual(7, status)
657
658 self.assertJobListLength(1)
659
660 # wait $pid1
661 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[0])])
662 status = self.wait_builtin.Run(cmd_val)
663 self.assertEqual(9, status)
664
665 self.assertJobListLength(0)
666
667 def testWaitJob(self):
668 """ wait %j2 """
669
670 # Jobs list starts out empty
671 self.assertJobListLength(0)
672
673 # Fork 3 processes with &
674 pids, job_ids = self._StartPipelines(3)
675
676 # Now we have 3 jobs
677 self.assertJobListLength(3)
678
679 # wait %j2
680 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[1])])
681 return
682 status = self.wait_builtin.Run(cmd_val)
683 self.assertEqual(8, status)
684
685 self.assertJobListLength(2)
686
687 # wait %j3
688 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[2])])
689
690 status = self.wait_builtin.Run(cmd_val)
691 self.assertEqual(7, status)
692
693 self.assertJobListLength(1)
694
695 # wait %j1
696 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[0])])
697 status = self.wait_builtin.Run(cmd_val)
698 self.assertEqual(9, status)
699
700 self.assertJobListLength(0)
701
702 def testForegroundPipelineCleansUpChildProcessDict(self):
703 self.assertJobListLength(0)
704
705 # TODO
706 return
707
708 argv = ['sleep', '0.01']
709 p = self._ExtProc(argv)
710 why = trace.External(argv)
711 p.RunProcess(self.waiter, why)
712
713 self.assertJobListLength(0)
714
715
716if __name__ == '__main__':
717 unittest.main()
718
719# vim: sw=4