OILS / core / process_test.py View on Github | oils.pub

543 lines, 306 significant
1#!/usr/bin/env python2
2"""process_test.py: Tests for process.py."""
3
4import os
5import unittest
6
7from _devbuild.gen.id_kind_asdl import Id
8from _devbuild.gen.runtime_asdl import (RedirValue, redirect_arg, cmd_value,
9 trace)
10from _devbuild.gen.syntax_asdl import loc, redir_loc
11from asdl import runtime
12from builtin import read_osh
13from builtin import process_osh
14from builtin import trap_osh
15from core import dev
16from core import process # module under test
17from core import pyos
18from core import sh_init
19from core import state
20from core import test_lib
21from core import util
22from display import ui
23from frontend import flag_def # side effect: flags are defined, for wait builtin
24from mycpp import iolib
25from mycpp import mylib
26from mycpp.mylib import log
27
28import posix_ as posix
29
30_ = flag_def
31
32Process = process.Process
33ExternalThunk = process.ExternalThunk
34
35
36def Banner(msg):
37 print('-' * 60)
38 print(msg)
39
40
41def _CommandNode(code_str, arena):
42 c_parser = test_lib.InitCommandParser(code_str, arena=arena)
43 return c_parser.ParseLogicalLine()
44
45
46class FakeJobControl(object):
47
48 def __init__(self, enabled):
49 self.enabled = enabled
50
51 def Enabled(self):
52 return self.enabled
53
54
55def _SetupTest(self):
56 self.arena = test_lib.MakeArena('process_test.py')
57
58 self.mem = test_lib.MakeMem(self.arena)
59 parse_opts, exec_opts, mutable_opts = state.MakeOpts(self.mem, {}, None)
60 self.mem.exec_opts = exec_opts
61
62 #state.InitMem(mem, {}, '0.1')
63 sh_init.InitDefaultVars(self.mem, [])
64
65 self.job_control = process.JobControl()
66 self.job_list = process.JobList()
67
68 signal_safe = iolib.InitSignalSafe()
69 self.trap_state = trap_osh.TrapState(signal_safe)
70
71 fd_state = None
72 self.multi_trace = dev.MultiTracer(posix.getpid(), '', '', '', fd_state)
73 self.tracer = dev.Tracer(None, exec_opts, mutable_opts, self.mem,
74 mylib.Stderr(), self.multi_trace)
75 self.waiter = process.Waiter(self.job_list, exec_opts, self.trap_state,
76 self.tracer)
77 self.errfmt = ui.ErrorFormatter()
78 self.fd_state = process.FdState(self.errfmt, self.job_control,
79 self.job_list, None, self.tracer, None,
80 exec_opts)
81 self.ext_prog = process.ExternalProgram('', self.fd_state, self.errfmt,
82 util.NullDebugFile())
83
84
85def _MakeThunk(argv, ext_prog):
86 arg_vec = cmd_value.Argv(argv, [loc.Missing] * len(argv), False, None,
87 None)
88 argv0_path = None
89 for path_entry in ['/bin', '/usr/bin']:
90 full_path = os.path.join(path_entry, argv[0])
91 if os.path.exists(full_path):
92 argv0_path = full_path
93 break
94 if not argv0_path:
95 argv0_path = argv[0] # fallback that tests failure case
96 return ExternalThunk(ext_prog, argv0_path, arg_vec, {})
97
98
99class ProcessTest(unittest.TestCase):
100
101 def setUp(self):
102 _SetupTest(self)
103
104 def _ExtProc(self, argv):
105 thunk = _MakeThunk(argv, self.ext_prog)
106 return Process(thunk, self.job_control, self.job_list, self.tracer)
107
108 def testStdinRedirect(self):
109 PATH = '_tmp/one-two.txt'
110 # Write two lines
111 with open(PATH, 'w') as f:
112 f.write('one\ntwo\n')
113
114 # Should get the first line twice, because Pop() closes it!
115
116 r = RedirValue(Id.Redir_Less, runtime.NO_SPID, redir_loc.Fd(0),
117 redirect_arg.Path(PATH))
118
119 class CommandEvaluator(object):
120
121 def RunPendingTraps(self):
122 pass
123
124 cmd_ev = CommandEvaluator()
125
126 err_out = []
127 self.fd_state.Push([r], err_out)
128 line1, _ = read_osh._ReadPortion(pyos.NEWLINE_CH, -1, cmd_ev)
129 self.fd_state.Pop(err_out)
130
131 self.fd_state.Push([r], err_out)
132 line2, _ = read_osh._ReadPortion(pyos.NEWLINE_CH, -1, cmd_ev)
133 self.fd_state.Pop(err_out)
134
135 # sys.stdin.readline() would erroneously return 'two' because of buffering.
136 self.assertEqual('one', line1)
137 self.assertEqual('one', line2)
138
139 def testProcess(self):
140 # 3 fds. Does Python open it? Shell seems to have it too. Maybe it
141 # inherits from the shell.
142 print('FDS BEFORE', os.listdir('/dev/fd'))
143
144 Banner('date')
145 argv = ['date']
146 p = self._ExtProc(argv)
147 why = trace.External(argv)
148 status = p.RunProcess(self.waiter, why)
149 log('date returned %d', status)
150 self.assertEqual(0, status)
151
152 Banner('does-not-exist')
153 p = self._ExtProc(['does-not-exist'])
154 print(p.RunProcess(self.waiter, why))
155
156 # 12 file descriptors open!
157 print('FDS AFTER', os.listdir('/dev/fd'))
158
159 def testPipeline(self):
160 node = _CommandNode('uniq -c', self.arena)
161 cmd_ev = test_lib.InitCommandEvaluator(arena=self.arena,
162 ext_prog=self.ext_prog)
163 print('BEFORE', os.listdir('/dev/fd'))
164
165 p = process.Pipeline(False, self.job_control, self.job_list,
166 self.tracer)
167 p.Add(self._ExtProc(['ls']))
168 p.Add(self._ExtProc(['cut', '-d', '.', '-f', '2']))
169 p.Add(self._ExtProc(['sort']))
170
171 p.AddLast((cmd_ev, node))
172
173 p.StartPipeline(self.waiter)
174 pipe_status = p.RunLastPart(self.waiter, self.fd_state)
175 log('pipe_status: %s', pipe_status)
176
177 print('AFTER', os.listdir('/dev/fd'))
178
179 def testPipeline2(self):
180 cmd_ev = test_lib.InitCommandEvaluator(arena=self.arena,
181 ext_prog=self.ext_prog)
182
183 Banner('ls | cut -d . -f 1 | head')
184 p = process.Pipeline(False, self.job_control, self.job_list,
185 self.tracer)
186 p.Add(self._ExtProc(['ls']))
187 p.Add(self._ExtProc(['cut', '-d', '.', '-f', '1']))
188
189 node = _CommandNode('head', self.arena)
190 p.AddLast((cmd_ev, node))
191
192 p.StartPipeline(self.waiter)
193 print(p.RunLastPart(self.waiter, self.fd_state))
194
195 # Simulating subshell for each command
196 node1 = _CommandNode('ls', self.arena)
197 node2 = _CommandNode('head', self.arena)
198 node3 = _CommandNode('sort --reverse', self.arena)
199
200 thunk1 = process.SubProgramThunk(cmd_ev, node1, self.trap_state,
201 self.multi_trace, True, False)
202 thunk2 = process.SubProgramThunk(cmd_ev, node2, self.trap_state,
203 self.multi_trace, True, False)
204 thunk3 = process.SubProgramThunk(cmd_ev, node3, self.trap_state,
205 self.multi_trace, True, False)
206
207 p = process.Pipeline(False, self.job_control, self.job_list,
208 self.tracer)
209 p.Add(Process(thunk1, self.job_control, self.job_list, self.tracer))
210 p.Add(Process(thunk2, self.job_control, self.job_list, self.tracer))
211 p.Add(Process(thunk3, self.job_control, self.job_list, self.tracer))
212
213 last_thunk = (cmd_ev, _CommandNode('cat', self.arena))
214 p.AddLast(last_thunk)
215
216 p.StartPipeline(self.waiter)
217 print(p.RunLastPart(self.waiter, self.fd_state))
218
219 # TODO: Combine pipelines for other things:
220
221 # echo foo 1>&2 | tee stdout.txt
222 #
223 # foo=$(ls | head)
224 #
225 # foo=$(<<EOF ls | head)
226 # stdin
227 # EOF
228 #
229 # ls | head &
230
231 # Or technically we could fork the whole interpreter for foo|bar|baz and
232 # capture stdout of that interpreter.
233
234 def makeTestPipeline(self, jc):
235 cmd_ev = test_lib.InitCommandEvaluator(arena=self.arena,
236 ext_prog=self.ext_prog)
237
238 pi = process.Pipeline(False, jc, self.job_list, self.tracer)
239
240 node1 = _CommandNode('/bin/echo testpipeline', self.arena)
241 node2 = _CommandNode('cat', self.arena)
242
243 thunk1 = process.SubProgramThunk(cmd_ev, node1, self.trap_state,
244 self.multi_trace, True, False)
245 thunk2 = process.SubProgramThunk(cmd_ev, node2, self.trap_state,
246 self.multi_trace, True, False)
247
248 pi.Add(Process(thunk1, jc, self.job_list, self.tracer))
249 pi.Add(Process(thunk2, jc, self.job_list, self.tracer))
250
251 return pi
252
253 def testPipelinePgidField(self):
254 jc = FakeJobControl(False)
255
256 pi = self.makeTestPipeline(jc)
257 self.assertEqual(process.INVALID_PGID, pi.ProcessGroupId())
258
259 pi.StartPipeline(self.waiter)
260 # No pgid
261 self.assertEqual(process.INVALID_PGID, pi.ProcessGroupId())
262
263 jc = FakeJobControl(True)
264
265 pi = self.makeTestPipeline(jc)
266 self.assertEqual(process.INVALID_PGID, pi.ProcessGroupId())
267
268 pi.StartPipeline(self.waiter)
269 # first process is the process group leader
270 self.assertEqual(pi.pids[0], pi.ProcessGroupId())
271
272 def testOpen(self):
273 # Disabled because mycpp translation can't handle it. We do this at a
274 # higher layer.
275 return
276
277 # This function used to raise BOTH OSError and IOError because Python 2 is
278 # inconsistent.
279 # We follow Python 3 in preferring OSError.
280 # https://stackoverflow.com/questions/29347790/difference-between-ioerror-and-oserror
281 self.assertRaises(OSError, self.fd_state.Open, '_nonexistent_')
282 self.assertRaises(OSError, self.fd_state.Open, 'metrics/')
283
284
285class JobListTest(unittest.TestCase):
286 """
287 Test invariant that the 'wait' builtin removes the (pid -> status)
288 mappings (NOT the Waiter)
289
290 There are 4 styles of invoking it:
291
292 wait # for all
293 wait -n # for next
294 wait $pid1 $pid2 # for specific jobs -- problem: are pipelines included?
295 wait %j1 %j2 # job specs -- jobs are either pielines or processes
296
297 Bonus:
298
299 jobs -l can show exit status
300 """
301
302 def setUp(self):
303 _SetupTest(self)
304
305 self.wait_builtin = process_osh.Wait(self.waiter, self.job_list,
306 self.mem, self.tracer,
307 self.errfmt)
308
309 def _ExtProc(self, argv):
310 thunk = _MakeThunk(argv, self.ext_prog)
311 return Process(thunk, self.job_control, self.job_list, self.tracer)
312
313 def _RunBackgroundJob(self, argv):
314 p = self._ExtProc(argv)
315
316 # Similar to Executor::StartBackgroundJob()
317 p.SetBackground()
318 pid = p.StartProcess(trace.Fork)
319
320 #self.mem.last_bg_pid = pid # for $!
321
322 job_id = self.job_list.RegisterJob(p) # show in 'jobs' list
323 return pid, job_id
324
325 def _StartProcesses(self, n):
326 pids = []
327 job_ids = []
328
329 assert n < 10, n
330 for i in xrange(1, n + 1):
331 j = 10 - i # count down
332 argv = ['sh', '-c', 'sleep 0.0%d; echo i=%d; exit %d' % (j, j, j)]
333 pid, job_id = self._RunBackgroundJob(argv)
334 pids.append(pid)
335 job_ids.append(job_id)
336
337 log('pids %s', pids)
338 log('job_ids %s', job_ids)
339
340 return pids, job_ids
341
342 def assertJobListLength(self, length):
343 self.assertEqual(length, len(self.job_list.child_procs))
344 self.assertEqual(length, len(self.job_list.jobs))
345 self.assertEqual(length, len(self.job_list.pid_to_job))
346
347 def testWaitAll(self):
348 """ wait """
349 # Jobs list starts out empty
350 self.assertJobListLength(0)
351
352 # Fork 2 processes with &
353 pids, job_ids = self._StartProcesses(2)
354
355 # Now we have 2 jobs
356 self.assertJobListLength(2)
357
358 # Invoke the 'wait' builtin
359
360 cmd_val = test_lib.MakeBuiltinArgv(['wait'])
361 status = self.wait_builtin.Run(cmd_val)
362 self.assertEqual(0, status)
363
364 # Jobs list is now empty
365 self.assertJobListLength(0)
366
367 def testWaitNext(self):
368 """ wait -n """
369 # Jobs list starts out empty
370 self.assertJobListLength(0)
371
372 # Fork 2 processes with &
373 pids, job_ids = self._StartProcesses(2)
374
375 # Now we have 2 jobs
376 self.assertJobListLength(2)
377
378 ### 'wait -n'
379 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
380 status = self.wait_builtin.Run(cmd_val)
381 self.assertEqual(8, status)
382
383 # Jobs list now has 1 fewer job
384 self.assertJobListLength(1)
385
386 ### 'wait -n' again
387 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
388 status = self.wait_builtin.Run(cmd_val)
389 self.assertEqual(9, status)
390
391 # Now zero
392 self.assertJobListLength(0)
393
394 ### 'wait -n' again
395 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
396 status = self.wait_builtin.Run(cmd_val)
397 self.assertEqual(127, status)
398
399 # Still zero
400 self.assertJobListLength(0)
401
402 def testWaitPid(self):
403 """ wait $pid2 """
404 # Jobs list starts out empty
405 self.assertJobListLength(0)
406
407 # Fork 3 processes with &
408 pids, job_ids = self._StartProcesses(3)
409
410 # Now we have 3 jobs
411 self.assertJobListLength(3)
412
413 # wait $pid2
414 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[1])])
415 status = self.wait_builtin.Run(cmd_val)
416 self.assertEqual(8, status)
417
418 # Jobs list now has 1 fewer job
419 self.assertJobListLength(2)
420
421 # wait $pid3
422 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[2])])
423 status = self.wait_builtin.Run(cmd_val)
424 self.assertEqual(7, status)
425
426 self.assertJobListLength(1)
427
428 # wait $pid1
429 cmd_val = test_lib.MakeBuiltinArgv(['wait', str(pids[0])])
430 status = self.wait_builtin.Run(cmd_val)
431 self.assertEqual(9, status)
432
433 self.assertJobListLength(0)
434
435 def testWaitJob(self):
436 """ wait %j2 """
437
438 # Jobs list starts out empty
439 self.assertJobListLength(0)
440
441 # Fork 3 processes with &
442 pids, job_ids = self._StartProcesses(3)
443
444 # Now we have 3 jobs
445 self.assertJobListLength(3)
446
447 # wait %j2
448 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[1])])
449 status = self.wait_builtin.Run(cmd_val)
450 self.assertEqual(8, status)
451
452 self.assertJobListLength(2)
453
454 # wait %j3
455 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[2])])
456
457 status = self.wait_builtin.Run(cmd_val)
458 self.assertEqual(7, status)
459
460 self.assertJobListLength(1)
461
462 # wait %j1
463 cmd_val = test_lib.MakeBuiltinArgv(['wait', '%' + str(job_ids[0])])
464 status = self.wait_builtin.Run(cmd_val)
465 self.assertEqual(9, status)
466
467 self.assertJobListLength(0)
468
469 def testForegroundProcessCleansUpChildProcessDict(self):
470 self.assertJobListLength(0)
471
472 argv = ['sleep', '0.01']
473 p = self._ExtProc(argv)
474 why = trace.External(argv)
475 p.RunProcess(self.waiter, why)
476
477 self.assertJobListLength(0)
478
479 def testGrandchildOutlivesChild(self):
480 """ The new parent is the init process """
481
482 # Jobs list starts out empty
483 self.assertEqual(0, len(self.job_list.child_procs))
484
485 # the sleep process should outlive the sh process
486 argv = ['sh', '-c', 'sleep 0.1 & exit 99']
487 pid, job_id = self._RunBackgroundJob(argv)
488
489 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
490 status = self.wait_builtin.Run(cmd_val)
491 log('status = %d', status)
492 self.assertEqual(99, status)
493
494 cmd_val = test_lib.MakeBuiltinArgv(['wait', '-n'])
495 status = self.wait_builtin.Run(cmd_val)
496 log('status = %d', status)
497 self.assertEqual(127, status)
498
499 # More tests:
500 #
501 # wait $pipeline_pid - with pipeline leader, and other PID
502 # wait %pipeline_job
503 # wait -n on pipeline? Does it return PIPESTATUS?
504 # wait with pipeline - should be OK
505 #
506 # Stopped jobs: does it print something interactively?
507
508
509class PipelineJobListTest(unittest.TestCase):
510 """
511 Like the above, but starts pipelines instead of individual processes.
512 """
513
514 def setUp(self):
515 _SetupTest(self)
516
517 self.wait_builtin = process_osh.Wait(self.waiter, self.job_list,
518 self.mem, self.tracer,
519 self.errfmt)
520
521 def _ExtProc(self, argv):
522 thunk = _MakeThunk(argv, self.ext_prog)
523 return Process(thunk, self.job_control, self.job_list, self.tracer)
524
525 def _RunBackgroundJob(self, argv):
526 p = self._ExtProc(argv)
527
528 # Similar to Executor::StartBackgroundJob()
529 p.SetBackground()
530 pid = p.StartProcess(trace.Fork)
531
532 #self.mem.last_bg_pid = pid # for $!
533
534 job_id = self.job_list.RegisterJob(p) # show in 'jobs' list
535 return pid, job_id
536
537 # TODO: Add all the same tests
538
539
540if __name__ == '__main__':
541 unittest.main()
542
543# vim: sw=4