)]}'
{"tripleo_ansible/roles/tripleo_systemd_wrapper/templates/service_sync.j2":[{"author":{"_account_id":6681,"name":"Brent Eagles","email":"beagles@redhat.com","username":"beagles"},"change_message_id":"a064137e957fa35f6f3097f610f6930b8bc33f7b","unresolved":false,"context_lines":[{"line_number":41,"context_line":"flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"},{"line_number":44,"context_line":"read LINE \u003c\"${jobs_file}\""},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"NETNS\u003d$(echo $LINE | awk \u0027{ print $1 }\u0027)"},{"line_number":47,"context_line":"CONTAINER_NAME\u003d\"{{ tripleo_systemd_wrapper_service_name }}-${NETNS}\""}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_b5b3f94c","line":44,"updated":"2020-04-27 23:14:45.000000000","message":"Is there a reason not to do this in a loop? \n\nSomething to think about that is related:\nif we have to do a wrapper-\u003epath notification-\u003eoneshot for every neutron sidecar it might take a very long time for controllers with a lot of routers, subnets, whatever, to come up into a functional state on a restart. Also besides rebooting a controller, failover with HA routers might result in a lot of sidecars getting started when a failover occurs. It would be better to avoid any excessive performance regressions.","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"36094101ce385d6eb2cf1520d11ea798d3c5bee5","unresolved":false,"context_lines":[{"line_number":41,"context_line":"flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"},{"line_number":44,"context_line":"read LINE \u003c\"${jobs_file}\""},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"NETNS\u003d$(echo $LINE | awk \u0027{ print $1 }\u0027)"},{"line_number":47,"context_line":"CONTAINER_NAME\u003d\"{{ tripleo_systemd_wrapper_service_name }}-${NETNS}\""}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_b5cdb486","line":44,"in_reply_to":"1f493fa4_350b64d6","updated":"2020-04-28 07:19:01.000000000","message":"note, the sync service is a singleton process, there is no way to have a few running. Either by the original design or the proposed changes for simple service or this one. So it\u0027s performance will be still limited to the time it takes to process a single container. I don\u0027t think this can be changed without reworking the thing completely and allowing multiple sync services firing off containers from individual job files...","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"95c193d06b863867814fd99996a055df4eabc14c","unresolved":false,"context_lines":[{"line_number":41,"context_line":"flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"},{"line_number":44,"context_line":"read LINE \u003c\"${jobs_file}\""},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"NETNS\u003d$(echo $LINE | awk \u0027{ print $1 }\u0027)"},{"line_number":47,"context_line":"CONTAINER_NAME\u003d\"{{ tripleo_systemd_wrapper_service_name }}-${NETNS}\""}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_350b64d6","line":44,"in_reply_to":"1f493fa4_b5b3f94c","updated":"2020-04-28 07:13:23.000000000","message":"the loop has been moved to the callers (wrappers) side instead. This oneshot fires off by any path trigger. And the likely reason for it being triggered is a job waiting to be picked up from the pipe. So that\u0027s triggering ensures a loop for calling the sync, not the sync running a loop. If you think such strictly serializable processing of commands would be slow, let\u0027s think of multiple pipes and passing in the pipe name for the sync as well.","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6681,"name":"Brent Eagles","email":"beagles@redhat.com","username":"beagles"},"change_message_id":"db93addee490bc839e7cbe5daa9bf1e36dffb79c","unresolved":false,"context_lines":[{"line_number":41,"context_line":"flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"},{"line_number":44,"context_line":"read LINE \u003c\"${jobs_file}\""},{"line_number":45,"context_line":""},{"line_number":46,"context_line":"NETNS\u003d$(echo $LINE | awk \u0027{ print $1 }\u0027)"},{"line_number":47,"context_line":"CONTAINER_NAME\u003d\"{{ tripleo_systemd_wrapper_service_name }}-${NETNS}\""}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_b806251f","line":44,"in_reply_to":"1f493fa4_b5cdb486","updated":"2020-04-29 13:57:12.000000000","message":"Yeah we are serialized there. The only thing we can win is a reduction in context switches to process a set workload. What we have here is analogous to a bunch of fixed function worker threads on an assembly line with a single workqueue. Ideally we\u0027d get some scale/perf testing on this stuff. We might find that it doesn\u0027t practically impact anything - on the other hand it might end up being completely unusable.","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":13995,"name":"Nate Johnston","email":"nate.johnston@redhat.com","username":"natejohnston"},"change_message_id":"3e48e7706c7085ab3b7f89128f5582a70fba9e43","unresolved":false,"context_lines":[{"line_number":37,"context_line":""},{"line_number":38,"context_line":"exec {lock_fd}\u003e/var/lock/containers/{{ tripleo_systemd_wrapper_service_name }}-processes.lock || exit 1"},{"line_number":39,"context_line":"trap \"flock -u $lock_fd\" INT TERM EXIT"},{"line_number":40,"context_line":"# In case service_wrapper script already locked the commands, we just wait for a 10 sec."},{"line_number":41,"context_line":"flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"},{"line_number":44,"context_line":"read LINE \u003c\"${jobs_file}\""}],"source_content_type":"text/x-jinja2","patch_set":6,"id":"1f493fa4_605b4386","line":41,"range":{"start_line":40,"start_character":0,"end_line":41,"end_character":30},"updated":"2020-04-29 16:34:01.000000000","message":"I don\u0027t think there is any reason to wait for the lock.  Having a lock on the service_wrapper side makes sense to ensure there is not write contention.  But since the sync script is a singleton it\u0027s fine for the sync script to just read from the fifo.  The \u0027read\u0027 will wait for an end of line before proceeding.  If we have a hundred haproxy sidecars that need to start then blocking on the lock will really exacerbate things.  Also, ten seconds is a reeeeeally long time to wait here if we are going to do tons of spawning.","commit_id":"31f103f10855de305f205cbb7547e027a2e679b0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"41c7440b2fad873d4dbfdc953b95cb68d9407def","unresolved":false,"context_lines":[{"line_number":37,"context_line":""},{"line_number":38,"context_line":"exec {lock_fd}\u003e/var/lock/containers/{{ tripleo_systemd_wrapper_service_name }}-processes.lock || exit 1"},{"line_number":39,"context_line":"trap \"flock -u $lock_fd\" INT TERM EXIT"},{"line_number":40,"context_line":"# In case service_wrapper script already locked the commands, we just wait for a 10 sec."},{"line_number":41,"context_line":"flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":42,"context_line":""},{"line_number":43,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"},{"line_number":44,"context_line":"read LINE \u003c\"${jobs_file}\""}],"source_content_type":"text/x-jinja2","patch_set":6,"id":"1f493fa4_f436e750","line":41,"range":{"start_line":40,"start_character":0,"end_line":41,"end_character":30},"in_reply_to":"1f493fa4_605b4386","updated":"2020-04-30 07:28:30.000000000","message":"Thank you, well spotted!","commit_id":"31f103f10855de305f205cbb7547e027a2e679b0"},{"author":{"_account_id":14985,"name":"Alex Schultz","email":"aschultz@next-development.com","username":"mwhahaha"},"change_message_id":"94ffebf3fa4f9a0a17393d6af9dde6a106a64779","unresolved":false,"context_lines":[{"line_number":3,"context_line":"set -x"},{"line_number":4,"context_line":"{% endif %}"},{"line_number":5,"context_line":""},{"line_number":6,"context_line":"# fetch a job ASAP using a read lock"},{"line_number":7,"context_line":"exec {lock_fd}\u003e/var/lock/{{ tripleo_systemd_wrapper_service_name }}-processes.lock || exit 1"},{"line_number":8,"context_line":"trap \"flock -u $lock_fd\" INT TERM EXIT"},{"line_number":9,"context_line":"# In case another wrapper has already exclusively locked it, we just wait for no more than a 10 sec."},{"line_number":10,"context_line":"flock -s -w 10 \"$lock_fd\" || exit 1"},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"jobs_file\u003d\"{{ tripleo_systemd_wrapper_service_dir }}/{{ tripleo_systemd_wrapper_service_name }}/processes\""},{"line_number":13,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"}],"source_content_type":"text/x-jinja2","patch_set":10,"id":"1f493fa4_c9f504cd","line":10,"range":{"start_line":6,"start_character":0,"end_line":10,"end_character":35},"updated":"2020-04-30 18:57:15.000000000","message":"This likely won\u0027t fix the issue because I believe the fact that the sync scripts exit if they can\u0027t lock is the problem.  If we end up with multiple syncs running at the same time but take too long, then the subsequent syncs may exit without ever being rerun so we end up with data pending that won\u0027t be picked up until another writer kicks off a script at some point.\n\nWe probably need a real service running to monitor the queue and launch things as necessary.  At this time we should just revert and investigate a different solution.","commit_id":"f3a9851fb775da5cc75c9e0cbca4324bc06397e2"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"bcef2b3ede05a40d3c2b5c294a8e8ebac54f8e62","unresolved":false,"context_lines":[{"line_number":3,"context_line":"set -x"},{"line_number":4,"context_line":"{% endif %}"},{"line_number":5,"context_line":""},{"line_number":6,"context_line":"# fetch a job ASAP using a read lock"},{"line_number":7,"context_line":"exec {lock_fd}\u003e/var/lock/{{ tripleo_systemd_wrapper_service_name }}-processes.lock || exit 1"},{"line_number":8,"context_line":"trap \"flock -u $lock_fd\" INT TERM EXIT"},{"line_number":9,"context_line":"# In case another wrapper has already exclusively locked it, we just wait for no more than a 10 sec."},{"line_number":10,"context_line":"flock -s -w 10 \"$lock_fd\" || exit 1"},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"jobs_file\u003d\"{{ tripleo_systemd_wrapper_service_dir }}/{{ tripleo_systemd_wrapper_service_name }}/processes\""},{"line_number":13,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"}],"source_content_type":"text/x-jinja2","patch_set":10,"id":"1f493fa4_e97b7c26","line":10,"range":{"start_line":6,"start_character":0,"end_line":10,"end_character":35},"in_reply_to":"1f493fa4_c9f504cd","updated":"2020-05-04 07:00:05.000000000","message":"Alex, please keep in mind that sync script uses locks as of its day 1. So normally there is no situations possible when \n\"multiple syncs running\", unless locking fails its very goal due to subtle corner cases.","commit_id":"f3a9851fb775da5cc75c9e0cbca4324bc06397e2"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"055efb765161e8f9ff252bf8ec0b16b325697289","unresolved":false,"context_lines":[{"line_number":3,"context_line":"set -x"},{"line_number":4,"context_line":"{% endif %}"},{"line_number":5,"context_line":""},{"line_number":6,"context_line":"# fetch a job ASAP using a read lock"},{"line_number":7,"context_line":"exec {lock_fd}\u003e/var/lock/{{ tripleo_systemd_wrapper_service_name }}-processes.lock || exit 1"},{"line_number":8,"context_line":"trap \"flock -u $lock_fd\" INT TERM EXIT"},{"line_number":9,"context_line":"# In case another wrapper has already exclusively locked it, we just wait for no more than a 10 sec."},{"line_number":10,"context_line":"flock -s -w 10 \"$lock_fd\" || exit 1"},{"line_number":11,"context_line":""},{"line_number":12,"context_line":"jobs_file\u003d\"{{ tripleo_systemd_wrapper_service_dir }}/{{ tripleo_systemd_wrapper_service_name }}/processes\""},{"line_number":13,"context_line":"# synchronize with the FIFO pipe (blocks and waits for a job)"}],"source_content_type":"text/x-jinja2","patch_set":10,"id":"1f493fa4_49e0b0cd","line":10,"range":{"start_line":6,"start_character":0,"end_line":10,"end_character":35},"in_reply_to":"1f493fa4_c9f504cd","updated":"2020-05-04 07:03:16.000000000","message":"Do you have a confirmation for the 10sec timeout on locking being the root cause of the subject issue?\n\nTo my understanding, the root cause is a loop in the sync service and the path unit merging update events. That ends up in pending jobs remaining \"merged\" truncated off the jobs file.","commit_id":"f3a9851fb775da5cc75c9e0cbca4324bc06397e2"}],"tripleo_ansible/roles/tripleo_systemd_wrapper/templates/service_wrapper.j2":[{"author":{"_account_id":6681,"name":"Brent Eagles","email":"beagles@redhat.com","username":"beagles"},"change_message_id":"a064137e957fa35f6f3097f610f6930b8bc33f7b","unresolved":false,"context_lines":[{"line_number":34,"context_line":"fi"},{"line_number":35,"context_line":""},{"line_number":36,"context_line":"# Submit the wrapped job via a non blocking post into the pipe"},{"line_number":37,"context_line":"echo \"$NETNS $ARGS\" | dd oflag\u003dnonblock of\u003d\"$pipe\" status\u003dnone"},{"line_number":38,"context_line":"echo \"submitted the wrapped job: $NETNS $ARGS\""},{"line_number":39,"context_line":"# only update the timestamp which fires systemd if there was an update"},{"line_number":40,"context_line":"flock -u \"$lock_fd\"  # prevents locking the processes file, while updating"}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_ba548622","line":37,"updated":"2020-04-27 23:14:45.000000000","message":"neat! Is there a condition where the pipe might become full if there isn\u0027t a reader or we aren\u0027t reading fast enough?  Is there possibility of a partial write if the pipe is full? If so, maybe the proper thing to do is find out if neutron does some useful error handling and return an error.","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"30a3b426269aefcc8c55005d0897bc255a6f9f69","unresolved":false,"context_lines":[{"line_number":34,"context_line":"fi"},{"line_number":35,"context_line":""},{"line_number":36,"context_line":"# Submit the wrapped job via a non blocking post into the pipe"},{"line_number":37,"context_line":"echo \"$NETNS $ARGS\" | dd oflag\u003dnonblock of\u003d\"$pipe\" status\u003dnone"},{"line_number":38,"context_line":"echo \"submitted the wrapped job: $NETNS $ARGS\""},{"line_number":39,"context_line":"# only update the timestamp which fires systemd if there was an update"},{"line_number":40,"context_line":"flock -u \"$lock_fd\"  # prevents locking the processes file, while updating"}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_f5bf3ca7","line":37,"in_reply_to":"1f493fa4_15758832","updated":"2020-04-28 07:27:11.000000000","message":"So according to http://man7.org/linux/man-pages/man7/pipe.7.html the cap is at least a 65k. I\u0027m not sure if that would be sufficient. Handling corner cases may be wanted indeed. On the bright side, I beleive \"an application should be designed so that a reading processconsumes data as soon as it is available\" perfectly fits here! :)","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"7f370268e46915ad0c5dfdaba1d626f4d94b8495","unresolved":false,"context_lines":[{"line_number":34,"context_line":"fi"},{"line_number":35,"context_line":""},{"line_number":36,"context_line":"# Submit the wrapped job via a non blocking post into the pipe"},{"line_number":37,"context_line":"echo \"$NETNS $ARGS\" | dd oflag\u003dnonblock of\u003d\"$pipe\" status\u003dnone"},{"line_number":38,"context_line":"echo \"submitted the wrapped job: $NETNS $ARGS\""},{"line_number":39,"context_line":"# only update the timestamp which fires systemd if there was an update"},{"line_number":40,"context_line":"flock -u \"$lock_fd\"  # prevents locking the processes file, while updating"}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_15758832","line":37,"in_reply_to":"1f493fa4_ba548622","updated":"2020-04-28 07:22:36.000000000","message":"Good question. To my naïve understanding, there is no limits for FIFO pipes for its \"buffering\" capacity nor for its max size since those are just files?..","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6681,"name":"Brent Eagles","email":"beagles@redhat.com","username":"beagles"},"change_message_id":"a064137e957fa35f6f3097f610f6930b8bc33f7b","unresolved":false,"context_lines":[{"line_number":49,"context_line":"    read -t 1 j \u003c\u0026\"$pipe_fd\"  # read a job wit timeout of a 1s"},{"line_number":50,"context_line":"    [ -z \"$j\" ] \u0026\u0026 break  # read nothing - no jobs left globally, exit this wrapper"},{"line_number":51,"context_line":"    # resubmit the read job back and notify the path unit to never miss it"},{"line_number":52,"context_line":"    echo \"$j\" | dd oflag\u003dnonblock of\u003d\"$pipe\" status\u003dnone"},{"line_number":53,"context_line":"    echo \"resubmitted a job pending for sync: $job\""},{"line_number":54,"context_line":"    flock -u \"$lock_fd\""},{"line_number":55,"context_line":"    date \u003e \"$trigger\""}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_9a26eacd","line":52,"updated":"2020-04-27 23:14:45.000000000","message":"So IIUC, the purpose here is to check if something is there, if so, try replaying it back into the pipe to trigger the systemd.path notification?","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"cd2adba67ae1722c48aede5fcd7935c4fd32d249","unresolved":false,"context_lines":[{"line_number":49,"context_line":"    read -t 1 j \u003c\u0026\"$pipe_fd\"  # read a job wit timeout of a 1s"},{"line_number":50,"context_line":"    [ -z \"$j\" ] \u0026\u0026 break  # read nothing - no jobs left globally, exit this wrapper"},{"line_number":51,"context_line":"    # resubmit the read job back and notify the path unit to never miss it"},{"line_number":52,"context_line":"    echo \"$j\" | dd oflag\u003dnonblock of\u003d\"$pipe\" status\u003dnone"},{"line_number":53,"context_line":"    echo \"resubmitted a job pending for sync: $job\""},{"line_number":54,"context_line":"    flock -u \"$lock_fd\""},{"line_number":55,"context_line":"    date \u003e \"$trigger\""}],"source_content_type":"text/x-jinja2","patch_set":5,"id":"1f493fa4_1848f984","line":52,"in_reply_to":"1f493fa4_9a26eacd","updated":"2020-04-29 13:48:37.000000000","message":"yes, this busy loop ensures no events will be missed if the path unit merges notifications.","commit_id":"ac60a6cff84c38d75b95f6bbd0aa2bdb9d6c80a0"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"ace682ea6b0e36b5d012790543ed3849b0fad1d6","unresolved":false,"context_lines":[{"line_number":46,"context_line":"while true; do"},{"line_number":47,"context_line":"    flock -w 10 \"$lock_fd\" || exit 1"},{"line_number":48,"context_line":"    j\u003d\u0027\u0027  # discard the previously read results, if any"},{"line_number":49,"context_line":"    read -t 1 j \u003c\u0026\"$pipe_fd\"  # read a job wit timeout of a 1s"},{"line_number":50,"context_line":"    [ -z \"$j\" ] \u0026\u0026 break  # read nothing - no jobs left globally, exit this wrapper"},{"line_number":51,"context_line":"    # resubmit the read job back and notify the path unit to never miss it"},{"line_number":52,"context_line":"    echo \"$j\" | dd oflag\u003dnonblock of\u003d\"$pipe\" status\u003dnone"}],"source_content_type":"text/x-jinja2","patch_set":7,"id":"1f493fa4_b70aa1ce","line":49,"range":{"start_line":49,"start_character":4,"end_line":49,"end_character":62},"updated":"2020-04-30 07:43:07.000000000","message":"Technically this may be a non-blocking read as well. The resulting shortest possible wrapper execution time will become less than a 1s. As is, that\u0027s a 1s minimum exec time.","commit_id":"9fc5a875e387e398cf5dd724ac5e18f074ac75b6"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"ace682ea6b0e36b5d012790543ed3849b0fad1d6","unresolved":false,"context_lines":[{"line_number":53,"context_line":"    echo \"resubmitted a job pending for sync: $job\""},{"line_number":54,"context_line":"    flock -u \"$lock_fd\""},{"line_number":55,"context_line":"    date \u003e \"$trigger\""},{"line_number":56,"context_line":"    # random window of 1-3s for other wrappers and sync service to access the pipe"},{"line_number":57,"context_line":"    echo ${RANDOM} | xargs -I{} expr {} % 2 + 1 | xargs sleep"},{"line_number":58,"context_line":"done"}],"source_content_type":"text/x-jinja2","patch_set":7,"id":"1f493fa4_970165b2","line":56,"updated":"2020-04-30 07:43:07.000000000","message":"during this window, any other instantiated wrapper may \"capture the flag\" allowing this one to exit.","commit_id":"9fc5a875e387e398cf5dd724ac5e18f074ac75b6"},{"author":{"_account_id":6926,"name":"Bogdan Dobrelya","email":"bdobreli@redhat.com","username":"bogdando"},"change_message_id":"89e36549b0e1fdcf0fd902770a83931ca3b98d4a","unresolved":false,"context_lines":[{"line_number":62,"context_line":"    flock -w 5 \"$lock_fd\""},{"line_number":63,"context_line":"    rc\u003d$?"},{"line_number":64,"context_line":"    if [ $rc -ne 0 ]; then  # abandon the fetched job to be picked up later"},{"line_number":65,"context_line":"        echo \"$j\" \u003e\u003e \"${pipe}.abandoned\""},{"line_number":66,"context_line":"        echo \"abandoned job for another try: $j\""},{"line_number":67,"context_line":"        break"},{"line_number":68,"context_line":"    fi"}],"source_content_type":"text/x-jinja2","patch_set":10,"id":"1f493fa4_c8786c91","line":65,"range":{"start_line":65,"start_character":8,"end_line":65,"end_character":40},"updated":"2020-04-30 09:24:40.000000000","message":"while adding to this shared file w/o lockin it doesn\u0027t look perfectly safe for concurrent wrappers, I hope this is fine cuz likely neing an extremely rare path?..","commit_id":"f3a9851fb775da5cc75c9e0cbca4324bc06397e2"}]}
