messageboard-2023-01-04-1540.py
01234567890123456789012345678901234567890123456789012345678901234567890123456789









14271428142914301431143214331434143514361437143814391440144114421443144414451446  1447144814491450145114521453145414551456145714581459  146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489








5878587958805881588258835884588558865887588858895890589158925893589458955896589758985899  59005901590259035904590559065907590859095910591159125913591459155916591759185919








707370747075707670777078707970807081708270837084708570867087708870897090709170927093     709470957096709770987099710071017102710371047105710671077108710971107111      7112711371147115 71167117711871197120 71217122712371247125712671277128712971307131713271337134713571367137713871397140








72067207720872097210721172127213721472157216721772187219722072217222722372247225  7226 7227722872297230723172327233723472357236723772387239724072417242724372447245724672477248











                            <----SKIPPED LINES---->




    date_segmentation: boolean indicating whether the date string yyyy-mm-dd
      should be prepended to the file name in full_path based on the current
      date, so that pickled files are segmented by date.
    timestamp: if date_segmentation is True, this is used rather than system
      time to generate the file name.
    verify: boolean indicating if we should verify that the pickled file object
      count increments by one, rewriting entire pickle file if it doesn't. Note
      that since this requires reading the entire pickle file and unpickling,
      it should only be done for small files / objects.

  Returns:
    Name of file to which the data was pickled if successful; None if failed.
  """
  global cached_object_count
  if not timestamp:
    timestamp = time.time()
  date_suffix = EpochDisplayTime(timestamp, '%Y-%m-%d-')
  if date_segmentation:
    full_path = PrependFileName(full_path, date_suffix)



  if full_path not in cached_object_count:
    cached_object_count[full_path] = len(
        UnpickleObjectFromFile(full_path, False))
  if not os.path.exists(full_path):  # Another method may delete the file
    cached_object_count[full_path] = 0

  try:
    with open(full_path, 'ab') as f:
      f.write(pickle.dumps(data))

  except IOError:
    Log('Unable to append pickle ' + full_path)
    return None



  if verify:
    # file object count should now be one more; if it isn't, the file is
    # corrupted, and rather than continue writing to a corrupted pickle file,
    # we should fix it so we don't lose too much data
    pickled_data = UnpickleObjectFromFile(full_path, False)
    cached_count = cached_object_count[full_path]
    if len(pickled_data) == cached_count + 1:
      cached_object_count[full_path] = cached_count + 1
    else:
      tmp_file_name = full_path + '.tmp'
      try:
        with open(tmp_file_name, 'ab') as f:
          for d in pickled_data:  # rewrite the old data that was retained
            f.write(pickle.dumps(d))
          f.write(pickle.dumps(data))  # new data
      except IOError:
        Log('Unable to append pickle %s in verify step; left tmp file as-is' %
            tmp_file_name)
        return None
      shutil.move(tmp_file_name, full_path)
      cached_object_count[full_path] = len(pickled_data) + 1
      Log('Re-pickled %s: after writing %s, expected len %d to increment, '
          'but it did not; after repickling (and adding the new '
          'data), new length = %d' % (
              full_path, data, cached_count, cached_object_count[full_path]))

  return full_path






                            <----SKIPPED LINES---->




    the global SHUTDOWN_SIGNAL with the same message sent to the log so that
    it can also be displayed on the html dashboard.
  """
  rpi_restart = False
  global SHUTDOWN_SIGNAL

  running_minutes = (time.time() - startup_time) / SECONDS_IN_MINUTE
  running_days = running_minutes / MINUTES_IN_DAY
  process_restart_days = configuration.get('process_restart_days', 1)
  rpi_restart_days = configuration.get('rpi_restart_days', 1)

  mostly_quiet = not message_queue
  planes_in_radio = json_desc_dict.get('radio_range_flights')
  # script /home/pi/splitflap/backup.sh creates temp file in this
  # directory; after it is copied to the NAS, it is deleted
  backup_in_progress = os.listdir('/media/backup')
  all_quiet = mostly_quiet and not planes_in_radio and not backup_in_progress
  early_morn = 6 > int(EpochDisplayTime(time.time(), '%-H')) >= 5

  # network test conditions
  minimum_uptime_minutes = 30 # 30 minutes
  number_of_intervals = 3 # at least three consecutive network failures



  # ----------------------------------------------------------------------------
  # PROCESS RESTART SCENARIOS: restart process, but do not restart RPi
  # ----------------------------------------------------------------------------
  process_restart = False
  if 'end_process' in configuration:
    process_restart = True
    msg = 'Process end requested via web form'
    RemoveSetting(configuration, 'end_process')
  elif all_quiet and early_morn and running_days >= process_restart_days:
    process_restart = True
    msg = ('All-quiet process restart triggered after %d days; '
           'actual runtime: %.2f days' % (process_restart_days, running_days))
  elif mostly_quiet and early_morn and running_days >= process_restart_days + 1:
    process_restart = True
    msg = ('Mostly-quiet process restart triggered after %d days; '
           'actual runtime: %.2f days' %
           (process_restart_days + 1, running_days))

  if process_restart:




                            <----SKIPPED LINES---->




  else:
    msg = pin[2]
  if RASPBERRY_PI:
    RPi.GPIO.output(pin[0], value)
    if value:
      pin_setting = 'HIGH'
      relay_light_value = 'OFF'
    else:
      pin_setting = 'LOW'
      relay_light_value = 'ON'
    msg += '; RPi GPIO pin %d set to %s; relay light #%d should now be %s' % (
        pin[0], pin_setting, pin[3], relay_light_value)

  if pin_values[pin[0]] != value:
    if VERBOSE:
      Log(msg)  # log
    pin_values[pin[0]] = value  # update cache
    UpdateDashboard(value, subsystem=pin, failure_message=failure_message)


def UpdateDashboard(value, subsystem=0, failure_message='', iteration=None):





  """Writes to disk a tuple with status details about a particular system.

  The independent monitoring.py module allows us to see in one place the
  status of all the subsystems and of the overall system; it does that
  monitoring based on these tuples of data.

  Args:
    value: Boolean indicating whether a failure has occurred (True) or
      system is nominal (False).
    subsystem: A tuple describing the system; though that description may
      have multiple attributes, the 0th element is the numeric identifier
      of that system.  monitoring.py depends on other attributes of that
      tuple being present as well.  Since the overall system does not have
      a tuple defined for it, it gets a default identifier of 0.
    failure_message: an (optional) message describing why the system /
      subsystem is being disabled or failing.
    iteration: integer indicating how many times the main loop has been
      completed.






  """
  versions = (VERSION_MESSAGEBOARD, VERSION_ARDUINO)
  if subsystem:
    subsystem = subsystem[0]

  PickleObjectToFile((
      time.time(), subsystem, value, versions,
      failure_message, INSTANCE_START_TIME,
      iteration, gpiozero.CPUTemperature().temperature),
      PICKLE_SYSTEM_DASHBOARD, True)



def RemoveFile(file):
  """Removes a file, returning a boolean indicating if it had existed."""
  if os.path.exists(file):

    try:
      os.remove(file)
    except PermissionError:
      return False
    return True

  return False


def ConfirmNewFlight(flight, flights):
  """Replaces last-seen flight with new flight if identifiers overlap.

  Flights are identified by the radio over time by a tuple of identifiers:
  flight_number and squawk.  Due to unknown communication issues, one or the




                            <----SKIPPED LINES---->





  args = (PICKLE_FLIGHTS, not SIMULATION, max_days)
  saved_flights = UnpickleObjectFromFile(*args)[:-1]
  files_to_overwrite = UnpickleObjectFromFile(*args, filenames=True)

  for file in files_to_overwrite:
    os.remove(file)
  for f in saved_flights:
    # we would like to use verify=True, but that's too slow without further
    # optimizing the verification step for a loop of data
    PickleObjectToFile(
        f, PICKLE_FLIGHTS, True, timestamp=f['now'], verify=False)

  return False


def HeartbeatRestart():
  """Logs system down / system up pair of heartbeats as system starts."""
  if SIMULATION:
    return 0


  UpdateDashboard(True)  # Indicates that this wasn't running moment before, ...

  UpdateDashboard(False)  # ... and now it is running!
  return time.time()


def Heartbeat(last_heartbeat_time=None, iteration=None):
  """Logs a system up heartbeat."""
  if SIMULATION:
    return last_heartbeat_time
  now = time.time()
  if not last_heartbeat_time or now - last_heartbeat_time > HEARTBEAT_SECONDS:
    UpdateDashboard(False, iteration=iteration)  # Send an all-clear message
    last_heartbeat_time = now
  return last_heartbeat_time


def VersionControl():
  """Copies current instances of messageboard.py and arduino.py into repository.

  To aid debugging, we want to keep past versions of the code easily
  accessible, and linked to the errors that have been logged. This function
  copies the python code into a version control directory after adding in a
  date / time stamp to the file name.




                            <----SKIPPED LINES---->





01234567890123456789012345678901234567890123456789012345678901234567890123456789









1427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451  145214531454145514561457145814591460146114621463146414651466146714681469  14701471147214731474147514761477147814791480148114821483148414851486148714881489








58785879588058815882588358845885588658875888588958905891589258935894589558965897 58985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920








707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154








7220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244 72457246724772487249725072517252725372547255725672577258725972607261726272637264











                            <----SKIPPED LINES---->




    date_segmentation: boolean indicating whether the date string yyyy-mm-dd
      should be prepended to the file name in full_path based on the current
      date, so that pickled files are segmented by date.
    timestamp: if date_segmentation is True, this is used rather than system
      time to generate the file name.
    verify: boolean indicating if we should verify that the pickled file object
      count increments by one, rewriting entire pickle file if it doesn't. Note
      that since this requires reading the entire pickle file and unpickling,
      it should only be done for small files / objects.

  Returns:
    Name of file to which the data was pickled if successful; None if failed.
  """
  global cached_object_count
  if not timestamp:
    timestamp = time.time()
  date_suffix = EpochDisplayTime(timestamp, '%Y-%m-%d-')
  if date_segmentation:
    full_path = PrependFileName(full_path, date_suffix)

  if not os.path.exists(full_path):  # Another method may delete the file
    cached_object_count[full_path] = 0
  if full_path not in cached_object_count:
    cached_object_count[full_path] = len(
        UnpickleObjectFromFile(full_path, False))



  try:
    with open(full_path, 'ab') as f:
      f.write(pickle.dumps(data))

  except IOError:
    Log('Unable to append pickle ' + full_path)
    return None

  cached_object_count[full_path] += 1

  if verify:
    # file object count should now be one more; if it isn't, the file is
    # corrupted, and rather than continue writing to a corrupted pickle file,
    # we should fix it so we don't lose too much data
    pickled_data = UnpickleObjectFromFile(full_path, False)
    cached_count = cached_object_count[full_path]
    if len(pickled_data) != cached_count:


      tmp_file_name = full_path + '.tmp'
      try:
        with open(tmp_file_name, 'ab') as f:
          for d in pickled_data:  # rewrite the old data that was retained
            f.write(pickle.dumps(d))
          f.write(pickle.dumps(data))  # new data
      except IOError:
        Log('Unable to append pickle %s in verify step; left tmp file as-is' %
            tmp_file_name)
        return None
      shutil.move(tmp_file_name, full_path)
      cached_object_count[full_path] = len(pickled_data) + 1
      Log('Re-pickled %s: after writing %s, expected len %d to increment, '
          'but it did not; after repickling (and adding the new '
          'data), new length = %d' % (
              full_path, data, cached_count, cached_object_count[full_path]))

  return full_path






                            <----SKIPPED LINES---->




    the global SHUTDOWN_SIGNAL with the same message sent to the log so that
    it can also be displayed on the html dashboard.
  """
  rpi_restart = False
  global SHUTDOWN_SIGNAL

  running_minutes = (time.time() - startup_time) / SECONDS_IN_MINUTE
  running_days = running_minutes / MINUTES_IN_DAY
  process_restart_days = configuration.get('process_restart_days', 1)
  rpi_restart_days = configuration.get('rpi_restart_days', 1)

  mostly_quiet = not message_queue
  planes_in_radio = json_desc_dict.get('radio_range_flights')
  # script /home/pi/splitflap/backup.sh creates temp file in this
  # directory; after it is copied to the NAS, it is deleted
  backup_in_progress = os.listdir('/media/backup')
  all_quiet = mostly_quiet and not planes_in_radio and not backup_in_progress
  early_morn = 6 > int(EpochDisplayTime(time.time(), '%-H')) >= 5

  # network test conditions

  number_of_intervals = 5 # at least three consecutive network failures
  minutes_per_interval = 10
  minimum_uptime_minutes = number_of_intervals * minutes_per_interval

  # ----------------------------------------------------------------------------
  # PROCESS RESTART SCENARIOS: restart process, but do not restart RPi
  # ----------------------------------------------------------------------------
  process_restart = False
  if 'end_process' in configuration:
    process_restart = True
    msg = 'Process end requested via web form'
    RemoveSetting(configuration, 'end_process')
  elif all_quiet and early_morn and running_days >= process_restart_days:
    process_restart = True
    msg = ('All-quiet process restart triggered after %d days; '
           'actual runtime: %.2f days' % (process_restart_days, running_days))
  elif mostly_quiet and early_morn and running_days >= process_restart_days + 1:
    process_restart = True
    msg = ('Mostly-quiet process restart triggered after %d days; '
           'actual runtime: %.2f days' %
           (process_restart_days + 1, running_days))

  if process_restart:




                            <----SKIPPED LINES---->




  else:
    msg = pin[2]
  if RASPBERRY_PI:
    RPi.GPIO.output(pin[0], value)
    if value:
      pin_setting = 'HIGH'
      relay_light_value = 'OFF'
    else:
      pin_setting = 'LOW'
      relay_light_value = 'ON'
    msg += '; RPi GPIO pin %d set to %s; relay light #%d should now be %s' % (
        pin[0], pin_setting, pin[3], relay_light_value)

  if pin_values[pin[0]] != value:
    if VERBOSE:
      Log(msg)  # log
    pin_values[pin[0]] = value  # update cache
    UpdateDashboard(value, subsystem=pin, failure_message=failure_message)


def UpdateDashboard(
    value,
    subsystem=0,
    failure_message='',
    iteration=None,
    verify=False):
  """Writes to disk a tuple with status details about a particular system.

  The independent monitoring.py module allows us to see in one place the
  status of all the subsystems and of the overall system; it does that
  monitoring based on these tuples of data.

  Args:
    value: Boolean indicating whether a failure has occurred (True) or
      system is nominal (False).
    subsystem: A tuple describing the system; though that description may
      have multiple attributes, the 0th element is the numeric identifier
      of that system.  monitoring.py depends on other attributes of that
      tuple being present as well.  Since the overall system does not have
      a tuple defined for it, it gets a default identifier of 0.
    failure_message: an (optional) message describing why the system /
      subsystem is being disabled or failing.
    iteration: integer indicating how many times the main loop has been
      completed.
    verify: boolean indicating whether we should verify that after
      writing, the dashboard file is incremented in length by one; if it
      is not, then we should re-pickle the entire dashboard file.

  Returns:
    Epoch recorded in the dashboard message
  """
  versions = (VERSION_MESSAGEBOARD, VERSION_ARDUINO)
  if subsystem:
    subsystem = subsystem[0]
  ts = time.time()
  PickleObjectToFile((
      time.time(), subsystem, value, versions,
      failure_message, INSTANCE_START_TIME,
      iteration, gpiozero.CPUTemperature().temperature),
      PICKLE_SYSTEM_DASHBOARD, True, verify=verify)
  return ts


def RemoveFile(file):
  """Removes a file, returning a boolean indicating if it had existed."""
  if os.path.exists(file):

    try:
      os.remove(file)
    except PermissionError:
      return False
    return True

  return False


def ConfirmNewFlight(flight, flights):
  """Replaces last-seen flight with new flight if identifiers overlap.

  Flights are identified by the radio over time by a tuple of identifiers:
  flight_number and squawk.  Due to unknown communication issues, one or the




                            <----SKIPPED LINES---->





  args = (PICKLE_FLIGHTS, not SIMULATION, max_days)
  saved_flights = UnpickleObjectFromFile(*args)[:-1]
  files_to_overwrite = UnpickleObjectFromFile(*args, filenames=True)

  for file in files_to_overwrite:
    os.remove(file)
  for f in saved_flights:
    # we would like to use verify=True, but that's too slow without further
    # optimizing the verification step for a loop of data
    PickleObjectToFile(
        f, PICKLE_FLIGHTS, True, timestamp=f['now'], verify=False)

  return False


def HeartbeatRestart():
  """Logs system down / system up pair of heartbeats as system starts."""
  if SIMULATION:
    return 0

  # Sometimes an unexpected restart corrupts a file; hence the verify.
  UpdateDashboard(True, verify=True)  # This wasn't running a moment before, ...

  return UpdateDashboard(False)  # ... and now it is running!



def Heartbeat(last_heartbeat_time=None, iteration=None):
  """Logs a system up heartbeat."""
  if SIMULATION:
    return last_heartbeat_time
  now = time.time()
  if not last_heartbeat_time or now - last_heartbeat_time > HEARTBEAT_SECONDS:
    UpdateDashboard(False, iteration=iteration)  # Send an all-clear message
    last_heartbeat_time = now
  return last_heartbeat_time


def VersionControl():
  """Copies current instances of messageboard.py and arduino.py into repository.

  To aid debugging, we want to keep past versions of the code easily
  accessible, and linked to the errors that have been logged. This function
  copies the python code into a version control directory after adding in a
  date / time stamp to the file name.




                            <----SKIPPED LINES---->