| import datetime |
| import logging |
| import logging.handlers |
| import os |
| import sys |
| import math |
| import random |
| import requests |
| import torch.distributed as dist |
|
|
| from llava.constants import LOGDIR |
|
|
| server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" |
| moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN." |
|
|
| handler = None |
|
|
|
|
| def build_logger(logger_name, logger_filename): |
| global handler |
|
|
| formatter = logging.Formatter( |
| fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", |
| datefmt="%Y-%m-%d %H:%M:%S", |
| ) |
|
|
| |
| if not logging.getLogger().handlers: |
| logging.basicConfig(level=logging.INFO) |
| logging.getLogger().handlers[0].setFormatter(formatter) |
|
|
| |
| stdout_logger = logging.getLogger("stdout") |
| stdout_logger.setLevel(logging.INFO) |
| sl = StreamToLogger(stdout_logger, logging.INFO) |
| sys.stdout = sl |
|
|
| stderr_logger = logging.getLogger("stderr") |
| stderr_logger.setLevel(logging.ERROR) |
| sl = StreamToLogger(stderr_logger, logging.ERROR) |
| sys.stderr = sl |
|
|
| |
| logger = logging.getLogger(logger_name) |
| logger.setLevel(logging.INFO) |
|
|
| |
| if handler is None: |
| os.makedirs(LOGDIR, exist_ok=True) |
| filename = os.path.join(LOGDIR, logger_filename) |
| handler = logging.handlers.TimedRotatingFileHandler( |
| filename, when='D', utc=True, encoding='UTF-8') |
| handler.setFormatter(formatter) |
|
|
| for name, item in logging.root.manager.loggerDict.items(): |
| if isinstance(item, logging.Logger): |
| item.addHandler(handler) |
|
|
| return logger |
|
|
|
|
| class StreamToLogger(object): |
| """ |
| Fake file-like stream object that redirects writes to a logger instance. |
| """ |
| def __init__(self, logger, log_level=logging.INFO): |
| self.terminal = sys.stdout |
| self.logger = logger |
| self.log_level = log_level |
| self.linebuf = '' |
|
|
| def __getattr__(self, attr): |
| return getattr(self.terminal, attr) |
|
|
| def write(self, buf): |
| temp_linebuf = self.linebuf + buf |
| self.linebuf = '' |
| for line in temp_linebuf.splitlines(True): |
| |
| |
| |
| |
| |
| if line[-1] == '\n': |
| self.logger.log(self.log_level, line.rstrip()) |
| else: |
| self.linebuf += line |
|
|
| def flush(self): |
| if self.linebuf != '': |
| self.logger.log(self.log_level, self.linebuf.rstrip()) |
| self.linebuf = '' |
|
|
|
|
| def disable_torch_init(): |
| """ |
| Disable the redundant torch default initialization to accelerate model creation. |
| """ |
| import torch |
| setattr(torch.nn.Linear, "reset_parameters", lambda self: None) |
| setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) |
|
|
|
|
| def violates_moderation(text): |
| """ |
| Check whether the text violates OpenAI moderation API. |
| """ |
| url = "https://api.openai.com/v1/moderations" |
| headers = {"Content-Type": "application/json", |
| "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]} |
| text = text.replace("\n", "") |
| data = "{" + '"input": ' + f'"{text}"' + "}" |
| data = data.encode("utf-8") |
| try: |
| ret = requests.post(url, headers=headers, data=data, timeout=5) |
| flagged = ret.json()["results"][0]["flagged"] |
| except requests.exceptions.RequestException as e: |
| flagged = False |
| except KeyError as e: |
| flagged = False |
|
|
| return flagged |
|
|
|
|
| def pretty_print_semaphore(semaphore): |
| if semaphore is None: |
| return "None" |
| return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})" |
|
|
| def master_print(*args): |
| import torch |
| if torch.cuda.current_device() == 0: |
| print(*args) |
|
|
| def is_dist_avail_and_initialized(): |
| if not dist.is_available(): |
| return False |
| if not dist.is_initialized(): |
| return False |
| return True |
|
|
| def get_world_size(): |
| if not is_dist_avail_and_initialized(): |
| return 1 |
| return dist.get_world_size() |
|
|
|
|
| def get_rank(): |
| if not is_dist_avail_and_initialized(): |
| return 0 |
| return dist.get_rank() |
|
|
| def is_main_process(): |
| return get_rank() == 0 |
|
|
|
|
| class DatasetIter(object): |
| def __init__(self, size, world_size, local_rank, num_workers=1): |
| self.size = size |
| self.world_size = world_size |
| self.local_rank = local_rank |
| |
| assert num_workers == 1, 'num workers must be 1' |
| self.num_workers = num_workers |
| self.per_worker = int(math.floor(self.size / float(self.world_size * self.num_workers))) |
| self.worker_indexs = dict() |
|
|
| for worker_id in range(self.num_workers): |
| self.init_worker_index(worker_id) |
| def init_worker_index(self, worker_id): |
|
|
| start = self.per_worker * (self.local_rank * self.num_workers + worker_id) |
| end = min(start + self.per_worker, self.size) |
| rank_indexs = list(range(start, end)) |
| random.shuffle(rank_indexs) |
|
|
| self.worker_indexs[worker_id] = rank_indexs |
|
|
| def increment(self, worker_id): |
|
|
| if len(self.worker_indexs[worker_id]) == 0: |
| self.init_worker_index(worker_id) |
|
|
| next_iter, self.worker_indexs[worker_id] = self.worker_indexs[worker_id][0], self.worker_indexs[worker_id][1:] |
| return next_iter |